repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aarora79/sitapt
|
sitapt/wrangle/wrangle.py
|
1
|
6258
|
#!/usr/bin/env python
#title :wrangle.py
#description :Top level file for wrangle.py module in the SITAPT package
#author :aarora79
#date :20151003
#version :0.1
#usage :python ingest.py
#notes :
#python_version :2.7.10
#==============================================================================
import os
import sys
import argparse
import pkg_resources # part of setuptools
import wget
from bs4 import BeautifulSoup
import urlparse
from urlparse import urljoin
import shutil
import pickle
import pprint
import gzip
import json
#import submodules
from globals import globals
from utils import sa_logger
from pcap import sa_pcap
from pcap import pcap_globals
import dbif
#global varialbes for this file
RECORDS_IN_ONE_GO = 100000
GZ_EXTN = '.gz'
logger = sa_logger.init(globals.PACKAGE_NAME)
#private functions in this module
##############################################################
# decompress_gz_file(input_file)
# This function decompresses the input .csv.gz file into a .csv
# file. The script exits if there is a failure during decompression.
# inputs: input_file name.
# returns: name of the csv file if decompression is successful.
def decompress_gz_file(input_file):
output_file = ''
try:
logger.info('decompressing ' + input_file + '...this could take a few seconds....')
sys.stdout.flush()
with gzip.open(input_file, 'rb') as in_file:
# uncompress the gzip_path INTO THE 's' variable
s = in_file.read()
# get original filename (remove 3 characters from the end: ".gz")
uncompressed_path = input_file[:-3]
# store uncompressed file data from 's' variable
with open(uncompressed_path, 'wb') as out_file:
out_file.write(s)
logger.info('done decompressing ' + uncompressed_path + ' from ' + input_file)
except Exception as e:
logger.error('error while decompressing files from ' + input_file + ' error message [' + e.message + ']...')
else:
logger.info('file extraction completed...')
return uncompressed_path
def _make_list_of_files_in_the_data_lake(config):
dict_of_files = {}
#get each file from the data lake, uncompress it, convert it into comsumable format and
#then store the consumable format in a database
#start with copying and uncompressing the file
data_lake = config['downloaddir'] + '\\' + globals.PACKAGE_NAME + '\downloaded_files'
#look for folders and then for files inside those folders
logger.info('looking for the files in the lake ' + data_lake)
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(data_lake):
path = root.split('/')
dir_name = os.path.basename(root)
if dir_name not in dict_of_files.keys():
dict_of_files[dir_name] = []
for file in files:
dict_of_files[dir_name].append(root + '\\' + str(file))
logger.info('following files exist in the links, now going to process them one by one..')
logger.info(dict_of_files)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(dict_of_files)
return dict_of_files
def extract_info_and_store_in_db(file_name):
#read the file packet by packet and store it in mongo
logger.info('about to begin file parsing and storge....' + file_name)
status, file_handle, pcap_file_header = sa_pcap.open_pcap_file(file_name)
logger.info(pcap_file_header)
counter = 0
exception_count = 0
record_list =[]
#create a collection in mongo
last_slash = file_name.rfind('\\')
db_collection_name = file_name[last_slash + 1:]
dbif.db_create_collection(globals.DATA_COLLECTION_DB_NAME, db_collection_name)
#keep going until the end of file was reached or until there is a file error
#pkt errors are ignored
while status != pcap_globals.PCAP_FILE_ERROR and status != pcap_globals.PCAP_FILE_EOF_REACHED:
#get next packet
status, pcap_rec_info, pkt_info = sa_pcap.get_next_packet(file_handle)
counter += 1
if status != pcap_globals.PCAP_FILE_OK:
exception_count += 1
logger.error('pkt number #' + str(counter) + ' had an exception')
else:
#dbif.db_add_record_to_collection(db_collection_name, pkt_info)
record_list.append(pkt_info)
if counter % RECORDS_IN_ONE_GO == 0:
dbif.db_add_records_to_collection(globals.DATA_COLLECTION_DB_NAME, db_collection_name, record_list)
record_list =[]
logger.info('inserted pkt# ' + str(counter) + ' into DB')
#insert pending records
dbif.db_add_records_to_collection(globals.DATA_COLLECTION_DB_NAME, db_collection_name, record_list)
logger.info('parsed ' + str(counter) + ' packets from this file. ' + str(exception_count) + ' packets had an exception.')
logger.info('inserted everything into DB')
def _uncompress_files_and_extract_info(config, dict_of_files):
#first copy over the file from the lake into a temp directory
logger.info('now getting files from the lake and processing them')
for key in dict_of_files.keys():
#key is the year
file_list = dict_of_files[key]
for file_name in file_list:
#copy the file
logger.info('getting ' + file_name )
bname = os.path.basename(file_name)
shutil.copy2(file_name, config['tempdir'])
#does it need to be uncompressed
if GZ_EXTN == file_name[-3:]:
uncompressed_file = decompress_gz_file(config['tempdir'] + '\\' + bname)
#read the file and store it in a consumable format in the DB
extract_info_and_store_in_db(uncompressed_file)
#public functions in the this module
def wrangle_data(config):
#get logger object, probably already created
logger.info('wrangling phase begining...')
#first find out what all is in the lake
dict_of_files = _make_list_of_files_in_the_data_lake(config)
#ok now, copy over the files and uncompress them one by one
_uncompress_files_and_extract_info(config, dict_of_files)
|
isc
| -663,283,717,774,042,000 | 37.398773 | 125 | 0.6403 | false |
nimadini/Teammate
|
handlers/home/upload.py
|
1
|
1318
|
__author__ = 'stanley'
from google.appengine.ext.webapp import blobstore_handlers
from domain.user import *
from google.appengine.api import users
from domain.image import Image
from domain.doc import Doc
import json
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
req = self.request.get('type')
if req is '':
return
usr = user_key(users.get_current_user().email()).get()
if usr is None:
return
upload_files = self.get_uploads('file')
if len(upload_files) == 0:
raise Exception("No file selected!") # TODO! :D
blob_info = upload_files[0]
file_name = blob_info.filename
# cover
if req == '0':
usr.cover_pic = Image(name=file_name, img=blob_info.key())
usr.put()
# resume
elif req == '1':
usr.reference.resume = Doc(name=file_name, document=blob_info.key())
usr.put()
# profile
elif req == '2':
usr.profile_pic = Image(name=file_name, img=blob_info.key())
usr.put()
else:
return
self.response.headers['Content-Type'] = 'application/json'
result = json.dumps({'successful': True})
self.response.write(result)
|
apache-2.0
| 3,949,520,587,871,805,000 | 27.06383 | 80 | 0.574355 | false |
yaoxuanw007/forfun
|
leetcode/python/addTwoNumbers.py
|
1
|
1031
|
# https://oj.leetcode.com/problems/add-two-numbers/
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @return a ListNode
def addTwoNumbers(self, l1, l2):
head, tail, curr, carry = None, None, None, 0
while l1 != None or l2 != None:
if l1 != None and l2 != None:
carry += l1.val + l2.val
l1, l2 = l1.next, l2.next
elif l1 != None:
carry += l1.val
l1 = l1.next
else:
carry += l2.val
l2 = l2.next
curr = ListNode(carry % 10)
carry /= 10
if head == None:
head = curr
if tail != None:
tail.next = curr
tail = curr
if carry > 0:
tail.next = ListNode(carry)
return head
s = Solution()
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
head = s.addTwoNumbers(l1,l2)
while head != None:
print head.val
head = head.next
print
|
mit
| 292,417,777,314,674,940 | 20.040816 | 51 | 0.57032 | false |
k-freeman/Abb1t
|
mods/log.py
|
1
|
1737
|
#essential
import _thread as thread
from queue import *
#mod
import gzip, os
import json
import re
class log:
def __init__(self, bot):
self.bot = bot.bot
self.overseer = bot.overseer
self.description = ""
self.queue_in=Queue()
#self.queue_out=Queue()
thread.start_new_thread(self.run,())
#self.resttime=0
#self.lastcmd=0
self.logdir="logs"
# if you restart too often, better make it permanent:
self.message_ids = {}
def run(self):
while 1:
msg=self.queue_in.get() # get() is blocking
text=msg.get_text().lower()
from_id = msg.get_from_id()
chat_id=msg.get_chat_id() # will be the name of the gz
message_id=msg.get_message_id()
try:
os.mkdir(self.logdir)
except:
pass # exists already
with gzip.open("{}.gz".format(os.path.join(self.logdir,str(chat_id))),"at") as fw:
fw.write("{}\n".format(json.dumps(msg.raw_msg)))
if re.search(r'^(?:/|!)undelete ', text):# and int(from_id) == int(self.overseer): # undelete messages
try:
number = int(text.split(" ",1)[1])
except Exception as e:
print(e)
continue
self.bot.forwardMessage(chat_id,chat_id,self.message_ids[chat_id][-number])
#append it afterwards, so the undelete message does not count
if chat_id not in self.message_ids:
self.message_ids[chat_id]=[]
self.message_ids[chat_id].append(message_id)
def enqueue(self,msg):
self.queue_in.put(msg)
|
mit
| -8,973,747,618,887,172,000 | 30.017857 | 114 | 0.532527 | false |
dtrckd/pymake
|
pymake/util/utils.py
|
1
|
7572
|
import sys, os
from datetime import datetime
from collections import defaultdict
import hashlib
import json
from string import Template
import numpy as np
from builtins import input
basestring = (str, bytes)
from .colors import colored
#from itertools import cycle
class Cycle(object):
def __init__(self, seq):
self.seq = seq
self.it = np.nditer([seq])
def next(self):
return self.__next__()
def __next__(self):
try:
return next(self.it).item()
except StopIteration:
self.it.reset()
# Exception on nditer when seq is empty (infinite recursivity)
return self.next()
def reset(self):
return self.it.reset()
def copy(self):
return self.__class__(self.seq)
def get_dest_opt_filled(parser):
''' Return the {dest} name of the options filled in the command line
Parameters
----------
parser : ArgParser
Returns
-------
set of string
'''
opts_in = [opt for opt in sys.argv if opt.startswith('-') and opt not in ['-vv', '-vvv']]
opt2dest_dict = dict( (opt, act.dest) for act in parser._get_optional_actions() for opt in act.option_strings )
dests_in = {opt2dest_dict[opt] for opt in opts_in}
return dests_in
# Assign new values to an array according to a map list
def set_v_to(a, map):
new_a = a.copy()
for k, c in dict(map).iteritems():
new_a[a==k] = c
return new_a
# Re-order the confusion matrix in order to map the cluster (columns) to the best (classes) according to purity
# One class by topics !
# It modify confu and map in-place
# Return: list of tuple that map topic -> class
import sys
sys.setrecursionlimit(10000)
def map_class2cluster_from_confusion(confu, map=None, cpt=0, minmax='max'):
assert(confu.shape[0] == confu.shape[1])
if minmax == 'max':
obj_f = np.argmax
else:
obj_f = np.argmin
if len(confu) -1 == cpt:
# Recursive stop condition
return sorted(map)
if map is None:
confu = confu.copy()
map = [(i, i) for i in range(len(confu))]
#map = np.array(map)
#K = confu.shape[0]
#C = confu.shape[1]
previous_assigned = [i[1] for i in map[:cpt]]
c_l = obj_f(np.delete(confu[cpt], previous_assigned))
# Get the right id of the class
for j in sorted(previous_assigned):
# rectify c_l depending on which class where already assigning
if c_l >= j:
c_l += 1
else:
break
m_l = confu[cpt, c_l]
# Get the right id of the topic
c_c = obj_f(confu[cpt:,c_l]) + cpt
m_c = confu[c_c, c_l]
if m_c > m_l:
# Move the line corresponding to the max for this class to the top
confu[[cpt, c_c], :] = confu[[c_c, cpt], :]
map[cpt], map[c_c] = map[c_c], map[cpt] # Doesn't work if it's a numpy array
return map_class2cluster_from_confusion(confu, map, cpt)
else:
# Map topic 1 to class c_l and return confu - topic 1 and class c_l
map[cpt] = (map[cpt][0], c_l)
cpt += 1
return map_class2cluster_from_confusion(confu, map, cpt)
def drop_zeros(a_list):
#return [i for i in a_list if i>0]
return filter(lambda x: x != 0, a_list)
def nxG(y):
import networkx as nx
if type(y) is np.ndarray:
if (y == y.T).all():
# Undirected Graph
typeG = nx.Graph()
else:
# Directed Graph
typeG = nx.DiGraph()
G = nx.from_numpy_matrix(y, create_using=typeG)
else:
G = y
return G
#
#
# Common/Utils
#
#
def retrieve_git_info():
git_branch = subprocess.check_output(['git', 'rev-parse','--abbrev-ref' ,'HEAD']).strip().decode()
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode()
return {'git_branch':git_branch, 'git_hash':git_hash}
def hash_objects(obj, algo='md5'):
""" Return a hash of the input """
hashalgo = getattr(hashlib, algo)
if isinstance(obj, (np.ndarray, list, tuple)):
# array of int
hashed_obj = hashalgo(np.asarray(obj).tobytes()).hexdigest()
elif isinstance(obj, str):
hashed_obj = hashalgo(obj.encode("utf-8")).hexdigest()
elif isinstance(obj, dict):
hashed_obj = hashalgo(json.dumps(obj, sort_keys=True).encode('utf8')).hexdigest()
else:
raise TypeError('Type of object unashable: %s' % (type(obj)))
return hashed_obj
def ask_sure_exit(question):
while True:
a = input(question+' ').lower()
if a in ('yes', 'y'):
break
elif a in ('no', 'n'):
exit(2)
else:
print("Enter either [y|n]")
def make_path(f):
bdir = os.path.dirname(f)
if not os.path.exists(bdir) and bdir:
os.makedirs(bdir)
#fn = os.path.basename(bdir)
#if not os.path.exists(fn) and fn:
# open(fn, 'a').close()
return bdir
def Now():
return datetime.now()
def nowDiff(last):
return datetime.now() - last
def ellapsed_time(text, since):
current = datetime.now()
delta = current - since
print(text + ' : %s' % (delta))
return current
def tail(filename, n_lines):
_tail = []
for i, line in enumerate(reverse_readline(filename)):
if i == n_lines:
break
_tail.append(line)
return _tail[::-1]
#import mmap
#def tail(filename, nlines):
# """Returns last n lines from the filename. No exception handling"""
# size = os.path.getsize(filename)
# with open(filename, "rb") as f:
# # for Windows the mmap parameters are different
# fm = mmap.mmap(f.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)
# try:
# for i in range(size - 1, -1, -1):
# if fm[i] == '\n':
# nlines -= 1
# if nlines == -1:
# break
# return fm[i + 1 if i else 0:].splitlines()
# finally:
# pass
#
def reverse_readline(filename, buf_size=8192):
"""a generator that returns the lines of a file in reverse order"""
with open(filename) as fh:
segment = None
offset = 0
fh.seek(0, os.SEEK_END)
file_size = remaining_size = fh.tell()
while remaining_size > 0:
offset = min(file_size, offset + buf_size)
fh.seek(file_size - offset)
buffer = fh.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buffer.split('\n')
# the first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# if the previous chunk starts right from the beginning of line
# do not concact the segment to the last line of new chunk
# instead, yield the segment first
if buffer[-1] != '\n':
lines[-1] += segment
else:
yield segment
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
if len(lines[index]):
yield lines[index]
# Don't yield None if the file was empty
if segment is not None:
yield segment
class defaultdict2(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError( key )
else:
ret = self[key] = self.default_factory(key)
return ret
|
gpl-3.0
| -3,358,410,421,763,190,300 | 28.811024 | 115 | 0.569334 | false |
probml/pyprobml
|
scripts/polyfit_ridge_lasso_demo.py
|
1
|
4961
|
# Ridge and lasso regression:
# visualize effect of changing lambda on degree 14 polynomial
# https://github.com/probml/pmtk3/blob/master/demos/polyfitRidgeLasso.m
# Duane Rich
"""
Ridge and lasso regression:
Visualize effect of changing lambda on degree 14 polynomial.
This is a simplified version of linregPolyVsRegDemo.m
These are the steps:
- Generate the data
- Create a preprocessor pipeline that applies a degree 14 polynomial
and rescales values to be within [-1, 1] (no hypers to CV)
- Create a pipeline with the preprocessor and a ridge estimator
- Create a pipeline with the preprocessor and a lasso estimator
- Create the grid where we show coefficients decrease as regularizers
increase (for both ridge and lasso)
- Plot fitted values vs y values for ridge and lasso (with standard errors)
- For increasing log values of lambda, plot the training and test error
for ridge regression.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from polyDataMake import polyDataMake
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.linear_model import Ridge, Lasso
from pyprobml_utils import save_fig
deg = 14
# Generate data and split into in and out of sample
#xtrain, ytrain, xtest, ytestNoisefree, ytest, sigma2 = polyDataMake(sampling='thibaux')
def make_1dregression_data(n=21):
np.random.seed(0)
xtrain = np.linspace(0.0, 20, n)
xtest = np.arange(0.0, 20, 0.1)
sigma2 = 4
w = np.array([-1.5, 1/9.])
fun = lambda x: w[0]*x + w[1]*np.square(x)
ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \
np.sqrt(sigma2)
ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \
np.sqrt(sigma2)
return xtrain, ytrain, xtest, ytest
xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)
def shp(x):
return np.asarray(x).reshape(-1,1)
xtrain = shp(xtrain)
xtest = shp(xtest)
preprocess = Pipeline([('Poly', PolynomialFeatures(degree=deg, include_bias=False)),
('MinMax', MinMaxScaler((-1, 1)))])
ridge_pipe = Pipeline([('PreProcess', preprocess),
('Estimator', Ridge())])
lasso_pipe = Pipeline([('PreProcess', preprocess),
('Estimator', Lasso())])
ridge_lambdas = [0.00001, 0.1]
lasso_lambdas = [0.00001, 0.1]
coefs_by_lambda = {}
def extract_coefs(pipe, lambdas):
coefs_by_lambda = {}
for lamb in lambdas:
pipe.set_params(Estimator__alpha=lamb)
pipe.fit(xtrain, ytrain)
coefs_by_lambda[lamb] = pipe.named_steps['Estimator'].coef_.reshape(-1)
return pd.DataFrame(coefs_by_lambda, index=range(1, deg+1))
lasso_coefs_by_lambda = extract_coefs(lasso_pipe, lasso_lambdas)
lasso_coefs_by_lambda.columns = ['lamb1='+str(lm) for lm in lasso_lambdas]
ridge_coefs_by_lambda = extract_coefs(ridge_pipe, ridge_lambdas)
ridge_coefs_by_lambda.columns = ['lamb2='+str(lm) for lm in ridge_lambdas]
coefs = lasso_coefs_by_lambda.join(ridge_coefs_by_lambda)
print(coefs)
def make_plot_fit(pipe, lamb, num):
fig, ax = plt.subplots()
pipe.set_params(Estimator__alpha=lamb)
pipe.fit(xtrain, ytrain)
ypred = pipe.predict(xtest)
ax.plot(xtest, ypred, linewidth=3)
ax.scatter(xtrain, ytrain)
std_err = np.std(ypred - ytest)
ax.plot(xtest, ypred - std_err, linewidth=1,
linestyle='dotted', color='blue')
ax.plot(xtest, ypred + std_err, linewidth=1,
linestyle='dotted', color='blue')
ax.set_title('L{0} lambda = {1}'.format(str(num), str(lamb)[:6]))
ax.set_xlim(0, 20)
ax.set_ylim(-10, 20)
return fig, ax
for i, lamb in enumerate(ridge_lambdas):
fig_ridge, ax_ridge = make_plot_fit(ridge_pipe, lamb, 2)
save_fig('polyfitRidgeK' + str(i+1) + '.pdf')
for i, lamb in enumerate(lasso_lambdas):
fig_lasso, ax_lasso = make_plot_fit(lasso_pipe, lamb, 1)
save_fig('polyfitRidgeLassoK' + str(i+1) + '.pdf')
def mse(ypred, ytest):
return np.mean((ypred - ytest)**2)
def make_train_test_mse(pipe, log_lambdas):
train_mse = []
test_mse = []
for i, llamb in enumerate(log_lambdas):
pipe.set_params(Estimator__alpha=np.exp(llamb))
pipe.fit(xtrain, ytrain)
ypred_test = pipe.predict(xtest)
ypred_train = pipe.predict(xtrain)
train_mse.append(mse(ypred_train, ytrain))
test_mse.append(mse(ypred_test, ytest))
fig, ax = plt.subplots()
ax.plot(log_lambdas, train_mse, label='train mse', color='blue', marker='s', markersize=10)
ax.plot(log_lambdas, test_mse, label='test mse', color='red', marker='x', markersize=10)
ax.set_title('Mean Squared Error')
ax.set_xlabel('log lambda')
ax.set_xlim(-25, 5)
ax.legend(loc='upper left')
return fig, ax
fig, ax = make_train_test_mse(ridge_pipe, np.linspace(-24, 4, 10))
save_fig('polyfitRidgeUcurve.pdf')
plt.show()
|
mit
| 6,852,023,252,812,611,000 | 32.527027 | 95 | 0.668817 | false |
xlevus/flask-kibble
|
flask_kibble/query_composers.py
|
1
|
7754
|
import sys
import flask
from werkzeug import cached_property
# from markupsafe import Markup
from google.appengine.ext import ndb
class UnboundComposer(object):
"""
Class to hold constructor arguments in while outside of a request.
"""
def __init__(self, composer_cls, *args, **kwargs):
self._cls = composer_cls
self._args = args
self._kwargs = kwargs
def __call__(self, kibble_view, query):
kw = dict(_kibble_view=kibble_view, _query=query, **self._kwargs)
return self._cls(*self._args, **kw)
class QueryComposer(object):
context_var = None
def __new__(cls, *args, **kwargs):
if '_kibble_view' in kwargs and '_query' in kwargs:
return super(QueryComposer, cls).__new__(cls)
else:
return UnboundComposer(cls, *args, **kwargs)
def __init__(self, _kibble_view=None, _query=None):
self.kibble_view = _kibble_view
self.query = _query
def get_query(self):
return self.query.filter()
def get_query_params(self):
return {}
def __getattr__(self, attr):
return getattr(
self.kibble_view,
self.context_var + '_' + attr)
class Paginator(QueryComposer):
"""
Paginates the query into smaller chunks.
"""
context_var = 'paginator'
PAGE_ARG = 'page'
PERPAGE_ARG = 'page-size'
DEFAULT_PAGE_SIZE = 20
def __init__(self, *args, **kwargs):
super(Paginator, self).__init__(*args, **kwargs)
self._total_objects = self.query.count_async()
def get_query_params(self):
return {
'limit': self.per_page,
'offset': self.per_page * (self.page_number - 1),
}
@cached_property
def per_page(self):
page_size = getattr(self, 'page_size', self.DEFAULT_PAGE_SIZE)
if self.PERPAGE_ARG in flask.request.args:
try:
page_size = int(flask.request.args[self.PERPAGE_ARG])
except ValueError:
pass
return min(
page_size,
getattr(self, "max_page_size", sys.maxint))
@property
def total_objects(self):
return self._total_objects.get_result()
@property
def page_number(self):
try:
p = flask.request.view_args.get(self.PAGE_ARG)
return p or int(flask.request.args.get(self.PAGE_ARG, '1'))
except ValueError:
return 1
@property
def pages(self):
from math import ceil
return int(ceil(self.total_objects / float(self.per_page)))
def iter_page_numbers(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page_number - left_current - 1
and
num < self.page_number + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
def url_for_page(self, number):
args = flask.request.view_args.copy()
args.update(flask.request.args)
args[self.PAGE_ARG] = number
return flask.url_for(flask.request.endpoint, **args)
@property
def has_next(self):
return self.page_number < self.pages
@property
def has_prev(self):
return self.page_number > 1
@property
def prev(self):
return self.page_number - 1
@property
def next(self):
return self.page_number + 1
class Filter(QueryComposer):
"""
Filter on column values.
"""
context_var = 'filter'
def __init__(self, *filters, **kwargs):
super(Filter, self).__init__(**kwargs)
if filters:
self.filters = filters
for f in self:
f.preload()
def __nonzero__(self):
return bool(self._filters)
def __iter__(self):
return iter(self._filters)
@property
def _filters(self):
return getattr(self, 'filters', [])
def get_query(self):
q = self.query
for f in self:
q = f.filter(self.kibble_view.model, q)
return q
SORT_ASC = '+'
SORT_DESC = '-'
class SortColumn(object):
ICONS = {
'numeric': {
SORT_ASC: 'glyphicon glyphicon-sort-by-order',
SORT_DESC: 'glyphicon glyphicon-sort-by-order-alt',
None: 'glyphicon glyphicon-sort'
},
'alphanumeric': {
SORT_ASC: 'glyphicon glyphicon-sort-by-alphabet',
SORT_DESC: 'glyphicon glyphicon-sort-by-alphabet-alt',
None: 'glyphicon glyphicon-sort'
},
'attributes': {
SORT_ASC: 'glyphicon glyphicon-sort-by-attributes',
SORT_DESC: 'glyphicon glyphicon-sort-by-attributes-alt',
None: 'glyphicon glyphicon-sort'
}
}
def __init__(self, column_header, field=None, default=None,
icon_set='attributes'):
self.column_header = column_header
self.field = field or column_header
self.default = default
self.icon_set = icon_set
def icon(self, order=None):
return self.ICONS[self.icon_set][order]
def apply(self, query, order):
prop = ndb.GenericProperty(self.field)
if order == SORT_DESC:
prop = -prop
return query.order(prop)
class Sort(QueryComposer):
"""
Sorts queries on specified columns.
class MyList(kibble.List):
sort_columns = (
kibble.SortColumn('a_field'),
)
"""
context_var = 'sort'
def __init__(self, sortable_columns=None, **kwargs):
super(Sort, self).__init__(**kwargs)
if sortable_columns:
self.sortable_columns = sortable_columns
@property
def _columns(self):
return {
s.column_header: s
for s in getattr(self, 'columns', ())
}
@property
def _default_column(self):
for c in self._columns.values():
if c.default:
return c
return None
def get_query(self):
order, column = self.current_column()
if order and column:
return column.apply(self.query, order)
return self.query
def is_sortable(self, column_header):
return self._columns.get(column_header, False)
def current_column(self):
c = flask.request.args.get(self.context_var)
try:
if c:
return (c[0], self._columns[c[1:]])
except KeyError:
pass
if self._default_column:
return self._default_column.default, self._default_column
return None, None
def icon_class(self, column_header):
order, curr_column = self.current_column()
if curr_column and column_header != curr_column.column_header:
order = None
return self._columns[column_header].icon(order)
def url_for(self, column_header):
curr_order, curr_col = self.current_column()
if curr_col and column_header != curr_col.column_header:
curr_order = None
# TODO: Move this to the SortColumn class and implement
# enabling/disabling of ASC/DESC ordering.
next_order = {
None: SORT_ASC + column_header,
SORT_ASC: SORT_DESC + column_header,
SORT_DESC: None,
}
args = flask.request.view_args.copy()
args.update(flask.request.args)
args[self.context_var] = next_order[curr_order]
return flask.url_for(flask.request.endpoint, **args)
|
bsd-3-clause
| -5,618,213,949,186,320,000 | 26.111888 | 73 | 0.560743 | false |
openstack/dragonflow
|
dragonflow/neutron/db/models/l3.py
|
1
|
2013
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dragonflow.common import utils
from dragonflow.db.models import l3
def logical_router_from_neutron_router(router):
return l3.LogicalRouter(
id=router['id'],
topic=utils.get_obj_topic(router),
name=router.get('name'),
version=router['revision_number'],
routes=router.get('routes', []))
def build_logical_router_port(router_port_info, mac, network, unique_key):
return l3.LogicalRouterPort(
id=router_port_info['port_id'],
topic=utils.get_obj_topic(router_port_info),
lswitch=router_port_info['network_id'],
mac=mac,
network=network,
unique_key=unique_key)
def build_floating_ip_from_ovo_floating_ip(floating_ip):
return l3.FloatingIp(
id=floating_ip['id'],
topic=utils.get_obj_topic(floating_ip),
version=floating_ip['revision_number'],
lrouter=floating_ip.get('router_id', None),
lport=floating_ip.get('fixed_port_id', None),
fixed_ip_address=floating_ip.get('fixed_ip_address', None),
)
def build_floating_ip_from_neutron_floating_ip(floating_ip):
return l3.FloatingIp(
id=floating_ip['id'],
topic=utils.get_obj_topic(floating_ip),
version=floating_ip['revision_number'],
lrouter=floating_ip.get('router_id', None),
lport=floating_ip.get('port_id', None),
fixed_ip_address=floating_ip.get('fixed_ip_address', None),
)
|
apache-2.0
| 3,655,962,001,205,559,000 | 35.6 | 78 | 0.66766 | false |
Edgar324/GeoVis
|
geovis/urls.py
|
1
|
1084
|
"""geovis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^geovismain/', include('geovismain.urls', namespace="geovismain")),
url(r'^weathervis/', include('weathervis.urls', namespace="weathervis")),
url(r'^scatter/', include('scatterdatareview.urls', namespace="scatterdatareview")),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
apache-2.0
| -8,448,952,761,217,233,000 | 42.36 | 88 | 0.699262 | false |
gerardroche/sublime_color_scheme_unit
|
lib/result.py
|
1
|
5292
|
from timeit import default_timer as timer
class ResultPrinter():
def __init__(self, output, debug=False):
self.output = output
self.debug = debug
self.assertions = 0
self.progress_count = 0
self.tests = 0
self.tests_total = 0
def on_tests_start(self, tests):
self.tests_total = len(tests)
self.start_time = timer()
if self.debug:
self.output.write('Starting {} test{}:\n\n'.format(
len(tests),
's' if len(tests) > 1 else ''
))
for i, test in enumerate(tests, start=1):
self.output.write('%d) %s\n' % (i, test))
def on_tests_end(self, errors, skipped, failures, total_assertions):
self.output.write('\n\n')
self.output.write('Time: %.2f secs\n' % (timer() - self.start_time))
self.output.write('\n')
# ERRORS
if len(errors) > 0:
self.output.write("There %s %s error%s:\n" % (
'was' if len(errors) == 1 else 'were',
len(errors),
'' if len(errors) == 1 else 's',
))
self.output.write("\n")
for i, error in enumerate(errors, start=1):
self.output.write("%d) %s\n" % (i, error['message']))
self.output.write("\n%s:%d:%d\n" % (error['file'], error['row'], error['col']))
self.output.write("\n")
# FAILURES
if len(failures) > 0:
if len(errors) > 0:
self.output.write("--\n\n")
self.output.write("There %s %s failure%s:\n\n" % (
'was' if len(failures) == 1 else 'were',
len(failures),
'' if len(failures) == 1 else 's',
))
for i, failure in enumerate(failures, start=1):
self.output.write("%d) %s\n" % (i, failure['assertion']))
self.output.write("Failed asserting %s equals %s\n" % (
str(failure['actual']), str(failure['expected'])))
# TODO failure diff
# self.output.write("--- Expected\n")
# self.output.write("+++ Actual\n")
# self.output.write("@@ @@\n")
# self.output.write("{{diff}}\n\n")
self.output.write("%s:%d:%d\n" % (failure['file'], failure['row'], failure['col']))
self.output.write("\n")
# SKIPPED
if len(skipped) > 0:
if (len(errors) + len(failures)) > 0:
self.output.write("--\n\n")
self.output.write("There %s %s skipped test%s:\n" % (
'was' if len(skipped) == 1 else 'were',
len(skipped),
'' if len(skipped) == 1 else 's',
))
self.output.write("\n")
for i, skip in enumerate(skipped, start=1):
self.output.write("%d) %s\n" % (i, skip['message']))
self.output.write("\n%s:%d:%d\n" % (skip['file'], skip['row'], skip['col']))
self.output.write("\n")
# TOTALS
if len(errors) == 0 and len(failures) == 0:
self.output.write("OK (%d tests, %d assertions" % (self.tests, total_assertions))
if len(skipped) > 0:
self.output.write(", %d skipped" % (len(skipped)))
self.output.write(")\n")
else:
self.output.write("FAILURES!\n")
self.output.write("Tests: %d, Assertions: %d" % (self.tests, total_assertions))
if len(errors) > 0:
self.output.write(", Errors: %d" % (len(errors)))
self.output.write(", Failures: %d" % (len(failures)))
if len(skipped) > 0:
self.output.write(", Skipped: %d" % (len(skipped)))
self.output.write(".")
self.output.write("\n")
def on_test_start(self, test, data):
if self.debug:
settings = data.settings()
color_scheme = settings.get('color_scheme')
syntax = settings.get('syntax')
self.output.write('\nStarting test \'{}\'\n color scheme: \'{}\'\n syntax: \'{}\'\n'
.format(test, color_scheme, syntax))
def _writeProgress(self, c):
self.output.write(c)
self.progress_count += 1
if self.progress_count % 80 == 0:
self.output.write(' %d / %s (%3.1f%%)\n' % (
self.tests,
self.tests_total,
(self.tests / self.tests_total) * 100))
def on_test_end(self):
self.tests += 1
def addError(self, test, data):
self._writeProgress('E')
def addSkippedTest(self, test, data):
self._writeProgress('S')
if self.debug:
settings = data.settings()
color_scheme = settings.get('color_scheme')
syntax = settings.get('syntax')
self.output.write('\nSkipping test \'{}\'\n color scheme: \'{}\'\n syntax: \'{}\'\n'
.format(test, color_scheme, syntax))
def on_test_success(self):
self._writeProgress('.')
def on_test_failure(self):
self._writeProgress('F')
def on_assertion(self):
self.assertions += 1
|
bsd-3-clause
| 1,050,316,348,667,923,300 | 36.531915 | 99 | 0.483938 | false |
mrtrumbe/meliman
|
thetvdb.py
|
1
|
9411
|
import urllib2
import zipfile
import StringIO
import random
from datetime import datetime
from xml.etree.ElementTree import parse, Element, SubElement
import metadata
MIRROR_URL = 'http://www.thetvdb.com/api/%s/mirrors.xml'
SERVER_TIME_URL = '/api/%s/updates/'
SERIES_LOOKUP_URL = '/api/GetSeries.php?seriesname=%s'
SERIES_URL = '/api/%s/series/%s/en.xml'
EPISODE_URL = '/api/%s/series/%i/default/%i/%i/en.xml'
EPISODE_HISTORY_URL = '/api/%s/series/%i/all/en.zip'
EPISODE_LOOKUP_BY_DATE_URL = '/api/GetEpisodeByAirDate.php?apikey=%s&seriesid=%i&airdate=%s&language=en'
API_KEY = "0403764A0DA51955"
class TheTvDb:
# this finds an acceptable mirror for future thetvdb requests.
def __init__(self, config, debug):
self.debug = debug
self.active_mirror = None
# this only needs to be called by other TheTvDb functions before they go to the server
def find_mirror(self):
if self.active_mirror is None:
full_url = MIRROR_URL % (API_KEY, )
mirrors_xml = parse(urllib2.urlopen(full_url))
mirrors = mirrors_xml.findall('Mirror')
random.seed()
mirror_to_get = random.randrange(0, len(mirrors), 1)
mirror = mirrors[mirror_to_get]
self.active_mirror = mirror.findtext('mirrorpath')
if self.debug:
print "Using thetvdb mirror: %s" % (self.active_mirror, )
# returns an integer timestamp
def get_server_time(self):
self.find_mirror()
full_url = self.active_mirror + SERVER_TIME_URL % (API_KEY, )
if self.debug:
print "Getting data for url: %s" % full_url
series_xml = parse(urllib2.urlopen(full_url))
data_element = series_xml.getroot()
to_return = int(data_element.attrib['time'])
if self.debug:
print " result: %i" % to_return
return to_return
# returns a metadata.Series object
def get_series_info(self, series_id):
self.find_mirror()
full_url = self.active_mirror + SERIES_URL % (API_KEY, series_id)
if self.debug:
print "Getting data for url: %s" % full_url
full_series = []
try:
full_series_xml = parse(urllib2.urlopen(full_url))
for full_s in full_series_xml.findall('Series'):
full_series.append(full_s)
except urllib2.HTTPError:
raise Exception('Could not find series with id %i' % (series_id, ))
if len(full_series) > 0:
if self.debug:
print " found %i series:" % len(full_series)
s_obj = self.parse_series_xml(full_series[0])
return s_obj
return None
# returns a list of metadata.Series object
def lookup_series_info(self, name):
self.find_mirror()
full_url = self.active_mirror + SERIES_LOOKUP_URL % (name.strip().replace(' ', '%20'),)
if self.debug:
print "Getting data for url: %s" % full_url
series_xml = parse(urllib2.urlopen(full_url))
series = [Series for Series in series_xml.findall('Series')]
# now that we have the id's, get the full series info
full_series = []
for s in series:
id = self.get_int(s, 'id', 0)
full_url = self.active_mirror + SERIES_URL % (API_KEY, id)
full_series_xml = parse(urllib2.urlopen(full_url))
for full_s in full_series_xml.findall('Series'):
full_series.append(full_s)
if self.debug:
print " found %i series:" % len(full_series)
to_return = []
for s in full_series:
s_obj = self.parse_series_xml(s)
to_return.append(s_obj)
if self.debug:
print " found series '%s'" % s_obj.title
return to_return
# returns a list of metadata.Episode objects:
def get_full_episode_list(self, series):
self.find_mirror()
full_url = self.active_mirror + EPISODE_HISTORY_URL % (API_KEY, series.id)
if self.debug:
print "Getting data for url: %s" % full_url
zip_stream = urllib2.urlopen(full_url)
zip_file_string = self.create_string_from_stream(zip_stream)
local_stream = StringIO.StringIO(zip_file_string)
zip_file = zipfile.ZipFile(local_stream)
episode_xml = zip_file.read('en.xml')
to_return = self.parse_episode_xml(episode_xml, series)
return to_return
# returns a metadata.Episode object
def get_specific_episode(self, series, season_number, episode_number):
self.find_mirror()
full_url = self.active_mirror + EPISODE_URL % (API_KEY, series.id, season_number, episode_number)
if self.debug:
print "Getting data for url: %s" % full_url
try:
xml_stream = urllib2.urlopen(full_url)
xml_file_string = self.create_string_from_stream(xml_stream)
to_return = self.parse_episode_xml(xml_file_string, series)
return to_return[0]
except urllib2.HTTPError:
return None
# returns a metadata.Episode object
def get_specific_episode_by_date(self, series, year, month, day):
self.find_mirror()
formatted_date='%i-%i-%i' % (year, month, day)
full_url = self.active_mirror + EPISODE_LOOKUP_BY_DATE_URL % (API_KEY, series.id, formatted_date)
if self.debug:
print "Getting data for url: %s" % full_url
try:
xml_stream = urllib2.urlopen(full_url)
xml_file_string = self.create_string_from_stream(xml_stream)
to_return = self.parse_episode_xml(xml_file_string, series)
return to_return[0]
except urllib2.HTTPError:
return None
def create_string_from_stream(self, stream):
to_return = ''
try:
while True:
to_return = to_return + stream.next()
except:
to_return = to_return
return to_return
def parse_series_xml(self, series_xml):
to_return = metadata.Series()
to_return.id = self.get_int(series_xml, 'id', -1)
to_return.zap2it_id = self.get_string(series_xml, 'zap2it_id', '')
to_return.imdb_id = self.get_string(series_xml, 'IMDB_ID', '')
to_return.title = self.get_string(series_xml, 'SeriesName', '')
to_return.description = self.get_string(series_xml, 'Overview', '')
to_return.actors = self.get_string_list(series_xml, 'Actors', '|', [])
to_return.genres = self.get_string_list(series_xml, 'Genre', '|', [])
to_return.content_rating = self.get_string(series_xml, 'ContentRating', '')
return to_return
def parse_episode_xml(self, xml_string, series):
xml_stream = StringIO.StringIO(xml_string)
xml_object = parse(xml_stream)
episodes = [Episode for Episode in xml_object.findall('Episode')]
to_return = []
for episode in episodes:
episode_metadata = metadata.Episode()
to_return.append(episode_metadata)
episode_metadata.series = series;
episode_metadata.title = self.get_string(episode, 'EpisodeName', '')
episode_metadata.description = self.get_string(episode, 'Overview', '')
episode_metadata.rating = self.get_float(episode, 'Rating', 0.0)
episode_metadata.season_number = self.get_int(episode, 'SeasonNumber', 0)
episode_metadata.episode_number = self.get_int(episode, 'EpisodeNumber', 0)
episode_metadata.director = self.get_string(episode, 'Director', '')
episode_metadata.host = self.get_string(episode, 'Host', '')
episode_metadata.writers = self.get_string_list(episode, 'Writer', ',', [])
episode_metadata.guest_stars = self.get_string_list(episode, 'GuestStars', '|', [])
episode_metadata.original_air_date = self.get_datetime(episode, 'FirstAired',
None)
episode_metadata.time = datetime.now()
return to_return
def get_string(self, xml_element, sub_element_name, default):
try:
return xml_element.findtext(sub_element_name)
except:
return default
def get_string_list(self, xml_element, sub_element_name, separator, default):
try:
text = xml_element.findtext(sub_element_name)
return [ t for t in text.split(separator) ]
except:
return [ t for t in default ]
def get_int(self, xml_element, sub_element_name, default):
try:
return int(xml_element.findtext(sub_element_name))
except:
return default
def get_float(self, xml_element, sub_element_name, default):
try:
return float(xml_element.findtext(sub_element_name))
except:
return default
def get_datetime(self, xml_element, sub_element_name, default):
try:
date_text = xml_element.findtext(sub_element_name)
return datetime.strptime(date_text, "%Y-%m-%d")
except ValueError:
if self.debug:
print "Error parsing string '%s' into datetime.\n" % date_text,
return default
except:
return default
|
bsd-3-clause
| -5,145,120,445,678,961,000 | 31.677083 | 105 | 0.592179 | false |
silverlogic/blockhunt-back
|
blockhunt/stores/migrations/0004_auto_20160123_2044.py
|
1
|
1174
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-23 20:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stores', '0003_storeaddress_zip_code'),
]
operations = [
migrations.RenameField(
model_name='storeaddress',
old_name='point',
new_name='coords',
),
migrations.AlterField(
model_name='store',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stores', to='stores.StoreCategory'),
),
migrations.AlterField(
model_name='store',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stores', to='stores.StoreOwner'),
),
migrations.AlterField(
model_name='storeaddress',
name='store',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='address', to='stores.Store'),
),
]
|
mit
| -5,133,368,833,882,696,000 | 31.611111 | 131 | 0.603066 | false |
openstack/mistral
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/038_delete_delayed_calls_with_empty_key.py
|
1
|
1357
|
# Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete delayed calls with key=NULL.
Revision ID: 038
Revises: 037
Create Date: 2020-7-13 13:20:00
"""
# revision identifiers, used by Alembic.
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
revision = '038'
down_revision = '037'
def upgrade():
# See https://bugs.launchpad.net/mistral/+bug/1861988.
# Due to this bug there may be redundant delayed calls in DB.
# We need to delete all rows where the "key" column is None.
session = sa.orm.Session(bind=op.get_bind())
delayed_calls = table('delayed_calls_v2', column('key'))
with session.begin(subtransactions=True):
session.execute(
delayed_calls.delete().where(delayed_calls.c.key==None) # noqa
)
session.commit()
|
apache-2.0
| 8,103,812,811,635,855,000 | 27.87234 | 75 | 0.715549 | false |
mpkato/openliveq
|
tests/test_question_features.py
|
1
|
1378
|
import openliveq as olq
from openliveq.features import (answer_num, log_answer_num,
view_num, log_view_num, is_open, is_vote, is_solved, rank, updated_at)
import pytest
import os
from math import log
from .test_base import TestBase
class TestQuestionFeatures(TestBase):
def test_answer_num(self, q1, q2):
assert answer_num(q1) == 1
assert answer_num(q2) == 2
def test_view_num(self, q1, q2):
assert view_num(q1) == 952
assert view_num(q2) == 14593
def test_is_open(self, q1, q2):
assert is_open(q1) == 0
assert is_open(q2) == 0
def test_is_vote(self, q1, q2):
assert is_vote(q1) == 0
assert is_vote(q2) == 0
def test_is_solved(self, q1, q2):
assert is_solved(q1) == 1
assert is_solved(q2) == 1
def test_rank(self, q1, q2, q3):
assert rank(q1) == 1
assert rank(q2) == 1
assert rank(q3) == 2
def test_updated_at(self, q1, q2, q3):
assert updated_at(q1) > updated_at(q2)
assert updated_at(q2) > updated_at(q3)
@pytest.fixture
def fe(self):
return olq.FeatureExtractor()
@pytest.fixture
def q1(self, questions):
return questions[0]
@pytest.fixture
def q2(self, questions):
return questions[1]
@pytest.fixture
def q3(self, questions):
return questions[2]
|
mit
| 2,381,516,393,865,743,400 | 24.518519 | 74 | 0.597242 | false |
renyixiang/auto_test_baby
|
apis_test/common_methods/mysql.py
|
1
|
1034
|
#encoding:utf-8
import MySQLdb
from apis_test.common_methods.config import *
from apis_test.common_methods.mylogs import logger
def mysql_self(type,sql):
if type == 'mysql':
conn=MySQLdb.connect(
host=datas['sit-mysql']['数据库hosts'],
port=int(datas['sit-mysql']['数据库端口']),
user=datas['sit-mysql']['用户名'],
passwd=datas['sit-mysql']['用户密码'],
db=datas['sit-mysql']['数据库名称'],
charset='utf8'
)
cur = conn.cursor()
#语句
# sql = "select * from s_user where username='17717392244'"
r = cur.execute(sql)
info = cur.fetchmany(r)
for i in info:
logger.info(i)
cur.close()
conn.commit()
conn.close()
return info
# mysql_self('mysql',"select * from s_user where username='17195864861'")
|
gpl-2.0
| 3,406,893,547,768,958,000 | 30.935484 | 73 | 0.480808 | false |
tensorflow/data-validation
|
tensorflow_data_validation/utils/test_util.py
|
1
|
18403
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for writing statistics generator tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Union
from absl.testing import absltest
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import pyarrow as pa
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow.python.util.protobuf import compare
from tensorflow_metadata.proto.v0 import statistics_pb2
def make_example_dict_equal_fn(
test: absltest.TestCase,
expected: List[types.Example]) -> Callable[[List[types.Example]], None]:
"""Makes a matcher function for comparing the example dict.
Args:
test: test case object.
expected: the expected example dict.
Returns:
A matcher function for comparing the example dicts.
"""
def _matcher(actual):
"""Matcher function for comparing the example dicts."""
try:
# Check number of examples.
test.assertLen(actual, len(expected))
for i in range(len(actual)):
for key in actual[i]:
# Check each feature value.
if isinstance(expected[i][key], np.ndarray):
test.assertEqual(
expected[i][key].dtype, actual[i][key].dtype,
'Expected dtype {}, found {} in actual[{}][{}]: {}'.format(
expected[i][key].dtype, actual[i][key].dtype, i, key,
actual[i][key]))
np.testing.assert_equal(actual[i][key], expected[i][key])
else:
test.assertEqual(
expected[i][key], actual[i][key],
'Unexpected value of actual[{}][{}]'.format(i, key))
except AssertionError:
raise util.BeamAssertException(traceback.format_exc())
return _matcher
def make_dataset_feature_stats_list_proto_equal_fn(
test: absltest.TestCase,
expected_result: statistics_pb2.DatasetFeatureStatisticsList
) -> Callable[[List[statistics_pb2.DatasetFeatureStatisticsList]], None]:
"""Makes a matcher function for comparing DatasetFeatureStatisticsList proto.
Args:
test: test case object
expected_result: the expected DatasetFeatureStatisticsList proto.
Returns:
A matcher function for comparing DatasetFeatureStatisticsList proto.
"""
def _matcher(actual: List[statistics_pb2.DatasetFeatureStatisticsList]):
"""Matcher function for comparing DatasetFeatureStatisticsList proto."""
try:
test.assertLen(actual, 1,
'Expected exactly one DatasetFeatureStatisticsList')
test.assertLen(actual[0].datasets, len(expected_result.datasets))
sorted_actual_datasets = sorted(actual[0].datasets, key=lambda d: d.name)
sorted_expected_datasets = sorted(expected_result.datasets,
key=lambda d: d.name)
for i in range(len(sorted_actual_datasets)):
assert_dataset_feature_stats_proto_equal(test,
sorted_actual_datasets[i],
sorted_expected_datasets[i])
except AssertionError:
raise util.BeamAssertException(traceback.format_exc())
return _matcher
def assert_feature_proto_equal(
test: absltest.TestCase, actual: statistics_pb2.FeatureNameStatistics,
expected: statistics_pb2.FeatureNameStatistics) -> None:
"""Ensures feature protos are equal.
Args:
test: The test case.
actual: The actual feature proto.
expected: The expected feature proto.
"""
test.assertLen(actual.custom_stats, len(expected.custom_stats))
expected_custom_stats = {}
for expected_custom_stat in expected.custom_stats:
expected_custom_stats[expected_custom_stat.name] = expected_custom_stat
for actual_custom_stat in actual.custom_stats:
test.assertIn(actual_custom_stat.name, expected_custom_stats)
expected_custom_stat = expected_custom_stats[actual_custom_stat.name]
compare.assertProtoEqual(
test, expected_custom_stat, actual_custom_stat, normalize_numbers=True)
del actual.custom_stats[:]
del expected.custom_stats[:]
# Compare the rest of the proto without numeric custom stats
compare.assertProtoEqual(test, expected, actual, normalize_numbers=True)
def assert_dataset_feature_stats_proto_equal(
test: absltest.TestCase, actual: statistics_pb2.DatasetFeatureStatistics,
expected: statistics_pb2.DatasetFeatureStatistics) -> None:
"""Compares DatasetFeatureStatistics protos.
This function can be used to test whether two DatasetFeatureStatistics protos
contain the same information, even if the order of the features differs.
Args:
test: The test case.
actual: The actual DatasetFeatureStatistics proto.
expected: The expected DatasetFeatureStatistics proto.
"""
test.assertEqual(
expected.name, actual.name, 'Expected name to be {}, found {} in '
'DatasetFeatureStatistics {}'.format(expected.name, actual.name, actual))
test.assertEqual(
expected.num_examples, actual.num_examples,
'Expected num_examples to be {}, found {} in DatasetFeatureStatistics {}'
.format(expected.num_examples, actual.num_examples, actual))
test.assertLen(actual.features, len(expected.features))
expected_features = {}
for feature in expected.features:
expected_features[types.FeaturePath.from_proto(feature.path)] = feature
for feature in actual.features:
feature_path = types.FeaturePath.from_proto(feature.path)
if feature_path not in expected_features:
raise AssertionError(
'Feature path %s found in actual but not found in expected.' %
feature_path)
assert_feature_proto_equal(test, feature, expected_features[feature_path])
class CombinerStatsGeneratorTest(absltest.TestCase):
"""Test class with extra combiner stats generator related functionality."""
# Runs the provided combiner statistics generator and tests if the output
# matches the expected result.
def assertCombinerOutputEqual(
self, batches: List[types.ExampleBatch],
generator: stats_generator.CombinerStatsGenerator,
expected_feature_stats: Dict[types.FeaturePath,
statistics_pb2.FeatureNameStatistics],
expected_cross_feature_stats: Optional[Dict[
types.FeatureCross, statistics_pb2.CrossFeatureStatistics]] = None,
only_match_expected_feature_stats: bool = False,
) -> None:
"""Tests a combiner statistics generator.
This runs the generator twice to cover different behavior. There must be at
least two input batches in order to test the generator's merging behavior.
Args:
batches: A list of batches of test data.
generator: The CombinerStatsGenerator to test.
expected_feature_stats: Dict mapping feature name to FeatureNameStatistics
proto that it is expected the generator will return for the feature.
expected_cross_feature_stats: Dict mapping feature cross to
CrossFeatureStatistics proto that it is expected the generator will
return for the feature cross.
only_match_expected_feature_stats: if True, will only compare features
that appear in `expected_feature_stats`.
"""
generator.setup()
if expected_cross_feature_stats is None:
expected_cross_feature_stats = {}
def _verify(output):
"""Verifies that the output meeds the expectations."""
if only_match_expected_feature_stats:
features_in_stats = set(
[types.FeaturePath.from_proto(f.path) for f in output.features])
self.assertTrue(set(expected_feature_stats.keys())
.issubset(features_in_stats))
else:
self.assertEqual( # pylint: disable=g-generic-assert
len(output.features), len(expected_feature_stats),
'{}, {}'.format(output, expected_feature_stats))
for actual_feature_stats in output.features:
actual_path = types.FeaturePath.from_proto(actual_feature_stats.path)
expected_stats = expected_feature_stats.get(actual_path)
if (only_match_expected_feature_stats and expected_stats is None):
continue
compare.assertProtoEqual(
self,
actual_feature_stats,
expected_stats,
normalize_numbers=True)
self.assertEqual( # pylint: disable=g-generic-assert
len(result.cross_features), len(expected_cross_feature_stats),
'{}, {}'.format(result, expected_cross_feature_stats))
for actual_cross_feature_stats in result.cross_features:
cross = (actual_cross_feature_stats.path_x.step[0],
actual_cross_feature_stats.path_y.step[0])
compare.assertProtoEqual(
self,
actual_cross_feature_stats,
expected_cross_feature_stats[cross],
normalize_numbers=True)
# Run generator to check that merge_accumulators() works correctly.
accumulators = [
generator.add_input(generator.create_accumulator(), batch)
for batch in batches
]
result = generator.extract_output(
generator.merge_accumulators(accumulators))
_verify(result)
# Run generator to check that compact() works correctly after
# merging accumulators.
accumulators = [
generator.add_input(generator.create_accumulator(), batch)
for batch in batches
]
result = generator.extract_output(
generator.compact(generator.merge_accumulators(accumulators)))
_verify(result)
# Run generator to check that add_input() works correctly when adding
# inputs to a non-empty accumulator.
accumulator = generator.create_accumulator()
for batch in batches:
accumulator = generator.add_input(accumulator, batch)
result = generator.extract_output(accumulator)
_verify(result)
class _DatasetFeatureStatisticsComparatorWrapper(object):
"""Wraps a DatasetFeatureStatistics and provides a custom comparator.
This is to facilitate assertCountEqual().
"""
# Disable the built-in __hash__ (in python2). This forces __eq__ to be
# used in assertCountEqual().
__hash__ = None
def __init__(self, wrapped: statistics_pb2.DatasetFeatureStatistics):
self._wrapped = wrapped
self._normalized = statistics_pb2.DatasetFeatureStatistics()
self._normalized.MergeFrom(wrapped)
compare.NormalizeNumberFields(self._normalized)
def __eq__(self, other: '_DatasetFeatureStatisticsComparatorWrapper'):
return compare.ProtoEq(self._normalized, other._normalized) # pylint: disable=protected-access
def __repr__(self):
return self._normalized.__repr__()
class TransformStatsGeneratorTest(absltest.TestCase):
"""Test class with extra transform stats generator related functionality."""
def setUp(self):
super(TransformStatsGeneratorTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
# Runs the provided slicing aware transform statistics generator and tests
# if the output matches the expected result.
def assertSlicingAwareTransformOutputEqual(
self,
examples: List[Union[types.SlicedExample, types.Example]],
generator: stats_generator.TransformStatsGenerator,
expected_results: List[Union[
statistics_pb2.DatasetFeatureStatistics,
Tuple[types.SliceKey, statistics_pb2.DatasetFeatureStatistics]]],
metrics_verify_fn: Optional[Callable[[beam.metrics.metric.MetricResults],
None]] = None,
add_default_slice_key_to_input: bool = False,
add_default_slice_key_to_output: bool = False,
) -> None:
"""Tests a slicing aware transform statistics generator.
Args:
examples: Input sliced examples.
generator: A TransformStatsGenerator.
expected_results: Expected statistics proto results.
metrics_verify_fn: A callable which will be invoked on the resulting
beam.metrics.metric.MetricResults object.
add_default_slice_key_to_input: If True, adds the default slice key to
the input examples.
add_default_slice_key_to_output: If True, adds the default slice key to
the result protos.
"""
def _make_result_matcher(
test: absltest.TestCase,
expected_results: List[
Tuple[types.SliceKey, statistics_pb2.DatasetFeatureStatistics]]):
"""Makes matcher for a list of DatasetFeatureStatistics protos."""
def _equal(actual_results: List[
Tuple[types.SliceKey, statistics_pb2.DatasetFeatureStatistics]]):
"""Matcher for comparing a list of DatasetFeatureStatistics protos."""
if len(actual_results) == 1 and len(expected_results) == 1:
# If appropriate use proto matcher for better errors
test.assertEqual(expected_results[0][0], actual_results[0][0])
compare.assertProtoEqual(test, expected_results[0][1],
actual_results[0][1], normalize_numbers=True)
else:
test.assertCountEqual(
[(k, _DatasetFeatureStatisticsComparatorWrapper(v))
for k, v in expected_results],
[(k, _DatasetFeatureStatisticsComparatorWrapper(v))
for k, v in actual_results])
return _equal
if add_default_slice_key_to_input:
examples = [(None, e) for e in examples]
if add_default_slice_key_to_output:
expected_results = [(None, p) for p in expected_results]
options = beam.options.pipeline_options.PipelineOptions(
runtime_type_check=True)
with beam.Pipeline(options=options) as p:
result = p | beam.Create(examples) | generator.ptransform
util.assert_that(result, _make_result_matcher(self, expected_results))
pipeline_result = p.run()
if metrics_verify_fn:
metrics_verify_fn(pipeline_result.metrics())
class CombinerFeatureStatsGeneratorTest(absltest.TestCase):
"""Test class for combiner feature stats generator related functionality."""
# Runs the provided combiner feature statistics generator and tests if the
# output matches the expected result.
def assertCombinerOutputEqual(
self,
input_batches: List[types.ValueBatch],
generator: stats_generator.CombinerFeatureStatsGenerator,
expected_result: statistics_pb2.FeatureNameStatistics,
feature_path: types.FeaturePath = types.FeaturePath([''])) -> None:
"""Tests a feature combiner statistics generator.
This runs the generator twice to cover different behavior. There must be at
least two input batches in order to test the generator's merging behavior.
Args:
input_batches: A list of batches of test data.
generator: The CombinerFeatureStatsGenerator to test.
expected_result: The FeatureNameStatistics proto that it is expected the
generator will return.
feature_path: The FeaturePath to use, if not specified, will set a
default value.
"""
generator.setup()
# Run generator to check that merge_accumulators() works correctly.
accumulators = [
generator.add_input(generator.create_accumulator(), feature_path,
input_batch) for input_batch in input_batches
]
result = generator.extract_output(
generator.merge_accumulators(accumulators))
compare.assertProtoEqual(
self, result, expected_result, normalize_numbers=True)
# Run generator to check that compact() works correctly after
# merging accumulators.
accumulators = [
generator.add_input(generator.create_accumulator(), feature_path,
input_batch) for input_batch in input_batches
]
result = generator.extract_output(
generator.compact(generator.merge_accumulators(accumulators)))
compare.assertProtoEqual(
self, result, expected_result, normalize_numbers=True)
# Run generator to check that add_input() works correctly when adding
# inputs to a non-empty accumulator.
accumulator = generator.create_accumulator()
for input_batch in input_batches:
accumulator = generator.add_input(accumulator, feature_path, input_batch)
result = generator.extract_output(accumulator)
compare.assertProtoEqual(
self, result, expected_result, normalize_numbers=True)
def make_arrow_record_batches_equal_fn(
test: absltest.TestCase, expected_record_batches: List[pa.RecordBatch]):
"""Makes a matcher function for comparing arrow record batches."""
def _matcher(actual_record_batches):
"""Arrow record batches matcher fn."""
test.assertLen(actual_record_batches, len(expected_record_batches))
for i in range(len(expected_record_batches)):
actual_record_batch = actual_record_batches[i]
expected_record_batch = expected_record_batches[i]
test.assertEqual(
expected_record_batch.num_columns,
actual_record_batch.num_columns,
'Expected {} columns, found {} in record_batch {}'.format(
expected_record_batch.num_columns,
actual_record_batch.num_columns, actual_record_batch))
for column_name, expected_column in zip(
expected_record_batch.schema.names, expected_record_batch.columns):
field_index = actual_record_batch.schema.get_field_index(column_name)
test.assertGreaterEqual(
field_index, 0, 'Unable to find column {}'.format(column_name))
actual_column = actual_record_batch.column(field_index)
test.assertTrue(
actual_column.equals(expected_column),
'{}: {} vs {}'.format(column_name, actual_column, expected_column))
return _matcher
|
apache-2.0
| 6,273,145,978,485,591,000 | 39.895556 | 99 | 0.691463 | false |
goodmami/pydelphin
|
tests/derivation_test.py
|
1
|
21369
|
import warnings
import pytest
from delphin.derivation import (
Derivation as D,
UdfNode as N,
UdfTerminal as T,
UdfToken as Tk
)
class TestUdfNode():
def test_init(self):
n = N(1, 'entity')
assert n.id == 1
assert n.entity == 'entity'
assert n.score == -1.0
assert n.start == -1
assert n.end == -1
assert n.daughters == []
assert n.is_head() == True # it has no siblings
assert n.type == None
n = N(1, 'entity', 0.5, 1, 2, [], head=True, type='type')
assert n.id == 1
assert n.entity == 'entity'
assert n.score == 0.5
assert n.start == 1
assert n.end == 2
assert n.daughters == []
assert n.is_head() == True
assert n.type == 'type'
class TestDerivation():
def test_init(self):
with pytest.raises(TypeError): D()
with pytest.raises(TypeError): D(1)
t = D(1, 'some-thing')
t = D(1, 'some-thing', 0.5, 0, 3, [T('some-token')])
# roots are special: id is None, entity is root, daughters must
# exactly 1 node; rest are None
with pytest.raises(TypeError): t = D(None)
with pytest.raises(TypeError): t = D(None, 'some-root', 0.5)
with pytest.raises(TypeError): t = D(None, 'some-root', start=1)
with pytest.raises(TypeError): t = D(None, 'some-root', end=1)
with pytest.raises(ValueError):
t = D(None, 'some-root',
daughters=[N(1, 'some-thing'), N(2, 'some-thing')])
with pytest.raises(ValueError):
t = D(None, 'some-root', daughters=[T('some-token')])
t = D(None, 'some-root', daughters=[N(1, 'some-thing')])
t = D(None, 'some-root', None, None, None,
daughters=[N(1, 'some-thing')])
# root not as top
with pytest.raises(ValueError):
D(1, 'some-thing', daughters=[
N(None, 'some-root', daughters=[
N(2, 'a-lex', daughters=[T('some-token')])
])
])
def test_attributes(self):
t = D(1, 'some-thing')
assert t.id == 1
assert t.entity == 'some-thing'
assert t.score == -1
assert t.start == -1
assert t.end == -1
assert t.daughters == []
t = D(1, 'some-thing', 0.5, 1, 6, [T('some token')])
assert t.id == 1
assert t.entity == 'some-thing'
assert t.score == 0.5
assert t.start == 1
assert t.end == 6
assert t.daughters == [T('some token')]
t = D(None, 'some-root', daughters=[D(1, 'some-thing')])
assert t.id == None
assert t.entity == 'some-root'
assert t.score == None
assert t.start == None
assert t.end == None
assert len(t.daughters) == 1
def test_fromstring(self):
with pytest.raises(ValueError): D.from_string('')
# root with no children
with pytest.raises(ValueError): D.from_string('(some-root)')
# does not start with `(` or end with `)`
with pytest.raises(ValueError):
D.from_string(' (1 some-thing -1 -1 -1 ("token"))')
with pytest.raises(ValueError):
D.from_string(' (1 some-thing -1 -1 -1 ("token")) ')
# uneven parens
with pytest.raises(ValueError):
D.from_string('(1 some-thing -1 -1 -1 ("token")')
# ok
t = D.from_string('(1 some-thing -1 -1 -1 ("token"))')
assert t.id == 1
assert t.entity == 'some-thing'
assert t.score == -1.0
assert t.start == -1
assert t.end == -1
assert t.daughters == [T('token')]
# newlines in tree
t = D.from_string('''(1 some-thing -1 -1 -1
("token"))''')
assert t.id == 1
assert t.entity == 'some-thing'
assert t.score == -1.0
assert t.start == -1
assert t.end == -1
assert t.daughters == [T('token')]
# LKB-style terminals
t = D.from_string('''(1 some-thing -1 -1 -1
("to ken" 1 2))''')
assert t.id == 1
assert t.entity == 'some-thing'
assert t.score == -1.0
assert t.start == -1
assert t.end == -1
assert t.daughters == [T('to ken')] # start/end ignored
# TFS-style terminals
t = D.from_string(r'''(1 some-thing -1 -1 -1
("to ken" 2 "token [ +FORM \"to\" ]"
3 "token [ +FORM \"ken\" ]"))''')
assert t.id == 1
assert t.entity == 'some-thing'
assert t.score == -1.0
assert t.start == -1
assert t.end == -1
assert t.daughters == [
T('to ken', [Tk(2, r'token [ +FORM \"to\" ]'),
Tk(3, r'token [ +FORM \"ken\" ]')])
]
# longer example
t = D.from_string(r'''(root
(1 some-thing 0.4 0 5
(2 a-lex 0.8 0 1
("a" 1 "token [ +FORM \"a\" ]"))
(3 bcd-lex 0.5 2 5
("bcd" 2 "token [ +FORM \"bcd\" ]")))
)''')
assert t.entity == 'root'
assert len(t.daughters) == 1
top = t.daughters[0]
assert top.id == 1
assert top.entity == 'some-thing'
assert top.score == 0.4
assert top.start == 0
assert top.end == 5
assert len(top.daughters) == 2
lex = top.daughters[0]
assert lex.id == 2
assert lex.entity == 'a-lex'
assert lex.score == 0.8
assert lex.start == 0
assert lex.end == 1
assert lex.daughters == [T('a', [Tk(1, r'token [ +FORM \"a\" ]')])]
lex = top.daughters[1]
assert lex.id == 3
assert lex.entity == 'bcd-lex'
assert lex.score == 0.5
assert lex.start == 2
assert lex.end == 5
assert lex.daughters == [T('bcd',
[Tk(2, r'token [ +FORM \"bcd\" ]')])]
def test_str(self):
s = '(1 some-thing -1 -1 -1 ("token"))'
assert str(D.from_string(s)) == s
s = (r'(root (1 some-thing 0.4 0 5 (2 a-lex 0.8 0 1 '
r'("a" 1 "token [ +FORM \"a\" ]")) '
r'(3 bcd-lex 0.5 2 5 ("bcd" 2 "token [ +FORM \"bcd\" ]"))))')
assert str(D.from_string(s)) == s
def test_eq(self):
a = D.from_string('(1 some-thing -1 -1 -1 ("token"))')
# identity
b = D.from_string('(1 some-thing -1 -1 -1 ("token"))')
assert a == b
# ids and scores don't matter
b = D.from_string('(100 some-thing 0.114 -1 -1 ("token"))')
assert a == b
# tokens matter
b = D.from_string('(1 some-thing -1 -1 -1 ("nekot"))')
assert a != b
# and type of rhs
assert a != '(1 some-thing -1 -1 -1 ("token"))'
# and tokenization
b = D.from_string('(1 some-thing -1 2 7 ("token"))')
assert a != b
# and of course entities
b = D.from_string('(1 epyt-emos -1 -1 -1 ("token"))')
assert a != b
# and number of children
a = D.from_string('(1 x -1 -1 -1 (2 y -1 -1 -1 ("y")))')
b = D.from_string('(1 x -1 -1 -1 (2 y -1 -1 -1 ("y")) (3 z -1 -1 -1 ("z")))')
assert a != b
# and order of children
a = D.from_string('(1 x -1 -1 -1 (2 y -1 -1 -1 ("y")) (3 z -1 -1 -1 ("z")))')
b = D.from_string('(1 x -1 -1 -1 (3 z -1 -1 -1 ("z")) (2 y -1 -1 -1 ("y")))')
assert a != b
# and UDX properties when specified
a = D.from_string('(1 x -1 -1 -1 (2 ^y -1 -1 -1 ("y")) (3 z -1 -1 -1 ("z")))')
b = D.from_string('(1 x -1 -1 -1 (2 ^y -1 -1 -1 ("y")) (3 z -1 -1 -1 ("z")))')
assert a == b
b = D.from_string('(1 x -1 -1 -1 (2 y -1 -1 -1 ("y")) (3 ^z -1 -1 -1 ("z")))')
assert a != b
b = D.from_string('(1 x -1 -1 -1 (2 y -1 -1 -1 ("y")) (3 z -1 -1 -1 ("z")))')
assert a != b
a = D.from_string('(1 some-thing@some-type -1 -1 -1 ("token"))')
b = D.from_string('(1 some-thing@some-type -1 -1 -1 ("token"))')
assert a == b
b = D.from_string('(1 some-thing@another-type -1 -1 -1 ("token"))')
assert a != b
b = D.from_string('(1 some-thing -1 -1 -1 ("token"))')
assert a != b
def test_is_root(self):
a = D.from_string('(1 some-thing -1 -1 -1 ("token"))')
assert a.is_root() == False
a = D.from_string('(root (1 some-thing -1 -1 -1 ("token")))')
assert a.is_root() == True
assert a.daughters[0].is_root() == False
def test_is_head(self):
# NOTE: is_head() is undefined for nodes with multiple
# siblings, none of which are marked head (e.g. in plain UDF)
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 some-thing -1 -1 -1 ("a"))'
' (3 some-thing -1 -1 -1 ("b"))))')
assert a.is_head() == True
node = a.daughters[0]
assert node.is_head() == True
assert node.daughters[0].is_head() == None
assert node.daughters[1].is_head() == None
# if one sibling is marked, all become decidable
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 some-thing -1 -1 -1 ("a"))'
' (3 ^some-thing -1 -1 -1 ("b"))))')
assert a.is_head() == True
node = a.daughters[0]
assert node.is_head() == True
assert node.daughters[0].is_head() == False
assert node.daughters[1].is_head() == True
def test_entity(self):
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing -1 -1 -1 ("a"))'
' (3 b-thing -1 -1 -1 ("b"))))')
assert a.entity == 'root'
node = a.daughters[0]
assert node.entity == 'some-thing'
assert node.daughters[0].entity == 'a-thing'
assert node.daughters[1].entity == 'b-thing'
a = D.from_string('(root (1 some-thing@some-type -1 -1 -1'
' (2 a-thing@a-type -1 -1 -1 ("a"))'
' (3 b-thing@b-type -1 -1 -1 ("b"))))')
assert a.entity == 'root'
node = a.daughters[0]
assert node.entity == 'some-thing'
assert node.daughters[0].entity == 'a-thing'
assert node.daughters[1].entity == 'b-thing'
def test_type(self):
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing -1 -1 -1 ("a"))'
' (3 b-thing -1 -1 -1 ("b"))))')
assert a.type == None
node = a.daughters[0]
assert node.type == None
assert node.daughters[0].type == None
assert node.daughters[1].type == None
a = D.from_string('(root (1 some-thing@some-type -1 -1 -1'
' (2 a-thing@a-type -1 -1 -1 ("a"))'
' (3 b-thing@b-type -1 -1 -1 ("b"))))')
assert a.type == None
node = a.daughters[0]
assert node.type == 'some-type'
assert node.daughters[0].type == 'a-type'
assert node.daughters[1].type == 'b-type'
def test_basic_entity(self):
warnings.simplefilter('always')
# this works for both UDX and standard UDF
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing -1 -1 -1 ("a"))'
' (3 b-thing -1 -1 -1 ("b"))))')
with pytest.warns(DeprecationWarning):
assert a.basic_entity() == 'root'
node = a.daughters[0]
assert node.daughters[0].basic_entity() == 'a-thing'
assert node.daughters[1].basic_entity() == 'b-thing'
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing@a-type_le -1 -1 -1 ("a"))'
' (3 b-thing@b-type_le -1 -1 -1 ("b"))))')
with pytest.warns(DeprecationWarning):
assert a.basic_entity() == 'root'
node = a.daughters[0]
assert node.basic_entity() == 'some-thing'
assert node.daughters[0].basic_entity() == 'a-thing'
assert node.daughters[1].basic_entity() == 'b-thing'
def test_lexical_type(self):
warnings.simplefilter('always')
# NOTE: this returns None for standard UDF or non-preterminals
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing -1 -1 -1 ("a"))'
' (3 b-thing -1 -1 -1 ("b"))))')
with pytest.warns(DeprecationWarning):
assert a.lexical_type() == None
node = a.daughters[0]
assert node.daughters[0].lexical_type() == None
assert node.daughters[1].lexical_type() == None
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing@a-type_le -1 -1 -1 ("a"))'
' (3 b-thing@b-type_le -1 -1 -1 ("b"))))')
with pytest.warns(DeprecationWarning):
assert a.lexical_type() == None
node = a.daughters[0]
assert node.daughters[0].lexical_type() == 'a-type_le'
assert node.daughters[1].lexical_type() == 'b-type_le'
def test_preterminals(self):
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing -1 -1 -1 ("a"))'
' (3 b-thing -1 -1 -1 ("b"))))')
assert [t.id for t in a.preterminals()] == [2, 3]
a = D.from_string('(root'
' (1 some-thing@some-type 0.4 0 5'
' (2 a-lex@a-type 0.8 0 1'
' ("a b"'
' 3 "token [ +FORM \\"a\\" ]"'
' 4 "token [ +FORM \\"b\\" ]"))'
' (5 b-lex@b-type 0.9 1 2'
' ("b"'
' 6 "token [ +FORM \\"b\\" ]"))))')
assert [t.id for t in a.preterminals()] == [2, 5]
def test_terminals(self):
a = D.from_string('(root (1 some-thing -1 -1 -1'
' (2 a-thing -1 -1 -1 ("a"))'
' (3 b-thing -1 -1 -1 ("b"))))')
assert [t.form for t in a.terminals()] == ['a', 'b']
a = D.from_string('(root'
' (1 some-thing@some-type 0.4 0 5'
' (2 a-lex@a-type 0.8 0 1'
' ("a b"'
' 3 "token [ +FORM \\"a\\" ]"'
' 4 "token [ +FORM \\"b\\" ]"))'
' (5 b-lex@b-type 0.9 1 2'
' ("b"'
' 6 "token [ +FORM \\"b\\" ]"))))')
assert [t.form for t in a.terminals()] == ['a b', 'b']
def test_to_udf(self):
s = '(1 some-thing -1 -1 -1 ("token"))'
assert D.from_string(s).to_udf(indent=None) == s
assert D.from_string(s).to_udf(indent=1) == (
'(1 some-thing -1 -1 -1\n'
' ("token"))'
)
s = (r'(root (1 some-thing 0.4 0 5 (2 a-lex 0.8 0 1 '
r'("a" 3 "token [ +FORM \"a\" ]")) '
r'(4 bcd-lex 0.5 2 5 ("bcd" 5 "token [ +FORM \"bcd\" ]"))))')
assert D.from_string(s).to_udf(indent=1) == (
'(root\n'
' (1 some-thing 0.4 0 5\n'
' (2 a-lex 0.8 0 1\n'
' ("a"\n'
' 3 "token [ +FORM \\"a\\" ]"))\n'
' (4 bcd-lex 0.5 2 5\n'
' ("bcd"\n'
' 5 "token [ +FORM \\"bcd\\" ]"))))'
)
s = (r'(root (1 some-thing 0.4 0 5 (2 a-lex 0.8 0 1 '
r'("a b" 3 "token [ +FORM \"a\" ]" 4 "token [ +FORM \"b\" ]"))))')
assert D.from_string(s).to_udf(indent=1) == (
'(root\n'
' (1 some-thing 0.4 0 5\n'
' (2 a-lex 0.8 0 1\n'
' ("a b"\n'
' 3 "token [ +FORM \\"a\\" ]"\n'
' 4 "token [ +FORM \\"b\\" ]"))))'
)
s = (r'(root (1 some-thing@some-type 0.4 0 5 (2 a-lex@a-type 0.8 0 1 '
r'("a b" 3 "token [ +FORM \"a\" ]" 4 "token [ +FORM \"b\" ]"))))')
assert D.from_string(s).to_udf(indent=1) == (
'(root\n'
' (1 some-thing 0.4 0 5\n'
' (2 a-lex 0.8 0 1\n'
' ("a b"\n'
' 3 "token [ +FORM \\"a\\" ]"\n'
' 4 "token [ +FORM \\"b\\" ]"))))'
)
def test_to_udx(self):
s = '(1 some-thing -1 -1 -1 ("token"))'
assert D.from_string(s).to_udx(indent=None) == s
s = (r'(root (1 some-thing@some-type 0.4 0 5 '
r'(2 a-lex@a-type 0.8 0 1 '
r'("a b" 3 "token [ +FORM \"a\" ]" 4 "token [ +FORM \"b\" ]")) '
r'(5 b-lex@b-type 0.9 1 2 '
r'("b" 6 "token [ +FORM \"b\" ]"))))')
assert D.from_string(s).to_udx(indent=1) == (
'(root\n'
' (1 some-thing@some-type 0.4 0 5\n'
' (2 a-lex@a-type 0.8 0 1\n'
' ("a b"\n'
' 3 "token [ +FORM \\"a\\" ]"\n'
' 4 "token [ +FORM \\"b\\" ]"))\n'
' (5 b-lex@b-type 0.9 1 2\n'
' ("b"\n'
' 6 "token [ +FORM \\"b\\" ]"))))'
)
def test_to_dict(self):
s = '(1 some-thing -1 -1 -1 ("token"))'
assert D.from_string(s).to_dict() == {
'id': 1,
'entity': 'some-thing',
'score': -1.0,
'start': -1,
'end': -1,
'form': 'token'
}
fields = ('id', 'entity', 'score')
# daughters and form are always shown
assert D.from_string(s).to_dict(fields=fields) == {
'id': 1,
'entity': 'some-thing',
'score': -1.0,
'form': 'token'
}
s = (r'(root (0 top@top-rule -1 -1 -1'
r' (1 a-lex@a-type -1 -1 -1 ("a b" 2 "token [ +FORM \"a\" ]"'
r' 3 "token [ +FORM \"b\" ]"))'
r' (4 ^c-lex@c-type -1 -1 -1 ("c" 5 "token [ +FORM \"c\" ]"))))')
assert D.from_string(s).to_dict() == {
'entity': 'root',
'daughters': [
{
'id': 0,
'entity': 'top',
'type': 'top-rule',
'score': -1.0,
'start': -1,
'end': -1,
'daughters': [
{
'id': 1,
'entity': 'a-lex',
'type': 'a-type',
'score': -1.0,
'start': -1,
'end': -1,
'form': 'a b',
'tokens': [
{'id': 2, 'tfs': r'token [ +FORM \"a\" ]'},
{'id': 3, 'tfs': r'token [ +FORM \"b\" ]'}
]
},
{
'id': 4,
'entity': 'c-lex',
'type': 'c-type',
'head': True,
'score': -1.0,
'start': -1,
'end': -1,
'form': 'c',
'tokens': [
{'id': 5, 'tfs': r'token [ +FORM \"c\" ]'}
]
}
]
}
]
}
assert D.from_string(s).to_dict(fields=fields) == {
'entity': 'root',
'daughters': [
{
'id': 0,
'entity': 'top',
'score': -1.0,
'daughters': [
{
'id': 1,
'entity': 'a-lex',
'score': -1.0,
'form': 'a b'
},
{
'id': 4,
'entity': 'c-lex',
'score': -1.0,
'form': 'c'
}
]
}
]
}
def test_from_dict(self):
s = '(root (1 some-thing -1 -1 -1 ("a")))'
d = {
'entity': 'root',
'daughters': [
{
'id': 1,
'entity': 'some-thing',
'form': 'a'
}
]
}
assert D.from_dict(d) == D.from_string(s)
s = (r'(root (1 ^some-thing@some-type -1 -1 -1 ("a b"'
r' 2 "token [ +FORM \"a\" ]"'
r' 3 "token [ +FORM \"b\" ]")))' )
d = {
'entity': 'root',
'daughters': [
{
'id': 1,
'entity': 'some-thing',
'type': 'some-type',
'head': True,
'form': 'a b',
'tokens': [
{'id': 2, 'tfs': r'token [ +FORM \"a\" ]'},
{'id': 3, 'tfs': r'token [ +FORM \"b\" ]'}
]
}
]
}
assert D.from_dict(d) == D.from_string(s)
|
mit
| -1,624,406,123,667,610,400 | 38.572222 | 86 | 0.402265 | false |
courtiol/choosiness
|
Visualization/VisualizationWithPygame/VisualizationOf2DEnvironment.py
|
1
|
7942
|
import pygame
import math
import CIndividual
from Visualization.VisualizationWithPygame.CVisualizationWithPygameBaseClass import CVisualizationWithPygameBaseClass
from Visualization.VisualizationWithPygame.VisualizationWithDiagrams import CNHistograms
from Tools.convertCollisionRateAndE import convert_collision_rate_to_e
"""
Visualization of a 2D-Environment. Individuals are represented as circles. States and gender of the individuals are
represented as colours.
"""
# ---------------------------------------No visualization-----------------------------------
class CNoVisualizationOfSimulation(CVisualizationWithPygameBaseClass):
"""
No visualization at all. Just information about shortcuts on the screen.
"""
def __init__(self, simulation, width_of_window, height_of_window):
CVisualizationWithPygameBaseClass.__init__(self, simulation, width_of_window, height_of_window)
self.colour_of_background = (255, 255, 255)
self.selected_individual = None
# overwrite
def init_screen(self):
CVisualizationWithPygameBaseClass.init_screen(self)
CVisualizationWithPygameBaseClass.screen.fill(self.colour_of_background)
self.print_text_on_screen("No visualization selected", self.simulation.settings.width/2,
self.simulation.settings.height/2-40, 30)
self.print_text_on_screen("p - pause/run", self.simulation.settings.width/2,
self.simulation.settings.height/2, 15)
self.print_text_on_screen("v - next visualization", self.simulation.settings.width/2,
self.simulation.settings.height/2+20, 15)
self.print_text_on_screen("i - current time step", self.simulation.settings.width/2,
self.simulation.settings.height/2+40, 15)
pygame.display.flip()
def __str__(self):
return "No visualization"
def give_information_about_selected_individual(self):
if self.selected_individual is not None:
print(str(self.selected_individual))
# ---------------------------------------2D-Visualization-----------------------------------
class C2DVisualizationOfSimulation(CVisualizationWithPygameBaseClass):
"""
The class encapsulates the visual output of the simulation. In order to that it makes use of pygame.
It requires also a pointer to the instance of the simulation.
"""
def __init__(self, simulation, width_of_window, height_of_window):
CVisualizationWithPygameBaseClass.__init__(self, simulation, width_of_window, height_of_window)
self.colour_of_environment = (255, 255, 255)
self.colour_of_males = (0, 0, 255) # blue
self.thickness_latency = 2
self.thickness_available = 0
self.colour_of_females = (255, 0, 0) # red
self.colour_of_dead_individual = (0, 0, 0) # black
self.selected_individual = None
def draw_simulation(self):
self._draw_environment()
self._draw_population(self.simulation.population)
pygame.display.flip()
# display
def _draw_environment(self):
"""
Sets the background colour, shows the current number of collisions and the average collision number per
step.
:return:
"""
CVisualizationWithPygameBaseClass.screen.fill(self.colour_of_environment)
estimated_e = convert_collision_rate_to_e(
collisions_per_time_step=self.simulation.settings.average_number_of_collisions_per_timestep,
population_size=self.simulation.population.populationSize,
proportion_males=self.simulation.population.sex_ratio)
text = 'collisions: '+str(self.simulation.settings.collision_counter)
text += " average: "+str(round(self.simulation.settings.average_number_of_collisions_per_timestep, 4))
text += " e: "+str(round(estimated_e, 4))
self.print_text_on_screen(text, self.width_of_window/2, self.height_of_window/2)
def _draw_population(self, population):
pygame.display.set_caption("Simulation with "+str(self.simulation.population.current_number_of_females) +
' females in red and '+str(self.simulation.population.current_number_of_males) +
' males in blue')
for p in population.males:
self._draw_individual(p)
for p in population.females:
self._draw_individual(p)
def _draw_individual(self, individual):
"""
Each individual is represented by a circle. The colour of the circle depends on the gender and state of the
individual.
:param individual:individual which is drawn.
:return:
"""
# thickness = 2
if individual.gender == CIndividual.MALE:
if individual.state == CIndividual.IN_LATENCY:
pygame.draw.circle(CVisualizationWithPygameBaseClass.screen, self.colour_of_males, (int(individual.x),
int(individual.y)),
self.simulation.env.itemSize, self.thickness_latency)
else:
pygame.draw.circle(CVisualizationWithPygameBaseClass.screen, self.colour_of_males, (int(individual.x),
int(individual.y)),
self.simulation.env.itemSize, self.thickness_available)
self.print_text_on_screen(str(round(individual.q, 4)), int(individual.x), int(individual.y), 15)
else:
if individual.state == CIndividual.IN_LATENCY:
pygame.draw.circle(CVisualizationWithPygameBaseClass.screen, self.colour_of_females, (int(individual.x),
int(individual.y)),
self.simulation.env.itemSize, self.thickness_latency)
else:
pygame.draw.circle(CVisualizationWithPygameBaseClass.screen, self.colour_of_females, (int(individual.x),
int(individual.y)),
self.simulation.env.itemSize, self.thickness_available)
self.print_text_on_screen(str(round(individual.phi, 4)), int(individual.x), int(individual.y), 15)
def give_information_about_selected_individual(self):
"""
shows __str of individual
:return:
"""
if self.selected_individual is not None:
print(str(self.selected_individual))
def select_individual(self, x, y):
"""
Searches in the environment for an individual with coordinates (x,y) and selects this individual
:param x: x-coordinate
:param y: y-coordinate
:return:-
"""
self.selected_individual = self.simulation.env.find_item(x, y, [self.simulation.population])
def __str__(self):
return "2D visualization"
def handle_user_event(self, event):
"""
overwritten method from the super class
:param event: user event
:return:
"""
# add user events for selecting individuals in the environment
if event.type == pygame.MOUSEBUTTONDOWN:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
self.select_individual(mouse_x, mouse_y)
if self.selected_individual is not None:
self.give_information_about_selected_individual()
elif event.type == pygame.MOUSEBUTTONUP:
self.selected_individual = None
#deal with other possible user events in the super class
CVisualizationWithPygameBaseClass.handle_user_event(self,event)
|
gpl-3.0
| 4,464,538,815,037,431,000 | 49.585987 | 121 | 0.60753 | false |
daniaki/Enrich2
|
enrich2/tests/test_lib_barcode.py
|
1
|
10997
|
# Copyright 2016-2017 Alan F Rubin
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ..libraries.barcode import BarcodeSeqLib
from .utilities import load_config_data, create_file_path
from .methods import HDF5TestComponent
CFG_FILE = "barcode.json"
CFG_DIR = "data/config/barcode/"
READS_DIR = create_file_path("barcode/", "data/reads/")
RESULT_DIR = "data/result/barcode/"
LIBTYPE = 'barcode'
FILE_EXT = 'tsv'
FILE_SEP = '\t'
# -------------------------------------------------------------------------- #
#
# BARCODE INTEGRATED COUNT TESTING
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibCountsIntegratedFilters(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/integrated.fq'.format(READS_DIR)
cfg['fastq']['filters']['max N'] = 0
cfg['fastq']['filters']['chastity'] = True
cfg['fastq']['filters']['avg quality'] = 38
cfg['fastq']['filters']['min quality'] = 20
cfg['fastq']['start'] = 4
cfg['fastq']['length'] = 3
cfg['fastq']['reverse'] = True
cfg['barcodes']['min count'] = 2
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='integrated', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# BARCODE MINCOUNT COUNT TESTING
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibCountWithBarcodeMinCountSetting(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/barcode_mincount.fq'.format(READS_DIR)
cfg['barcodes']['min count'] = 2
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='mincount', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# BARCODE COUNTS ONLY MODE COUNT TESTING
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibCountCountsOnlyMode(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['counts file'] = '{}/counts_only.tsv'.format(READS_DIR)
cfg['barcodes']['min count'] = 2
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='counts_only', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# FASTQ FILTER AVERAGE QUALITY
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithAvgQualityFQFilter(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/filter_avgq.fq'.format(READS_DIR)
cfg['fastq']['filters']['avg quality'] = 39
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='filter_avgq', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# FASTQ FILTER MAX N
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithMaxNFQFilter(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/filter_maxn.fq'.format(READS_DIR)
cfg['fastq']['filters']['max N'] = 1
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='filter_maxn', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# FASTQ FILTER MIN QUALITY
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithMinQualFQFilter(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/filter_minq.fq'.format(READS_DIR)
cfg['fastq']['filters']['min quality'] = 38
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='filter_minq', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# FASTQ FILTER NOT CHASTE
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithChastityFQFilter(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/filter_not_chaste.fq'.format(READS_DIR)
cfg['fastq']['filters']['chastity'] = True
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='filter_not_chaste', scoring_method='',
logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# USE REVCOMP
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithRevcompSetting(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/revcomp.fq'.format(READS_DIR)
cfg['fastq']['reverse'] = True
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='revcomp', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# USE TRIM START SETTING
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithTrimStartSetting(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/trim_start.fq'.format(READS_DIR)
cfg['fastq']['start'] = 4
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='trim_start', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# USE TRIM LENGTH SETTING
#
# -------------------------------------------------------------------------- #
class TestBarcodeSeqLibWithTrimLenSetting(unittest.TestCase):
def setUp(self):
cfg = load_config_data(CFG_FILE, CFG_DIR)
cfg['fastq']['reads'] = '{}/trim_len.fq'.format(READS_DIR)
cfg['fastq']['length'] = 5
self.test_component = HDF5TestComponent(
store_constructor=BarcodeSeqLib, cfg=cfg, result_dir=RESULT_DIR,
file_ext=FILE_EXT, file_sep=FILE_SEP, save=False, verbose=False,
libtype='trim_len', scoring_method='', logr_method='', coding=''
)
self.test_component.setUp()
def tearDown(self):
self.test_component.tearDown()
def test_all_hdf5_dataframes(self):
self.test_component.runTest()
# -------------------------------------------------------------------------- #
#
# MAIN
#
# -------------------------------------------------------------------------- #
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| 8,759,446,518,685,914,000 | 34.474194 | 79 | 0.523234 | false |
nlapalu/SDDetector
|
SDDetector/Utils/AlignmentChainer.py
|
1
|
8484
|
#!/usr/bin/env python
import logging
from SDDetector.Entities.Chain import Chain
class AlignmentChainer(object):
def __init__(self, db, maxGap=3000, logLevel='ERROR'):
"""AlignmentChainer constructor"""
self.db = db
self.maxGap = maxGap
self.logLevel = logLevel
self.dIndex = {}
self.lChains = []
logging.basicConfig(level=logLevel)
def assertRankForAlgmts(self,lAlgmts,algmt):
lAlgmts.append(algmt)
lSortedSbjct = sorted(lAlgmts,key=lambda algmtc: algmtc.sstart)
lSortedQuery = sorted(lAlgmts,key=lambda algmtc: algmtc.qstart)
if lSortedSbjct == lSortedQuery or lSortedSbjct == lSortedQuery[::-1]:
return True
else:
return False
def chainAlignments(self, lAlgmts):
"""Build the list of chains and keep position of alignments in chains"""
for algmt in lAlgmts:
if algmt.id not in self.dIndex:
chain = Chain([algmt])
index = len(self.lChains)
self.lChains.append(chain)
self.dIndex[algmt.id] = [index]
lChainIdsCurrentAlgmt = self.dIndex[algmt.id]
lProximalAlgmts = self.db.selectProximalAlgmts(algmt.id, self.maxGap)
for proxAlgmt in lProximalAlgmts:
for chainId in lChainIdsCurrentAlgmt:
if self.distanceBetweenQueryAlgmts(algmt,proxAlgmt) < self.maxGap and self.distanceBetweenQueryAlgmts(algmt,proxAlgmt) > 0:
if proxAlgmt.id not in self.lChains[chainId].getIdListOfAlgmts():
self.lChains[chainId].lAlgmts.append(proxAlgmt)
if proxAlgmt.id in self.dIndex:
self.dIndex[proxAlgmt.id].append(chainId)
else:
self.dIndex[proxAlgmt.id] = [chainId]
self.removeInternalAlignments()
return
def removeInternalAlignments(self):
"""
remove match with internal similarity
a b c d
q1 --|-----|---|--------------|--
s1 ------------|----|-----|---|--
c' a' b' d'
"""
ltmp = []
for i,chain in enumerate(self.lChains):
lAlgmtIds = set(self.db.selectQueryOnlySuboptimalAlgmts(chain.getIdListOfAlgmts()))
lAlgmtIds.union(self.db.selectSubjectOnlySuboptimalAlgmts(chain.getIdListOfAlgmts()))
chain.deleteListOfAlgmts(lAlgmtIds)
for algmtId in lAlgmtIds:
index = self.dIndex[algmtId].index(i)
del(self.dIndex[algmtId][index])
if chain.getNbAlgmts() == 1:
ltmp.append(chain)
if chain.getNbAlgmts() > 1:
for i,algmt in enumerate(chain.sortListOfAlgmts()[:-1]):
if self.distanceBetweenQueryAlgmts(chain.sortListOfAlgmts()[i],chain.sortListOfAlgmts()[i+1]) < self.maxGap:
ltmp.append(chain)
self.lChains = ltmp
def distanceBetweenQueryAlgmts(self, algmt1, algmt2):
"""Compute and return the distance between 2 alignments
hypthesis to check !!! algmt1 always start before algmt2 """
if algmt1.qstart < algmt2.qstart:
return algmt2.qstart - algmt1.qend - 1
else:
return algmt1.qstart - algmt2.qend - 1
def distanceBetweenSbjctAlgmts(self, algmt1, algmt2):
"""Compute and return the distance between 2 alignments
algmt1 start before 2"""
if algmt1.sstart < algmt2.sstart:
return algmt2.sstart - algmt1.send - 1
else:
return algmt1.sstart - algmt2.send - 1
def distanceBetweenQuerySbjctAlgmts(self, algmt1, algmt2):
"""Compute and return the distance between 2 alignments
hypthesis to check !!! algmt1 always start before algmt2 """
if algmt1.sstart < algmt2.qstart:
return algmt2.qstart - algmt1.send - 1
else:
return algmt1.sstart - algmt2.qend - 1
def sortListOfChains(self, lChains):
"""Return a sorted list of chains"""
ltmp = []
for i, chain in enumerate(lChains):
lAlgmts = chain.sortListOfAlgmts()
for j,algmt in enumerate(lAlgmts):
if j == 0:
sstartMin = algmt.sstart
sendMax = algmt.send
if algmt.sstart < sstartMin:
sstartMin = algmt.sstart
if algmt.send > sendMax:
sendMax = algmt.send
ltmp.append((algmt.sbjct, sstartMin, sendMax, algmt.query, i))
ltmp.sort(key=lambda x: ('{0:0>150}'.format(x[0]).lower(), x[1], x[2], '{0:0>150}'.format(x[3])))
return [ lChains[row[4]] for row in ltmp ]
def removeChainsWithInternalSimilarity(self, lChains):
"""
return a non-internal similarity list of chains
a b c d
chr1 --|-----|---|--------------|--------------------
chr1 ---------------------|-----|---|----------------|--
a' b' d' a'
internal similarity between both alignments
"""
ltmp = []
for i,chain in enumerate(lChains):
if chain.lAlgmts[0].query == chain.lAlgmts[0].sbjct:
if (chain.getSStart() >= chain.getQStart() and chain.getSStart() < chain.getQEnd()):
ltmp.append(i)
elif (chain.getSStart() <= chain.getQStart() and chain.getSEnd() > chain.getQStart()):
ltmp.append(i)
elif (chain.getSEnd() > chain.getQStart() and chain.getSEnd() <= chain.getQEnd()):
ltmp.append(i)
lChains = [ chain for i, chain in enumerate(lChains) if i not in ltmp ]
return self.sortListOfChains(lChains)
def removeOverlappingChains(self, lChains):
"""Return a non-overlapping list of chains"""
ltmp = []
for i, chain1 in enumerate(lChains[:-1]):
for j, chain2 in enumerate(lChains[i+1:]):
sTrue = False
qTrue = False
if (chain1.lAlgmts[0].query == chain2.lAlgmts[0].query and chain1.lAlgmts[0].sbjct == chain2.lAlgmts[0].sbjct):
if (chain1.getSStart() >= chain2.getSStart() and chain1.getSStart() < chain2.getSEnd()):
sTrue = True
elif (chain1.getSStart() <= chain2.getSStart() and chain1.getSEnd() > chain2.getSStart()):
sTrue = True
elif (chain1.getSEnd() > chain2.getSStart() and chain1.getSEnd() <= chain2.getSEnd()):
sTrue = True
if (chain1.getQStart() >= chain2.getQStart() and chain1.getQStart() < chain2.getQEnd()):
qTrue = True
elif (chain1.getQStart() <= chain2.getQStart() and chain1.getQEnd() > chain2.getQStart()):
qTrue = True
elif (chain1.getQEnd() > chain2.getQStart() and chain1.getQEnd() <= chain2.getQEnd()):
qTrue = True
if sTrue and qTrue:
if chain1.getLength() > chain2.getLength():
ltmp.append(i+1)
elif chain1.getLength() < chain2.getLength():
ltmp.append(i)
elif chain1.getLength() == chain2.getLength():
if chain1.getNbAlgmts() > chain2.getNbAlgmts():
ltmp.append(i)
elif chain1.getNbAlgmts() < chain2.getNbAlgmts():
ltmp.append(i+1)
elif chain1.getNbAlgmts() == chain2.getNbAlgmts():
if chain1.getAlgmtMaxLength() > chain2.getAlgmtMaxLength():
ltmp.append(i+1)
elif chain1.getAlgmtMaxLength() < chain2.getAlgmtMaxLength():
ltmp.append(i)
else:
ltmp.append(i+1)
lChains = [ chain for i, chain in enumerate(lChains) if i not in ltmp ]
return self.sortListOfChains(lChains)
|
gpl-3.0
| 1,982,953,328,056,818,200 | 38.277778 | 143 | 0.524163 | false |
dmpetrov/dataversioncontrol
|
tests/func/test_version.py
|
1
|
2664
|
import os
import re
import shutil
import pytest
from dvc.command.version import psutil
from dvc.main import main
@pytest.mark.parametrize("scm_init", [True, False])
def test_info_in_repo(scm_init, tmp_dir, caplog):
tmp_dir.init(scm=scm_init, dvc=True)
# Create `.dvc/cache`, that is needed to check supported link types.
os.mkdir(tmp_dir.dvc.cache.local.cache_dir)
assert main(["version"]) == 0
assert re.search(r"DVC version: \d+\.\d+\.\d+", caplog.text)
assert re.search(r"Python version: \d\.\d\.\d", caplog.text)
assert re.search(r"Platform: .*", caplog.text)
assert re.search(r"Binary: (True|False)", caplog.text)
assert re.search(r"Package: .*", caplog.text)
assert re.search(r"Supported remotes: .*", caplog.text)
assert re.search(
r"(Cache: (.*link - (not )?supported(,\s)?){3})", caplog.text
)
if scm_init:
assert "Repo: dvc, git" in caplog.text
else:
assert "Repo: dvc (no_scm)" in caplog.text
def test_info_in_subdir(tmp_dir, scm, caplog):
dvc_subdir = tmp_dir / "subdir"
dvc_subdir.mkdir()
with dvc_subdir.chdir():
dvc_subdir.init(scm=False, dvc=True)
with dvc_subdir.dvc.config.edit() as conf:
del conf["core"]["no_scm"]
assert main(["version"]) == 0
assert "Repo: dvc (subdir), git" in caplog.text
def test_info_in_broken_git_repo(tmp_dir, dvc, scm, caplog):
shutil.rmtree(dvc.scm.dir)
assert main(["version"]) == 0
assert "Repo: dvc, git (broken)" in caplog.text
@pytest.mark.skipif(psutil is None, reason="No psutil.")
def test_fs_info_in_repo(tmp_dir, dvc, caplog):
os.mkdir(dvc.cache.local.cache_dir)
assert main(["version"]) == 0
assert "Filesystem type (cache directory): " in caplog.text
assert "Filesystem type (workspace): " in caplog.text
def test_info_outside_of_repo(tmp_dir, caplog):
assert main(["version"]) == 0
assert re.search(r"DVC version: \d+\.\d+\.\d+", caplog.text)
assert re.search(r"Python version: \d\.\d\.\d", caplog.text)
assert re.search(r"Platform: .*", caplog.text)
assert re.search(r"Binary: (True|False)", caplog.text)
assert re.search(r"Package: .*", caplog.text)
assert re.search(r"Supported remotes: .*", caplog.text)
assert not re.search(r"(Cache: (.*link - (not )?(,\s)?){3})", caplog.text)
assert "Repo:" not in caplog.text
@pytest.mark.skipif(psutil is None, reason="No psutil.")
def test_fs_info_outside_of_repo(tmp_dir, caplog):
assert main(["version"]) == 0
assert "Filesystem type (cache directory): " not in caplog.text
assert "Filesystem type (workspace): " in caplog.text
|
apache-2.0
| -242,218,295,400,082,000 | 31.096386 | 78 | 0.644144 | false |
telmomenezes/synthetic
|
synthetic/commands/prune.py
|
1
|
1297
|
from synthetic.consts import (DEFAULT_SAMPLE_RATE, DEFAULT_NODES,
DEFAULT_EDGES, DEFAULT_GEN_TYPE)
from synthetic.generator import load_generator
from synthetic.commands.command import Command, arg_with_default
class Prune(Command):
def __init__(self, cli_name):
Command.__init__(self, cli_name)
self.name = 'prune'
self.description = 'simplify generator program'
self.mandatory_args = ['prg', 'oprg']
self.optional_args = ['undir', 'sr', 'nodes', 'edges', 'gentype']
def run(self, args):
self.error_msg = None
prog = args['prg']
out_prog = args['oprg']
sr = arg_with_default(args, 'sr', DEFAULT_SAMPLE_RATE)
directed = not args['undir']
nodes = arg_with_default(args, 'nodes', DEFAULT_NODES)
edges = arg_with_default(args, 'edges', DEFAULT_EDGES)
gentype = arg_with_default(args, 'gentype', DEFAULT_GEN_TYPE)
print('nodes: {}'.format(nodes))
print('edges: {}'.format(edges))
# load and run generator
gen = load_generator(prog, directed, gentype)
gen.run(nodes, edges, sr)
# prune and save
gen.prog.dyn_pruning()
gen.prog.write(out_prog)
print('done.')
return True
|
mit
| -1,588,164,623,561,873,400 | 31.425 | 73 | 0.594449 | false |
stefanwebo/HotelReviewSearch
|
data/data_inspector.py
|
1
|
4335
|
import json
import os
def get_user_values(json_file):
username = []
age = []
gender = []
member_since = []
cities = []
contributions = []
countries = []
locations = []
miles = []
percentage = []
helpfulness = []
with open(json_file, 'r') as f:
content = f.read()
f.close()
json_file = json.dumps(json.loads(content), indent=4)
splitted = json_file.split('\n')
for line in splitted:
line = line.strip('[]').strip()
if '\": ' in line:
tmp = line.split('\": ')
key = tmp[0].strip('\":').strip()
value = tmp[1].strip('\",').strip()
if key == 'username':
username.append(value)
elif key == 'age':
if value not in age:
age.append(value)
elif key == 'gender':
if value not in gender:
gender.append(value)
elif key == 'member_since':
member_since.append(value)
elif key == 'cities':
cities.append(value)
elif key == 'contributions':
contributions.append(value)
elif key == 'countries':
countries.append(value)
elif key == 'location':
locations.append(value)
elif key == 'miles':
miles.append(value)
elif key == 'percentage':
percentage.append(value)
elif key == 'helpfulness':
helpfulness.append(value)
with open('separated_user_values/users.txt', 'w+') as file:
for element in username:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/age.txt', 'w+') as file:
for element in age:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/gender.txt', 'w+') as file:
for element in gender:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/member_since.txt', 'w+') as file:
for element in member_since:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/cities.txt', 'w+') as file:
for element in cities:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/contributions.txt', 'w+') as file:
for element in contributions:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/countries.txt', 'w+') as file:
for element in countries:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/locations.txt', 'w+') as file:
for element in locations:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/miles.txt', 'w+') as file:
for element in miles:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/percentage.txt', 'w+') as file:
for element in percentage:
file.write(element)
file.write('\n')
file.close()
with open('separated_user_values/helpfulness.txt', 'w+') as file:
for element in helpfulness:
file.write(element)
file.write('\n')
file.close()
def format_json(json_file):
with open(json_file, 'r') as f:
content = f.read()
f.close()
filename = json_file[json_file.rfind('/') + 1:json_file.rfind('.json')] + "_formatted.json"
file = "formatted_json/users/" + filename
with open(file, 'w+') as f:
f.write(json.dumps(json.loads(content), sort_keys=True, indent=4))
f.close()
if __name__ == '__main__':
json_file = "/home/alex/Downloads/hotel_review_data/corpus-webis-tripad-14/users.json"
get_user_values(json_file)
# format_json(json_file)
# json_dir = "/home/alex/Downloads/hotel_review_data/corpus-webis-tripad-14/reviews/"
# for root, dirs, files in os.walk(json_dir, topdown=False):
# for file in files:
# format_json(os.path.join(root, file))
|
bsd-2-clause
| -7,816,203,830,080,477,000 | 29.104167 | 95 | 0.536332 | false |
anurag03/integration_tests
|
cfme/tests/v2v/test_v2v_migrations.py
|
1
|
26157
|
"""Test to validate End-to-End migrations- functional testing."""
import fauxfactory
import pytest
from cfme.fixtures.provider import (dual_network_template, dual_disk_template,
dportgroup_template, win7_template, win10_template, win2016_template, rhel69_template,
win2012_template, ubuntu16_template, rhel7_minimal)
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import ONE_PER_VERSION
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.ignore_stream('5.8'),
pytest.mark.provider(
classes=[RHEVMProvider],
selector=ONE_PER_VERSION,
required_flags=['v2v']
),
pytest.mark.provider(
classes=[VMwareProvider],
selector=ONE_PER_VERSION,
fixture_name='second_provider',
required_flags=['v2v']
)
]
def get_migrated_vm_obj(src_vm_obj, target_provider):
"""Returns the migrated_vm obj from target_provider."""
collection = target_provider.appliance.provider_based_collection(target_provider)
migrated_vm = collection.instantiate(src_vm_obj.name, target_provider)
return migrated_vm
@pytest.mark.parametrize('form_data_vm_obj_single_datastore', [['nfs', 'nfs', rhel7_minimal],
['nfs', 'iscsi', rhel7_minimal], ['iscsi', 'iscsi', rhel7_minimal],
['iscsi', 'nfs', rhel7_minimal], ['iscsi', 'local', rhel7_minimal]],
indirect=True)
def test_single_datastore_single_vm_migration(request, appliance, v2v_providers, host_creds,
conversion_tags,
form_data_vm_obj_single_datastore):
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_vm_obj_single_datastore.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
# vm_obj is a list, with only 1 VM object, hence [0]
src_vm_obj = form_data_vm_obj_single_datastore.vm_list[0]
wait_for(lambda: src_vm_obj.ip_address is not None,
message="Waiting for VM to display IP in CFME",
fail_func=src_vm_obj.refresh_relationships,
delay=5, timeout=300)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_vm_obj_single_datastore.vm_list, start_migration=True)
# explicit wait for spinner of in-progress status card
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
# wait until plan is in progress
wait_for(func=view.plan_in_progress, func_args=[migration_plan.name],
message="migration plan is in progress, be patient please",
delay=5, num_sec=1800)
view.migr_dropdown.item_select("Completed Plans")
view.wait_displayed()
logger.info("For plan %s, migration status after completion: %s, total time elapsed: %s",
migration_plan.name, view.migration_plans_completed_list.get_vm_count_in_plan(
migration_plan.name), view.migration_plans_completed_list.get_clock(
migration_plan.name))
# validate MAC address matches between source and target VMs
assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name)
migrated_vm = get_migrated_vm_obj(src_vm_obj, v2v_providers.rhv_provider)
assert src_vm_obj.mac_address == migrated_vm.mac_address
@pytest.mark.parametrize('form_data_vm_obj_single_network', [['DPortGroup', 'ovirtmgmt',
dportgroup_template], ['VM Network', 'ovirtmgmt', rhel7_minimal]],
indirect=True)
def test_single_network_single_vm_migration(request, appliance, v2v_providers, host_creds,
conversion_tags,
form_data_vm_obj_single_network):
# This test will make use of migration request details page to track status of migration
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_vm_obj_single_network.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_vm_obj_single_network.vm_list, start_migration=True)
# as migration is started, try to track progress using migration plan request details page
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
view.progress_card.select_plan(migration_plan.name)
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection,
'Details').VIEW)
view.wait_displayed()
request_details_list = view.migration_request_details_list
vms = request_details_list.read()
# ideally this will always pass as request details list shows VMs in migration plan
# unless we have a bug
assert len(vms) > 0, "No VMs displayed on Migration Plan Request Details list."
wait_for(func=view.plan_in_progress, message="migration plan is in progress, be patient please",
delay=5, num_sec=2700)
assert (request_details_list.is_successful(vms[0]) and
not request_details_list.is_errored(vms[0]))
# validate MAC address matches between source and target VMs
src_vm = form_data_vm_obj_single_network.vm_list.pop()
migrated_vm = get_migrated_vm_obj(src_vm, v2v_providers.rhv_provider)
assert src_vm.mac_address == migrated_vm.mac_address
@pytest.mark.parametrize(
'form_data_dual_vm_obj_dual_datastore', [[['nfs', 'nfs',
rhel7_minimal], ['iscsi', 'iscsi', rhel7_minimal]]],
indirect=True
)
def test_dual_datastore_dual_vm_migration(request, appliance, v2v_providers, host_creds,
conversion_tags,
form_data_dual_vm_obj_dual_datastore, soft_assert):
# This test will make use of migration request details page to track status of migration
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_dual_vm_obj_dual_datastore.
form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_dual_vm_obj_dual_datastore.vm_list, start_migration=True)
# as migration is started, try to track progress using migration plan request details page
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
view.progress_card.select_plan(migration_plan.name)
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection,
'Details').VIEW)
view.wait_displayed()
request_details_list = view.migration_request_details_list
vms = request_details_list.read()
wait_for(func=view.plan_in_progress, message="migration plan is in progress, be patient please",
delay=5, num_sec=1800)
for vm in vms:
soft_assert(request_details_list.is_successful(vm) and
not request_details_list.is_errored(vm))
src_vms_list = form_data_dual_vm_obj_dual_datastore.vm_list
# validate MAC address matches between source and target VMs
for src_vm in src_vms_list:
migrated_vm = get_migrated_vm_obj(src_vm, v2v_providers.rhv_provider)
soft_assert(src_vm.mac_address == migrated_vm.mac_address)
@pytest.mark.parametrize(
'form_data_vm_obj_dual_nics', [[['VM Network', 'ovirtmgmt'],
['DPortGroup', 'Storage - VLAN 33'], dual_network_template]],
indirect=True
)
def test_dual_nics_migration(request, appliance, v2v_providers, host_creds, conversion_tags,
form_data_vm_obj_dual_nics):
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_vm_obj_dual_nics.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_vm_obj_dual_nics.vm_list, start_migration=True)
# explicit wait for spinner of in-progress status card
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
# wait until plan is in progress
wait_for(func=view.plan_in_progress, func_args=[migration_plan.name],
message="migration plan is in progress, be patient please",
delay=5, num_sec=2700)
view.migr_dropdown.item_select("Completed Plans")
view.wait_displayed()
logger.info("For plan %s, migration status after completion: %s, total time elapsed: %s",
migration_plan.name, view.migration_plans_completed_list.get_vm_count_in_plan(
migration_plan.name), view.migration_plans_completed_list.get_clock(
migration_plan.name))
assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name)
# validate MAC address matches between source and target VMs
src_vm = form_data_vm_obj_dual_nics.vm_list.pop()
migrated_vm = get_migrated_vm_obj(src_vm, v2v_providers.rhv_provider)
assert src_vm.mac_address == migrated_vm.mac_address
@pytest.mark.parametrize('form_data_vm_obj_single_datastore', [['nfs', 'nfs', dual_disk_template]],
indirect=True)
def test_dual_disk_vm_migration(request, appliance, v2v_providers, host_creds, conversion_tags,
form_data_vm_obj_single_datastore):
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_vm_obj_single_datastore.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_vm_obj_single_datastore.vm_list, start_migration=True)
# explicit wait for spinner of in-progress status card
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
# wait until plan is in progress
wait_for(func=view.plan_in_progress, func_args=[migration_plan.name],
message="migration plan is in progress, be patient please",
delay=5, num_sec=3600)
view.migr_dropdown.item_select("Completed Plans")
view.wait_displayed()
logger.info("For plan %s, migration status after completion: %s, total time elapsed: %s",
migration_plan.name, view.migration_plans_completed_list.get_vm_count_in_plan(
migration_plan.name), view.migration_plans_completed_list.get_clock(
migration_plan.name))
assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name)
# validate MAC address matches between source and target VMs
src_vm = form_data_vm_obj_single_datastore.vm_list.pop()
migrated_vm = get_migrated_vm_obj(src_vm, v2v_providers.rhv_provider)
assert src_vm.mac_address == migrated_vm.mac_address
@pytest.mark.parametrize('form_data_multiple_vm_obj_single_datastore', [['nfs', 'nfs',
[win7_template, win10_template]], ['nfs', 'nfs',
[win2016_template, rhel69_template]], ['nfs', 'nfs',
[win2012_template, ubuntu16_template]]], indirect=True)
def test_migrations_different_os_templates(request, appliance, v2v_providers, host_creds,
conversion_tags,
form_data_multiple_vm_obj_single_datastore,
soft_assert):
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(
form_data_multiple_vm_obj_single_datastore.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_multiple_vm_obj_single_datastore.vm_list, start_migration=True)
# as migration is started, try to track progress using migration plan request details page
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
view.progress_card.select_plan(migration_plan.name)
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection,
'Details').VIEW)
view.wait_displayed()
request_details_list = view.migration_request_details_list
view.items_on_page.item_select('15')
vms = request_details_list.read()
wait_for(func=view.plan_in_progress, message="migration plan is in progress, be patient please",
delay=5, num_sec=3600)
for vm in vms:
soft_assert(request_details_list.is_successful(vm) and
not request_details_list.is_errored(vm))
src_vms_list = form_data_multiple_vm_obj_single_datastore.vm_list
# validate MAC address matches between source and target VMs
for src_vm in src_vms_list:
migrated_vm = get_migrated_vm_obj(src_vm, v2v_providers.rhv_provider)
soft_assert(src_vm.mac_address == migrated_vm.mac_address)
def test_conversion_host_tags(appliance, v2v_providers):
"""Tests following cases:
1)Test Attribute in UI indicating host has/has not been configured as conversion host like Tags
2)Test converstion host tags
"""
tag1 = (appliance.collections.categories.instantiate(
display_name='V2V - Transformation Host *')
.collections.tags.instantiate(display_name='t'))
tag2 = (appliance.collections.categories.instantiate(
display_name='V2V - Transformation Method')
.collections.tags.instantiate(display_name='VDDK'))
# We just pick the first available host hence [0]
host = v2v_providers.rhv_provider.hosts.all()[0]
# Remove any prior tags
host.remove_tags(host.get_tags())
host.add_tag(tag1)
assert host.get_tags()[0].category.display_name in tag1.category.display_name
host.remove_tag(tag1)
host.add_tag(tag2)
assert host.get_tags()[0].category.display_name in tag2.category.display_name
host.remove_tag(tag2)
host.remove_tags(host.get_tags())
@pytest.mark.parametrize('conversion_tags, form_data_vm_obj_single_datastore', [['SSH',
['nfs', 'nfs', rhel7_minimal]]], indirect=True)
def test_single_vm_migration_with_ssh(request, appliance, v2v_providers, host_creds,
conversion_tags,
form_data_vm_obj_single_datastore):
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_vm_obj_single_datastore.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_vm_obj_single_datastore.vm_list, start_migration=True)
# explicit wait for spinner of in-progress status card
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
# wait until plan is in progress
wait_for(func=view.plan_in_progress, func_args=[migration_plan.name],
message="migration plan is in progress, be patient please",
delay=5, num_sec=1800)
view.migr_dropdown.item_select("Completed Plans")
view.wait_displayed()
logger.info("For plan %s, migration status after completion: %s, total time elapsed: %s",
migration_plan.name,
view.migration_plans_completed_list.get_vm_count_in_plan(migration_plan.name),
view.migration_plans_completed_list.get_clock(migration_plan.name))
assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name)
# validate MAC address matches between source and target VMs
src_vm = form_data_vm_obj_single_datastore.vm_list.pop()
migrated_vm = get_migrated_vm_obj(src_vm, v2v_providers.rhv_provider)
assert src_vm.mac_address == migrated_vm.mac_address
@pytest.mark.parametrize('form_data_vm_obj_single_datastore', [['nfs', 'nfs', rhel7_minimal]],
indirect=True)
@pytest.mark.parametrize('power_state', ['RUNNING', 'STOPPED'])
def test_single_vm_migration_power_state_tags_retirement(request, appliance, v2v_providers,
host_creds, conversion_tags,
form_data_vm_obj_single_datastore,
power_state):
# Test VM migration power state and tags are preserved
# as this is single_vm_migration it only has one vm_obj, which we extract on next line
src_vm = form_data_vm_obj_single_datastore.vm_list[0]
if power_state not in src_vm.mgmt.state:
if power_state == 'RUNNING':
src_vm.mgmt.start()
elif power_state == 'STOPPED':
src_vm.mgmt.stop()
tag = (appliance.collections.categories.instantiate(display_name='Owner *').collections.tags
.instantiate(display_name='Production Linux Team'))
src_vm.add_tag(tag)
src_vm.set_retirement_date(offset={'hours': 1})
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(form_data_vm_obj_single_datastore.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_vm_obj_single_datastore.vm_list, start_migration=True)
# explicit wait for spinner of in-progress status card
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
# wait until plan is in progress
wait_for(func=view.plan_in_progress, func_args=[migration_plan.name],
message="migration plan is in progress, be patient please",
delay=5, num_sec=1800)
view.migr_dropdown.item_select("Completed Plans")
view.wait_displayed()
logger.info("For plan %s, migration status after completion: %s, total time elapsed: %s",
migration_plan.name,
view.migration_plans_completed_list.get_vm_count_in_plan(migration_plan.name),
view.migration_plans_completed_list.get_clock(migration_plan.name))
assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name)
# check power state on migrated VM
rhv_prov = v2v_providers.rhv_provider
migrated_vm = rhv_prov.mgmt.get_vm(src_vm.name)
assert power_state in migrated_vm.state
# check tags
vm_obj = appliance.collections.infra_vms.instantiate(migrated_vm.name, rhv_prov)
owner_tag = None
for t in vm_obj.get_tags():
if tag.display_name in t.display_name:
owner_tag = t
assert owner_tag is not None and tag.display_name in owner_tag.display_name
# If Never is not there, that means retirement is set.
assert 'Never' not in vm_obj.retirement_date
@pytest.mark.parametrize('host_creds, form_data_multiple_vm_obj_single_datastore', [['multi-host',
['nfs', 'nfs', [rhel7_minimal, ubuntu16_template, rhel69_template, win7_template]]]],
indirect=True)
def test_multi_host_multi_vm_migration(request, appliance, v2v_providers, host_creds,
conversion_tags, soft_assert,
form_data_multiple_vm_obj_single_datastore):
infrastructure_mapping_collection = appliance.collections.v2v_mappings
mapping = infrastructure_mapping_collection.create(
form_data_multiple_vm_obj_single_datastore.form_data)
@request.addfinalizer
def _cleanup():
infrastructure_mapping_collection.delete(mapping)
migration_plan_collection = appliance.collections.v2v_plans
migration_plan = migration_plan_collection.create(
name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}"
.format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name,
vm_list=form_data_multiple_vm_obj_single_datastore.vm_list, start_migration=True)
# as migration is started, try to track progress using migration plan request details page
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection, 'All').VIEW)
wait_for(func=view.progress_card.is_plan_started, func_args=[migration_plan.name],
message="migration plan is starting, be patient please", delay=5, num_sec=150,
handle_exception=True)
view.progress_card.select_plan(migration_plan.name)
view = appliance.browser.create_view(navigator.get_class(migration_plan_collection,
'Details').VIEW)
view.wait_displayed()
request_details_list = view.migration_request_details_list
vms = request_details_list.read()
view.items_on_page.item_select('15')
# testing multi-host utilization
def _is_migration_started():
for vm in vms:
if request_details_list.get_message_text(vm) != 'Migrating':
return False
return True
wait_for(func=_is_migration_started, message="migration is not started for all VMs, "
"be patient please", delay=5, num_sec=300)
hosts_dict = {key.name: [] for key in host_creds}
for vm in vms:
popup_text = request_details_list.read_additional_info_popup(vm)
# open__additional_info_popup function also closes opened popup in our case
request_details_list.open_additional_info_popup(vm)
if popup_text['Conversion Host'] in hosts_dict:
hosts_dict[popup_text['Conversion Host']].append(vm)
for host in hosts_dict:
logger.info("Host: {} is migrating VMs: {}".format(host, hosts_dict[host]))
assert len(hosts_dict[host]) > 0, ("Conversion Host: {} not being utilized for migration!"
.format(host))
wait_for(func=view.plan_in_progress, message="migration plan is in progress, be patient please",
delay=5, num_sec=14400)
for vm in vms:
soft_assert(request_details_list.is_successful(vm) and
not request_details_list.is_errored(vm))
|
gpl-2.0
| -3,145,448,637,582,939,600 | 50.490157 | 100 | 0.685209 | false |
creiht/komodomako
|
components/koMako_UDL_Language.py
|
1
|
1949
|
# Copyright (c) 2008-2012 Chuck Thier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
# Komodo Mako language service.
import logging
from koXMLLanguageBase import koHTMLLanguageBase
log = logging.getLogger("koMakoLanguage")
#log.setLevel(logging.DEBUG)
def registerLanguage(registry):
log.debug("Registering language Mako")
registry.registerLanguage(KoMakoLanguage())
class KoMakoLanguage(koHTMLLanguageBase):
name = "Mako"
lexresLangName = "Mako"
_reg_desc_ = "%s Language" % name
_reg_contractid_ = "@activestate.com/koLanguage?language=%s;1" % name
_reg_categories_ = [("komodo-language", name)]
_reg_clsid_ = "5207496e-a7b8-4e10-ba35-f77c07fa3539"
defaultExtension = '.mako'
supportsSmartIndent = "brace"
lang_from_udl_family = {
'CSL': 'JavaScript',
'TPL': 'Mako',
'M': 'HTML',
'CSS': 'CSS',
'SSL': 'Python',
}
|
mit
| 8,720,785,554,609,959,000 | 36.480769 | 80 | 0.715752 | false |
msmexplorer/msmexplorer
|
examples/plot_trace2d.py
|
1
|
1515
|
"""
Two dimensional trace plot
===============
"""
from msmbuilder.example_datasets import FsPeptide
from msmbuilder.featurizer import DihedralFeaturizer
from msmbuilder.decomposition import tICA
from msmbuilder.cluster import MiniBatchKMeans
from msmbuilder.msm import MarkovStateModel
from matplotlib import pyplot as pp
import numpy as np
import msmexplorer as msme
rs = np.random.RandomState(42)
# Load Fs Peptide Data
trajs = FsPeptide().get().trajectories
# Extract Backbone Dihedrals
featurizer = DihedralFeaturizer(types=['phi', 'psi'])
diheds = featurizer.fit_transform(trajs)
# Perform Dimensionality Reduction
tica_model = tICA(lag_time=2, n_components=2)
tica_trajs = tica_model.fit_transform(diheds)
# Plot free 2D free energy (optional)
txx = np.concatenate(tica_trajs, axis=0)
ax = msme.plot_free_energy(
txx, obs=(0, 1), n_samples=100000,
random_state=rs,
shade=True,
clabel=True,
clabel_kwargs={'fmt': '%.1f'},
cbar=True,
cbar_kwargs={'format': '%.1f', 'label': 'Free energy (kcal/mol)'}
)
# Now plot the first trajectory on top of it to inspect it's movement
msme.plot_trace2d(
data=tica_trajs[0], ts=0.2, ax=ax,
scatter_kwargs={'s': 2},
cbar_kwargs={'format': '%d', 'label': 'Time (ns)',
'orientation': 'horizontal'},
xlabel='tIC 1', ylabel='tIC 2'
)
# Finally, let's plot every trajectory to see the individual sampled regions
f, ax = pp.subplots()
msme.plot_trace2d(tica_trajs, ax=ax, xlabel='tIC 1', ylabel='tIC 2')
pp.show()
|
mit
| 6,836,271,037,440,017,000 | 29.3 | 76 | 0.70363 | false |
andrefreitas/schwa
|
schwa/test/git_extractor_test.py
|
1
|
15943
|
# Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module with the Unit tests for the Git Extractor. """
import unittest
import tempfile
import os
import shutil
import time
import git
from schwa.extraction import GitExtractor
from schwa.repository import *
class TestGitExtractor(unittest.TestCase):
def setUp(self):
self.temp_dir = os.path.join(tempfile.gettempdir(), "repo-test")
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
self.repo = git.Repo.init(self.temp_dir)
self.repo.git.execute(["git", "config", "user.email", "[email protected]"])
self.repo.git.execute(["git", "config", "user.name", "Peter Griffin"])
def testExtraction(self):
code = """
package org.feup.meoarenacustomer.app;
import android.app.DownloadManager;
import com.loopj.android.http.*;
static class API {
static String getUrl() {
return url;
}
static void setUrl(String url) {
this.url = url;
}
private String url;
private final String PRODUCTION_URL = "http://neo.andrefreitas.pt:8081/api";
private static AsyncHttpClient client = new AsyncHttpClient();
static API(String url ){
this.url = url;
}
static API(){
this.url = PRODUCTION_URL;
}
static void login(String email, String password, AsyncHttpResponseHandler responseHandler){
RequestParams params = new RequestParams();
params.put("email", email);
params.put("password", password);
client.post(url + "/login", params, responseHandler);
}
static void register(String name, String email, String nif, String password, String ccNumber, String ccType, String ccValidity, AsyncHttpResponseHandler responseHandler){
RequestParams params = new RequestParams();
params.put("name", name);
params.put("email", email);
params.put("password", password);
params.put("nif", nif);
params.put("ccNumber", ccNumber);
params.put("ccType", ccType);
params.put("ccValidity", ccValidity);
client.post(url + "/customers", params, responseHandler);
}
static void getShows(AsyncHttpResponseHandler responseHandler) {
client.get(url + "/shows", responseHandler);
}
}
private class SOAPAPI{
private void login(String name){
params.put("email", email);
}
}"""
""" First Commit """
# Added API.java
file_path = os.path.join(self.temp_dir, "API.java")
f = open(file_path, "w")
f.write(code)
f.close()
self.repo.git.add(file_path)
# Added README.txt
file_path = os.path.join(self.temp_dir, "README.txt")
f = open(file_path, "w")
f.write("TODO: Write readme")
f.close()
self.repo.git.add(file_path)
self.repo.git.commit(m='First commit')
creation_timestamp = time.time()
code_b = """
package org.feup.meoarenacustomer.app;
import android.app.DownloadManager;
import com.loopj.android.http.*;
static class API {
static String getUrl() {
return url;
}
static void setUrl(String url) {
this.url = url;
}
private String url;
private final String PRODUCTION_URL = "http://neo.andrefreitas.pt:8081/api";
private static AsyncHttpClient client = new AsyncHttpClient();
static API(String url ){
this.url = url;
}
static API(){
this.url = PRODUCTION_URL;
}
// Modified method
static void login(String email, String password, AsyncHttpResponseHandler responseHandler){
RequestParams params = new RequestParams();
params.put("email", email);
client.post(url + "/login", params, responseHandler);
}
// Removed method register()
// Added method
static void recover(String name){
RequestParams params = new RequestParams();
params.put("name", name);
params.put("email", email);
}
// Added method
static void outputShows(AsyncHttpResponseHandler responseHandler) {
client.get(url + "/shows", responseHandler);
}
static void getShows(AsyncHttpResponseHandler responseHandler) {
client.get(url + "/shows", responseHandler);
}
}
private class JSONAPI{
private void recover(String name){
RequestParams params = new RequestParams();
params.put("name", name);
params.put("email", email);
}
}
"""
""" Second commit """
# Modified API.java
file_path = os.path.join(self.temp_dir, "API.java")
f = open(file_path, "w")
f.write(code_b)
f.close()
self.repo.git.add(file_path)
self.repo.git.commit(m='Second commit')
code = """/* CallingMethodsInSameClass.java
*
* illustrates how to call static methods a class
* from a method in the same class
*/
public class CallingMethodsInSameClass
{
public static void main(String[] args) {
printOne();
printOne();
printTwo();
}
public static void printOne() {
System.out.println("Hello World");
}
public static void printTwo() {
printOne();
printOne();
}
}"""
""" Third commit """
# Added CallingMethodsInSameClass.java
file_path = os.path.join(self.temp_dir, "CallingMethodsInSameClass.java")
f = open(file_path, "w")
f.write(code)
f.close()
self.repo.git.add(file_path)
# Modified README.txt
file_path = os.path.join(self.temp_dir, "README.txt")
f = open(file_path, "w")
f.write("AUTHORS: Peter Griffin - [email protected]")
f.close()
self.repo.git.add(file_path)
self.repo.git.commit(m='Third commit')
""" Fourth Commit """
# Deleted CallingMethodsInSameClass.java
file_path = os.path.join(self.temp_dir, "CallingMethodsInSameClass.java")
self.repo.git.rm(file_path)
# Renamed API.java to API2.java
file_path_a = os.path.join(self.temp_dir, "API.java")
file_path_b = os.path.join(self.temp_dir, "API2.java")
self.repo.git.mv(file_path_a, file_path_b)
self.repo.git.commit(m='Fourth commit')
""" Fifth Commit"""
# Modified README.txt
file_path = os.path.join(self.temp_dir, "README.txt")
f = open(file_path, "w")
f.write("AUTHORS: Peter Griffin and Louis Griffin")
f.close()
self.repo.git.add(file_path)
self.repo.git.commit(m='Fifth commit')
""" Sixth Commit"""
# Added ShadowTest.java
code = """public class ShadowTest {
public int x = 0;
class FirstLevel {
public int x = 1;
void methodInFirstLevel(int x) {
System.out.println("x = " + x);
System.out.println("this.x = " + this.x);
System.out.println("ShadowTest.this.x = " + ShadowTest.this.x);
}
}
public static void main(String... args) {
ShadowTest st = new ShadowTest();
ShadowTest.FirstLevel fl = st.new FirstLevel();
fl.methodInFirstLevel(23);
}
}"""
file_path = os.path.join(self.temp_dir, "ShadowTest.java")
f = open(file_path, "w")
f.write(code)
f.close()
self.repo.git.add(file_path)
self.repo.git.commit(m='Sixth commit')
""" Extract """
extractor = GitExtractor(self.temp_dir)
repository = extractor.extract(method_granularity=True, parallel=False)
""" Tests """
self.assertEqual(len(repository.commits), 5, msg="It should only extract commits related to code")
self.assertTrue(repository.begin_ts < creation_timestamp, msg="It should extract the timestamp of first commit")
# First commit
self.assertEqual(repository.commits[0].message, "First commit\n")
self.assertEqual(repository.commits[0].author, "[email protected]")
diffs = repository.commits[0].diffs
self.assertEqual(len(diffs), 10)
self.assertTrue(DiffFile(file_b="API.java", added=True) in diffs)
self.assertTrue(DiffClass(file_name="API.java", class_b="SOAPAPI", added=True) in diffs)
self.assertTrue(DiffClass(file_name="API.java", class_b="API", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="API", method_b="getUrl", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="API", method_b="setUrl", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="API", method_b="API", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="API", method_b="login", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="API", method_b="register", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="API", method_b="getShows", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="API.java", class_name="SOAPAPI", method_b="login", added=True) in diffs)
# Second commit
self.assertEqual(repository.commits[1].message, "Second commit\n")
diffs = repository.commits[1].diffs
self.assertEqual(len(diffs), 10)
self.assertTrue(DiffFile(file_a="API.java", file_b="API.java", modified=True) in diffs)
self.assertTrue(DiffClass("API.java", class_a="API", class_b="API", modified=True) in diffs,
msg="It should recognize modified classes")
self.assertTrue(DiffMethod("API.java", class_name="API", method_a="login", method_b="login", modified=True)
in diffs, msg="It should recognize modified methods")
self.assertTrue(DiffMethod("API.java", class_name="API", method_a="register", removed=True) in diffs,
msg="It should recognize removed methods")
self.assertTrue(DiffMethod("API.java", class_name="API", method_b="recover", added=True) in diffs,
msg="It should recognize added methods")
self.assertTrue(DiffMethod("API.java", class_name="API", method_b="outputShows", added=True) in diffs,
msg="It should recognize added methods")
self.assertTrue(DiffClass("API.java", class_a="SOAPAPI", removed=True) in diffs,
msg="It should recognize removed classes")
self.assertTrue(DiffClass("API.java", class_b="JSONAPI", added=True) in diffs,
msg="It should recognize added classes")
self.assertTrue(DiffMethod("API.java", class_name="SOAPAPI", method_a="login", removed=True) in diffs,
msg="It should recognize removed methods")
self.assertTrue(DiffMethod("API.java", class_name="JSONAPI", method_b="recover", added=True) in diffs,
msg="It should recognize added methods")
# Third commit
self.assertEqual(repository.commits[2].message, "Third commit\n")
diffs = repository.commits[2].diffs
self.assertEqual(len(diffs), 5)
self.assertTrue(DiffFile(file_b="CallingMethodsInSameClass.java", added=True) in diffs)
self.assertTrue(DiffClass(file_name="CallingMethodsInSameClass.java", class_b="CallingMethodsInSameClass",
added=True) in diffs)
self.assertTrue(DiffMethod(file_name="CallingMethodsInSameClass.java", class_name="CallingMethodsInSameClass",
method_b="main", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="CallingMethodsInSameClass.java", class_name="CallingMethodsInSameClass",
method_b="printOne", added=True) in diffs)
self.assertTrue(DiffMethod(file_name="CallingMethodsInSameClass.java", class_name="CallingMethodsInSameClass",
method_b="printTwo", added=True) in diffs)
# Fourth commit
self.assertEqual(repository.commits[3].message, "Fourth commit\n")
diffs = repository.commits[3].diffs
self.assertEqual(len(diffs), 2)
self.assertTrue(DiffFile(file_a="API.java", file_b="API2.java", renamed=True) in diffs)
self.assertTrue(DiffFile(file_a="CallingMethodsInSameClass.java", removed=True) in diffs)
# Sixth commit
self.assertEqual(repository.commits[4].message, "Sixth commit\n")
diffs = repository.commits[4].diffs
self.assertEqual(len(diffs), 5)
self.assertTrue(DiffFile(file_b="ShadowTest.java", added=True) in diffs)
self.assertTrue(DiffClass(file_name="ShadowTest.java", class_b="ShadowTest.FirstLevel",
added=True) in diffs, msg="It should recognize nested classes")
self.assertTrue(DiffClass(file_name="ShadowTest.java", class_b="ShadowTest",
added=True) in diffs)
self.assertTrue(DiffMethod(file_name="ShadowTest.java", class_name="ShadowTest.FirstLevel",
method_b="methodInFirstLevel", added=True) in diffs,
msg="It should recognize nested classes")
self.assertTrue(DiffMethod(file_name="ShadowTest.java", class_name="ShadowTest",
method_b="main", added=True) in diffs)
def tearDown(self):
shutil.rmtree(self.temp_dir)
|
mit
| -2,882,101,331,058,797,600 | 42.441417 | 186 | 0.580004 | false |
hostviralnetworks/nampy
|
nampy/test/__init__.py
|
1
|
1623
|
from __future__ import with_statement, absolute_import
import sys
from os import name as __name
available_tests = ['unit_tests']
del __name
from os.path import abspath as __abspath
from os.path import join as __join
from os.path import split as __split
from os.path import sep as __sep
nampy_directory = __abspath(__join(__split(__abspath(__file__))[0], ".."))
nampy_location = __abspath(__join(nampy_directory, ".."))
data_directory = nampy_directory + "/data/"
nampy_directory += '/core/'
humannet_filename = __join(data_directory, "HumanNet_v1_join_networkonly.txt")
recon_1_filename = __join(data_directory, "recon_1_nampy_v01")
del __abspath, __join, __split, __sep
def create_test_model(test_network_file = humannet_filename):
"""Returns a nampy model for testing.
test_network_file: a two-column network file to be read
"""
# from os import name as __name
from nampy.networkio import networkio
the_nampy_model = networkio.create_network_model_from_textfile('humannet', test_network_file, verbose = False)
return the_nampy_model
def create_test_suite():
"""create a unittest.TestSuite with available tests"""
from unittest import TestLoader, TestSuite
loader = TestLoader()
suite = TestSuite()
for test_name in available_tests:
exec("from . import " + test_name)
suite.addTests(loader.loadTestsFromModule(eval(test_name)))
return suite
suite = create_test_suite()
def test_all():
"""###running unit tests on nampy###"""
from unittest import TextTestRunner
TextTestRunner(verbosity=2).run(create_test_suite())
|
gpl-3.0
| 1,117,968,099,965,605,400 | 30.211538 | 114 | 0.688232 | false |
enovance/dci-control-server
|
tests/api/v1/test_identity.py
|
1
|
1451
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
def test_get_identity_admin(admin):
response = admin.get('/api/v1/identity')
assert response.status_code == 200
assert 'identity' in response.data
identity = response.data['identity']
assert identity['name'] == 'admin'
assert identity['team_name'] == 'admin'
assert identity['role_label'] == 'SUPER_ADMIN'
def test_get_identity_unauthorized(unauthorized):
response = unauthorized.get('/api/v1/identity')
assert response.status_code == 401
def test_get_identity_user(user):
response = user.get('/api/v1/identity')
assert response.status_code == 200
assert 'identity' in response.data
identity = response.data['identity']
assert identity['name'] == 'user'
assert identity['team_name'] == 'user'
assert identity['role_label'] == 'USER'
|
apache-2.0
| 7,780,515,505,378,716,000 | 33.547619 | 75 | 0.70572 | false |
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSmoggytranslatesCom.py
|
1
|
1509
|
def extractSmoggytranslatesCom(item):
'''
Parser for 'smoggytranslates.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('i transmigrated into a stunning plaything!', 'i transmigrated into a stunning plaything!', 'translated'),
('taking my elder brothers as husbands', 'taking my elder brothers as husbands', 'translated'),
('ivory moonlight', 'ivory moonlight', 'translated'),
('The Men at Her Feet', 'The Men at Her Feet', 'translated'),
('anti-cheater strategies', 'anti-cheater strategies', 'translated'),
('the imperial doctor belongs to the princess', 'the imperial doctor belongs to the princess', 'translated'),
('kinky perfume system', 'kinky perfume system', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
bsd-3-clause
| -4,980,651,137,773,108,000 | 54.925926 | 136 | 0.505633 | false |
melodous/designate
|
designate/mdns/__init__.py
|
1
|
2093
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
cfg.CONF.register_group(cfg.OptGroup(
name='service:mdns', title="Configuration for mDNS Service"
))
OPTS = [
cfg.IntOpt('workers', default=None,
help='Number of mdns worker processes to spawn'),
cfg.StrOpt('host', default='0.0.0.0',
help='mDNS Bind Host'),
cfg.ListOpt('slave-nameserver-ips-and-ports', default=[],
help='Ips and ports of slave nameservers that are notified of '
'zone changes. The format of each item in the list is'
'"ipaddress:port"'),
cfg.IntOpt('notify-timeout', default=60,
help='The number of seconds to wait before the notify query '
'times out.'),
cfg.IntOpt('notify-retries', default=0,
help='The number of retries of a notify to a slave '
'nameserver. A notify-retries of 0 implies that on an '
'error after sending a NOTIFY, there would not be any '
'retries. A -ve number implies that NOTIFYs are not sent '
'at all'),
cfg.IntOpt('port', default=5354,
help='mDNS Port Number'),
cfg.IntOpt('tcp-backlog', default=100,
help='mDNS TCP Backlog'),
cfg.StrOpt('storage-driver', default='sqlalchemy',
help='The storage driver to use'),
]
cfg.CONF.register_opts(OPTS, group='service:mdns')
|
apache-2.0
| -5,668,967,250,996,036,000 | 42.604167 | 79 | 0.635452 | false |
jacquev6/RecursiveDocument
|
RecursiveDocument/tests/WrappingTestCase.py
|
1
|
5010
|
# coding: utf8
# Copyright 2013-2015 Vincent Jacques <[email protected]>
import unittest
import textwrap
from .. import Document, Section, Paragraph
class WrappingTestCase(unittest.TestCase):
__lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh luctus sit amet semper tellus gravida. Proin lorem libero, aliquet vitae suscipit ac, egestas sit amet velit. In justo nisi, porttitor vel fermentum id, feugiat eget eros. Nullam vulputate risus tempor odio suscipit sit amet ornare est rhoncus. Vestibulum malesuada mattis sollicitudin. Duis ac lectus ac neque semper euismod imperdiet nec eros. Ut ac odio libero. Morbi a diam quis libero volutpat euismod. Etiam gravida fringilla erat quis facilisis. Morbi venenatis malesuada dapibus. Phasellus libero dui, congue a tincidunt ut, cursus in risus. Ut sapien sapien, scelerisque at hendrerit sed, vestibulum a sem. Sed vitae odio vel est aliquam suscipit ut gravida quam. Morbi a faucibus ipsum. In eros orci, feugiat et scelerisque non, faucibus et eros."
__shortLorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque facilisis nisi vel nibh"
def setUp(self):
unittest.TestCase.setUp(self)
self.maxDiff = None
self.doc = Document()
def test_single_paragraph(self):
self.doc.add(Paragraph(self.__lorem))
self.assertEqual(
self.doc.format(),
textwrap.dedent(
# 70 chars ###########################################################
"""\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque
facilisis nisi vel nibh luctus sit amet semper tellus gravida. Proin
lorem libero, aliquet vitae suscipit ac, egestas sit amet velit. In
justo nisi, porttitor vel fermentum id, feugiat eget eros. Nullam
vulputate risus tempor odio suscipit sit amet ornare est rhoncus.
Vestibulum malesuada mattis sollicitudin. Duis ac lectus ac neque
semper euismod imperdiet nec eros. Ut ac odio libero. Morbi a diam
quis libero volutpat euismod. Etiam gravida fringilla erat quis
facilisis. Morbi venenatis malesuada dapibus. Phasellus libero dui,
congue a tincidunt ut, cursus in risus. Ut sapien sapien, scelerisque
at hendrerit sed, vestibulum a sem. Sed vitae odio vel est aliquam
suscipit ut gravida quam. Morbi a faucibus ipsum. In eros orci,
feugiat et scelerisque non, faucibus et eros."""
)
)
def test_paragraph_in_sub_section(self):
self.doc.add(Section("Section").add(Section("Sub-section").add(Paragraph(self.__lorem))))
self.assertEqual(
self.doc.format(),
textwrap.dedent(
# 70 chars ###########################################################
"""\
Section
Sub-section
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Pellentesque facilisis nisi vel nibh luctus sit amet semper tellus
gravida. Proin lorem libero, aliquet vitae suscipit ac, egestas
sit amet velit. In justo nisi, porttitor vel fermentum id, feugiat
eget eros. Nullam vulputate risus tempor odio suscipit sit amet
ornare est rhoncus. Vestibulum malesuada mattis sollicitudin. Duis
ac lectus ac neque semper euismod imperdiet nec eros. Ut ac odio
libero. Morbi a diam quis libero volutpat euismod. Etiam gravida
fringilla erat quis facilisis. Morbi venenatis malesuada dapibus.
Phasellus libero dui, congue a tincidunt ut, cursus in risus. Ut
sapien sapien, scelerisque at hendrerit sed, vestibulum a sem. Sed
vitae odio vel est aliquam suscipit ut gravida quam. Morbi a
faucibus ipsum. In eros orci, feugiat et scelerisque non, faucibus
et eros."""
)
)
def test_long_section_titles(self):
self.doc.add(
Section("Section " + self.__shortLorem)
.add(
Section("Sub-section " + self.__shortLorem)
.add(Paragraph("Some text"))
)
)
self.assertEqual(
self.doc.format(),
textwrap.dedent(
# 70 chars ###########################################################
"""\
Section Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Pellentesque facilisis nisi vel nibh
Sub-section Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Pellentesque facilisis nisi vel nibh
Some text"""
)
)
|
mit
| 8,696,917,957,637,706,000 | 55.931818 | 866 | 0.603792 | false |
pathbreak/linode-cluster-toolkit
|
setup.py
|
1
|
3127
|
"""
A setuptools based setup module
Based on https://github.com/pypa/sampleproject/blob/master/setup.py
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='linode-cluster-toolkit',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='A toolkit for provisioning, managing and orchestrating clusters on Linode cloud',
long_description=long_description,
# The project's main homepage.
url='http://github.com/pathbreak/linode-cluster-toolkit',
# Author details
author='Karthik Shiraly',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Systems Administration',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='linode cloud cluster',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'unittests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'requests',
'PyYAML',
'argparse',
'dpath',
'linode-api'
],
entry_points = {
'console_scripts': [
'linodetool=lct.linodetool.linodetool:main'
],
},
)
|
mit
| 2,185,480,559,804,743,700 | 31.237113 | 98 | 0.646626 | false |
lunixbochs/fs-uae-gles
|
launcher/fs_uae_launcher/fsui/wx/window.py
|
1
|
1687
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import wx
from ..common.element import Element
from .common import update_class
class Window(wx.Frame):
def __init__(self, parent=None, title=""):
wx.Frame.__init__(self, parent, -1, title)
#Element.init(self)
if parent:
wx_parent = parent.container
else:
wx_parent = None
self.container = wx.Panel(self)
self.container.get_window = self.get_window
#self._window = wx.Frame(wx_parent, -1, title)
#self._container = wx.Panel(self._window)
#self.on_create()
self.Bind(wx.EVT_SIZE, self.__resize_event)
self.Bind(wx.EVT_WINDOW_DESTROY, self.__destroy_event)
def get_window(self):
return self
def get_container(self):
return self.container
def show(self):
self.Show()
def destroy(self):
self.Destroy()
def set_title(self, title):
self.SetTitle(title)
def get_size(self):
return self.GetClientSize()
def set_size(self, size):
self.SetClientSize(size)
def on_create(self):
pass
def on_resize(self):
if self.layout:
#print("calling layout.set_size", self.get_size())
self.layout.set_size(self.get_size())
#self.layout.update()
def __resize_event(self, event):
self.on_resize()
event.Skip()
def on_destroy(self):
pass
def __destroy_event(self, event):
self.on_destroy()
update_class(Window)
|
gpl-2.0
| -4,518,472,023,826,819,000 | 23.560606 | 62 | 0.564315 | false |
hippo91/XVOF
|
xfv/src/cell/one_dimension_enriched_cell_hansbo.py
|
1
|
33139
|
# -*- coding: utf-8 -*-
"""
Implementing the Element1dEnriched class for Hansbo&Hansbo enrichment
"""
import os
from typing import Tuple
import numpy as np
from xfv.src.cell.one_dimension_cell import OneDimensionCell
from xfv.src.discontinuity.discontinuity import Discontinuity
from xfv.src.utilities.stress_invariants_calculation import compute_second_invariant
from xfv.src.fields.field import Field
# noinspection PyArgumentList
class OneDimensionHansboEnrichedCell(OneDimensionCell): # pylint: disable=too-many-public-methods
"""
A collection of 1d enriched elements. Treatment for Hansbo enrichment
"""
@staticmethod
def _compute_discontinuity_borders_velocity(epsilon: np.array,
u1g: np.array,
u1d: np.array,
u2g: np.array,
u2d: np.array) -> Tuple[np.array, np.array]:
"""
Compute the velocities of points at the discontinuity border
:param epsilon: relative position of the discontinuity inside the cell
:param u1g: classic velocity on left node (inside node)
:param u1d: additional dof velocity on left node
:param u2g: additional dof velocity on right node
:param u2d: classical velocity on right node (outside)
:return ug: velocity of the discontinuity left boundary
:return ud: velocity of the discontinuity right boundary
"""
ug = u2g * epsilon + u1g * (1. - epsilon) # pylint: disable=invalid-name
ud = u2d * epsilon + u1d * (1. - epsilon) # pylint: disable=invalid-name
return ug, ud
@classmethod
def compute_discontinuity_borders_velocity(cls, disc, node_velocity):
"""
Compute the velocities of points at the discontinuity border
:param disc: Discontinuity to be considered
:param node_velocity: array with nodes velocity
:return ug: velocity of the discontinuity left boundary
:return ud: velocity of the discontinuity right boundary
"""
epsilon = disc.position_in_ruptured_element
u1g = node_velocity[disc.mask_in_nodes]
u2d = node_velocity[disc.mask_out_nodes]
u2g = disc.enr_velocity_new[0]
u1d = disc.enr_velocity_new[1]
# ug, ud = cls._compute_discontinuity_borders_velocity(epsilon, u1g, u1d, u2g, u2d)
ug = u2g * epsilon + u1g * (1. - epsilon) # pylint: disable=invalid-name
ud = u2d * epsilon + u1d * (1. - epsilon) # pylint: disable=invalid-name
return ug, ud
def __init__(self, n_cells: int):
"""
Build the class OneDimensionHansboEnrichedCell
:param n_cells: total number of cells
"""
super(OneDimensionHansboEnrichedCell, self).__init__(n_cells)
#
print(self._fields_manager)
self._classical = np.ones(n_cells, dtype=np.bool, order='C')
# Cell parts geometry
self._left_part_size = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._right_part_size = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
# Additional dof: thermodynamics
self._enr_pressure = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._enr_density = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._enr_energy = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._enr_artificial_viscosity = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._enr_sound_velocity = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
# Additional dof : elasticity / plasticity
self._enr_shear_modulus = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._enr_yield_stress = Field(n_cells, np.zeros([n_cells]), np.zeros([n_cells]))
self._enr_stress = np.zeros([n_cells, 3])
self._enr_deviatoric_stress_current = np.zeros([n_cells, 3])
self._enr_deviatoric_stress_new = np.zeros([n_cells, 3])
self._enr_deviatoric_strain_rate = np.zeros([n_cells, 3])
self._enr_equivalent_plastic_strain_rate = np.zeros([n_cells])
self._enr_plastic_strain_rate = np.zeros([n_cells, 3])
# Indicator right part of enr cell is plastic
self.plastic_enr_cells = np.zeros([n_cells], dtype=bool)
def initialize_additional_cell_dof(self, disc: Discontinuity):
"""
Values to initialize the right part fields when discontinuity disc is created
:param disc: the current discontinuity
"""
enr_cell = disc.get_ruptured_cell_id
# Initialization of the current field value
self.enr_density.current_value[enr_cell] = \
np.copy(self.density.current_value[enr_cell])
self.enr_pressure.current_value[enr_cell] = \
np.copy(self.pressure.current_value[enr_cell])
self.enr_sound_velocity.current_value[enr_cell] = \
np.copy(self.sound_velocity.current_value[enr_cell])
self.enr_energy.current_value[enr_cell] = \
np.copy(self.energy.current_value[enr_cell])
self.enr_artificial_viscosity.current_value[enr_cell] = \
np.copy(self.pseudo.current_value[enr_cell])
self._enr_deviatoric_stress_current[enr_cell] = \
np.copy(self._deviatoric_stress_current[enr_cell])
self.enr_shear_modulus.current_value[enr_cell] = \
np.copy(self.shear_modulus.current_value[enr_cell])
self.enr_yield_stress.current_value[enr_cell] = \
np.copy(self.yield_stress.current_value[enr_cell])
# Initialization of new value field
# (so that the current value is not erased if the field is not updated in current step)
self.enr_density.new_value[enr_cell] = np.copy(self.density.new_value[enr_cell])
self.enr_pressure.new_value[enr_cell] = \
np.copy(self.pressure.new_value[enr_cell])
self.enr_sound_velocity.new_value[enr_cell] = \
np.copy(self.sound_velocity.new_value[enr_cell])
self.enr_energy.new_value[enr_cell] = np.copy(self.energy.new_value[enr_cell])
self.enr_artificial_viscosity.new_value[enr_cell] = \
np.copy(self.pseudo.new_value[enr_cell])
self.enr_shear_modulus.new_value[enr_cell] = \
np.copy(self.shear_modulus.new_value[enr_cell])
self.enr_yield_stress.new_value[enr_cell] = \
np.copy(self.yield_stress.new_value[enr_cell])
self._enr_deviatoric_stress_new[enr_cell] = \
np.copy(self._deviatoric_stress_new[enr_cell])
# Other quantities initialization
self._enr_deviatoric_strain_rate[enr_cell] = \
np.copy(self._deviatoric_strain_rate[enr_cell])
self._enr_stress[enr_cell] = np.copy(self._stress[enr_cell])
self._enr_equivalent_plastic_strain_rate[enr_cell] = \
np.copy(self._equivalent_plastic_strain_rate[enr_cell])
def reconstruct_enriched_hydro_field(self, classical_field: Field, enriched_field_name: str):
"""
True field reconstruction from the classical and enriched fields
:param classical_field: classical field
:param enriched_field_name: name of the enriched field
:return: complete field
:rtype: np.array
"""
# To build the coordinates of cell field, the cracked cells of discontinuities must be
# sorted by cell_id in order to manage shifts
enriched_field = getattr(self, enriched_field_name)
insertion_field = np.zeros([len(Discontinuity.discontinuity_list()), 2])
insertion_field[:, 0] = np.arange(self.number_of_cells)[self.enriched] # filtered indexes
insertion_field[:, 1] = enriched_field.current_value[self.enriched] # filtered field
res = np.copy(classical_field.current_value)
offset = 1
for indice_cell_rompue in insertion_field[:, 0]:
res = np.insert(res, int(indice_cell_rompue) + offset, insertion_field[offset - 1, 1])
offset += 1
return res
def reconstruct_enriched_elasto_field(self, classical_field: np.array,
enriched_field_name: str):
"""
True field reconstruction from the classical and enriched fields
:param classical_field: classical field
:param enriched_field_name: name of the enriched field
:return: complete field
:rtype: np.array
"""
# To build the coordinates of cell field, the cracked cells of discontinuities must be
# sorted by cell_id in order to manage shifts
enriched_field = getattr(self, enriched_field_name)
insertion_field = np.zeros([len(Discontinuity.discontinuity_list()), 2])
insertion_field[:, 0] = np.arange(self.number_of_cells)[self.enriched] # filtered indexes
insertion_field[:, 1] = enriched_field[self.enriched, 0] # filtered field_xx
res = np.copy(classical_field[:, 0])
offset = 1
for indice_cell_rompue in insertion_field[:, 0]:
res = np.insert(res, int(indice_cell_rompue) + offset, insertion_field[offset - 1, 1])
offset += 1
return res
@property
def left_part_size(self):
"""
Accessor on the size of the left part of cracked cell field
"""
return self._left_part_size
@property
def right_part_size(self):
"""
Accessor on the size of the right part of cracked cell field
"""
return self._right_part_size
@property
def enr_pressure(self):
"""
Accessor on the right part of cracked cell pressure field
"""
return self._enr_pressure
@property
def enr_density(self):
"""
Accessor on the right part of cracked cell density field
"""
return self._enr_density
@property
def enr_sound_velocity(self):
"""
Accessor on the right part of cracked cell sound speed field
"""
return self._enr_sound_velocity
@property
def enr_energy(self):
"""
Accessor on the right part of cracked cell internal energy field
"""
return self._enr_energy
@property
def enr_artificial_viscosity(self):
"""
Accessor on the right part of cracked cell artificial viscosity field
"""
return self._enr_artificial_viscosity
@property
def enr_stress(self):
"""
Accessor on the right part of cracked cell stress at time t
"""
return self._enr_stress
@property
def enr_stress_xx(self):
"""
Accessor on the right part of cracked cell stress at time t
"""
return self._enr_stress[:, 0]
@property
def enr_deviatoric_stress_current(self):
"""
Accessor on the right part of cracked cell deviatoric stress at time t
"""
return self._enr_deviatoric_stress_current
@property
def enr_deviatoric_stress_new(self):
"""
Accessor on the right part of cracked cell deviatoric stress at time t+dt
"""
return self._enr_deviatoric_stress_new
@property
def enr_deviatoric_strain_rate(self):
"""
Accessor on the right part of cracked cell deviatoric strain rate at time t
"""
return self._enr_deviatoric_strain_rate
@property
def enr_shear_modulus(self):
"""
Accessor on the right part of cracked cell shear modulus field
"""
return self._enr_shear_modulus
@property
def enr_yield_stress(self):
"""
Accessor on the right part of cracked cell yield stress field
"""
return self._enr_yield_stress
@property
def enr_equivalent_plastic_strain_rate(self):
"""
Accessor on the right part of cracked cell equivalent plastic strain rate at time t
"""
return self._enr_equivalent_plastic_strain_rate
@property
def enr_plastic_strain_rate(self):
"""
Accessor on the right part of cracked cell plastic strain rate tensor at time t
"""
return self._enr_plastic_strain_rate
@property
def pressure_field(self):
"""
:return: pressure field
:rtype: np.array
"""
return self.reconstruct_enriched_hydro_field(self.pressure, "enr_pressure")
@property
def density_field(self):
"""
:return: density field
:rtype: np.array
"""
return self.reconstruct_enriched_hydro_field(self.density, "enr_density")
@property
def energy_field(self):
"""
:return: energy field
:rtype: np.array
"""
return self.reconstruct_enriched_hydro_field(self.energy, "enr_energy")
@property
def artificial_viscosity_field(self):
"""
:return: artificial viscosity field
:rtype: np.array
"""
return self.reconstruct_enriched_hydro_field(self.pseudo,
"enr_artificial_viscosity")
@property
def stress_xx_field(self):
"""
:return: sigma_xx field
:rtype: np.array
"""
return self.reconstruct_enriched_elasto_field(self.stress, "enr_stress")
@property
def deviatoric_stress_field(self):
"""
:return: (deviateur de sigma)_xx field
:rtype: np.array
"""
return self.reconstruct_enriched_elasto_field(self.deviatoric_stress_current,
"enr_deviatoric_stress_current")
@property
def classical(self):
"""
:return: a mask where True indicate a classical cell
"""
return self._classical
@property
def enriched(self):
"""
:return: a mask where True indicates an enrich cell
"""
return ~self.classical
def __str__(self):
message = "<--ENRICHED CELLS COLLECTION-->" + os.linesep
message += "Classical elements are:"
message += str(self.classical) + os.linesep
message += "Enriched elements are:"
message += str(self.enriched)
return message
def print_infos(self):
"""
Printing informations about Elements
A REECRIRE AU PROPRE; NOTATIONS ONT CHANGE
"""
message = "{}\n".format(self.__class__)
for disc in Discontinuity.discontinuity_list():
cell_i = disc.get_ruptured_cell_id
message += "---- Discontinuity {:} ----".format(disc.label)
# Density
message += "==> masse volumique classique à t = {}\n". \
format(self.density.current_value[cell_i])
message += "==> masse volumique enrichie à t = {}\n". \
format(self.enr_density.current_value[cell_i])
message += "==> masse volumique classique à t+dt = {}\n". \
format(self.density.new_left_value[cell_i])
message += "==> masse volumique enrichie à t+dt = {}\n". \
format(self.enr_density.new_value[cell_i])
# Size of each part of the cracked cell
message += "==> taille à gauche à t = {}\n". \
format(disc.left_size.current_value)
message += "==> taille à droite à t = {}\n". \
format(disc.right_size.current_value)
message += "==> taille à gauche à t+dt = {}\n". \
format(disc.left_size.new_value)
message += "==> taille à droite à t+dt = {}\n". \
format(disc.right_size.new_value)
# Pressure
message += "==> pression à gauche à t = {}\n". \
format(self.pressure.current_value[cell_i])
message += "==> pression à droite à t = {}\n". \
format(self.enr_pressure.current_value[cell_i])
message += "==> pression à gauche à t+dt = {}\n". \
format(self.pressure.new_value[cell_i])
message += "==> pression à droite à t+dt = {}\n". \
format(self.enr_pressure.new_value[cell_i])
# Sound speed
message += "==> vitesse du son à gauche à t = {}\n". \
format(self.sound_velocity.current_value[cell_i])
message += "==> vitesse du son à droite à t = {}\n". \
format(self.enr_sound_velocity.current_value[cell_i])
message += "==> vitesse du son à gauche à t+dt = {}\n". \
format(self.sound_velocity.new_value[cell_i])
message += "==> vitesse du son à droite à t+dt = {}\n". \
format(self.enr_sound_velocity.new_value[cell_i])
# Energy
message += "==> énergie à gauche à t = {}\n". \
format(self.energy.current_value[cell_i])
message += "==> énergie à droite à t = {}\n". \
format(self.enr_energy.current_value[cell_i])
message += "==> énergie à gauche à t+dt = {}\n". \
format(self.energy.new_value[cell_i])
message += "==> énergie à droite à t+dt = {}\n". \
format(self.enr_energy.new_value[cell_i])
# Pseudo viscosity
message += "==> pseudo à gauche = {}\n". \
format(self.pseudo.current_value[cell_i])
message += "==> pseudo à droite = {}\n". \
format(self.enr_artificial_viscosity.current_value[cell_i])
print(message)
def compute_enriched_elements_new_pressure(self, delta_t):
"""
Compute pressure, internal energy and sound velocity in left and right parts of
the enriched elements
:param delta_t: time step
"""
target_model = self.data.material_target.constitutive_model
# Fracture cannot occur on the projectile => check only the target model to know if
# elasticity or plasticity is activated
elasticity_activated = (target_model.elasticity_model is not None)
plasticity_activated = (target_model.plasticity_model is not None)
mask = self.enriched
if elasticity_activated or plasticity_activated:
self.enr_energy.current_value[mask] += \
OneDimensionCell.add_elastic_energy_method(
delta_t, self.enr_density.current_value[mask],
self.enr_density.new_value[mask],
self.enr_deviatoric_stress_current[mask],
self.enr_deviatoric_stress_new[mask],
self.enr_deviatoric_strain_rate[mask])
# Initialize local parameters :
density_right = self.enr_density.current_value[mask]
density_right_new = self.enr_density.new_value[mask]
pressure_right = self.enr_pressure.current_value[mask]
pressure_right_new = self.enr_pressure.new_value[mask]
energy_right = self.enr_energy.current_value[mask]
energy_right_new = self.enr_energy.new_value[mask]
pseudo_right = self.enr_artificial_viscosity.current_value[mask]
cson_right_new = self.enr_sound_velocity.new_value[mask]
# Call EOS :
energy_new_right_value, pressure_new_right_value, sound_velocity_new_right_value = \
OneDimensionCell.apply_equation_of_state(
self, self._target_eos,
density_right, density_right_new, pressure_right,
pressure_right_new, energy_right, energy_right_new,
pseudo_right, cson_right_new)
# Save results :
self.enr_pressure.new_value[mask] = pressure_new_right_value
self.enr_energy.new_value[mask] = energy_new_right_value
self.enr_sound_velocity.new_value[mask] = sound_velocity_new_right_value
def compute_enriched_elements_new_part_size(self, time_step, node_velocity):
"""
Compute the new size of each ruptured element part (left size and right size)
:param time_step: time step
:param node_velocity: array, node velocities
"""
for disc in Discontinuity.discontinuity_list():
u_left, u_right = (
OneDimensionHansboEnrichedCell.compute_discontinuity_borders_velocity(
disc, node_velocity))
u_node_left = node_velocity[disc.mask_in_nodes]
u_node_right = node_velocity[disc.mask_out_nodes]
self.left_part_size.new_value[disc.get_ruptured_cell_id] = (
self.left_part_size.current_value[disc.get_ruptured_cell_id]
+ (u_left - u_node_left) * time_step)
self.right_part_size.new_value[disc.get_ruptured_cell_id] = (
self.right_part_size.current_value[disc.get_ruptured_cell_id]
+ (u_node_right - u_right) * time_step)
def compute_enriched_elements_new_density(self):
"""
Compute the new densities for left and right parts of the ruptured element
(from mass conservation equation)
"""
mask = self.enriched
density_left = self.density.current_value[mask]
density_right = self.enr_density.current_value[mask]
size_left_current = self.left_part_size.current_value[mask]
size_left_new = self.left_part_size.new_value[mask]
size_right_current = self.right_part_size.current_value[mask]
size_right_new = self.right_part_size.new_value[mask]
self.density.new_value[mask] = density_left * size_left_current / size_left_new
self.enr_density.new_value[mask] = (density_right *
size_right_current / size_right_new)
def compute_enriched_elements_new_pseudo(self, delta_t):
"""
Compute the new artificial viscosity of the enriched_cells
:param delta_t: time_step
"""
mask = self.enriched
if not mask.any():
return
# Left part :
density_left = self.density.current_value[mask]
density_left_new = self.density.new_value[mask]
sound_velocity_left = self.sound_velocity.current_value[mask]
size_left = self.left_part_size.new_value[mask]
pseudo_left = OneDimensionCell.compute_pseudo(delta_t, density_left, density_left_new,
size_left, sound_velocity_left,
self.data.numeric.a_pseudo,
self.data.numeric.b_pseudo)
# Right part :
density_right = self.enr_density.current_value[mask]
density_right_new = self.enr_density.new_value[mask]
sound_velocity_right = self.enr_sound_velocity.current_value[mask]
size_right = self.right_part_size.new_value[mask]
pseudo_right = OneDimensionCell.compute_pseudo(delta_t, density_right, density_right_new,
size_right, sound_velocity_right,
self.data.numeric.a_pseudo,
self.data.numeric.b_pseudo)
self.pseudo.new_value[mask] = pseudo_left
self.enr_artificial_viscosity.new_value[mask] = pseudo_right
def compute_enriched_stress_tensor(self):
"""
Compute the complete enriched stress tensor : sigma = -(p+q) I + S
"""
mask = self.enriched
# Right part
self.enr_stress[mask, 0] = \
self.enr_deviatoric_stress_new[mask, 0] - \
(self.enr_pressure.new_value[mask] +
self.enr_artificial_viscosity.new_value[mask])
self.enr_stress[mask, 1] = \
self.enr_deviatoric_stress_new[mask, 1] - \
(self.enr_pressure.new_value[mask] +
self.enr_artificial_viscosity.new_value[mask])
self.enr_stress[mask, 2] = \
self.enr_deviatoric_stress_new[mask, 2] - \
(self.enr_pressure.new_value[mask] +
self.enr_artificial_viscosity.new_value[mask])
def compute_enriched_deviatoric_strain_rate(self, dt: float, # pylint: disable=invalid-name
node_coord_new: np.array,
node_velocity_new: np.array) -> None:
"""
Compute the deviatoric strain rate for enriched cells
:param dt: time step
:param node_coord_new: array, new nodes coordinates
:param node_velocity_new: array, new nodes velocity
"""
disc_list = Discontinuity.discontinuity_list()
if not disc_list:
return
mask_nodes_in = Discontinuity.in_nodes.flatten()
mask_nodes_out = Discontinuity.out_nodes.flatten()
mask_cells_arr = Discontinuity.ruptured_cell_id.flatten()
eps_arr = Discontinuity.discontinuity_position.flatten()
u2g_arr = Discontinuity.enr_velocity_new[:, 0].flatten()
u1d_arr = Discontinuity.enr_velocity_new[:, 1].flatten()
u_noeuds_new_in_arr = node_velocity_new[mask_nodes_in]
u_noeuds_new_out_arr = node_velocity_new[mask_nodes_out]
x_noeuds_new_in_arr = node_coord_new[mask_nodes_in]
x_noeuds_new_out_arr = node_coord_new[mask_nodes_out]
xg_new_arr = np.concatenate((
x_noeuds_new_in_arr,
x_noeuds_new_in_arr + self.left_part_size.new_value[mask_cells_arr][np.newaxis].T),
axis=1)
xd_new_arr = np.concatenate((
x_noeuds_new_out_arr - self.right_part_size.new_value[mask_cells_arr][np.newaxis].T,
x_noeuds_new_out_arr), axis=1)
x_new_arr = np.concatenate((xg_new_arr, xd_new_arr))
u_discg_new_arr, u_discd_new_arr = self._compute_discontinuity_borders_velocity(
eps_arr, u_noeuds_new_in_arr[:, 0], u1d_arr, u2g_arr, u_noeuds_new_out_arr[:, 0])
ug_new_arr = np.concatenate((u_noeuds_new_in_arr, u_discg_new_arr[np.newaxis].T), axis=1)
ud_new_arr = np.concatenate((u_discd_new_arr[np.newaxis].T, u_noeuds_new_out_arr), axis=1)
u_new_arr = np.concatenate((ug_new_arr, ud_new_arr))
deviator_left, deviator_right = np.split(
OneDimensionCell.general_method_deviator_strain_rate(dt, x_new_arr, u_new_arr), 2)
self._deviatoric_strain_rate[mask_cells_arr] = deviator_left
self._enr_deviatoric_strain_rate[mask_cells_arr] = deviator_right
def compute_enriched_deviatoric_stress_tensor(self, node_coord_new, node_velocity_new,
delta_t):
"""
Compute the deviatoric part of the stress tensor
:param node_coord_new: array, new nodes coordinates
:param node_velocity_new: array, new nodes velocity
:param delta_t: float, time step
"""
self.compute_enriched_deviatoric_strain_rate(delta_t, node_coord_new, node_velocity_new)
# Compute rotation rate tensor : W = 0 en 1D
# Left part
mask = self.enriched
if not mask.any():
return
G = self.shear_modulus.new_value[mask] # pylint: disable=invalid-name
self._deviatoric_stress_new[mask, 0] = (
self._deviatoric_stress_current[mask, 0] +
2. * G * self._deviatoric_strain_rate[mask, 0] * delta_t)
self._deviatoric_stress_new[mask, 1] = (
self._deviatoric_stress_current[mask, 1] +
2. * G * self._deviatoric_strain_rate[mask, 1] * delta_t)
self._deviatoric_stress_new[mask, 2] = (
self._deviatoric_stress_current[mask, 2] +
2. * G * self._deviatoric_strain_rate[mask, 2] * delta_t)
# Right part
Gd = self.enr_shear_modulus.new_value[mask] # pylint: disable=invalid-name
self._enr_deviatoric_stress_new[mask, 0] = \
self.enr_deviatoric_stress_current[mask, 0] + \
2. * Gd * self.enr_deviatoric_strain_rate[mask, 0] * delta_t
self._enr_deviatoric_stress_new[mask, 1] = \
self.enr_deviatoric_stress_current[mask, 1] + \
2. * Gd * self.enr_deviatoric_strain_rate[mask, 1] * delta_t
self._enr_deviatoric_stress_new[mask, 2] = \
self.enr_deviatoric_stress_current[mask, 2] + \
2. * Gd * self.enr_deviatoric_strain_rate[mask, 2] * delta_t
def compute_enriched_shear_modulus(self, shear_modulus_model):
"""
Compute the shear modulus for ruptured cell
:param shear_modulus_model: model to compute the shear modulus
"""
mask = self.enriched
if not mask.any():
return
self.enr_shear_modulus.new_value[mask] = \
shear_modulus_model.compute(self.enr_density.new_value[mask])
def apply_plasticity_enr(self, mask_mesh, delta_t):
"""
Apply plasticity treatment if criterion is activated :
- compute yield stress
- tests plasticity criterion
- compute plastic strain rate for plastic cells
"""
mask = np.logical_and(self.plastic_enr_cells, mask_mesh)
if not mask.any():
return
# Right part : right part of enriched cells is plastic ? => self.plastic_enr_cells
invariant_j2_el = np.sqrt(compute_second_invariant(self.enr_deviatoric_stress_new[mask]))
yield_stress = self.enr_yield_stress.new_value[mask]
shear_modulus = self.enr_shear_modulus.new_value[mask]
radial_return = self._compute_radial_return(invariant_j2_el, yield_stress)
dev_stress = self.enr_deviatoric_stress_new[mask]
self._plastic_strain_rate[mask] = \
self._compute_plastic_strain_rate_tensor(radial_return, shear_modulus,
delta_t, dev_stress)
self._equivalent_plastic_strain_rate[mask] = \
self._compute_equivalent_plastic_strain_rate(invariant_j2_el, shear_modulus,
yield_stress, delta_t)
self._deviatoric_stress_new[mask] *= radial_return[np.newaxis].T
def compute_enriched_yield_stress(self, yield_stress_model):
"""
Compute the yield stress for ruptured cells
:param yield_stress_model: model to compute the yield stress
"""
mask = self.enriched
self.enr_yield_stress.new_value[mask] = \
yield_stress_model.compute(self.enr_density.new_value[mask])
def compute_enriched_elements_new_time_step(self):
"""
Compute the new local time step.
The calculation is equivalent to a remeshing time step and thus underestimates the
time step for the enriched cells
"""
cfl = self.data.numeric.cfl
cfl_pseudo = self.data.numeric.cfl_pseudo
mask = self.enriched
# Left part
density_left = self.density.current_value[mask]
density_left_new = self.density.new_value[mask]
sound_velocity_left_new = self.sound_velocity.new_value[mask]
pseudo_left = self.pseudo.current_value[mask]
pseudo_left_new = self.pseudo.new_value[mask]
dt_g = OneDimensionCell.compute_time_step(cfl, cfl_pseudo, density_left, density_left_new,
self.left_part_size.new_value[mask],
sound_velocity_left_new,
pseudo_left, pseudo_left_new)
# Right part
density_right = self.enr_density.current_value[mask]
density_right_new = self.enr_density.new_value[mask]
sound_velocity_right_new = self.enr_sound_velocity.new_value[mask]
pseudo_right = self.enr_artificial_viscosity.current_value[mask]
pseudo_right_new = self.enr_artificial_viscosity.new_value[mask]
dt_d = OneDimensionCell.compute_time_step(cfl, cfl_pseudo, density_right, density_right_new,
self.right_part_size.new_value[mask],
sound_velocity_right_new,
pseudo_right, pseudo_right_new)
if mask.any():
self._dt[mask] = np.min(np.array([dt_g, dt_d]), axis=0)
def cell_enr_increment(self):
"""
Increment the enriched cell variables
"""
# Thermodynamics
self._enr_density.increment_values()
self._enr_pressure.increment_values()
self._enr_energy.increment_values()
self._enr_artificial_viscosity.increment_values()
self._enr_sound_velocity.increment_values()
# Kinematics
self._left_part_size.increment_values()
self._right_part_size.increment_values()
# Elasticity
self._enr_deviatoric_stress_current[:] = self._enr_deviatoric_stress_new[:]
|
gpl-3.0
| 287,237,958,040,930,720 | 43.425503 | 100 | 0.592168 | false |
sahid/warm
|
setup.py
|
1
|
1900
|
# Copyright 2013 Cloudwatt
#
# Author: Sahid Orentino Ferdjaoui <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import setuptools
execfile("warm/version.py")
setuptools.setup(
name="warm",
version=__version__,
author="Sahid Orentino Ferdjaoui (Cloudwatt)",
author_email="[email protected]",
description="Deploy a simple OpenStack environment from template",
long_description=(open(os.path.join(os.getcwd(), "README")).read()),
license="Apache License 2.0",
keywords=["openstack", "cloudwatt", "deploy", "cloud"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Environment :: OpenStack",
],
url="https://github.com/sahid/warm.git",
packages=["warm", "warm.components"],
install_requires=[
"requests>=1.1,<1.2.3",
"six>=1.4.1",
"Babel==1.3",
"distribute",
"pyyaml",
"python-openstackclient==0.2.2",
"python-neutronclient",
],
entry_points="""
[console_scripts]
warm = warm:main""",
)
|
apache-2.0
| -9,055,197,856,144,422,000 | 32.928571 | 77 | 0.641053 | false |
praw-dev/prawcore
|
setup.py
|
1
|
1794
|
"""prawcore setup.py."""
import re
from codecs import open
from os import path
from setuptools import setup
PACKAGE_NAME = "prawcore"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
extras = {
"ci": ["coveralls"],
"lint": ["black", "flake8", "pre-commit", "pydocstyle", "flynt"],
"test": [
"betamax >=0.8, <0.9",
"betamax_matchers >=0.4.0, <0.5",
"betamax-serializers >=0.2.0, <0.3",
"mock >=0.8",
"pytest",
"testfixtures >4.13.2, <7",
],
}
extras["dev"] = extras["lint"] + extras["test"]
setup(
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="[email protected]",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="Low-level communication layer for PRAW 4+.",
extras_require=extras,
install_requires=["requests >=2.6.0, <3.0"],
python_requires="~=3.6",
keywords="praw reddit api",
license="Simplified BSD License",
long_description=README,
packages=[PACKAGE_NAME],
url="https://github.com/praw-dev/prawcore",
version=VERSION,
)
|
bsd-2-clause
| -4,833,178,104,992,566,000 | 30.473684 | 77 | 0.591416 | false |
tensorflow/graphics
|
tensorflow_graphics/geometry/representation/mesh/tests/mesh_test_utils.py
|
1
|
1686
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines for mesh unit tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def create_single_triangle_mesh():
r"""Creates a single-triangle mesh, in the z=0 plane and facing +z.
(0,1) 2
|\
| \
| \
(0,0) 0---1 (1,0)
Returns:
vertices: A [3, 3] float array
faces: A [1, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32)
faces = np.array(((0, 1, 2),), dtype=np.int32)
return vertices, faces
def create_square_triangle_mesh():
r"""Creates a square mesh, in the z=0 planse and facing +z.
# (0,1) 2---3 (1,1)
# |\ /|
# | 4 |
# |/ \|
# (0,0) 0---1 (1,0)
Returns:
vertices: A [5, 3] float array
faces: A [4, 3] int array
"""
vertices = np.array(
((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),
dtype=np.float32)
faces = np.array(
((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32)
return vertices, faces
|
apache-2.0
| -2,725,058,304,536,747,500 | 26.193548 | 74 | 0.603796 | false |
dropseedlabs/configyaml
|
configyaml/config/list.py
|
1
|
2311
|
import yaml
from .base import AbstractNode
class ListNode(AbstractNode):
_list_item_class = None
def __init__(self, *args, **kwargs):
self._type = list
self._children = [] # so we can append, etc
if not hasattr(self, '_min_items_required'):
# by default, don't have to have anything
# set to 1 in custom __init__ to require at least 1 item
self._min_items_required = 0
if not self._list_item_class:
raise AttributeError('_list_item_class must be defined in subclasses of ListNode')
super(ListNode, self).__init__(*args, **kwargs)
def _validate_value(self):
if len(self._value) < self._min_items_required:
self._add_error(
title='Minimum items requirement not met',
description='Must have at least {} item(s)'.format(self._min_items_required)
)
for index, item in enumerate(self._value):
field_class = self._list_item_class
field = field_class(
value=item,
value_node=self._find_node_for_list_index(index),
context=self._context,
variables=self._variables,
parent=self,
key=index
)
self._children.append(field)
def _find_node_for_list_index(self, index):
if not self._value_node:
return None
if not isinstance(self._value_node, yaml.nodes.SequenceNode):
# the original node was not a list
# - could have been a variable string
#
# return the original node so that the yaml text can place the error there
return self._value_node
return self._value_node.value[index]
def __getitem__(self, key):
return self._children[key]
def __len__(self):
return len(self._children)
def _as_dict(self, redact=False):
if redact and self._should_redact():
return self._as_redacted_dict()
d = {
'items': [x._as_dict(redact=redact) for x in self._children],
}
if self._errors:
d['errors'] = [x.as_dict() for x in self._errors]
d.update(self._as_dict_to_inject(redact=redact))
return d
|
mit
| 8,732,987,478,376,040,000 | 30.22973 | 94 | 0.555604 | false |
kgorman/WMG_speed
|
raw/process_raw_data.py
|
1
|
1733
|
#!/bin/python
#
# script to pre-process sign data into normalized form
# very specific to the file format the sign generates
# 2015 kg
#
import os
import csv
import datetime
import sys
dir = sys.argv[1]
csv_output_filename = "all_data.csv"
csv_write_file = open(csv_output_filename, 'ab')
def dow(date):
days=["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
dayNumber=date.weekday()
return days[dayNumber]
def makedate(file_name, time_stamp):
''' makes a date string out of the filename based on convention '''
if ':' in time_stamp:
date_string = file_name.split('.')[0].split('DS')[1]+' '+time_stamp
date_object = datetime.datetime.strptime(date_string, '%y%m%d %H:%M')
return date_object
else:
return None
for root, dirs, files in os.walk(dir):
for file in files:
if file.endswith(".csv"):
f = open(os.path.join(root, file), 'r')
csv_file = csv.reader(f, delimiter=',', quotechar='|')
csv_output_file = csv.writer(csv_write_file, delimiter=',', quotechar='|')
for row in csv_file:
thedate = makedate(file, row[0])
# lets make a new csv with the correct stuff
if thedate:
newrow = []
newrow.append(str(thedate))
newrow.append(str(thedate.weekday()))
newrow.append(str(dow(thedate)))
newrow.append(str(thedate.strftime("%H")))
newrow.append(str(dir))
for i in range(1,5):
newrow.append(row[i])
csv_output_file.writerow(newrow)
f.close()
|
mit
| -8,885,233,380,556,486,000 | 29.403509 | 86 | 0.5603 | false |
jobiols/odoo-argentina
|
l10n_ar_partner/sale_config.py
|
1
|
1544
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
from openerp.tools.safe_eval import safe_eval
class SaleConfiguration(models.TransientModel):
_inherit = 'sale.config.settings'
group_multiple_id_numbers = fields.Boolean(
"Allow Multiple Id Numbers on Partners",
help="If you allow multiple Id Numbers, then a new tab for 'Id "
"NUmbers' will be added on partner form view",
implied_group='l10n_ar_partner.group_multiple_id_numbers',
)
unique_id_numbers = fields.Boolean(
"Restrict Id Numbers to be Unique",
help="If you set it True, then we will check that partner Id Numbers "
"(for eg. cuit, dni, etc) are unique. Same number for partners in a "
"child/parent relation are still allowed",
)
@api.multi
def get_default_unique_id_numbers(self):
unique_id_numbers = self.env['ir.config_parameter'].get_param(
"l10n_ar_partner.unique_id_numbers", 'False')
return {
'unique_id_numbers': safe_eval(unique_id_numbers),
}
@api.multi
def set_default_unique_id_numbers(self):
for record in self:
self.env['ir.config_parameter'].set_param(
"l10n_ar_partner.unique_id_numbers", record.unique_id_numbers)
|
agpl-3.0
| 7,941,889,687,674,843,000 | 39.631579 | 78 | 0.582254 | false |
mgstigler/cs3240-s15-team19
|
secure_witness/forms.py
|
1
|
2327
|
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm
from django.db.models import Q
from secure_witness.models import UserProfile, Report
from django.forms.models import inlineformset_factory
from secure_witness.models import (
Folder,
Report,
)
class ReportForm(forms.ModelForm):
detailed = forms.CharField(widget=forms.Textarea(attrs={'cols':50, 'rows': 2}))
date = forms.DateField(widget=forms.DateInput(format='%d/%m/%Y'),
input_formats=('%d/%m/%Y'),
required=False)
location = forms.CharField(required=False)
keywords = forms.CharField(required=False)
time = forms.CharField(required=False)
folder = forms.ModelChoiceField(queryset=Folder.objects.all(), required=False)
# Display all groups except admin group
authorized_groups = forms.ModelMultipleChoiceField(queryset=Group.objects.filter(~Q(name='admins')),
required=False, widget=forms.CheckboxSelectMultiple)
class Meta:
model = Report
fields = ['folder', 'short', 'detailed', 'location', 'keywords', 'time', 'private', 'authorized_groups']
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(), required=True)
class Meta:
model = User
fields = ['username', 'email', 'password']
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder' : 'Email address'}))
first_name = forms.CharField(required=True)
last_name = forms.CharField(required=True)
def clean_email(self):
email = self.cleaned_data["email"]
# If no user exists, then the email is valid
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
return email
# Otherwise, raise an exception
raise forms.ValidationError('Duplicate Email')
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
if commit:
user.is_active = False
user.save()
return user
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'username', 'password1', 'password2']
|
mit
| -3,412,109,564,408,642,600 | 35.375 | 112 | 0.675548 | false |
noah-dev/todo_django
|
login/views.py
|
1
|
1371
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, get_user_model, login, logout
from .forms import UserLoginForm
def login_view(request):
'''Login form for user authentication'''
# If the user is already logged in, redirect to the todo page
if request.user.is_authenticated:
return redirect('/todo/')
# If the user attempts to go to another url without logging in, remember
# the that url in next
next = request.GET.get('next')
form = UserLoginForm(request.POST or None)
# If the form is valid, attempt to authenticate and login the user
if form.is_valid():
username = form.cleaned_data.get("user")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
login(request, user)
if next:
# Redirect to original url after logging in
return redirect(next)
# After logged in, go to todo page
return redirect('/todo/')
# If user is not logged in, show the login page.
return render(request, "login/login.html", {"form":form})
def logout_view(request):
'''Logout the user - go to /login/logout'''
logout(request)
# Redirect to login page after logging user out
return redirect('/login/')
|
mit
| 7,287,097,481,866,130,000 | 36.081081 | 76 | 0.671043 | false |
brianjimenez/lightdock
|
lightdock/test/mathutil/test_mtrandom.py
|
1
|
1899
|
"""Tests for MTGenerator class"""
from lightdock.mathutil.lrandom import MTGenerator
from nose.tools import assert_almost_equals
class TestMTGenerator:
def setUp(self):
self.generated = [0.376962302390386, 0.9267885077263207, 0.8434467391322422, 0.21404576995204339,
0.8717081122454375,
0.6364617457506916, 0.04239212615820076, 0.9529720601244589, 0.25568366821646715,
0.305900980269444,
0.4240801565062292, 0.5885140140252595, 0.1243916494838736, 0.686677304871223,
0.8322405233765763,
0.5123190504571955, 0.7944608177634946, 0.6249799602888791, 0.8248125234140684,
0.18093878439654187,
0.358677331460587, 0.46939438227304753, 0.1039841315397021, 0.9731064575430529,
0.6360295241926393,
0.0982257917564412, 0.5827761542521052, 0.4144259792060764, 0.1893963938663159,
0.16093249334871063,
0.46074351722343176, 0.07036985012480113, 0.5630778079431735, 0.5149278241079724,
0.12707991696432464,
0.875161140919984, 0.04052657292837836, 0.1720720684328083, 0.8171782611452408,
0.22081551531133514,
0.42421704427664764, 0.5595825645842261, 0.4355613053193895, 0.49413584628211227,
0.5047312502106898,
0.4827386173776421, 0.057609288544146486, 0.816336523703245, 0.282849141726241,
0.04773978879444429]
def tearDown(self):
pass
def test_get_random_number(self):
gen = MTGenerator(25)
for i in range(50):
assert_almost_equals(self.generated[i], gen())
|
gpl-3.0
| 7,815,284,650,805,520,000 | 51.75 | 107 | 0.589258 | false |
uber/ludwig
|
ludwig/train.py
|
1
|
16171
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import sys
from typing import List, Union
import pandas as pd
import yaml
from ludwig.api import LudwigModel
from ludwig.backend import ALL_BACKENDS, LOCAL, Backend, initialize_backend
from ludwig.contrib import contrib_command, contrib_import
from ludwig.globals import LUDWIG_VERSION
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.misc_utils import check_which_config
from ludwig.utils.print_utils import logging_level_registry, print_ludwig
logger = logging.getLogger(__name__)
def train_cli(
config: dict = None,
config_file: str = None,
dataset: Union[str, dict, pd.DataFrame] = None,
training_set: Union[str, dict, pd.DataFrame] = None,
validation_set: Union[str, dict, pd.DataFrame] = None,
test_set: Union[str, dict, pd.DataFrame] = None,
training_set_metadata: Union[str, dict] = None,
data_format: str = None,
experiment_name: str = 'api_experiment',
model_name: str = 'run',
model_load_path: str = None,
model_resume_path: str = None,
skip_save_training_description: bool = False,
skip_save_training_statistics: bool = False,
skip_save_model: bool = False,
skip_save_progress: bool = False,
skip_save_log: bool = False,
skip_save_processed_input: bool = False,
output_directory: str = 'results',
gpus: Union[str, int, List[int]] = None,
gpu_memory_limit: int = None,
allow_parallel_threads: bool = True,
backend: Union[Backend, str] = None,
random_seed: int = default_random_seed,
logging_level: int =logging.INFO,
debug: bool = False,
**kwargs
) -> None:
"""*train* defines the entire training procedure used by Ludwig's
internals. Requires most of the parameters that are taken into the model.
Builds a full ludwig model and performs the training.
:param config: (dict) config which defines the different
parameters of the model, features, preprocessing and training.
:param config_file: (str, default: `None`) the filepath string
that specifies the config. It is a yaml file.
:param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing the entire dataset to be used for training.
If it has a split column, it will be used for splitting (0 for train,
1 for validation, 2 for test), otherwise the dataset will be
randomly split.
:param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing training data.
:param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing validation data.
:param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing test data.
:param training_set_metadata: (Union[str, dict], default: `None`)
metadata JSON file or loaded metadata. Intermediate preprocessed
structure containing the mappings of the input
dataset created the first time an input file is used in the same
directory with the same name and a '.meta.json' extension.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param experiment_name: (str, default: `'experiment'`) name for
the experiment.
:param model_name: (str, default: `'run'`) name of the model that is
being used.
:param model_load_path: (str, default: `None`) if this is specified the
loaded model will be used as initialization
(useful for transfer learning).
:param model_resume_path: (str, default: `None`) resumes training of
the model from the path specified. The config is restored.
In addition to config, training statistics, loss for each
epoch and the state of the optimizer are restored such that
training can be effectively continued from a previously interrupted
training process.
:param skip_save_training_description: (bool, default: `False`) disables
saving the description JSON file.
:param skip_save_training_statistics: (bool, default: `False`) disables
saving training statistics JSON file.
:param skip_save_model: (bool, default: `False`) disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation metric improves, but if the model is really big
that can be time consuming. If you do not want to keep
the weights and just find out what performance a model can get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on and the returned model
will have the weights obtained at the end of training, instead of
the weights of the epoch with the best validation performance.
:param skip_save_progress: (bool, default: `False`) disables saving
progress each epoch. By default Ludwig saves weights and stats
after each epoch for enabling resuming of training, but if
the model is really big that can be time consuming and will uses
twice as much space, use this parameter to skip it, but training
cannot be resumed later on.
:param skip_save_log: (bool, default: `False`) disables saving
TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
but if it is not needed turning it off can slightly increase the
overall speed.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param random_seed: (int: default: 42) random seed used for weights
initialization, splits and any other random function.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
:param logging_level: (int) Log level that will be sent to stderr.
# Return
:return: (`None`)
"""
config = check_which_config(config,
config_file)
if model_load_path:
model = LudwigModel.load(
model_load_path,
logging_level=logging_level,
backend=backend,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
)
else:
model = LudwigModel(
config=config,
logging_level=logging_level,
backend=backend,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
)
model.train(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
model_resume_path=model_resume_path,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
output_directory=output_directory,
random_seed=random_seed,
debug=debug,
)
def cli(sys_argv):
parser = argparse.ArgumentParser(
description='This script trains a model',
prog='ludwig train',
usage='%(prog)s [options]'
)
# ----------------------------
# Experiment naming parameters
# ----------------------------
parser.add_argument(
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
parser.add_argument(
'--experiment_name',
type=str,
default='experiment',
help='experiment name'
)
parser.add_argument(
'--model_name',
type=str,
default='run',
help='name for the model'
)
# ---------------
# Data parameters
# ---------------
parser.add_argument(
'--dataset',
help='input data file path. '
'If it has a split column, it will be used for splitting '
'(0: train, 1: validation, 2: test), '
'otherwise the dataset will be randomly split'
)
parser.add_argument('--training_set', help='input train data file path')
parser.add_argument(
'--validation_set', help='input validation data file path'
)
parser.add_argument('--test_set', help='input test data file path')
parser.add_argument(
'--training_set_metadata',
help='input metadata JSON file path. An intermediate preprocessed file '
'containing the mappings of the input file created '
'the first time a file is used, in the same directory '
'with the same name and a .json extension'
)
parser.add_argument(
'--data_format',
help='format of the input data',
default='auto',
choices=['auto', 'csv', 'excel', 'feather', 'fwf', 'hdf5',
'html' 'tables', 'json', 'jsonl', 'parquet', 'pickle', 'sas',
'spss', 'stata', 'tsv']
)
parser.add_argument(
'-sspi',
'--skip_save_processed_input',
help='skips saving intermediate HDF5 and JSON files',
action='store_true',
default=False
)
# ----------------
# Model parameters
# ----------------
config = parser.add_mutually_exclusive_group(required=True)
config.add_argument(
'-c',
'--config',
type=yaml.safe_load,
help='config'
)
config.add_argument(
'-cf',
'--config_file',
help='YAML file describing the model. Ignores --config'
)
parser.add_argument(
'-mlp',
'--model_load_path',
help='path of a pretrained model to load as initialization'
)
parser.add_argument(
'-mrp',
'--model_resume_path',
help='path of the model directory to resume training of'
)
parser.add_argument(
'-sstd',
'--skip_save_training_description',
action='store_true',
default=False,
help='disables saving the description JSON file'
)
parser.add_argument(
'-ssts',
'--skip_save_training_statistics',
action='store_true',
default=False,
help='disables saving training statistics JSON file'
)
parser.add_argument(
'-ssm',
'--skip_save_model',
action='store_true',
default=False,
help='disables saving weights each time the model improves. '
'By default Ludwig saves weights after each epoch '
'the validation metric imrpvoes, but if the model is really big '
'that can be time consuming. If you do not want to keep '
'the weights and just find out what performance a model can get '
'with a set of hyperparameters, use this parameter to skip it'
)
parser.add_argument(
'-ssp',
'--skip_save_progress',
action='store_true',
default=False,
help='disables saving weights after each epoch. By default ludwig saves '
'weights after each epoch for enabling resuming of training, but '
'if the model is really big that can be time consuming and will '
'save twice as much space, use this parameter to skip it'
)
parser.add_argument(
'-ssl',
'--skip_save_log',
action='store_true',
default=False,
help='disables saving TensorBoard logs. By default Ludwig saves '
'logs for the TensorBoard, but if it is not needed turning it off '
'can slightly increase the overall speed'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-rs',
'--random_seed',
type=int,
default=42,
help='a random seed that is going to be used anywhere there is a call '
'to a random number generator: data splitting, parameter '
'initialization and training set shuffling'
)
parser.add_argument(
'-g',
'--gpus',
nargs='+',
type=int,
default=None,
help='list of gpus to use'
)
parser.add_argument(
'-gml',
'--gpu_memory_limit',
type=int,
default=None,
help='maximum memory in MB to allocate per GPU device'
)
parser.add_argument(
'-dpt',
'--disable_parallel_threads',
action='store_false',
dest='allow_parallel_threads',
help='disable TensorFlow from using multithreading for reproducibility'
)
parser.add_argument(
"-b",
"--backend",
help='specifies backend to use for parallel / distributed execution, '
'defaults to local execution or Horovod if called using horovodrun',
choices=ALL_BACKENDS,
)
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False, help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.train')
args.backend = initialize_backend(args.backend)
if args.backend.is_coordinator():
print_ludwig('Train', LUDWIG_VERSION)
train_cli(**vars(args))
if __name__ == '__main__':
contrib_import()
contrib_command("train", *sys.argv)
cli(sys.argv[1:])
|
apache-2.0
| -4,580,385,739,438,529,500 | 37.686603 | 81 | 0.619937 | false |
eriksore/sdn
|
test3.py
|
1
|
1942
|
#!/usr/bin/python
"""
Script created by VND - Visual Network Description
"""
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSKernelSwitch, OVSLegacyKernelSwitch, UserSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, TCLink
def topology():
"Create a network."
net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
s1 = net.addSwitch( 's1' )
s2 = net.addSwitch( 's2' )
s3 = net.addSwitch( 's3' )
s4 = net.addSwitch( 's4' )
s5 = net.addSwitch( 's5' )
s6 = net.addSwitch( 's6' )
h12 = net.addHost( 'h12', mac='00:00:00:00:00:12', ip='10.0.0.12/8' )
#print h12
h13 = net.addHost( 'h13', mac='00:00:00:00:00:13', ip='10.0.0.13/8' )
h14 = net.addHost( 'h14', mac='00:00:00:00:00:14', ip='10.0.0.14/8' )
h15 = net.addHost( 'h15', mac='00:00:00:00:00:15', ip='10.0.0.15/8' )
h16 = net.addHost( 'h16', mac='00:00:00:00:00:16', ip='10.0.0.16/8' )
h17 = net.addHost( 'h17', mac='00:00:00:00:00:17', ip='10.0.0.17/8' )
h18 = net.addHost( 'h18', mac='00:00:00:00:00:18', ip='10.0.0.18/8' )
h19 = net.addHost( 'h19', mac='00:00:00:00:00:19', ip='10.0.0.19/8' )
print "*** Creating links"
net.addLink(s6, h19, 3, 0)
net.addLink(s6, h18, 2, 0)
net.addLink(s5, h17, 3, 0)
net.addLink(s5, h16, 2, 0)
net.addLink(s3, h15, 3, 0)
net.addLink(s3, h14, 2, 0)
net.addLink(s2, h13, 3, 0)
net.addLink(s2, h12, 2, 0)
net.addLink(s4, s6, 3, 1)
net.addLink(s4, s5, 2, 1)
net.addLink(s1, s3, 3, 1, bw=10, delay='10ms', loss=1)
net.addLink(s1, s2, 2, 1)
net.addLink(s1, s4, 1, 1)
print "*** Starting network"
net.build()
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
|
mit
| -6,639,555,770,369,265,000 | 31.366667 | 105 | 0.594233 | false |
mengxin891029/Project-4-Conference-Central-App
|
models.py
|
1
|
4806
|
#!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
__author__ = 'Marquis de Meng'
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionWishList=ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty()
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) #DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) #DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Session(ndb.Model):
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StringProperty()
duration = ndb.IntegerProperty()
type = ndb.StringProperty()
date = ndb.DateProperty()
startHour = ndb.IntegerProperty() # in 24 hour notation, so stores in IntegerProperty
class SessionForm(messages.Message):
name = messages.StringField(1)
highlights = messages.StringField(2)
speaker = messages.StringField(3)
duration = messages.IntegerField(4)
type = messages.StringField(5)
date = messages.StringField(6) # in Date Format
startHour = messages.IntegerField(7) # In 24 Hour Format
class SessionForms(messages.Message):
"""SessionForms -- multiple SessionForm inbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
field = messages.StringField(1)
class AddSessionToWishListForm(messages.Message):
sessionKey = messages.StringField(1)
|
apache-2.0
| -4,686,787,165,955,999,000 | 34.080292 | 93 | 0.703079 | false |
jendrikseipp/vulture
|
tests/test_unreachable.py
|
1
|
5106
|
from . import check_unreachable
from . import v
assert v # Silence pyflakes
def test_return_assignment(v):
v.scan(
"""\
def foo():
print("Hello World")
return
a = 1
"""
)
check_unreachable(v, 4, 1, "return")
def test_return_multiline_return_statements(v):
v.scan(
"""\
def foo():
print("Something")
return (something,
that,
spans,
over,
multiple,
lines)
print("Hello World")
"""
)
check_unreachable(v, 9, 1, "return")
def test_return_multiple_return_statements(v):
v.scan(
"""\
def foo():
return something
return None
return (some, statement)
"""
)
check_unreachable(v, 3, 2, "return")
def test_return_pass(v):
v.scan(
"""\
def foo():
return
pass
return something
"""
)
check_unreachable(v, 3, 2, "return")
def test_return_multiline_return(v):
v.scan(
"""
def foo():
return \
"Hello"
print("Unreachable code")
"""
)
check_unreachable(v, 4, 1, "return")
def test_return_recursive_functions(v):
v.scan(
"""\
def foo(a):
if a == 1:
return 1
else:
return foo(a - 1)
print("This line is never executed")
"""
)
check_unreachable(v, 6, 1, "return")
def test_return_semicolon(v):
v.scan(
"""\
def foo():
return; a = 1
"""
)
check_unreachable(v, 2, 1, "return")
def test_return_list(v):
v.scan(
"""\
def foo(a):
return
a[1:2]
"""
)
check_unreachable(v, 3, 1, "return")
def test_return_continue(v):
v.scan(
"""\
def foo():
if foo():
return True
continue
else:
return False
"""
)
check_unreachable(v, 4, 1, "return")
def test_raise_assignment(v):
v.scan(
"""\
def foo():
raise ValueError
li = []
"""
)
check_unreachable(v, 3, 1, "raise")
def test_multiple_raise_statements(v):
v.scan(
"""\
def foo():
a = 1
raise
raise KeyError
# a comment
b = 2
raise CustomDefinedError
"""
)
check_unreachable(v, 4, 4, "raise")
def test_return_with_raise(v):
v.scan(
"""\
def foo():
a = 1
return
raise ValueError
return
"""
)
check_unreachable(v, 4, 2, "return")
def test_return_comment_and_code(v):
v.scan(
"""\
def foo():
return
# This is a comment
print("Hello World")
"""
)
check_unreachable(v, 4, 1, "return")
def test_raise_with_return(v):
v.scan(
"""\
def foo():
a = 1
raise
return a
"""
)
check_unreachable(v, 4, 1, "raise")
def test_raise_error_message(v):
v.scan(
"""\
def foo():
raise SomeError("There is a problem")
print("I am unreachable")
"""
)
check_unreachable(v, 3, 1, "raise")
def test_raise_try_except(v):
v.scan(
"""\
def foo():
try:
a = 1
raise
except IOError as e:
print("We have some problem.")
raise
print(":-(")
"""
)
check_unreachable(v, 8, 1, "raise")
def test_raise_with_comment_and_code(v):
v.scan(
"""\
def foo():
raise
# This is a comment
print("Something")
return None
"""
)
check_unreachable(v, 4, 2, "raise")
def test_continue_basic(v):
v.scan(
"""\
def foo():
if bar():
a = 1
else:
continue
a = 2
"""
)
check_unreachable(v, 6, 1, "continue")
def test_continue_one_liner(v):
v.scan(
"""\
def foo():
for i in range(1, 10):
if i == 5: continue
print(1 / i)
"""
)
assert v.unreachable_code == []
def test_continue_nested_loops(v):
v.scan(
"""\
def foo():
a = 0
if something():
foo()
if bar():
a = 2
continue
# This is unreachable
a = 1
elif a == 1:
pass
else:
a = 3
continue
else:
continue
"""
)
check_unreachable(v, 9, 1, "continue")
def test_continue_with_comment_and_code(v):
v.scan(
"""\
def foo():
if bar1():
bar2()
else:
a = 1
continue
# Just a comment
raise ValueError
"""
)
check_unreachable(v, 8, 1, "continue")
def test_break_basic(v):
v.scan(
"""\
def foo():
for i in range(123):
break
# A comment
return
dead = 1
"""
)
check_unreachable(v, 5, 2, "break")
def test_break_one_liner(v):
v.scan(
"""\
def foo():
for i in range(10):
if i == 3: break
print(i)
"""
)
assert v.unreachable_code == []
def test_break_with_comment_and_code(v):
v.scan(
"""\
while True:
break
# some comment
print("Hello")
"""
)
check_unreachable(v, 4, 1, "break")
def test_while_true_else(v):
v.scan(
"""\
while True:
print("I won't stop")
else:
print("I won't run")
"""
)
check_unreachable(v, 4, 1, "else")
|
mit
| 9,188,205,456,608,774,000 | 14.151335 | 47 | 0.492754 | false |
alphatwirl/alphatwirl
|
alphatwirl/summary/Reader.py
|
1
|
2548
|
# Tai Sakuma <[email protected]>
import logging
from .WeightCalculatorOne import WeightCalculatorOne
##__________________________________________________________________||
class Reader:
def __init__(self, keyValComposer, summarizer,
nextKeyComposer=None,
weightCalculator=WeightCalculatorOne(),
collector=None, nevents=None):
self.keyValComposer = keyValComposer
self.summarizer = summarizer
self.collector = collector
self.weightCalculator = weightCalculator
self.nextKeyComposer = nextKeyComposer
self.nevents = nevents
self.ievent = 0
self._repr_pairs = [
('keyValComposer', self.keyValComposer),
('summarizer', self.summarizer),
('collector', self.collector),
('nextKeyComposer', self.nextKeyComposer),
('weightCalculator', self.weightCalculator),
('nevents', self.nevents),
]
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join(['{}={!r}'.format(n, v) for n, v in self._repr_pairs]),
)
def __str__(self):
nwidth = max(len(n) for n, _ in self._repr_pairs)
nwidth += 4
return '{}:\n{}'.format(
self.__class__.__name__,
'\n'.join(['{:>{}}: {!r}'.format(n, nwidth, v) for n, v in self._repr_pairs]),
)
def begin(self, event):
self.keyValComposer.begin(event)
def event(self, event):
if self.nevents is not None and self.nevents <= self.ievent:
return
self.ievent += 1
try:
keyvals = self.keyValComposer(event)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(e)
logger.error(self)
raise
weight = self.weightCalculator(event)
for key, val in keyvals:
self.summarizer.add(key=key, val=val, weight=weight)
def end(self):
if self.nextKeyComposer is None:
return
for key in sorted(self.summarizer.keys()):
nextKeys = self.nextKeyComposer(key)
for nextKey in nextKeys:
self.summarizer.add_key(nextKey)
def merge(self, other):
self.summarizer += other.summarizer
def results(self):
return self.summarizer
def collect(self):
return self.collector(self)
##__________________________________________________________________||
|
bsd-3-clause
| -1,899,977,931,594,863,000 | 29.333333 | 90 | 0.527473 | false |
eti-p-doray/log6308
|
visu.py
|
1
|
13304
|
# encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#plt.style.use(["ggplot", "tensorflowvisu.mplstyle"])
#import matplotlib
#matplotlib.use('macosx') #this is the default on mac
#print("matplotlib version: " + matplotlib.__version__)
import matplotlib.animation as animation
from matplotlib import rcParams
import math
tf.set_random_seed(0)
# number of percentile slices for histogram visualisations
HISTOGRAM_BUCKETS = 7
# n = HISTOGRAM_BUCKETS (global)
# Buckets the data into n buckets so that there are an equal number of data points in
# each bucket. Returns n+1 bucket boundaries. Spreads the reaminder data.size % n more
# or less evenly among the central buckets.
# data: 1-D ndarray containing float data, MUST BE SORTED in ascending order
# n: integer, the number of desired output buckets
# return value: ndarray, 1-D vector of size n+1 containing the bucket boundaries
# the first value is the min of the data, the last value is the max
def probability_distribution(data):
n = HISTOGRAM_BUCKETS
data.sort()
bucketsize = data.size // n
bucketrem = data.size % n
buckets = np.zeros([n+1])
buckets[0] = data[0] # min
buckets[-1] = data[-1] # max
buckn = 0
rem = 0
remn = 0
k = 0
cnt = 0 # only for assert
lastval = data[0]
for i in range(data.size):
val = data[i]
buckn += 1
cnt += 1
if buckn > bucketsize+rem : ## crossing bucket boundary
cnt -= 1
k += 1
buckets[k] = (val + lastval) / 2
if (k<n+1):
cnt += 1
buckn = 1 # val goes into the new bucket
if k >= (n - bucketrem) // 2 and remn < bucketrem:
rem = 1
remn += 1
else:
rem = 0
lastval = val
assert i+1 == cnt
return buckets
def _empty_collection(collection):
tempcoll = []
for a in (collection):
tempcoll.append(a)
for a in (tempcoll):
collection.remove(a)
def _display_time_histogram(ax, xdata, ydata, color):
_empty_collection(ax.collections)
midl = HISTOGRAM_BUCKETS//2
midh = HISTOGRAM_BUCKETS//2
for i in range(int(math.ceil(HISTOGRAM_BUCKETS/2.0))):
ax.fill_between(xdata, ydata[:,midl-i], ydata[:,midh+1+i], facecolor=color, alpha=1.6/HISTOGRAM_BUCKETS)
if HISTOGRAM_BUCKETS % 2 == 0 and i == 0:
ax.fill_between(xdata, ydata[:,midl-1], ydata[:,midh], facecolor=color, alpha=1.6/HISTOGRAM_BUCKETS)
midl = midl-1
class NetflixDataVis:
xmax = 0
y2max = 0
x1 = []
y1 = []
z1 = []
x2 = []
y2 = []
z2 = []
x3 = []
w3 = np.zeros([0,HISTOGRAM_BUCKETS+1])
b3 = np.zeros([0,HISTOGRAM_BUCKETS+1])
im1 = np.full((28*10,28*10,3),255, dtype='uint8')
im2 = np.full((28*10,28*10,3),255, dtype='uint8')
_animpause = False
_animation = None
_mpl_figure = None
_mlp_init_func = None
_mpl_update_func = None
_color4 = None
_color5 = None
def __set_title(self, ax, title, default=""):
if title is not None and title != "":
ax.set_title(title, y=1.02) # adjustment for plot title bottom margin
else:
ax.set_title(default, y=1.02) # adjustment for plot title bottom margin
# retrieve the color from the color cycle, default is 1
def __get_histogram_cyclecolor(self, colornum):
clist = rcParams['axes.prop_cycle']
ccount = 1 if (colornum is None) else colornum
colors = clist.by_key()['color']
for i, c in enumerate(colors):
if (i == ccount % 3):
return c
def __init__(self, title1=None, title2=None, title3=None, title4=None, title5=None, title6=None, histogram4colornum=None, histogram5colornum=None, dpi=70):
self._color4 = self.__get_histogram_cyclecolor(histogram4colornum)
self._color5 = self.__get_histogram_cyclecolor(histogram5colornum)
fig = plt.figure(figsize=(19.20, 10.80), dpi=dpi)
plt.gcf().canvas.set_window_title("MNIST")
fig.set_facecolor('#FFFFFF')
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
self.__set_title(ax1, title1, default="Accuracy")
self.__set_title(ax2, title2, default="Cross entropy loss")
self.__set_title(ax3, title3, default="Training digits")
self.__set_title(ax4, title4, default="Weights")
self.__set_title(ax5, title5, default="Biases")
self.__set_title(ax6, title6, default="Test digits")
#ax1.set_figaspect(1.0)
# TODO: finish exporting the style modifications into a stylesheet
line1, = ax1.plot(self.x1, self.y1, label="training accuracy")
line2, = ax1.plot(self.x2, self.y2, label="test accuracy")
legend = ax1.legend(loc='lower right') # fancybox : slightly rounded corners
legend.draggable(True)
line3, = ax2.plot(self.x1, self.z1, label="training loss")
line4, = ax2.plot(self.x2, self.z2, label="test loss")
legend = ax2.legend(loc='upper right') # fancybox : slightly rounded corners
legend.draggable(True)
ax3.grid(False) # toggle grid off
ax3.set_axis_off()
imax1 = ax3.imshow(self.im1, animated=True, cmap='binary', vmin=0.0, vmax=1.0, interpolation='nearest', aspect=1.0)
ax6.grid(False) # toggle grid off
ax6.axes.get_xaxis().set_visible(False)
imax2 = ax6.imshow(self.im2, animated=True, cmap='binary', vmin=0.0, vmax=1.0, interpolation='nearest', aspect=1.0)
ax6.locator_params(axis='y', nbins=7)
# hack...
ax6.set_yticks([0, 280-4*56, 280-3*56, 280-2*56, 280-56, 280])
ax6.set_yticklabels(["100%", "98%", "96%", "94%", "92%", "90%"])
def _init():
ax1.set_xlim(0, 10) # initial value only, autoscaled after that
ax2.set_xlim(0, 10) # initial value only, autoscaled after that
ax4.set_xlim(0, 10) # initial value only, autoscaled after that
ax5.set_xlim(0, 10) # initial value only, autoscaled after that
ax1.set_ylim(0, 1) # important: not autoscaled
#ax1.autoscale(axis='y')
ax2.set_ylim(0, 2) # important: not autoscaled
return imax1, imax2, line1, line2, line3, line4
def _update():
# x scale: iterations
ax1.set_xlim(0, self.xmax+1)
ax2.set_xlim(0, self.xmax+1)
ax4.set_xlim(0, self.xmax+1)
ax5.set_xlim(0, self.xmax+1)
# four curves: train and test accuracy, train and test loss
line1.set_data(self.x1, self.y1)
line2.set_data(self.x2, self.y2)
line3.set_data(self.x1, self.z1)
line4.set_data(self.x2, self.z2)
#images
imax1.set_data(self.im1)
imax2.set_data(self.im2)
# histograms
_display_time_histogram(ax4, self.x3, self.w3, self._color4)
_display_time_histogram(ax5, self.x3, self.b3, self._color5)
#return changed artists
return imax1, imax2, line1, line2, line3, line4
def _key_event_handler(event):
if len(event.key) == 0:
return
else:
keycode = event.key
# pause/resume with space bar
if keycode == ' ':
self._animpause = not self._animpause
if not self._animpause:
_update()
return
# [p, m, n] p is the #of the subplot, [n,m] is the subplot layout
toggles = {'1':[1,1,1], # one plot
'2':[2,1,1], # one plot
'3':[3,1,1], # one plot
'4':[4,1,1], # one plot
'5':[5,1,1], # one plot
'6':[6,1,1], # one plot
'7':[12,1,2], # two plots
'8':[45,1,2], # two plots
'9':[36,1,2], # two plots
'escape':[123456,2,3], # six plots
'0':[123456,2,3]} # six plots
# other matplotlib keyboard shortcuts:
# 'o' box zoom
# 'p' mouse pan and zoom
# 'h' or 'home' reset
# 's' save
# 'g' toggle grid (when mouse is over a plot)
# 'k' toggle log/lin x axis
# 'l' toggle log/lin y axis
if not (keycode in toggles):
return
for i in range(6):
fig.axes[i].set_visible(False)
fignum = toggles[keycode][0]
if fignum <= 6:
fig.axes[fignum-1].set_visible(True)
fig.axes[fignum-1].change_geometry(toggles[keycode][1], toggles[keycode][2], 1)
ax6.set_aspect(25.0/40) # special case for test digits
elif fignum < 100:
fig.axes[fignum//10-1].set_visible(True)
fig.axes[fignum//10-1].change_geometry(toggles[keycode][1], toggles[keycode][2], 1)
fig.axes[fignum%10-1].set_visible(True)
fig.axes[fignum%10-1].change_geometry(toggles[keycode][1], toggles[keycode][2], 2)
ax6.set_aspect(1.0) # special case for test digits
elif fignum == 123456:
for i in range(6):
fig.axes[i].set_visible(True)
fig.axes[i].change_geometry(toggles[keycode][1], toggles[keycode][2], i+1)
ax6.set_aspect(1.0) # special case for test digits
plt.draw()
fig.canvas.mpl_connect('key_press_event', _key_event_handler)
self._mpl_figure = fig
self._mlp_init_func = _init
self._mpl_update_func = _update
def _update_xmax(self, x):
if (x > self.xmax):
self.xmax = x
def _update_y2max(self, y):
if (y > self.y2max):
self.y2max = y
def append_training_curves_data(self, x, accuracy, loss):
self.x1.append(x)
self.y1.append(accuracy)
self.z1.append(loss)
self._update_xmax(x)
def append_test_curves_data(self, x, accuracy, loss):
self.x2.append(x)
self.y2.append(accuracy)
self.z2.append(loss)
self._update_xmax(x)
self._update_y2max(accuracy)
def get_max_test_accuracy(self):
return self.y2max
def append_data_histograms(self, x, datavect1, datavect2, title1=None, title2=None):
self.x3.append(x)
datavect1.sort()
self.w3 = np.concatenate((self.w3, np.expand_dims(probability_distribution(datavect1), 0)))
datavect2.sort()
self.b3 = np.concatenate((self.b3, np.expand_dims(probability_distribution(datavect2), 0)))
self._update_xmax(x)
def update_image1(self, im):
self.im1 = im
def update_image2(self, im):
self.im2 = im
def is_paused(self):
return self._animpause
def animate(self, compute_step, iterations, train_data_update_freq=20, test_data_update_freq=100, one_test_at_start=True, more_tests_at_start=False, save_movie=False):
def animate_step(i):
if (i == iterations // train_data_update_freq): #last iteration
compute_step(iterations, True, True)
else:
for k in range(train_data_update_freq):
n = i * train_data_update_freq + k
request_data_update = (n % train_data_update_freq == 0)
request_test_data_update = (n % test_data_update_freq == 0) and (n > 0 or one_test_at_start)
if more_tests_at_start and n < test_data_update_freq: request_test_data_update = request_data_update
compute_step(n, request_test_data_update, request_data_update)
# makes the UI a little more responsive
plt.pause(0.001)
if not self.is_paused():
return self._mpl_update_func()
self._animation = animation.FuncAnimation(self._mpl_figure, animate_step, int(iterations // train_data_update_freq + 1), init_func=self._mlp_init_func, interval=16, repeat=False, blit=False)
if save_movie:
mywriter = animation.FFMpegWriter(fps=24, codec='libx264', extra_args=['-pix_fmt', 'yuv420p', '-profile:v', 'high', '-tune', 'animation', '-crf', '18'])
self._animation.save("./tensorflowvisu_video.mp4", writer=mywriter)
else:
plt.show(block=True)
|
mit
| 6,400,653,905,988,676,000 | 38.360947 | 198 | 0.575015 | false |
hakanozadam/bal
|
run_bp_span_filter.py
|
1
|
1379
|
#!/bin/env python3
# AUTHORS:
# Hakan Ozadam
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
#################################################################
import os, argparse
from bal.reference.extract_gene_sequences import ExtractGeneSequences
from bal.genomic_io.functions import bp_coverage_filter_sam
###################################################################
###################################################################
def main():
parser = argparse.ArgumentParser(description=
'''
Filter a given sam file using bp_span_coverage_filter
''')
parser.add_argument("-i" ,
help = "Input sam file" ,
required = True ,
metavar = "input_sam_file" ,
type = str)
parser.add_argument("-o" ,
help = "Output sam file" ,
required = True ,
metavar = "output_sam_file",
type = str)
arguments = parser.parse_args()
bp_coverage_filter_sam(arguments.i, arguments.o)
############################################################################
if __name__ == "__main__":
main()
|
gpl-2.0
| 432,496,338,644,076,700 | 29 | 76 | 0.433648 | false |
castlecms/castle.cms
|
castle/cms/services/google/analytics.py
|
1
|
1859
|
from plone.formwidget.namedfile.converter import b64decode_file
from plone.registry.interfaces import IRegistry
from zope.component import getUtility
from . import get_service
def get_ga_service(ga_scope=['https://www.googleapis.com/auth/analytics.readonly']):
registry = getUtility(IRegistry)
api_email = registry.get('castle.google_api_email', None)
api_key = registry.get('castle.google_api_service_key_file', None)
ga_id = registry.get('castle.google_analytics_id', None)
if not api_key or not api_email or not ga_id:
return
# Authenticate and construct service.
return get_service(
'analytics', 'v3', ga_scope, b64decode_file(api_key)[1], api_email)
def get_ga_profile(service):
registry = getUtility(IRegistry)
ga_id = registry.get('castle.google_analytics_id', None)
if not ga_id:
return
# Use the Analytics service object to get the first profile id.
# Get a list of all Google Analytics accounts for this user
accounts = service.management().accounts().list().execute()
if accounts.get('items'):
# Get the first Google Analytics account.
account = accounts.get('items')[0].get('id')
# Get a list of all the properties for the first account.
properties = service.management().webproperties().list(
accountId=account).execute()
for item in properties['items']:
if ga_id == item['id']:
# Get a list of all views (profiles) for the first property.
profiles = service.management().profiles().list(
accountId=account,
webPropertyId=ga_id).execute()
if profiles.get('items'):
# return the first view (profile) id.
return profiles.get('items')[0].get('id')
return None
|
gpl-2.0
| -6,307,028,686,283,417,000 | 35.45098 | 84 | 0.639053 | false |
raychorn/knowu
|
django/djangononrelsample2/views/urls.py
|
1
|
1859
|
from django.conf.urls import patterns, include, url
from django.conf.urls import *
#from django.contrib import admin
from views import default as default_view
from views import contact as contact_view
from views import terms as terms_view
from views import data as data_view
from views import not_yet_implemented
from views import acceptable_use_policy
from views import create_data as create_data_view
from views import fetch_data as fetch_data_view
from views import unittests as unittests_view
__has_users__ = False
try:
from users.views import default as users_view
from users.views import login as users_login
from users.views import logout as users_logout
from users.views import register as users_register
from users.views import registeruser as users_registeruser
__has_users__ = True
except ImportError:
pass
#admin.autodiscover()
handler500 = 'djangotoolbox.errorviews.server_error'
urlpatterns = patterns('',
#(r'^admin/', include(admin.site.urls)),
('^_ah/warmup$', 'djangoappengine.views.warmup'),
(r'^data/', data_view),
(r'^not-yet-implemented/', not_yet_implemented),
(r'^acceptable-use-policy/', acceptable_use_policy),
(r'^contact/', contact_view),
(r'^terms/', terms_view),
(r'^createdata/', create_data_view),
(r'^get/data/(?P<statename>\w+)/$',fetch_data_view),
(r'^unittests/', unittests_view),
)
if (__has_users__):
urlpatterns += patterns('',
(r'^__admin__/', users_view),
(r'^login/', users_login),
(r'^logout/', users_logout),
(r'^register/', users_register),
(r'^registeruser/', users_registeruser),
)
else:
urlpatterns += patterns('',
(r'^__admin__/', not_yet_implemented),
)
urlpatterns += patterns('',
('^$', default_view),
)
|
lgpl-3.0
| 5,071,289,929,849,273,000 | 29.508475 | 62 | 0.652501 | false |
kyper-data/python-highcharts
|
highcharts/highcharts/options.py
|
1
|
22310
|
# -*- coding: UTF-8 -*-
from past.builtins import basestring
from .highchart_types import OptionTypeError, Series, SeriesOptions
from .common import Formatter, Events, Position, ContextButton, Options3d, ResetZoomButton, \
DrillUpButton, Labels, PlotBands, PlotLines, Title, Items, Navigation, Background, Breaks, \
DataClasses, DateTimeLabelFormats, Zones, Levels, Marker, \
JSfunction, ColorObject, CSSObject, SVGObject, CommonObject, ArrayObject
import json, datetime
# Base Option Class
class BaseOptions(object):
def __init__(self,**kwargs):
self.update_dict(**kwargs)
def __display_options__(self):
print(json.dumps(self.__dict__, indent=4, sort_keys=True))
def __jsonable__(self):
return self.__dict__
def __validate_options__(self, k, v, ov):
if ov == NotImplemented:
raise OptionTypeError("Option Type Currently Not Supported: %s" % k)
if isinstance(v,dict) and isinstance(ov,dict):
keys = v.keys()
if len(keys) > 1:
raise NotImplementedError
return isinstance(v[keys[0]],ov[keys[0]])
return isinstance(v, ov)
def update_dict(self, **kwargs):
for k, v in kwargs.items():
if k in self.ALLOWED_OPTIONS:
# if isinstance(self.ALLOWED_OPTIONS[k], tuple) and isinstance(self.ALLOWED_OPTIONS[k][0](), SeriesOptions):
if k in PlotOptions.ALLOWED_OPTIONS.keys():
if self.__getattr__(k):
self.__dict__[k].update(series_type=k, **v)
else:
v = SeriesOptions(series_type=k, **v)
self.__dict__.update({k:v})
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and isinstance(self.ALLOWED_OPTIONS[k][0](), CommonObject):
if isinstance(v, dict):
if self.__getattr__(k):
self.__dict__[k].update(v) #update dict
else: # first
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
else:
OptionTypeError("Not An Accepted Input Type: %s, must be dictionary" % type(v))
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and isinstance(self.ALLOWED_OPTIONS[k][0](), ArrayObject):
if self.__getattr__(k): #existing attr
if isinstance(v, dict):
self.__dict__[k].update(v) # update array
elif isinstance(v, list):
for item in v:
self.__dict__[k].update(item) # update array
else:
OptionTypeError("Not An Accepted Input Type: %s, must be list or dictionary"
% type(v))
else: #first
if isinstance(v, dict):
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
elif isinstance(v, list):
if len(v) == 1:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v[0])})
else:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v[0])})
for item in v[1:]:
self.__dict__[k].update(item)
else:
OptionTypeError("Not An Accepted Input Type: %s, must be list or dictionary"
% type(v))
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and \
(isinstance(self.ALLOWED_OPTIONS[k][0](), CSSObject) or isinstance(self.ALLOWED_OPTIONS[k][0](), SVGObject)):
if self.__getattr__(k):
for key, value in v.items(): # check if v has object input
self.__dict__[k].__options__().update({key:value})
v = self.__dict__[k].__options__()
# upating object
if isinstance(v, dict):
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
else:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](v)})
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and (isinstance(self.ALLOWED_OPTIONS[k][0](), JSfunction) or \
isinstance(self.ALLOWED_OPTIONS[k][0](), Formatter) or isinstance(self.ALLOWED_OPTIONS[k][0](), ColorObject)):
if isinstance(v, dict):
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
else:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](v)})
else:
self.__dict__.update({k:v})
else:
print(self.ALLOWED_OPTIONS)
print(k, v)
raise OptionTypeError("Not An Accepted Option Type: %s" % k)
def __getattr__(self, item):
if not item in self.__dict__:
return None # Attribute Not Set
else:
return True
class ChartOptions(BaseOptions):
ALLOWED_OPTIONS = {
"alignTicks": bool,
"animation": [bool, dict, basestring],
"backgroundColor": (ColorObject, basestring, dict),
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": int,
"className": basestring,
"defaultSeriesType": basestring,
"events": (Events, dict),
"height": [int,basestring],
"ignoreHiddenSeries": bool,
"inverted": bool,
"margin": list,
"marginBottom": int,
"marginLeft": int,
"marginRight": int,
"marginTop": int,
"options3d": (Options3d, dict),
"plotBackgroundColor": (ColorObject, basestring, dict),
"plotBackgroundImage": basestring,
"plotBorderColor": (ColorObject, basestring, dict),
"plotBorderWidth": int,
"plotShadow": bool,
"polar": bool,
"reflow": bool,
"renderTo": basestring,
"resetZoomButton": (ResetZoomButton, dict),
"selectionMarkerFill": basestring,
"shadow": bool,
"showAxes": bool,
"spacingBottom": int,
"spacingLeft": int,
"spacingRight": int,
"spacingTop": int,
"style": (CSSObject, dict),
"type": basestring,
"width": [int,basestring],
"zoomType": basestring,
}
class ColorAxisOptions(BaseOptions):
ALLOWED_OPTIONS = {
"dataClassColor": basestring,
"dataClasses": (DataClasses, dict),
"endOnTick": bool,
"events": (Events, dict),
"gridLineColor": (ColorObject, basestring, dict),
"gridLineDashStyle": basestring,
"gridLineWidth": [float, int],
"id": basestring,
"labels": (Labels, dict),
"lineColor": (ColorObject, basestring, dict),
"lineWidth": [float, int],
"marker": (Marker, dict),
"max": [float, int],
"maxColor": (ColorObject, basestring, dict),
"maxPadding": [float, int],
"min": [float, int],
"minColor": (ColorObject, basestring, dict),
"minPadding": [float, int],
"minorGridLineColor": (ColorObject, basestring, dict),
"minorGridLineDashStyle": basestring,
"minorGridLineWidth": int,
"minorTickColor": (ColorObject, basestring, dict),
"minorTickInterval": int,
"minorTickLength": int,
"minorTickPosition": basestring,
"minorTickWidth": int,
"reversed": bool,
"showFirstLabel": bool,
"showLastLabel": bool,
"startOfWeek": int,
"startOnTick": bool,
"stops": list,
"tickColor": (ColorObject, basestring, dict),
"tickInterval": int,
"tickLength": int,
"tickPixelInterval": int,
"tickPosition": basestring,
"tickPositioner": JSfunction,
"tickPositions": list,
"tickWidth": int,
"type": basestring,
}
class ColorsOptions(BaseOptions):
""" Special Case, this is simply just an array of colours """
def __init__(self):
self.colors = {}
def set_colors(self, colors):
if isinstance(colors, basestring):
self.colors = ColorObject(colors)
elif isinstance(colors, list) or isinstance(colors, dict):
self.colors = colors
else:
OptionTypeError("Not An Accepted Input Type: %s" % type(colors))
def __jsonable__(self):
return self.colors
class CreditsOptions(BaseOptions):
ALLOWED_OPTIONS = {
"enabled": bool,
"href": basestring,
"position": (Position, dict),
"style": (CSSObject, dict),
"text": basestring,
}
class DrilldownOptions(BaseOptions): #not implement yet, need work in jinjia
ALLOWED_OPTIONS = {
"activeAxisLabelStyle": (CSSObject, dict),
"activeDataLabelStyle": (CSSObject, dict),
"animation": NotImplemented, #(bool, dict), #not sure how to implement
"drillUpButton": (DrillUpButton, dict),
"series": (SeriesOptions, dict),
}
class ExportingOptions(BaseOptions):
ALLOWED_OPTIONS = {
"buttons": (ContextButton, dict),
"chartOptions": (ChartOptions, dict),
"enabled": bool,
"filename": basestring,
"formAttributes": NotImplemented,
"scale": int,
"sourceHeight": int,
"sourceWidth": int,
"type": basestring,
"url": basestring,
"width": int,
}
class GlobalOptions(BaseOptions):
ALLOWED_OPTIONS = {
"Date": NotImplemented,
"VMLRadialGradientURL": basestring,
"canvasToolsURL": basestring,
"getTimezoneOffset": (JSfunction, basestring),
"timezoneOffset": int,
"useUTC": bool,
}
class LabelsOptions(BaseOptions):
ALLOWED_OPTIONS = {
"items": (Items, dict),
"style": (CSSObject, dict),
}
class LangOptions(BaseOptions):
ALLOWED_OPTIONS = {
"decimalPoint": basestring,
"downloadJPEG": basestring,
"downloadPDF": basestring,
"downloadPNG": basestring,
"donwloadSVG": basestring,
"exportButtonTitle": basestring,
"loading": basestring,
"months": list,
"noData": basestring,
"numericSymbols": list,
"printButtonTitle": basestring,
"resetZoom": basestring,
"resetZoomTitle": basestring,
"shortMonths": list,
"thousandsSep": basestring,
"weekdays": list,
}
class LegendOptions(BaseOptions):
ALLOWED_OPTIONS = {
"align": basestring,
"backgroundColor": (ColorObject, basestring, dict),
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": int,
"enabled": bool,
"floating": bool,
"itemDistance": int,
"itemHiddenStyle": (CSSObject, dict),
"itemHoverStyle": (CSSObject, dict),
"itemMarginBottom": int,
"itemMarginTop": int,
"itemStyle": (CSSObject, dict),
"itemWidth": int,
"labelFormat": basestring,
"labelFormatter": (Formatter, JSfunction),
"layout": basestring,
"lineHeight": int,
"margin": int,
"maxHeight": int,
"navigation": (Navigation, dict),
"padding": int,
"reversed": bool,
"rtl": bool,
"shadow": bool,
"style": (CSSObject, dict),
"symbolHeight": int,
"symbolPadding": int,
"symbolRadius": int,
"symbolWidth": int,
"title": (Title, dict),
"useHTML": bool,
"verticalAlign": basestring,
"width": int,
"x": int,
"y": int,
}
class LoadingOptions(BaseOptions):
ALLOWED_OPTIONS = {
"hideDuration": int,
"labelStyle": (CSSObject, dict),
"showDuration": int,
"style": (CSSObject, dict),
}
class NavigationOptions(BaseOptions):
ALLOWED_OPTIONS = {
"buttonOptions": (ContextButton, dict),
"menuItemHoverStyle": (CSSObject, dict),
"menuItemStyle": (CSSObject, dict),
"menuStyle": (CSSObject, dict),
}
class PaneOptions(BaseOptions):
ALLOWED_OPTIONS = {
"background": (Background, list), #arrayObject
"center": list,
"endAngle": int,
"size": int,
"startAngle": int,
}
class PlotOptions(BaseOptions):
""" Another Special Case: Interface With all the different Highchart Plot Types Here """
ALLOWED_OPTIONS = {
"area": (SeriesOptions, dict),
"arearange": (SeriesOptions, dict),
"areaspline": (SeriesOptions, dict),
"areasplinerange": (SeriesOptions, dict),
"bar": (SeriesOptions, dict),
"boxplot": (SeriesOptions, dict),
"bubble": (SeriesOptions, dict),
"column": (SeriesOptions, dict),
"columnrange": (SeriesOptions, dict),
"errorbar": (SeriesOptions, dict),
"gauge": (SeriesOptions, dict),
"heatmap": (SeriesOptions, dict),
"line": (SeriesOptions, dict),
"pie": (SeriesOptions, dict),
"scatter": (SeriesOptions, dict),
"series": (SeriesOptions, dict),
"spline": (SeriesOptions, dict),
"treemap": (SeriesOptions, dict),
}
class SeriesData(BaseOptions):
""" Another Special Case: Stores Data Series in an array for returning to the chart object """
def __init__(self):
#self.__dict__.update([])
self = []
class SubtitleOptions(BaseOptions):
ALLOWED_OPTIONS = {
"align": basestring,
"floating": bool,
"style": (CSSObject, dict),
"text": basestring,
"useHTML": bool,
"verticalAlign": basestring,
"x": int,
"y": int,
}
class TitleOptions(BaseOptions):
ALLOWED_OPTIONS = {
"align": basestring,
"floating": bool,
"margin": int,
"style": (CSSObject, dict),
"text": basestring,
"useHTML": bool,
"verticalAlign": basestring,
"x": int,
"y": int,
}
class TooltipOptions(BaseOptions):
ALLOWED_OPTIONS = {
"animation": bool,
"backgroundColor": (ColorObject, basestring, dict),
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": int,
"crosshairs": [bool, list, dict],
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"enabled": bool,
"followPointer": bool,
"followTouchMove": bool,
"footerFormat": basestring,
"formatter": (Formatter, JSfunction),
"headerFormat": basestring,
"pointFormat": basestring,
"pointFormatter": (Formatter, JSfunction),
"positioner": (JSfunction, basestring),
"shadow": bool,
"shared": bool,
"snap": int,
"style": (CSSObject, dict),
"useHTML": bool,
"valueDecimals": int,
"valuePrefix": basestring,
"valueSuffix": basestring,
"xDateFormat": basestring,
}
class xAxisOptions(BaseOptions):
ALLOWED_OPTIONS = {
"allowDecimals": bool,
"alternateGridColor": (ColorObject, basestring, dict),
"categories": list,
'crosshair': bool,
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"endOnTick": bool,
"events": (Events, dict),
"gridLineColor": (ColorObject, basestring, dict),
"gridLineDashStyle": basestring,
"gridLineWidth": int,
"id": basestring,
"labels": (Labels, dict),
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": int,
"max": [float, int],
"maxPadding": [float, int],
"maxZoom": NotImplemented,
"min": [float, int],
"minPadding": [float, int],
"minRange": int,
"minTickInterval": int,
"minorGridLineColor": (ColorObject, basestring, dict),
"minorGridLineDashStyle": basestring,
"minorGridLineWidth": int,
"minorTickColor": (ColorObject, basestring, dict),
"minorTickInterval": int,
"minorTickLength": int,
"minorTickPosition": basestring,
"minorTickWidth": int,
"offset": bool,
"opposite": bool,
"plotBands": (PlotBands, list),
"plotLines": (PlotLines, list),
"reversed": bool,
"showEmpty": bool,
"showFirstLabel": bool,
"showLastLabel": bool,
"startOfWeek": int,
"startOnTick": bool,
"tickColor": (ColorObject, basestring, dict),
"tickInterval": int,
"tickLength": int,
"tickPixelInterval": int,
"tickPosition": basestring,
"tickPositioner": JSfunction,
"tickPositions": list,
"tickWidth": int,
"tickmarkPlacement": basestring,
"title": (Title, dict),
"type": basestring,
"units": list
}
class yAxisOptions(BaseOptions):
ALLOWED_OPTIONS = {
"allowDecimals": bool,
"alternateGridColor": (ColorObject, basestring, dict),
"breaks": (Breaks, dict),
"categories": list,
"ceiling": (int, float),
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"endOnTick": bool,
"events": (Events, dict),
"floor": (int, float),
"gridLineColor": (ColorObject, basestring, dict),
"gridLineDashStyle": basestring,
"gridLineInterpolation": basestring,
"gridLineWidth": int,
"gridZIndex": int,
"id": basestring,
"labels": (Labels, dict),
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": int,
"max": [float, int],
"maxColor": (ColorObject, basestring, dict),
"maxPadding": [float, int],
"maxZoom": NotImplemented,
"min": [float, int],
"minColor": (ColorObject, basestring, dict),
"minPadding": [float, int],
"minRange": int,
"minTickInterval": int,
"minorGridLineColor": (ColorObject, basestring, dict),
"minorGridLineDashStyle": basestring,
"minorGridLineWidth": int,
"minorTickColor": (ColorObject, basestring, dict),
"minorTickInterval": int,
"minorTickLength": int,
"minorTickPosition": basestring,
"minorTickWidth": int,
"offset": bool,
"opposite": bool,
"plotBands": (PlotBands, list),
"plotLines": (PlotLines, list),
"reversed": bool,
"reversedStacks": bool,
"showEmpty": bool,
"showFirstLabel": bool,
"showLastLabel": bool,
"stackLabels": (Labels, dict),
"startOfWeek": int,
"startOnTick": bool,
"stops": list,
"tickAmount": int,
"tickColor": (ColorObject, basestring, dict),
"tickInterval": int,
"tickLength": int,
"tickPixelInterval": int,
"tickPosition": basestring,
"tickPositioner": (JSfunction, basestring),
"tickPositions": list,
"tickWidth": int,
"tickmarkPlacement": basestring,
"title": (Title, dict),
"type": basestring,
"units": list
}
class zAxisOptions(BaseOptions): #only for 3D plots
ALLOWED_OPTIONS = {
"allowDecimals": bool,
"alternateGridColor": (ColorObject, basestring, dict),
"breaks": (Breaks, dict),
"categories": list,
"ceiling": (int, float),
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"endOnTick": bool,
"events": (Events, dict),
"floor": (int, float),
"gridLineColor": (ColorObject, basestring, dict),
"gridLineDashStyle": basestring,
"gridLineInterpolation": basestring,
"gridLineWidth": int,
"gridZIndex": int,
"id": basestring,
"labels": (Labels, dict),
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": int,
"max": [float, int],
"maxColor": (ColorObject, basestring, dict),
"maxPadding": [float, int],
"maxZoom": NotImplemented,
"min": [float, int],
"minColor": (ColorObject, basestring, dict),
"minPadding": [float, int],
"minRange": int,
"minTickInterval": int,
"minorGridLineColor": (ColorObject, basestring, dict),
"minorGridLineDashStyle": basestring,
"minorGridLineWidth": int,
"minorTickColor": (ColorObject, basestring, dict),
"minorTickInterval": int,
"minorTickLength": int,
"minorTickPosition": basestring,
"minorTickWidth": int,
"offset": bool,
"opposite": bool,
"plotBands": (PlotBands, list),
"plotLines": (PlotLines, list),
"reversed": bool,
"reversedStacks": bool,
"showEmpty": bool,
"showFirstLabel": bool,
"showLastLabel": bool,
"stackLabels": (Labels, dict),
"startOfWeek": int,
"startOnTick": bool,
"stops": list,
"tickAmount": int,
"tickColor": (ColorObject, basestring, dict),
"tickInterval": int,
"tickLength": int,
"tickPixelInterval": int,
"tickPosition": basestring,
"tickPositioner": (JSfunction, basestring),
"tickPositions": list,
"tickWidth": int,
"tickmarkPlacement": basestring,
"title": (Title, dict),
"type": basestring,
"units": list
}
class MultiAxis(object):
def __init__(self, axis):
AXIS_LIST = {
"xAxis": xAxisOptions,
"yAxis": yAxisOptions
}
self.axis = []
self.AxisObj = AXIS_LIST[axis]
def update(self, **kwargs):
self.axis.append(self.AxisObj(**kwargs))
def __jsonable__(self):
return self.axis
|
mit
| 3,091,499,673,361,825,000 | 32.957382 | 130 | 0.545719 | false |
Jacy-Wang/MyLeetCode
|
Atoi8.py
|
1
|
1129
|
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip(' ')
opSign = False
op = ['-', '+']
for i in xrange(len(str)):
if i == 0:
if str[i] < '0' or str[i] > '9':
if str[i] not in op:
return 0
else:
opSign = True
elif i == 1:
if str[i] < '0' or str[i] > '9':
if opSign:
return 0
else:
return int(str[: i])
else:
if str[i] < '0' or str[i] > '9':
return Solution.decide(str[: i])
if len(str) == 0:
return 0
elif len(str) == 1 and opSign:
return 0
else:
return Solution.decide(str)
@staticmethod
def decide(s):
if int(s) > 2147483647:
return 2147483647
elif int(s) < -2147483648:
return -2147483648
return int(s)
|
gpl-2.0
| 1,976,696,723,448,190,000 | 27.948718 | 52 | 0.362267 | false |
pexip/pygobject
|
tests/test_overrides_glib.py
|
1
|
24264
|
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
from __future__ import absolute_import
import gc
import unittest
import gi
from gi.repository import GLib
from gi._compat import long_, integer_types
class TestGVariant(unittest.TestCase):
def test_create_simple(self):
variant = GLib.Variant('i', 42)
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.get_int32(), 42)
variant = GLib.Variant('s', '')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.get_string(), '')
variant = GLib.Variant('s', 'hello')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.get_string(), 'hello')
def test_create_variant(self):
variant = GLib.Variant('v', GLib.Variant('i', 42))
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertTrue(isinstance(variant.get_variant(), GLib.Variant))
self.assertEqual(variant.get_type_string(), 'v')
self.assertEqual(variant.get_variant().get_type_string(), 'i')
self.assertEqual(variant.get_variant().get_int32(), 42)
variant = GLib.Variant('v', GLib.Variant('v', GLib.Variant('i', 42)))
self.assertEqual(variant.get_type_string(), 'v')
self.assertEqual(variant.get_variant().get_type_string(), 'v')
self.assertEqual(variant.get_variant().get_variant().get_type_string(), 'i')
self.assertEqual(variant.get_variant().get_variant().get_int32(), 42)
def test_create_tuple(self):
variant = GLib.Variant('()', ())
self.assertEqual(variant.get_type_string(), '()')
self.assertEqual(variant.n_children(), 0)
variant = GLib.Variant('(i)', (3,))
self.assertEqual(variant.get_type_string(), '(i)')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.n_children(), 1)
self.assertTrue(isinstance(variant.get_child_value(0), GLib.Variant))
self.assertEqual(variant.get_child_value(0).get_int32(), 3)
variant = GLib.Variant('(ss)', ('mec', 'mac'))
self.assertEqual(variant.get_type_string(), '(ss)')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(0), GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(1), GLib.Variant))
self.assertEqual(variant.get_child_value(0).get_string(), 'mec')
self.assertEqual(variant.get_child_value(1).get_string(), 'mac')
# nested tuples
variant = GLib.Variant('((si)(ub))', (('hello', -1), (42, True)))
self.assertEqual(variant.get_type_string(), '((si)(ub))')
self.assertEqual(variant.unpack(), (('hello', -1), (long_(42), True)))
def test_new_tuple_sink(self):
# https://bugzilla.gnome.org/show_bug.cgi?id=735166
variant = GLib.Variant.new_tuple(GLib.Variant.new_tuple())
del variant
gc.collect()
def test_create_dictionary(self):
variant = GLib.Variant('a{si}', {})
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.get_type_string(), 'a{si}')
self.assertEqual(variant.n_children(), 0)
variant = GLib.Variant('a{si}', {'': 1, 'key1': 2, 'key2': 3})
self.assertEqual(variant.get_type_string(), 'a{si}')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(0), GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(1), GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(2), GLib.Variant))
self.assertEqual(variant.unpack(), {'': 1, 'key1': 2, 'key2': 3})
# nested dictionaries
variant = GLib.Variant('a{sa{si}}', {})
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.get_type_string(), 'a{sa{si}}')
self.assertEqual(variant.n_children(), 0)
d = {'': {'': 1, 'keyn1': 2},
'key1': {'key11': 11, 'key12': 12}}
variant = GLib.Variant('a{sa{si}}', d)
self.assertEqual(variant.get_type_string(), 'a{sa{si}}')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertEqual(variant.unpack(), d)
def test_create_array(self):
variant = GLib.Variant('ai', [])
self.assertEqual(variant.get_type_string(), 'ai')
self.assertEqual(variant.n_children(), 0)
variant = GLib.Variant('ai', [1, 2])
self.assertEqual(variant.get_type_string(), 'ai')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(0), GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(1), GLib.Variant))
self.assertEqual(variant.get_child_value(0).get_int32(), 1)
self.assertEqual(variant.get_child_value(1).get_int32(), 2)
variant = GLib.Variant('as', [])
self.assertEqual(variant.get_type_string(), 'as')
self.assertEqual(variant.n_children(), 0)
variant = GLib.Variant('as', [''])
self.assertEqual(variant.get_type_string(), 'as')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(0), GLib.Variant))
self.assertEqual(variant.get_child_value(0).get_string(), '')
variant = GLib.Variant('as', ['hello', 'world'])
self.assertEqual(variant.get_type_string(), 'as')
self.assertTrue(isinstance(variant, GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(0), GLib.Variant))
self.assertTrue(isinstance(variant.get_child_value(1), GLib.Variant))
self.assertEqual(variant.get_child_value(0).get_string(), 'hello')
self.assertEqual(variant.get_child_value(1).get_string(), 'world')
# nested arrays
variant = GLib.Variant('aai', [])
self.assertEqual(variant.get_type_string(), 'aai')
self.assertEqual(variant.n_children(), 0)
variant = GLib.Variant('aai', [[]])
self.assertEqual(variant.get_type_string(), 'aai')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 0)
variant = GLib.Variant('aai', [[1, 2], [3, 4, 5]])
self.assertEqual(variant.get_type_string(), 'aai')
self.assertEqual(variant.unpack(), [[1, 2], [3, 4, 5]])
def test_create_array_guchar(self):
variant = GLib.Variant('ay', [97, 97, 97])
assert variant.unpack() == [97, 97, 97]
variant = GLib.Variant('ay', b'aaa')
assert variant.unpack() == [97, 97, 97]
variant = GLib.Variant('ay', iter([1, 2, 3]))
assert variant.unpack() == [1, 2, 3]
with self.assertRaises(TypeError):
GLib.Variant('ay', u'aaa')
with self.assertRaises(TypeError):
GLib.Variant('ay', object())
def test_create_maybe(self):
variant = GLib.Variant('mai', None)
self.assertEqual(variant.get_type_string(), 'mai')
self.assertEqual(variant.n_children(), 0)
self.assertEqual(variant.unpack(), None)
variant = GLib.Variant('mai', [])
self.assertEqual(variant.get_type_string(), 'mai')
self.assertEqual(variant.n_children(), 1)
variant = GLib.Variant('mami', [None])
self.assertEqual(variant.get_type_string(), 'mami')
self.assertEqual(variant.n_children(), 1)
variant = GLib.Variant('mami', [None, 13, None])
self.assertEqual(variant.get_type_string(), 'mami')
self.assertEqual(variant.n_children(), 1)
array = variant.get_child_value(0)
self.assertEqual(array.n_children(), 3)
element = array.get_child_value(0)
self.assertEqual(element.n_children(), 0)
element = array.get_child_value(1)
self.assertEqual(element.n_children(), 1)
self.assertEqual(element.get_child_value(0).get_int32(), 13)
element = array.get_child_value(2)
self.assertEqual(element.n_children(), 0)
def test_create_complex(self):
variant = GLib.Variant('(as)', ([],))
self.assertEqual(variant.get_type_string(), '(as)')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 0)
variant = GLib.Variant('(as)', ([''],))
self.assertEqual(variant.get_type_string(), '(as)')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 1)
self.assertEqual(variant.get_child_value(0).get_child_value(0).get_string(), '')
variant = GLib.Variant('(as)', (['hello'],))
self.assertEqual(variant.get_type_string(), '(as)')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 1)
self.assertEqual(variant.get_child_value(0).get_child_value(0).get_string(), 'hello')
variant = GLib.Variant('a(ii)', [])
self.assertEqual(variant.get_type_string(), 'a(ii)')
self.assertEqual(variant.n_children(), 0)
variant = GLib.Variant('a(ii)', [(5, 6)])
self.assertEqual(variant.get_type_string(), 'a(ii)')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 2)
self.assertEqual(variant.get_child_value(0).get_child_value(0).get_int32(), 5)
self.assertEqual(variant.get_child_value(0).get_child_value(1).get_int32(), 6)
variant = GLib.Variant('(a(ii))', ([],))
self.assertEqual(variant.get_type_string(), '(a(ii))')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 0)
variant = GLib.Variant('(a(ii))', ([(5, 6)],))
self.assertEqual(variant.get_type_string(), '(a(ii))')
self.assertEqual(variant.n_children(), 1)
self.assertEqual(variant.get_child_value(0).n_children(), 1)
self.assertEqual(variant.get_child_value(0).get_child_value(0).n_children(), 2)
self.assertEqual(variant.get_child_value(0).get_child_value(0).get_child_value(0).get_int32(), 5)
self.assertEqual(variant.get_child_value(0).get_child_value(0).get_child_value(1).get_int32(), 6)
obj = {'a1': (1, True), 'a2': (2, False)}
variant = GLib.Variant('a{s(ib)}', obj)
self.assertEqual(variant.get_type_string(), 'a{s(ib)}')
self.assertEqual(variant.unpack(), obj)
obj = {'a1': (1, GLib.Variant('b', True)), 'a2': (2, GLib.Variant('y', 255))}
variant = GLib.Variant('a{s(iv)}', obj)
self.assertEqual(variant.get_type_string(), 'a{s(iv)}')
self.assertEqual(variant.unpack(), {'a1': (1, True), 'a2': (2, 255)})
obj = (1, {'a': {'a1': True, 'a2': False},
'b': {'b1': False},
'c': {}
},
'foo')
variant = GLib.Variant('(ia{sa{sb}}s)', obj)
self.assertEqual(variant.get_type_string(), '(ia{sa{sb}}s)')
self.assertEqual(variant.unpack(), obj)
obj = {"frequency": GLib.Variant('t', 738000000),
"hierarchy": GLib.Variant('i', 0),
"bandwidth": GLib.Variant('x', 8),
"code-rate-hp": GLib.Variant('d', 2.0 / 3.0),
"constellation": GLib.Variant('s', "QAM16"),
"guard-interval": GLib.Variant('u', 4)}
variant = GLib.Variant('a{sv}', obj)
self.assertEqual(variant.get_type_string(), 'a{sv}')
self.assertEqual(variant.unpack(),
{"frequency": 738000000,
"hierarchy": 0,
"bandwidth": 8,
"code-rate-hp": 2.0 / 3.0,
"constellation": "QAM16",
"guard-interval": 4
})
def test_create_errors(self):
# excess arguments
self.assertRaises(TypeError, GLib.Variant, 'i', 42, 3)
self.assertRaises(TypeError, GLib.Variant, '(i)', (42, 3))
# not enough arguments
self.assertRaises(TypeError, GLib.Variant, '(ii)', (42,))
# data type mismatch
self.assertRaises(TypeError, GLib.Variant, 'i', 'hello')
self.assertRaises(TypeError, GLib.Variant, 's', 42)
self.assertRaises(TypeError, GLib.Variant, '(ss)', 'mec', 'mac')
self.assertRaises(TypeError, GLib.Variant, '(s)', 'hello')
# invalid format string
self.assertRaises(TypeError, GLib.Variant, 'Q', 1)
# invalid types
self.assertRaises(TypeError, GLib.Variant, '(ii', (42, 3))
self.assertRaises(TypeError, GLib.Variant, '(ii))', (42, 3))
self.assertRaises(TypeError, GLib.Variant, 'a{si', {})
self.assertRaises(TypeError, GLib.Variant, 'a{si}}', {})
self.assertRaises(TypeError, GLib.Variant, 'a{iii}', {})
def test_unpack(self):
# simple values
res = GLib.Variant.new_int32(-42).unpack()
self.assertEqual(res, -42)
res = GLib.Variant.new_uint64(34359738368).unpack()
self.assertEqual(res, 34359738368)
res = GLib.Variant.new_boolean(True).unpack()
self.assertEqual(res, True)
res = GLib.Variant.new_object_path('/foo/Bar').unpack()
self.assertEqual(res, '/foo/Bar')
# variant
res = GLib.Variant('v', GLib.Variant.new_int32(-42)).unpack()
self.assertEqual(res, -42)
GLib.Variant('v', GLib.Variant('v', GLib.Variant('i', 42)))
self.assertEqual(res, -42)
# tuple
res = GLib.Variant.new_tuple(GLib.Variant.new_int32(-1),
GLib.Variant.new_string('hello')).unpack()
self.assertEqual(res, (-1, 'hello'))
# array
vb = GLib.VariantBuilder.new(gi._gi.variant_type_from_string('ai'))
vb.add_value(GLib.Variant.new_int32(-1))
vb.add_value(GLib.Variant.new_int32(3))
res = vb.end().unpack()
self.assertEqual(res, [-1, 3])
# dictionary
res = GLib.Variant('a{si}', {'key1': 1, 'key2': 2}).unpack()
self.assertEqual(res, {'key1': 1, 'key2': 2})
# maybe
v = GLib.Variant('mi', 1)
self.assertEqual(v.unpack(), 1)
v = GLib.Variant('mi', None)
self.assertEqual(v.unpack(), None)
v = GLib.Variant('mai', [])
self.assertEqual(v.unpack(), [])
v = GLib.Variant('m()', ())
self.assertEqual(v.unpack(), ())
v = GLib.Variant('mami', [None, 1, None])
self.assertEqual(v.unpack(), [None, 1, None])
def test_iteration(self):
# array index access
vb = GLib.VariantBuilder.new(gi._gi.variant_type_from_string('ai'))
vb.add_value(GLib.Variant.new_int32(-1))
vb.add_value(GLib.Variant.new_int32(3))
v = vb.end()
self.assertEqual(len(v), 2)
self.assertEqual(v[0], -1)
self.assertEqual(v[1], 3)
self.assertEqual(v[-1], 3)
self.assertEqual(v[-2], -1)
self.assertRaises(IndexError, v.__getitem__, 2)
self.assertRaises(IndexError, v.__getitem__, -3)
self.assertRaises(ValueError, v.__getitem__, 'a')
# array iteration
self.assertEqual([x for x in v], [-1, 3])
self.assertEqual(list(v), [-1, 3])
# tuple index access
v = GLib.Variant.new_tuple(GLib.Variant.new_int32(-1),
GLib.Variant.new_string('hello'))
self.assertEqual(len(v), 2)
self.assertEqual(v[0], -1)
self.assertEqual(v[1], 'hello')
self.assertEqual(v[-1], 'hello')
self.assertEqual(v[-2], -1)
self.assertRaises(IndexError, v.__getitem__, 2)
self.assertRaises(IndexError, v.__getitem__, -3)
self.assertRaises(ValueError, v.__getitem__, 'a')
# tuple iteration
self.assertEqual([x for x in v], [-1, 'hello'])
self.assertEqual(tuple(v), (-1, 'hello'))
# dictionary index access
vsi = GLib.Variant('a{si}', {'key1': 1, 'key2': 2})
vis = GLib.Variant('a{is}', {1: 'val1', 5: 'val2'})
self.assertEqual(len(vsi), 2)
self.assertEqual(vsi['key1'], 1)
self.assertEqual(vsi['key2'], 2)
self.assertRaises(KeyError, vsi.__getitem__, 'unknown')
self.assertEqual(len(vis), 2)
self.assertEqual(vis[1], 'val1')
self.assertEqual(vis[5], 'val2')
self.assertRaises(KeyError, vsi.__getitem__, 3)
# dictionary iteration
self.assertEqual(set(vsi.keys()), set(['key1', 'key2']))
self.assertEqual(set(vis.keys()), set([1, 5]))
# string index access
v = GLib.Variant('s', 'hello')
self.assertEqual(len(v), 5)
self.assertEqual(v[0], 'h')
self.assertEqual(v[4], 'o')
self.assertEqual(v[-1], 'o')
self.assertEqual(v[-5], 'h')
self.assertRaises(IndexError, v.__getitem__, 5)
self.assertRaises(IndexError, v.__getitem__, -6)
# string iteration
self.assertEqual([x for x in v], ['h', 'e', 'l', 'l', 'o'])
def test_split_signature(self):
self.assertEqual(GLib.Variant.split_signature('()'), [])
self.assertEqual(GLib.Variant.split_signature('s'), ['s'])
self.assertEqual(GLib.Variant.split_signature('as'), ['as'])
self.assertEqual(GLib.Variant.split_signature('(s)'), ['s'])
self.assertEqual(GLib.Variant.split_signature('(iso)'), ['i', 's', 'o'])
self.assertEqual(GLib.Variant.split_signature('(s(ss)i(ii))'),
['s', '(ss)', 'i', '(ii)'])
self.assertEqual(GLib.Variant.split_signature('(as)'), ['as'])
self.assertEqual(GLib.Variant.split_signature('(s(ss)iaiaasa(ii))'),
['s', '(ss)', 'i', 'ai', 'aas', 'a(ii)'])
self.assertEqual(GLib.Variant.split_signature('(a{iv}(ii)((ss)a{s(ss)}))'),
['a{iv}', '(ii)', '((ss)a{s(ss)})'])
def test_hash(self):
v1 = GLib.Variant('s', 'somestring')
v2 = GLib.Variant('s', 'somestring')
v3 = GLib.Variant('s', 'somestring2')
self.assertTrue(v2 in set([v1, v3]))
self.assertTrue(v2 in frozenset([v1, v3]))
self.assertTrue(v2 in {v1: '1', v3: '2'})
def test_compare(self):
# Check if identical GVariant are equal
def assert_equal(vtype, value):
self.assertEqual(GLib.Variant(vtype, value), GLib.Variant(vtype, value))
def assert_not_equal(vtype1, value1, vtype2, value2):
self.assertNotEqual(GLib.Variant(vtype1, value1), GLib.Variant(vtype2, value2))
numbers = ['y', 'n', 'q', 'i', 'u', 'x', 't', 'h', 'd']
for num in numbers:
assert_equal(num, 42)
assert_not_equal(num, 42, num, 41)
assert_not_equal(num, 42, 's', '42')
assert_equal('s', 'something')
assert_not_equal('s', 'something', 's', 'somethingelse')
assert_not_equal('s', 'something', 'i', 1234)
assert_equal('g', 'dustybinqhogx')
assert_not_equal('g', 'dustybinqhogx', 'g', 'dustybin')
assert_not_equal('g', 'dustybinqhogx', 'i', 1234)
assert_equal('o', '/dev/null')
assert_not_equal('o', '/dev/null', 'o', '/dev/zero')
assert_not_equal('o', '/dev/null', 'i', 1234)
assert_equal('(s)', ('strtuple',))
assert_not_equal('(s)', ('strtuple',), '(s)', ('strtuple2',))
assert_equal('a{si}', {'str': 42})
assert_not_equal('a{si}', {'str': 42}, 'a{si}', {'str': 43})
assert_equal('v', GLib.Variant('i', 42))
assert_not_equal('v', GLib.Variant('i', 42), 'v', GLib.Variant('i', 43))
def test_bool(self):
# Check if the GVariant bool matches the unpacked Pythonic bool
def assert_equals_bool(vtype, value):
self.assertEqual(bool(GLib.Variant(vtype, value)), bool(value))
# simple values
assert_equals_bool('b', True)
assert_equals_bool('b', False)
numbers = ['y', 'n', 'q', 'i', 'u', 'x', 't', 'h', 'd']
for number in numbers:
assert_equals_bool(number, 0)
assert_equals_bool(number, 1)
assert_equals_bool('s', '')
assert_equals_bool('g', '')
assert_equals_bool('s', 'something')
assert_equals_bool('o', '/dev/null')
assert_equals_bool('g', 'dustybinqhogx')
# arrays
assert_equals_bool('ab', [True])
assert_equals_bool('ab', [False])
for number in numbers:
assert_equals_bool('a' + number, [])
assert_equals_bool('a' + number, [0])
assert_equals_bool('as', [])
assert_equals_bool('as', [''])
assert_equals_bool('ao', [])
assert_equals_bool('ao', ['/'])
assert_equals_bool('ag', [])
assert_equals_bool('ag', [''])
assert_equals_bool('aai', [[]])
# tuples
assert_equals_bool('()', ())
for number in numbers:
assert_equals_bool('(' + number + ')', (0,))
assert_equals_bool('(s)', ('',))
assert_equals_bool('(o)', ('/',))
assert_equals_bool('(g)', ('',))
assert_equals_bool('(())', ((),))
# dictionaries
assert_equals_bool('a{si}', {})
assert_equals_bool('a{si}', {'': 0})
# complex types, always True
assert_equals_bool('(as)', ([],))
assert_equals_bool('a{s(i)}', {'': (0,)})
# variant types, recursive unpacking
assert_equals_bool('v', GLib.Variant('i', 0))
assert_equals_bool('v', GLib.Variant('i', 1))
def test_repr(self):
# with C constructor
v = GLib.Variant.new_uint32(42)
self.assertEqual(repr(v), "GLib.Variant('u', 42)")
# with override constructor
v = GLib.Variant('(is)', (1, 'somestring'))
self.assertEqual(repr(v), "GLib.Variant('(is)', (1, 'somestring'))")
def test_str(self):
# with C constructor
v = GLib.Variant.new_uint32(42)
self.assertEqual(str(v), 'uint32 42')
# with override constructor
v = GLib.Variant('(is)', (1, 'somestring'))
self.assertEqual(str(v), "(1, 'somestring')")
def test_parse_error(self):
# This test doubles as a test for GLib.Error marshaling.
source_str = 'abc'
with self.assertRaises(GLib.Error) as context:
GLib.Variant.parse(None, source_str, None, None)
e = context.exception
text = GLib.Variant.parse_error_print_context(e, source_str)
self.assertTrue(source_str in text)
def test_parse_error_exceptions(self):
source_str = 'abc'
self.assertRaisesRegexp(TypeError, 'Must be GLib.Error, not int',
GLib.Variant.parse_error_print_context,
42, source_str)
gerror = GLib.Error(message=42) # not a string
self.assertRaisesRegexp(TypeError, ".*Must be string, not int.*",
GLib.Variant.parse_error_print_context,
gerror, source_str)
gerror = GLib.Error(domain=42) # not a string
self.assertRaisesRegexp(TypeError, ".*Must be string, not int.*",
GLib.Variant.parse_error_print_context,
gerror, source_str)
gerror = GLib.Error(code='not an int')
self.assertRaisesRegexp(TypeError, ".*Must be number, not str.*",
GLib.Variant.parse_error_print_context,
gerror, source_str)
gerror = GLib.Error(code=GLib.MAXUINT)
self.assertRaisesRegexp(OverflowError,
".*not in range.*",
GLib.Variant.parse_error_print_context,
gerror, source_str)
class TestConstants(unittest.TestCase):
def test_basic_types_limits(self):
self.assertTrue(isinstance(GLib.MINFLOAT, float))
self.assertTrue(isinstance(GLib.MAXLONG, integer_types))
|
lgpl-2.1
| 7,875,772,412,101,172,000 | 40.195246 | 105 | 0.571299 | false |
Gekk0r/Orion-scan
|
actionsCamera.py
|
1
|
2984
|
import subprocess
from types import NoneType
import os.path
#import detectionCameras
def shoot(port):
p = subprocess.Popen(["gphoto2", "--capture-image", "--port", port], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()
if "Error" in out[0]:
print("No camera found")
#print(out)
def downloadLastImage(port):
p = subprocess.Popen(["gphoto2", "--list-files", "--port", port], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()
if "Error" in out[0]:
print("No camera found")
return
else:
photoNumber = ""
for element in out:
if type(element) != NoneType:
features = element.split("\n")
photoName = features[-2].split(" ")[1]
photoNumber = features[-2].split(" ")[0].split("#")[1]
print photoNumber, photoName
if os.path.isfile(photoName):
print "already downloaded"
else:
p = subprocess.Popen(["gphoto2", "--get-file", photoNumber, "--port", port], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()
print out
def shootAndDownload(port):
p = subprocess.Popen(["gphoto2", "--capture-image-and-download", "--port", port], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,cwd="photos")
out = p.communicate()
print out
def erase_camera_files(port):
p = subprocess.Popen(["gphoto2", "--delete-all-files", "--recurse", "--port", port], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()
#print out
def save_camera_files(port, name, folder, degree_rotation=1, patterns=1, file_number=1, project_pattern=False):
out = ""
index = 0
step = 0
while not ("Error".upper() in str(out).upper()) and not ("Failed".upper() in str(out).upper()):
path = name + "/" + folder
file = name+"_"+str(step*degree_rotation)+"_bg"+str(index+1) + ".jpg"
p = subprocess.Popen(["gphoto2", "--get-file", str(file_number), "--filename", path + file, "--port", port], stdout=subprocess.PIPE,stderr=subprocess.STDOUT, cwd="photos")
out = p.communicate()
file_number += 1
index += 1
if index % patterns == 0 or not project_pattern:
step += 1
index = 0
def download_file(port, file_number, name = ""):
print "gphoto port: " + port
if name != "":
file = name + ".jpg"
p = subprocess.Popen(["gphoto2", "--get-file", str(file_number), "--filename", file, "--port", port], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd="photos")
else:
p = subprocess.Popen(["gphoto2", "--get-file", str(file_number), "--port", port, "--force-overwrite"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd="photos")
out = p.communicate()
return out
# DEBUG
'''
port = []
device = detectionCameras.get_string_port_camera(port)
print("Banana")
a = download_file(device, 1)
print(a)
'''
|
gpl-3.0
| 65,000,279,693,979,550 | 34.535714 | 179 | 0.610255 | false |
zifeishan/braindump
|
util/config-generator/generate.py
|
1
|
11597
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# Requires click librar. Install: `pip install click`
import click
@click.command()
@click.option('--output_file', '-o',
prompt='Specify the output path of the generated config file',
default='braindump.conf',
help='output path of generated config file')
@click.option('--app_home',
prompt='Specify APP_HOME, the base directory of the DeepDive application',
default='$WORKING_DIR',
help='the base directory of the DeepDive application')
@click.option('--dd_output_dir',
prompt='Specify DD_OUTPUT_DIR, the output folder of DeepDive',
default='"$WORKING_DIR/../../out"',
help='the output folder of DeepDive')
@click.option('--dbname',
prompt='Specify DBNAME, the name of your working database',
default='$DBNAME',
help='the name of your working database')
@click.option('--pguser',
prompt='Specify PGUSER, the user name to connect to database',
default='${PGUSER:-`whoami`}',
help='the user name to connect to database')
@click.option('--pgpassword',
prompt='Specify PGPASSWORD, the password of your database',
default='${PGPASSWORD:-}',
help='the password of your database')
@click.option('--pgport',
prompt='Specify PGPORT, the port to connect to the database',
default='${PGPORT:-5432}',
help='the port to connect to the database')
@click.option('--pghost',
prompt='Specify PGHOST, the host to connect to the database',
default='${PGHOST:-localhost}',
help='the host to connect to the database')
@click.option('--feature_tables',
prompt='Specify FEATURE_TABLES, all tables that contain features. Separated by space. e.g. "f1 f2 f3"',
default='',
help='all tables that contain features. Separated by space. e.g. "f1 f2 f3"')
@click.option('--feature_columns',
prompt='Specify FEATURE_COLUMNS, columns that contain features in the same order of FEATURE_TABLES. Separated by space. e.g. "c1 c2 c3"',
default='',
help='columns that contain features in the same order of FEATURE_TABLES. Separated by space. e.g. "c1 c2 c3"')
@click.option('--variable_tables',
prompt='Specify VARIABLE_TABLES, all tables that contain DeepDive variables (as defined in table schema). Separated by space. e.g. "v1 v2"',
default='',
help='all tables that contain DeepDive variables (as defined in table schema). Separated by space. e.g. "v1 v2"')
@click.option('--variable_columns',
prompt='Specify VARIABLE_COLUMNS, variable columns in the same order of VARIABLE_TABLES. Separated by space. e.g. "v1 v2"',
default='',
help='variable columns in the same order of VARIABLE_TABLES. Separated by space. e.g. "v1 v2"')
@click.option('--variable_words_columns',
prompt='Specify VARIABLE_WORDS_COLUMNS, if the variable is a mention, specify the words / description for the mention. This is used for a statistics with naive entity linking. If empty (""), do not count deduplicated mentions for that table. Separated by space. e.g. w1 ""',
default='',
help='if the variable is a mention, specify the words / description for the mention. This is used for a statistics with naive entity linking. If empty (""), do not count deduplicated mentions for that table. Separated by space. e.g. w1 ""')
@click.option('--variable_docid_columns',
prompt='Specify VARIABLE_DOCID_COLUMNS, if there is a field in the variable table that indicates doc_id. This is used to count how many documents have extractions. If empty (""), do not count for that table. Separated by space. e.g. "" did2',
default='',
help='specify if there is a field in the variable table that indicates doc_id. This is used to count how many documents have extractions. If empty (""), do not count for that table. Separated by space. e.g. "" did2')
@click.option('--code_config',
prompt='Specify CODE_CONFIG, a config file that specifies what in $APP_HOME to save as codes, one file/folder per line. Default file is: \napplication.conf\nudf',
default='',
help='a config file that specifies what in $APP_HOME to save as codes, one file/folder per line. Default file is: \napplication.conf\nudf\n')
@click.option('--num_sampled_docs',
prompt='Specify NUM_SAMPLED_DOCS',
default='100',
help='')
@click.option('--num_sampled_features',
prompt='Specify NUM_SAMPLED_FEATURES',
default='100',
help='')
@click.option('--num_sampled_supervision',
prompt='Specify NUM_SAMPLED_SUPERVISION',
default='500',
help='')
@click.option('--num_sampled_result',
prompt='Specify NUM_SAMPLED_RESULT',
default='1000',
help='')
@click.option('--num_top_entities',
prompt='Specify NUM_TOP_ENTITIES',
default='50',
help='')
@click.option('--sentence_table',
prompt='Specify SENTENCE_TABLE',
default='sentences',
help='')
@click.option('--sentence_table_doc_id_column',
prompt='Specify SENTENCE_TABLE_DOC_ID_COLUMN',
default='document_id',
help='')
@click.option('--sentence_table_sent_offset_column',
prompt='Specify SENTENCE_TABLE_SENT_OFFSET_COLUMN',
default='sentence_offset',
help='')
@click.option('--sentence_table_words_column',
prompt='Specify SENTENCE_TABLE_WORDS_COLUMN',
default='words',
help='')
@click.option('--send_result_with_git',
prompt='Specify SEND_RESULT_WITH_GIT',
default='false',
help='')
@click.option('--send_result_with_git_push',
prompt='Specify SEND_RESULT_WITH_GIT_PUSH',
default='false',
help='')
@click.option('--send_result_with_email',
prompt='Specify SEND_RESULT_WITH_EMAIL',
default='false',
help='')
@click.option('--stats_script',
prompt='Specify STATS_SCRIPT, a script to override default statistics reporting',
default='',
help='a script to override default statistics reporting')
@click.option('--supervision_sample_script',
prompt='Specify SUPERVISION_SAMPLE_SCRIPT, a script to override default supervision sampling',
default='',
help='a script to override default supervision sampling')
@click.option('--inference_sample_script',
prompt='Specify INFERENCE_SAMPLE_SCRIPT, a script to override default inference result sampling',
default='',
help='a script to override default inference result sampling')
def generate(output_file,
app_home,
dd_output_dir,
dbname,
pguser,
pgpassword,
pgport,
pghost,
feature_tables,
feature_columns,
variable_tables,
variable_columns,
variable_words_columns,
variable_docid_columns,
code_config,
num_sampled_docs,
num_sampled_features,
num_sampled_supervision,
num_sampled_result,
num_top_entities,
sentence_table,
sentence_table_doc_id_column,
sentence_table_sent_offset_column,
sentence_table_words_column,
send_result_with_git,
send_result_with_git_push,
send_result_with_email,
stats_script,
supervision_sample_script,
inference_sample_script):
"""A program that generates braindump.conf"""
click.echo(file=open(output_file, 'w'), message='''
########## Conventions. Do not recommend to change. ###########
# Set the utility files dir
export UTIL_DIR="$HOME/local/braindump"
# Report folder: use current
export REPORT_DIR="$WORKING_DIR/experiment-reports"
########## User-specified configurations ###########
# Directories
# Use absolute path if possible.
# Avoid using "pwd" or "dirname $0", they don't work properly.
# $WORKING_DIR is set to be the directory where braindump is running.
# (the directory that contains braindump.conf)
export APP_HOME=%s
# Specify deepdive out directory (DEEPDIVE_HOME/out)
export DD_OUTPUT_DIR=%s
# Database Configuration
export DBNAME=%s
export PGUSER=%s
export PGPASSWORD=%s
export PGPORT=%s
export PGHOST=%s
# Specify all feature tables.
# e.g. FEATURE_TABLES=(f1 f2 f3)
export FEATURE_TABLES=(%s)
export FEATURE_COLUMNS=(%s)
# Specify all variable tables
export VARIABLE_TABLES=(%s)
export VARIABLE_COLUMNS=(%s)
# Assume that in DeepDive, inference result tables will be named as [VARIABLE_TABLE]_[VARIABLE_COLUMN]_inference
# If the variable is a mention, specify the words / description for the mention.
# This is used for a statistics with naive entity linking. If empty, do not count deduplicated mentions.
# e.g. export VARIABLE_WORDS_COLUMNS=(w1 "" w3)
# In the examples above, the second element is left empty
export VARIABLE_WORDS_COLUMNS=(%s)
# Set variable docid columns to count distinct documents that have extractions
# export VARIABLE_DOCID_COLUMNS=(%s)
# Code configs to save
export CODE_CONFIG=%s
# Number of samples
export NUM_SAMPLED_DOCS=%s
export NUM_SAMPLED_FEATURES=%s
export NUM_SAMPLED_SUPERVISION=%s
export NUM_SAMPLED_RESULT=%s
export NUM_TOP_ENTITIES=%s
# Specify some tables for statistics
export SENTENCE_TABLE=%s
export SENTENCE_TABLE_DOC_ID_COLUMN=%s
export SENTENCE_TABLE_SENT_OFFSET_COLUMN=%s
export SENTENCE_TABLE_WORDS_COLUMN=%s
# Define how to send result. use "true" to activate.
export SEND_RESULT_WITH_GIT=%s
# If true, push after commiting the report
export SEND_RESULT_WITH_GIT_PUSH=%s
export SEND_RESULT_WITH_EMAIL=%s
######## CUSTOM SCRIPTS ###########
# Leave blank for default stats report.
# Set to a location of a script (e.g. $APP_HOME/your_script) to use it instead of default
# Self-defined scripts for stats.
export STATS_SCRIPT=%s
export SUPERVISION_SAMPLE_SCRIPT=%s
export INFERENCE_SAMPLE_SCRIPT=%s
########## Conventions. Do not recommend to change. ###########
# Hack: use the last DD run as output dir
# Suppose out/ is under $DEEPDIVE_HOME/
# You may need to manually change it based on need
export DD_TIMESTAMP=`ls -t $DD_OUTPUT_DIR/ | head -n 1`
export DD_THIS_OUTPUT_DIR=$DD_OUTPUT_DIR/$DD_TIMESTAMP
''' % (app_home, dd_output_dir, dbname, pguser, pgpassword, pgport, pghost, feature_tables, feature_columns, variable_tables, variable_columns, variable_words_columns, variable_docid_columns, code_config, num_sampled_docs, num_sampled_features, num_sampled_supervision, num_sampled_result, num_top_entities, sentence_table, sentence_table_doc_id_column, sentence_table_sent_offset_column, sentence_table_words_column, send_result_with_git, send_result_with_git_push, send_result_with_email, stats_script, supervision_sample_script, inference_sample_script))
if __name__ == '__main__':
generate()
|
mit
| -8,824,576,774,812,026,000 | 44.661417 | 557 | 0.633612 | false |
zenoss/pywbem
|
pywbem/_cliutils.py
|
1
|
1928
|
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""
Internal module with utility stuff for command line programs.
"""
from __future__ import print_function, absolute_import
import argparse
class SmartFormatter(argparse.HelpFormatter):
"""Formatter class for `argparse`, that respects newlines in help strings.
Idea and code from: https://stackoverflow.com/a/22157136
Usage:
If an argparse argument help text starts with 'R|', it will be treated
as a *raw* string that does line formatting on its own by specifying
newlines appropriately. The string should not exceed 55 characters per
line. Indentation handling is still applied automatically and does not
need to be specified within the string.
Otherwise, the strings are formatted as normal and newlines are
treated like blanks.
Limitations:
It seems this only works for the `help` argument of
`ArgumentParser.add_argument()`, and not for group descriptions,
and usage, description, and epilog of ArgumentParser.
"""
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
|
lgpl-2.1
| 5,009,814,393,952,857,000 | 37.56 | 78 | 0.721992 | false |
snicoper/snicoper.com
|
tests/unit/authentication/test_backends.py
|
1
|
5343
|
from django.contrib.auth.hashers import make_password
from django.urls import reverse
from authentication import settings as auth_settings
from .base_auth import BaseAuthTest
class EmailOrUsernameModelBackend(BaseAuthTest):
def __init__(self, *args, **kwargs):
self._default_auth_type = auth_settings.AUTH_TYPE
super().__init__(*args, **kwargs)
def setUp(self):
super().setUp()
self.url = reverse('authentication:login')
self.logout()
def tearDown(self):
"""Asegurarse de que AUTH_TYPE queda con los valores por defecto."""
super().tearDown()
setattr(auth_settings, 'AUTH_TYPE', self._default_auth_type)
def test_auth_type_default_username(self):
"""Prueba authenticate con AUTH_TYPE por defecto."""
form_data = {
'username': self.user.username,
'password': '123'
}
response = self.client.post(self.url, data=form_data)
expected_url = reverse('accounts:profile')
self.assertRedirects(
response=response,
expected_url=expected_url,
status_code=302,
target_status_code=200
)
def test_auth_type_default_email(self):
"""No permite login con email, es el default."""
form_data = {
'username': self.user.email,
'password': '123'
}
response = self.client.post(self.url, data=form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'authentication/login.html')
# El usuario no esta logueado.
user = response.context['user']
self.assertTrue(user.is_anonymous)
def test_auth_type_both(self):
"""Con AUTH_TYPE='both'."""
setattr(auth_settings, 'AUTH_TYPE', 'both')
# Email.
form_data = {
'username': self.user.email,
'password': '123'
}
response = self.client.post(self.url, data=form_data)
expected_url = reverse('accounts:profile')
self.assertRedirects(
response=response,
expected_url=expected_url,
status_code=302,
target_status_code=200
)
self.client.logout()
# Username.
form_data = {
'username': self.user.email,
'password': '123'
}
response = self.client.post(self.url, data=form_data)
expected_url = reverse('accounts:profile')
self.assertRedirects(
response=response,
expected_url=expected_url,
status_code=302,
target_status_code=200
)
def test_auth_type_email(self):
"""Con AUTH_TYPE='email', username fallara."""
setattr(auth_settings, 'AUTH_TYPE', 'email')
# Email.
form_data = {
'username': self.user.email,
'password': '123'
}
response = self.client.post(self.url, data=form_data)
expected_url = reverse('accounts:profile')
self.assertRedirects(
response=response,
expected_url=expected_url,
status_code=302,
target_status_code=200
)
self.client.logout()
# Username fallara.
form_data = {
'username': self.user.username,
'password': '123'
}
response = self.client.post(self.url, data=form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'authentication/login.html')
# El usuario no esta logueado.
user = response.context['user']
self.assertTrue(user.is_anonymous)
def test_case_sensitive_en_username_y_password(self):
"""Nombre de usuario, email y password es case sensitive."""
setattr(auth_settings, 'AUTH_TYPE', 'both')
# Cambiar el password del usuario.
raw_passord = 'aBcDe123'
self.user.password = make_password(raw_passord)
self.user.save()
# Username en mayúsculas no logueara.
form_data = {
'username': self.user.username.upper(),
'password': raw_passord
}
response = self.client.post(self.url, data=form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'authentication/login.html')
user = response.context['user']
self.assertTrue(user.is_anonymous)
# Email en mayúsculas no logueara.
form_data = {
'username': self.user.email.upper(),
'password': raw_passord
}
response = self.client.post(self.url, data=form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'authentication/login.html')
user = response.context['user']
self.assertTrue(user.is_anonymous)
# Password en mayúsculas no logueara.
form_data = {
'username': self.user.email,
'password': raw_passord.upper()
}
response = self.client.post(self.url, data=form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'authentication/login.html')
user = response.context['user']
self.assertTrue(user.is_anonymous)
|
mit
| -3,895,250,486,134,387,000 | 31.168675 | 76 | 0.586704 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/GeneTrack-2.0.0_beta_1_dev_48da9e998f0caf01c5be731e926f4b0481f658f0-py2.7.egg/tests/authorize_tests.py
|
1
|
2225
|
import testlib
import os, unittest, random
from django.test import utils
from django.db import connection
from django.conf import settings
from genetrack import conf, util, logger
from genetrack.server.scripts import initializer
from genetrack.server.web import html
from django.contrib.auth.models import User
from django.core.files import File
class AuthorizeTest( unittest.TestCase ):
"""
Tests the models
"""
def setUp(self):
"Setting up the tests database for data insert"
self.old_name = settings.DATABASE_NAME
utils.setup_test_environment()
connection.creation.create_test_db(verbosity=0, autoclobber=True)
options = util.Params(test_mode=True, delete_everything=False, flush=False, verbosity=0)
fname = conf.testdata('test-users.csv')
initializer.load_users(fname, options)
def tearDown(self):
"Tearing down the database after test"
connection.creation.destroy_test_db(self.old_name, 0)
utils.teardown_test_environment()
def test_data_creation(self):
"""
Create datasets
"""
# it seems that importing it earlier messes up the test database setup
from genetrack.server.web import authorize
john = User.objects.get(username='johndoe')
project = authorize.create_project(user=john, name="Test project")
stream = File( open(conf.testdata('test-users.csv')) )
data = authorize.create_data(user=john, pid=project.id, stream=stream, name="Test data")
# project counts update
project = authorize.get_project(user=john, pid=project.id)
self.assertEqual(project.data_count, 1)
# testing data deletion
authorize.delete_data(user=john, pid=project.id, dids=[data.id])
project = authorize.get_project(user=john, pid=project.id)
self.assertEqual(project.data_count, 0)
def test_two(self):
pass
def get_suite():
"Returns the testsuite"
tests = [
AuthorizeTest,
]
return testlib.make_suite( tests )
if __name__ == '__main__':
suite = get_suite()
logger.disable('DEBUG')
unittest.TextTestRunner(verbosity=2).run( suite )
|
gpl-3.0
| -4,114,702,407,533,310,000 | 32.223881 | 96 | 0.667416 | false |
devilry/devilry-django
|
devilry/devilry_gradingsystem/tests/views/admin/base.py
|
1
|
2490
|
from devilry.devilry_gradingsystem.pluginregistry import GradingSystemPluginInterface
from devilry.project.develop.testhelpers.corebuilder import UserBuilder
class AdminViewTestMixin(object):
"""
Mixin class for the grading system admin views. They all take
``assignmentid`` as kwarg, and they all require the same permissions,
so we can allow them to share some code.
"""
def login(self, user):
self.client.login(username=user.shortname, password='test')
def get_as(self, user, *args, **kwargs):
"""
Login as the given user and run self.client.get(). Assumes
that self.url is defined.
"""
self.login(user)
return self.client.get(self.url, *args, **kwargs)
def post_as(self, user, *args, **kwargs):
"""
Login as the given user and run self.client.post(). Assumes
that self.url is defined.
"""
self.login(user)
return self.client.post(self.url, *args, **kwargs)
def test_get_not_admin_404(self):
nobody = UserBuilder('nobody').user
response = self.get_as(nobody)
self.assertEqual(response.status_code, 404)
class MockPointsPluginApi(GradingSystemPluginInterface):
id = 'mock_gradingsystemplugin_points'
title = 'Mock points'
description = 'Mock points description.'
def get_edit_feedback_url(self, deliveryid):
return '/mock/points/edit_feedback/{}'.format(deliveryid)
class MockApprovedPluginApi(GradingSystemPluginInterface):
id = 'mock_gradingsystemplugin_approved'
title = 'Mock approved'
description = 'Mock approved description.'
sets_passing_grade_min_points_automatically = True
sets_max_points_automatically = True
def get_edit_feedback_url(self, deliveryid):
return '/mock/approved/edit_feedback/{}'.format(deliveryid)
def get_passing_grade_min_points(self):
return 1
def get_max_points(self):
return 1
class MockRequiresConfigurationPluginApi(GradingSystemPluginInterface):
id = 'mock_gradingsystemplugin_requiresconfiguration'
title = 'Mock requiresconfiguration'
description = 'Mock requiresconfiguration description.'
requires_configuration = True
def get_edit_feedback_url(self, deliveryid):
return '/mock/requiresconfiguration/edit_feedback/{}'.format(deliveryid)
def get_configuration_url(self):
return '/mock/requiresconfiguration/configure/{}'.format(self.assignment.id)
|
bsd-3-clause
| -8,947,477,255,239,868,000 | 33.583333 | 85 | 0.693173 | false |
WestpointLtd/pytls
|
ssl2.py
|
1
|
1674
|
#!/usr/bin/python
import sys
import socket
import logging
from optparse import OptionParser
from tls import *
def make_hello():
hello = SSL2ClientHelloMessage.create(cipher_specs=ssl2_ciphers, challenge='0123456789abcdef')
record = SSL2Record.create(hello.bytes)
return record.bytes
def test_ssl2(hostname, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.debug('Connecting...')
s.settimeout(5)
s.connect((hostname, port))
starttls(s, port, 'auto')
f = s.makefile('rw', 0)
f.write(make_hello())
record = read_ssl2_record(f)
message = SSL2HandshakeMessage.from_bytes(record.message())
print 'Message Type:\t', message.message_types.get(message.message_type(), 'Uknown'), message.message_type()
print 'Version:\t', hex(message.server_version())
print 'Ciphers:'
for cipher in message.cipher_specs():
print '\t\t',ssl2.cipher_suites.get(cipher, 'Unknown'), hex(cipher)
def main():
options = OptionParser(usage='%prog server [options]',
description='Test for Python SSL')
options.add_option('-p', '--port',
type='int', default=443,
help='TCP port to test (default: 443)')
options.add_option('-d', '--debug', action='store_true', dest='debug',
default=False,
help='Print debugging messages')
opts, args = options.parse_args()
if len(args) < 1:
options.print_help()
return
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
test_ssl2(args[0], opts.port)
if __name__ == '__main__':
main()
|
mit
| 9,129,691,756,246,998,000 | 26.9 | 112 | 0.615293 | false |
trek10inc/cloudformation-toolbox
|
deploy/ecs-logs.py
|
1
|
4767
|
#!/usr/local/bin/python
import argparse
import json
import boto3
import pprint
import time
import random
import os
import stat
import time
from boto3.session import Session
pp = pprint.PrettyPrinter(indent=2)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#####
# Parsing of Arguments
#####
parser = argparse.ArgumentParser(description='Get a shell in a preconfigured container in your service stack')
parser.add_argument('--key', required=False, help='AWS Access Key Id')
parser.add_argument('--sshkey', required=True, help='SSH Key content, not just the file name! Use `cat key.pem` to read in a file to the command line)')
parser.add_argument('--secret', required=False, help='AWS Secret Access Key')
parser.add_argument('--stack', required=True, help='The Stack name (ex: Production)')
parser.add_argument('--region', required=False, help='The region of the stack (ex: us-east-1)', default='eu-west-1')
parser.add_argument('--private', required=False, default=False, help='Connect via public or private IP')
args = parser.parse_args()
print("Stack is " + args.stack + ".", flush=True)
# Writes out so it is usable in SSH
keyfile = open('/root/.ssh/id_rsa', 'w')
keyfile.write(args.sshkey)
keyfile.close()
os.chmod('/root/.ssh/id_rsa', 0o600)
if args.key:
print(bcolors.OKGREEN + 'Using provided aws access keys' + bcolors.ENDC)
session = Session(aws_access_key_id=args.key,
aws_secret_access_key=args.secret,
region_name=args.region)
else:
print(bcolors.OKGREEN + 'Letting aws sdk find access keys' + bcolors.ENDC)
session = Session(region_name=args.region)
cfnClient = session.client('cloudformation')
ecsClient = session.client('ecs')
ec2Client = session.client('ec2')
# Get parameters from old template and marshall them for update
try:
response = cfnClient.describe_stacks(
StackName=args.stack
)
resources = cfnClient.describe_stack_resources(
StackName=args.stack
)
except Exception as e:
print(e)
print(bcolors.FAIL + "That is not a valid stack name, or you do not have permission to access this stack!" + bcolors.ENDC)
exit()
print("Marshalling outputs and paramenters")
# Get output references for finding related resources
for output in response['Stacks'][0]['Outputs']:
if output['OutputKey'] == 'ClusterName':
cluster_name = output['OutputValue']
# Dumb thing we have to do to get the running image / tag
for parameter in response['Stacks'][0]['Parameters']:
if parameter['ParameterKey'] == 'ContainerDockerOrganization':
docker_organization = parameter['ParameterValue']
if parameter['ParameterKey'] == 'ContainerDockerImage':
docker_image = parameter['ParameterValue']
if parameter['ParameterKey'] == 'ContainerDockerTag':
docker_tag = parameter['ParameterValue']
for resource in resources['StackResources']:
if resource['ResourceType'] == 'AWS::ECS::Service':
ecs_service = resource['PhysicalResourceId']
full_image_reference = docker_organization + "/" + docker_image + ":" + docker_tag
availability = {
'up': 0,
'down': 0,
}
taskList = ecsClient.list_tasks(
cluster=cluster_name,
serviceName=ecs_service,
desiredStatus='RUNNING'
)
limit = 7
while len(taskList['taskArns']) == 0 and limit > 0:
print('task not running, trying again in five seconds')
time.sleep(5)
limit -= 1
taskList = ecsClient.list_tasks(
cluster=cluster_name,
serviceName=ecs_service,
desiredStatus='RUNNING'
)
if len(taskList['taskArns']) == 0:
print (bcolors.FAIL, 'Unable to find task, try again')
exit(1)
tasks = ecsClient.describe_tasks(
cluster=cluster_name,
tasks=taskList['taskArns']
)
containerInstanceArns = [t['containerInstanceArn'] for t in tasks['tasks']]
containerInstances = ecsClient.describe_container_instances(
cluster=cluster_name,
containerInstances=containerInstanceArns
)
instance_id = containerInstances['containerInstances'][0]['ec2InstanceId']
ec2Instances = ec2Client.describe_instances(
InstanceIds=[
instance_id
]
)
if args.private:
ip_address = response['Reservations'][0]['Instances'][0]['PrivateIpAddress']
else:
ip_address = response['Reservations'][0]['Instances'][0]['PublicIpAddress']
print (bcolors.OKGREEN + "Getting logs from host, ip address: ", ip_address, bcolors.ENDC)
os.system("ssh -oStrictHostKeyChecking=no ec2-user@" + ip_address +
''' "docker logs -f \`docker ps -a | grep ''' + args.stack + ''' | head -n 1 | awk '{ print \$1}'\`" ''')
|
apache-2.0
| -7,983,265,479,317,270,000 | 29.954545 | 152 | 0.682819 | false |
justnoise/blastor
|
master_blastor.py
|
1
|
2316
|
import asyncio
import time
import subprocess
import sys
import os
from aiohttp import web
PRIVOXY_LOGFILE = '/var/log/privoxy/logfile'
MAX_IDLE_TIME = 30 * 60 # 30 minutes
LIFETIME = 12 * 60 * 60 # 12 hours
kill_at = time.time() + LIFETIME
async def handle_deathclock(request):
data = await request.post()
new_time = data.get('time')
set_deathclock(new_time)
return web.Response(text='OK')
async def handle_delete(request):
set_deathclock(5)
return web.Response(text='OK')
async def handle_newnym(request):
# throw it to the wind. It'll work! If it don't, I'll rewrite this
cmd = """echo -e 'authenticate ""\nsignal newnym\nquit\n' | nc localhost 9051"""
subprocess.Popen(['/bin/bash', '-c', cmd])
return web.Response(text='OK')
def set_deathclock(seconds_from_now):
global kill_at
print("Setting deathclock to {}s".format(seconds_from_now))
seconds_from_now = float(seconds_from_now)
kill_at = time.time() + seconds_from_now
async def deathclock_loop():
while True:
if time.time() > kill_at:
print("killing process due deathclock expiration")
sys.stdout.flush()
sys.exit(0)
await asyncio.sleep(1)
async def log_loop(logfile):
last_filesize = -1
last_change_time = time.time()
while True:
try:
filesize = os.stat(logfile).st_size
if filesize != last_filesize:
last_filesize = filesize
last_change_time = time.time()
except OSError:
pass
if last_change_time + MAX_IDLE_TIME < time.time():
print("killing process due to unchanging log filesize")
sys.stdout.flush()
sys.exit(0)
await asyncio.sleep(10)
def run_services():
subprocess.run('service tor start', shell=True)
subprocess.run('service privoxy start', shell=True)
def main():
loop = asyncio.get_event_loop()
app = web.Application(loop=loop)
app.router.add_post('/deathclock', handle_deathclock)
app.router.add_delete('/', handle_delete)
app.router.add_get('/newnym', handle_newnym)
run_services()
loop.create_task(deathclock_loop())
loop.create_task(log_loop(PRIVOXY_LOGFILE))
web.run_app(app)
if __name__ == '__main__':
main()
|
mit
| -7,459,833,621,495,296,000 | 25.62069 | 84 | 0.62867 | false |
AIFDR/inasafe-django
|
django_project/realtime/utils.py
|
2
|
3950
|
# coding=utf-8
import logging
import os
from zipfile import ZipFile
from realtime.app_settings import LOGGER_NAME, REPORT_TEMPLATES
LOGGER = logging.getLogger(LOGGER_NAME)
def split_layer_ext(layer_path):
"""Split layer file by basename and extension.
We need this to accommodate parsing base_layer.aux.xml into
base_layer with ext .aux.xml, which is not provided os.path.splitext
:param layer_path: The path to the base file of the layer
:type layer_path: str
:return: A tuple of basename and extension: (basename, ext)
:rtype: (str, str)
"""
split = layer_path.split('.')
# take the first as basename and the rest as ext
banana = split[0]
return banana, '.'.join([''] + split[1:])
def zip_inasafe_layer(layer_path):
"""Zip associated file for InaSAFE layer.
:return: Path of newly zipped file
"""
# We zip files with the same basename
dirname = os.path.dirname(layer_path)
basename = os.path.basename(layer_path)
basename_without_ext = split_layer_ext(basename)[0]
zip_name = os.path.join(
dirname, basename_without_ext + '.zip')
with ZipFile(zip_name, 'w') as zf:
for root, dirs, files in os.walk(dirname):
for f in files:
f_basename, ext = split_layer_ext(f)
if f_basename == basename_without_ext and not ext == '.zip':
filename = os.path.join(root, f)
arcname = os.path.relpath(filename, dirname)
zf.write(filename, arcname=arcname)
return zip_name
def zip_inasafe_analysis_result(analysis_path):
"""Zip associated file for InaSAFE Analysis result."""
# We zip files with the same basename
dirname = os.path.dirname(analysis_path)
basename = os.path.basename(analysis_path)
basename_without_ext = split_layer_ext(basename)[0]
zip_name = os.path.join(
dirname, basename_without_ext + '.zip')
with ZipFile(zip_name, 'w') as zf:
for root, dirs, files in os.walk(dirname):
for f in files:
f_basename, ext = split_layer_ext(f)
if not ext == '.zip':
filename = os.path.join(root, f)
arcname = os.path.relpath(filename, dirname)
zf.write(filename, arcname=arcname)
return zip_name
def substitute_layer_order(layer_order_template, source_dict):
"""Replace references in layer_order_template according to source_dict.
Substitute entry that starts with @ if any
"""
layer_order = []
for layer in layer_order_template:
if layer.startswith('@'):
# substitute layer
keys = layer[1:].split('.')
try:
# Recursively find indexed keys' value
value = source_dict
for k in keys:
value = value[k]
# substitute if we find replacement
layer = value
except BaseException as e:
LOGGER.exception(e)
# Let layer order contains @ sign so it can be parsed
# by InaSAFE Headless instead (and decide if it will fail).
layer_order.append(layer)
return layer_order
def celery_worker_connected(celery_app, worker_name):
"""Check worker exists."""
pongs = celery_app.control.ping()
for pong in pongs:
for key in pong:
if worker_name in key:
return True
return False
def template_paths(hazard_type, locale='en'):
"""Internal function to return template paths."""
return REPORT_TEMPLATES[hazard_type][locale]
def template_names(hazard_type, locale='en'):
"""Internal function to return template output name."""
template_filename = template_paths(hazard_type, locale)
basename = os.path.basename(template_filename)
output_name, _ = os.path.splitext(basename)
return output_name
|
bsd-2-clause
| 332,064,118,970,813,300 | 31.377049 | 76 | 0.614177 | false |
Neurosim-lab/netpyne
|
examples/intervalSaveWeights/init.py
|
1
|
1562
|
"""
init.py
Starting script to run NetPyNE-based M1 model.
Usage:
python init.py # Run simulation, optionally plot a raster
MPI usage:
mpiexec -n 4 nrniv -python -mpi init.py
"""
import matplotlib; matplotlib.use('Agg') # to avoid graphics error in servers
from netpyne import sim
from cfg import cfg
from netParams import netParams
import os
### This is an example function run at an interval during the simulation
### This function save weights everytime it runs and will save simulation data
### at a different interval defined by cfg.intervalRun and cfg.saveInterval.
def saveWeights(t):
# if a list for weights is not initialized make one
if not hasattr(sim, 'allWeights'):
sim.allWeights=[]
# save the weights
for cell in sim.net.cells:
for conn in cell.conns:
sim.allWeights.append(float(conn['hObj'].weight[0]))
# if the sim time matches the saveInterval then save data
# NOTE: intervalRun must divide evenly into saveInterval (saveInterval % intervalRun == 0)
if (round(t, 4) % cfg.saveInterval == 0):
sim.intervalSave(t)
print("Starting sim ...")
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, cfg, output=True)
# we run with an interval function defined above
# if you just want to save the data and not the wieghts you can use the sim.intervalSave function instead of saveWeights
sim.runSimWithIntervalFunc(cfg.intervalRun, saveWeights)
# we run fileGather() instead of gather
sim.fileGather()
sim.analyze()
sim.checkOutput('M1detailed')
|
mit
| -8,196,858,167,986,852,000 | 28.471698 | 120 | 0.721511 | false |
ecell/ecell3
|
ecell/frontend/session-monitor/ecell/ui/osogo/GtkSessionMonitor.py
|
1
|
25607
|
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
#
# Design: Kenta Hashimoto <[email protected]>
# Design and application Framework: Koichi Takahashi <[email protected]>
# Programming:
# Yuki Fujita
# Yoshiya Matsubara
# Yuusuke Saito
# Masahiro Sugimoto <[email protected]>'
# Gabor Bereczki <[email protected]>
# at E-Cell Project, Lab. for Bioinformatics, Keio University.
#
import os
import os.path
import sys
import traceback
import ConfigParser
import gtk
import gobject
from ecell.Session import *
from ecell.ui.osogo.config import *
from ecell.ui.osogo.ModelWalker import *
import ecell.ui.osogo.MainWindow as MainWindow
import ecell.ui.osogo.EntityListWindow as EntityListWindow
import ecell.ui.osogo.LoggerWindow as LoggerWindow
import ecell.ui.osogo.InterfaceWindow as InterfaceWindow
import ecell.ui.osogo.StepperWindow as StepperWindow
import ecell.ui.osogo.BoardWindow as BoardWindow
import ecell.ui.osogo.LoggingPolicy as LoggingPolicy
import ecell.ui.osogo.OsogoPluginManager as OsogoPluginManager
from ecell.ui.osogo.DataGenerator import *
class GtkSessionMonitor(object):
def __init__(self):
"""sets up the osogo session, creates Mainwindow and other fundamental
windows but doesn't show them"""
self.theSession = None
self.theModelWalker = None
self.theMessageMethod = None
self.theDataGenerator = None
self.updateCallbackList = []
# -------------------------------------
# reads defaults from osogo.ini
# -------------------------------------
self.theConfigDB = ConfigParser.ConfigParser()
self.theIniFileName = os.path.join( home_dir, '.ecell', 'osogo.ini' )
theDefaultIniFileName = os.path.join( conf_dir, 'osogo.ini' )
if not os.path.isfile( self.theIniFileName ):
# get from default
self.theConfigDB.read( theDefaultIniFileName )
# try to write into home dir
self.saveParameters()
else:
# read from default
self.theConfigDB.read(self.theIniFileName)
self.theUpdateInterval = 150
self.stuckRequests = 0
self.theStepSizeOrSec = 1.0
self.theRunningFlag = False
# -------------------------------------
# creates PluginManager
# -------------------------------------
self.thePluginManager = OsogoPluginManager.OsogoPluginManager( self )
self.thePluginManager.loadAll()
# -------------------------------------
# creates FundamentalWindow
# -------------------------------------
# key:window name(str) value:window instance
self.theFundamentalWindows = {}
# creates fundamental windows
aLoggerWindow = LoggerWindow.LoggerWindow( self )
anInterfaceWindow = InterfaceWindow.InterfaceWindow( self )
aStepperWindow = StepperWindow.StepperWindow( self )
aBoardWindow = BoardWindow.BoardWindow( self )
aMainWindow = MainWindow.MainWindow( self )
# saves them to map
self.theFundamentalWindows['LoggerWindow'] = aLoggerWindow
self.theFundamentalWindows['InterfaceWindow'] = anInterfaceWindow
self.theFundamentalWindows['StepperWindow'] = aStepperWindow
self.theFundamentalWindows['BoardWindow'] = aBoardWindow
self.theFundamentalWindows['MainWindow'] = aMainWindow
# key:EntityListWindow instance value:None
# In deleteEntityListWindow method, an instance of EntityListWindow is
# accessed directory. The sequence information of EntityListWindow does
# not need. So the references to EntityListWindow instances should be
# held dict's key. Values of dict are not also imported.
self.theEntityListInstanceMap = {}
# -------------------------------------
# creates MainWindow
# -------------------------------------
self.theMainWindow = aMainWindow
def GUI_interact(self):
"hands over controlto the user (gtk.main_loop())"
gtk.main()
def QuitGUI( self ):
""" quits gtk.main_loop() after saving changes """
gtk.main_quit()
def doesExist( self, aWindowName ):
""" aWindowName: (str) name of Window
returns True if window is opened
False if window is not opened
checks both plugin and fundamental windows
"""
# check fundamentalwindows
if self.theFundamentalWindows.has_key(aWindowName):
return self.theFundamentalWindows[ aWindowName ].exists()
# check entity list windows
if aWindowName == 'EntityListWindow' and len( self.theEntityListInstanceMap>0):
return True
# check pluginwindow instances
aPluginInstanceList = self.thePluginManager.thePluginTitleDict.keys()
for aPluginInstance in aPluginInstanceList:
if aWindowName == self.thePluginManager.thePluginTitleDict[aPluginInstance]:
return True
return False
def openWindow( self, aWindowName, rootWidget = None, rootWindow = None ):
"""opens up window and returns aWindowname instance
aWindowName --- Window name (str)
Returns FundamentalWindow or EntityListWindow list
"""
# When the WindowName does not match, create nothing.
if self.theFundamentalWindows.has_key( aWindowName ):
if rootWidget == None:
self.theFundamentalWindows[ aWindowName ].openWindow()
else:
self.theFundamentalWindows[ aWindowName ].openWindow(rootWidget, rootWindow)
self.theMainWindow.updateButtons()
return self.theFundamentalWindows[ aWindowName ]
elif aWindowName == 'EntityListWindow':
return self.createEntityListWindow()
else:
message( "No such WindowType (%s) " %aWindowName )
return None
def getWindow( self, aWindowName ):
"""
aWindowName --- Window name (str)
Returns FundamentalWindow or EntityListWindow list
"""
# check fundamentalwindows
if self.theFundamentalWindows.has_key(aWindowName):
return self.theFundamentalWindows[aWindowName]
# check entity list windows
if aWindowName == 'EntityListWindow':
return self.theEntityListInstanceMap.keys()
# check pluginwindow instances
aPluginInstanceList = self.thePluginManager.thePluginTitleDict.keys()
for aPluginInstance in aPluginInstanceList:
aWindowName = self.thePluginManager.thePluginTitleDict[aPluginInstance]
return aPluginInstance
return None
def displayWindow( self, aWindowName ):
"""When the Window is not created, calls its openWidow() method.
When already created, move it to the top of desktop.
aWindowName --- window name(str)
Return None
[None]:When the WindowName does not matched, creates nothing.
"""
# When the WindowName does not match, creates nothing.
if not self.theFundamentalWindows.has_key( aWindowName ):
message ( "No such WindowType (%s) " %aWindowName )
return None
# When the Window is already created, move it to the top of desktop
if self.theFundamentalWindows[aWindowName].exists():
self.theFundamentalWindows[aWindowName].present()
pass
else:
self.theFundamentalWindows[aWindowName].openWindow()
self.theFundamentalWindows[aWindowName].update()
def toggleWindow( self, aWindowName, aNewState=None ):
aState = self.theFundamentalWindows[aWindowName].exists()
if aNewState is None:
aNewState = not aState
if aState != aNewState:
if aNewState:
self.theFundamentalWindows[aWindowName].openWindow()
self.theFundamentalWindows[aWindowName].update()
else:
self.theFundamentalWindows[aWindowName].close()
if self.theFundamentalWindows['MainWindow'].exists():
self.theFundamentalWindows['MainWindow'].update()
def createPluginWindow(self, aType, aFullPNList):
""" opens and returns _PluginWindow instance of aType showing aFullPNList
returns None if pluginwindow could not have been created """
anInstance = self.thePluginManager.createInstance( aType, aFullPNList)
if anInstance == None:
self.message ( 'Pluginwindow has not been created. %s may not be a valid plugin type' %aType )
return anInstance
def createPluginOnBoard(self, aType, aFullPNList):
""" creates and adds plugin to pluginwindow and returns plugininstance """
aBoardWindow = self.getWindow('BoardWindow')
if aBoardWindow == None:
self.message('Board Window does not exist. Plugin cannot be added.')
return None
return aBoardWindow.addPluginWindows( aType, aFullPNList)
def openLogPolicyWindow(self, aLogPolicy, aTitle ="Set log policy" ):
""" pops up a modal dialog window
with aTitle (str) as its title
and displaying loggingpolicy
and with an OK and a Cancel button
users can set logging policy
returns:
logging policy if OK is pressed
None if cancel is pressed
"""
aLogPolicyWindow = LoggingPolicy.LoggingPolicy( self, aLogPolicy, aTitle )
return aLogPolicyWindow.return_result()
def createEntityListWindow( self, rootWidget = 'EntityListWindow', aStatusBar=None ):
"""creates and returns an EntityListWindow
"""
anEntityListWindow = None
# when Model is already loaded.
if self.theSession is not None:
# creates new EntityListWindow instance
anEntityListWindow = EntityListWindow.EntityListWindow( self, rootWidget, aStatusBar )
anEntityListWindow.openWindow()
# saves the instance into map
self.theEntityListInstanceMap[ anEntityListWindow ] = None
# updates all fundamental windows
self.updateFundamentalWindows()
else:
anEntityListWindow = EntityListWindow.EntityListWindow( self, rootWidget, aStatusBar )
anEntityListWindow.openWindow()
# saves the instance into map
self.theEntityListInstanceMap[ anEntityListWindow ] = None
return anEntityListWindow
def registerUpdateCallback( self, aFunction ):
self.updateCallbackList.append( aFunction )
def deleteEntityListWindow( self, anEntityListWindow ):
"""deletes the reference to the instance of EntityListWindow
anEntityListWindow --- an instance of EntityListWindow(EntityListWindow)
Return None
[Note]: When the argument is not anEntityListWindow, throws exception.
When this has not the reference to the argument, does nothing.
"""
# When the argument is not anEntityListWindow, throws exception.
if anEntityListWindow.__class__.__name__ != 'EntityListWindow':
raise "(%s) must be EntityListWindow" %anEntityListWindow
# deletes the reference to the PropertyWindow instance on the EntityListWindow
self.thePluginManager.deletePropertyWindowOnEntityListWinsow( anEntityListWindow.thePropertyWindow )
# deletes the reference to the EntityListWindow instance
if self.theEntityListInstanceMap.has_key( anEntityListWindow ):
anEntityListWindow.close()
del self.theEntityListInstanceMap[ anEntityListWindow ]
def __updateByTimeOut( self, arg ):
"""when time out, calls updates method()
Returns None
"""
if not gtk.events_pending():
self.updateWindows()
if self.stuckRequests > 0:
self.stuckRequests -= 1
elif self.theUpdateInterval >=225:
self.theUpdateInterval /=1.5
else:
self.stuckRequests +=1
if self.stuckRequests >6:
self.theUpdateInterval *= 1.5
self.stuckRequests = 3
self.theTimer = gobject.timeout_add( int(self.theUpdateInterval), self.__updateByTimeOut, 0 )
def __removeTimeOut( self ):
"""removes time out
Returns None
"""
gobject.source_remove( self.theTimer )
def updateWindows( self ):
self.theMainWindow.update()
self.updateFundamentalWindows()
# updates all plugin windows
self.thePluginManager.updateAllPluginWindow()
for aFunction in self.updateCallbackList:
apply( aFunction )
def setUpdateInterval(self, Secs):
"plugins are refreshed every secs seconds"
self.theMainWindow.theUpdateInterval = Secs
def getUpdateInterval(self ): #
"returns the rate by plugins are refreshed "
return self.theMainWindow.theUpdateInterval
def updateFundamentalWindows( self ):
"""updates fundamental windows
Return None
"""
# updates all fundamental windows
for aFundamentalWindow in self.theFundamentalWindows.values():
aFundamentalWindow.update()
# updates all EntityListWindow
for anEntityListWindow in self.theEntityListInstanceMap.keys():
anEntityListWindow.update()
#update MainWindow
self.theMainWindow.update()
def __readIni(self,aPath):
"""read osogo.ini file
an osogo.ini file may be in the given path
that have an osogo section or others but no default
argument may be a filename as well
"""
# first delete every section apart from default
for aSection in self.theConfigDB.sections():
self.theConfigDB.remove(aSection)
# gets pathname
if not os.path.isdir( aPath ):
aPath=os.path.dirname( aPath )
# checks whether file exists
aFilename = os.path.join( aPath, 'osogo.ini' )
if not os.path.isfile( aFilename ):
# self.message('There is no osogo.ini file in this directory.\n Falling back to system defauls.\n')
return None
# tries to read file
try:
self.message('Reading osogo.ini file from directory [%s]' %aPath)
self.theConfigDB.read( aFilename )
# catch exceptions
except:
self.message(' error while executing ini file [%s]' %aFileName)
anErrorMessage = '\n'.join( traceback.format_exception( sys.exc_type,sys.exc_value,sys.exc_traceback ) )
self.message(anErrorMessage)
def getParameter(self, aParameter):
"""tries to get a parameter from ConfigDB
if the param is not present in either osogo or default section
raises exception and quits
"""
# first try to get it from osogo section
if self.theConfigDB.has_section('osogo'):
if self.theConfigDB.has_option('osogo',aParameter):
return self.theConfigDB.get('osogo',aParameter)
# gets it from default
return self.theConfigDB.get('DEFAULT',aParameter)
def setParameter(self, aParameter, aValue):
"""tries to set a parameter in ConfigDB
if the param is not present in either osogo or default section
raises exception and quits
"""
# first try to set it in osogo section
if self.theConfigDB.has_section('osogo'):
if self.theConfigDB.has_option('osogo',aParameter):
self.theConfigDB.set('osogo',aParameter, str(aValue))
else:
# sets it in default
self.theConfigDB.set('DEFAULT',aParameter, str(aValue))
def saveParameters( self ):
"""tries to save all parameters into a config file in home directory
"""
try:
aDirName = os.path.dirname( self.theIniFileName )
if not os.path.exists( aDirName ):
os.makedirs( aDirName )
fp = open( self.theIniFileName, 'w' )
self.theConfigDB.write( fp )
except:
self.message("Could not save preferences into file %s.\n Please check permissions for home directory.\n"%self.theIniFileName)
def getLogPolicyParameters( self ):
"""
gets logging policy from config database
"""
logPolicy = []
logPolicy.append ( int( self.getParameter( 'logger_min_step' ) ) )
logPolicy.append ( float ( self.getParameter( 'logger_min_interval' ) ) )
logPolicy.append ( int( self.getParameter( 'end_policy' ) ) )
logPolicy.append ( int (self.getParameter( 'available_space' ) ) )
if logPolicy[0]<=0 and logPolicy[1]<=0:
logPolicy[0]=1
return logPolicy
def setLogPolicyParameters( self, logPolicy ):
"""
saves logging policy into config database
"""
self.setParameter( 'logger_min_step', logPolicy[0] )
self.setParameter( 'logger_min_interval', logPolicy[1] )
self.setParameter( 'end_policy' , logPolicy[2] )
self.setParameter( 'available_space' ,logPolicy[3] )
self.saveParameters()
def interact( self, parameters={} ):
self.theSession.interact( parameters )
def unload( self ):
if self.theSession is None:
return
self.stop()
self.theSession = None
self.theModelWalker = None
self.theDataGenerator = None
self.updateWindows()
def newSession( self ):
self.theSession = Session()
self.theModelWalker = ModelWalker( self.theSession.theSimulator )
self.theDataGenerator = DataGenerator( self )
self.theSession.theSimulator.setEventHandler( lambda:
gtk.events_pending() and gtk.main_iteration() )
self.theSession.setMessageMethod( self.theMessageMethod )
def loadModel( self, aModel ):
#self.__readIni( aModel )
self.unload()
self.newSession()
self.theSession.loadModel( aModel )
def saveModel( self , aModel ):
if self.theSession is None:
raise Exception( "Model is not loaded" )
self.theSession.saveModel( self , aModel )
def setMessageMethod( self, aMethod ):
self.theMessageMethod = aMethod
if self.theSession is not None:
self.theSession.setMessageMethod( aMethod )
def restoreMessageMethod( self ):
if self.theSession is None:
return
self.theSession.restoreMessageMethod()
def message( self, message ):
if self.theMessageMethod is not None:
self.theMessageMethod( message )
#self._synchronize()
def run( self , time = '' ):
"""
if already running: do nothing
if time is given, run for the given time
if time is not given:
if Mainwindow is not opened create a stop button
set up a timeout rutin and Running Flag
"""
if self.theSession is None:
raise Exception("Model is not loaded")
if self.theRunningFlag == True:
return
if time == '' and not self.doesExist('MainWindow'):
self.openWindow('MainWindow')
try:
self.theRunningFlag = True
self.theTimer = gobject.timeout_add( self.theUpdateInterval, self.__updateByTimeOut, False )
aCurrentTime = self.getCurrentTime()
self.message("%15s"%aCurrentTime + ":Start\n" )
self.theSession.run( time )
self.theRunningFlag = False
self.__removeTimeOut()
except:
anErrorMessage = traceback.format_exception(sys.exc_type,sys.exc_value,sys.exc_traceback)
self.message(anErrorMessage)
self.theRunningFlag = False
self.__removeTimeOut()
self.updateWindows()
def stop( self ):
""" stop Simulation, remove timeout, set Running flag to false
"""
if self.theSession is None:
raise Exception("Model is not loaded")
try:
if self.theRunningFlag == True:
self.theSession.stop()
aCurrentTime = self.getCurrentTime()
self.message( ("%15s"%aCurrentTime + ":Stop\n" ))
self.__removeTimeOut()
self.theRunningFlag = False
except:
anErrorMessage = traceback.format_exception(sys.exc_type,sys.exc_value,sys.exc_traceback)
self.message(anErrorMessage)
self.updateWindows()
#self._synchronize()
def step( self, num = None ):
""" step according to num, if num is not given,
according to set step parameters
"""
if self.theSession is None:
raise Exception("Model is not loaded")
if self.theRunningFlag == True:
return
if num == None:
#set it to 1
num = 1
self.message( "Zero step value overridden to 1\n" )
try:
self.theRunningFlag = True
self.message( "Step\n" )
self.theTimer = gobject.timeout_add( self.theUpdateInterval, self.__updateByTimeOut, 0 )
self.theSession.step( int( num ) )
self.theRunningFlag = False
self.__removeTimeOut()
except:
anErrorMessage = traceback.format_exception(sys.exc_type,sys.exc_value,sys.exc_traceback)
self.message( anErrorMessage )
self.theRunningFlag = False
self.updateWindows()
def isRunning(self):
return self.theRunningFlag
def getNextEvent( self ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.getNextEvent()
def getCurrentTime( self ):
if self.theSession is None:
return float("nan")
return self.theSession.getCurrentTime()
def getStepperList( self ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.getStepperList()
def createStepperStub( self, id ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.createStepperStub( id )
def getEntityList( self, entityType, systemPath ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.getEntityList( entityType, systemPath )
def createEntityStub( self, fullid ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.createEntityStub( fullid )
def getEntityProperty( self, fullPN ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.getEntityProperty( fullPN )
def getEntityPropertyAttributes( self, fullPN ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.getEntityPropertyAttributes( createFullPNString( fullPN ) )
def setEntityProperty( self, fullPN, aValue ):
if self.theSession is None:
raise Exception("Model is not loaded")
self.theSession.setEntityProperty( createFullPNString( fullPN ), aValue )
def getLoggerList( self ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.getLoggerList()
def createLoggerStub( self, fullpn ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theSession.createLoggerStub( fullpn )
def saveLoggerData( self, fullpn=0, aSaveDirectory='./Data', aStartTime=-1, anEndTime=-1, anInterval=-1 ):
if self.theSession is None:
raise Exception("Model is not loaded")
self.theSession.saveLoggerData( fullpn, aSaveDirectory, aStartTime, anEndTime, anInterval )
def getDataGenerator( self ):
if self.theSession is None:
raise Exception("Model is not loaded")
return self.theDataGenerator
|
lgpl-3.0
| 1,528,708,790,385,297,400 | 35.529244 | 137 | 0.624361 | false |
sosuke-k/cornel-movie-dialogs-corpus-storm
|
mdcorpus/tests/test_parser.py
|
1
|
2015
|
"""Testing for Parser"""
from unittest import TestCase
from nose.tools import eq_
from sets import Set
from mdcorpus.parser import *
class ParserTestCase(TestCase):
def setUp(self):
self.parser = Parser()
def tearDown(self):
print "done"
def test_movie_titles_metadata(self):
line = "m0 +++$+++ 10 things i hate about you +++$+++ 1999 +++$+++ 6.90 +++$+++ 62847 +++$+++ ['comedy', 'romance']\n"
l = self.parser.movie_titles_metadata(line)
eq_(l[0], 0)
eq_(l[1], "10 things i hate about you")
eq_(l[2], 1999)
eq_(l[3], 6.90)
eq_(l[4], 62847)
eq_(l[5], Set(["comedy", "romance"]))
def test_movie_characters_metadata(self):
line = "u0 +++$+++ BIANCA +++$+++ m0 +++$+++ 10 things i hate about you +++$+++ f +++$+++ 4\n"
l = self.parser.movie_characters_metadata(line)
eq_(l[0], 0)
eq_(l[1], "BIANCA")
eq_(l[2], 0)
eq_(l[3], "10 things i hate about you")
eq_(l[4], "f")
eq_(l[5], 4)
def test_movie_conversations(self):
line = "u0 +++$+++ u2 +++$+++ m0 +++$+++ ['L194', 'L195', 'L196', 'L197']\n"
l = self.parser.movie_conversations(line)
eq_(l[0], 0)
eq_(l[1], 2)
eq_(l[2], 0)
eq_(l[3], [194, 195, 196, 197])
def test_movie_lines(self):
line = "L203 +++$+++ u2 +++$+++ m0 +++$+++ CAMERON +++$+++ Seems like she could get a date easy enough...\n"
l = self.parser.movie_lines(line)
eq_(l[0], 203)
eq_(l[1], 2)
eq_(l[2], 0)
eq_(l[3], "CAMERON")
eq_(l[4], "Seems like she could get a date easy enough...")
def test_raw_script_urls(self):
line = "m0 +++$+++ 10 things i hate about you +++$+++ http://www.dailyscript.com/scripts/10Things.html\n"
l = self.parser.raw_script_urls(line)
eq_(l[0], 0)
eq_(l[1], "10 things i hate about you")
eq_(l[2], "http://www.dailyscript.com/scripts/10Things.html")
|
mit
| -7,247,241,451,031,049,000 | 32.583333 | 126 | 0.5067 | false |
jeff-alves/Tera
|
ui/custom_checkbox.py
|
1
|
4508
|
import wx
from game.services.icon_database import IconDatabase
from util.util import img_transform
class CustomCheckBox(wx.PyControl):
def __init__(self, parent, img_unchecked, img_checked=None, img_hover=None, color_checked=None, color_unchecked=None, color_hover=None, width=12, height=12, name=None):
wx.PyControl.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.NO_BORDER, wx.DefaultValidator, "CustomCheckBox")
self.parent = parent
self.name = name
self.width = width
self.height = height
self.InitializeBitmaps(img_unchecked, img_checked if img_checked else img_unchecked, color_checked, color_unchecked, color_hover)
self.checked = False
self.hover = False
self.SetBackgroundColour(parent.GetBackgroundColour())
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseUp)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterWindow)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
def OnEnterWindow(self, event):
if not self.IsEnabled(): return
self.hover = True
self.Refresh()
def OnLeaveWindow(self, event):
if not self.IsEnabled(): return
self.hover = False
self.Refresh()
def OnMouseUp(self, event):
if not self.IsEnabled():
return
self.checked = not self.IsChecked()
self.SendCheckBoxEvent()
event.Skip()
def OnSize(self, event):
self.Refresh()
def InitializeBitmaps(self, img_unchecked, img_checked, color_checked, color_unchecked, color_hover):
self.bitmaps = {"CheckedEnable": img_transform(IconDatabase()[img_checked].GetImage(), width=self.width, height=self.height, color=color_checked),
"UnCheckedEnable": img_transform(IconDatabase()[img_unchecked].GetImage(), width=self.width, height=self.height, color=color_unchecked),
"CheckedHoverEnable": img_transform(IconDatabase()[img_checked].GetImage(), width=self.width, height=self.height, color=color_hover),
"UnCheckedHoverEnable": img_transform(IconDatabase()[img_unchecked].GetImage(), width=self.width, height=self.height, color=color_hover),
"CheckedDisable": img_transform(IconDatabase()[img_checked].GetImage(), width=self.width, height=self.height, color=color_checked, gray=True),
"UnCheckedDisable": img_transform(IconDatabase()[img_unchecked].GetImage(), width=self.width, height=self.height, color=color_unchecked, gray=True)}
def GetBitmap(self):
if self.IsEnabled():
if self.IsChecked():
if self.IsHover(): return self.bitmaps["CheckedHoverEnable"]
return self.bitmaps["CheckedEnable"]
else:
if self.IsHover(): return self.bitmaps["UnCheckedHoverEnable"]
return self.bitmaps["UnCheckedEnable"]
else:
if self.IsChecked():
return self.bitmaps["CheckedDisable"]
else:
return self.bitmaps["UnCheckedDisable"]
def DoGetBestSize(self):
best = wx.Size(self.width, self.height)
self.CacheBestSize(best)
return best
def SetForegroundColour(self, colour):
wx.PyControl.SetForegroundColour(self, colour)
self.Refresh()
def SetBackgroundColour(self, colour):
wx.PyControl.SetBackgroundColour(self, colour)
self.Refresh()
def Enable(self, enable=True):
wx.PyControl.Enable(self, enable)
self.Refresh()
def IsChecked(self):
return self.checked
def IsHover(self):
return self.hover
def SetChecked(self, value=True):
self.checked = value
self.SendCheckBoxEvent()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc.Clear()
dc.DrawBitmap(self.GetBitmap(), 0, 0, True)
def OnEraseBackground(self, event):
pass
def SendCheckBoxEvent(self):
checkEvent = wx.CommandEvent(wx.wxEVT_COMMAND_CHECKBOX_CLICKED, self.GetId())
checkEvent.SetInt(int(self.checked))
checkEvent.SetEventObject(self)
self.GetEventHandler().ProcessEvent(checkEvent)
self.Refresh()
if self.name: self.parent.parent.config.WriteBool(self.name, self.checked)
|
mit
| 5,005,441,229,452,019,000 | 39.621622 | 172 | 0.64929 | false |
douglaskastle/mutagen
|
tests/test_oggspeex.py
|
1
|
2242
|
# -*- coding: utf-8 -*-
import os
import shutil
from mutagen._compat import cBytesIO
from mutagen.ogg import OggPage
from mutagen.oggspeex import OggSpeex, OggSpeexInfo, delete
from tests import TestCase, DATA_DIR
from tests.test_ogg import TOggFileTypeMixin
from tempfile import mkstemp
class TOggSpeex(TestCase, TOggFileTypeMixin):
Kind = OggSpeex
def setUp(self):
original = os.path.join(DATA_DIR, "empty.spx")
fd, self.filename = mkstemp(suffix='.ogg')
os.close(fd)
shutil.copy(original, self.filename)
self.audio = self.Kind(self.filename)
def tearDown(self):
os.unlink(self.filename)
def test_module_delete(self):
delete(self.filename)
self.scan_file()
self.failIf(OggSpeex(self.filename).tags)
def test_channels(self):
self.failUnlessEqual(2, self.audio.info.channels)
def test_sample_rate(self):
self.failUnlessEqual(44100, self.audio.info.sample_rate)
def test_bitrate(self):
self.failUnlessEqual(0, self.audio.info.bitrate)
def test_invalid_not_first(self):
page = OggPage(open(self.filename, "rb"))
page.first = False
self.failUnlessRaises(IOError, OggSpeexInfo, cBytesIO(page.write()))
def test_vendor(self):
self.failUnless(
self.audio.tags.vendor.startswith("Encoded with Speex 1.1.12"))
self.failUnlessRaises(KeyError, self.audio.tags.__getitem__, "vendor")
def test_not_my_ogg(self):
fn = os.path.join(DATA_DIR, 'empty.oggflac')
self.failUnlessRaises(IOError, type(self.audio), fn)
self.failUnlessRaises(IOError, self.audio.save, fn)
self.failUnlessRaises(IOError, self.audio.delete, fn)
def test_multiplexed_in_headers(self):
shutil.copy(
os.path.join(DATA_DIR, "multiplexed.spx"), self.filename)
audio = self.Kind(self.filename)
audio.tags["foo"] = ["bar"]
audio.save()
audio = self.Kind(self.filename)
self.failUnlessEqual(audio.tags["foo"], ["bar"])
def test_mime(self):
self.failUnless("audio/x-speex" in self.audio.mime)
def test_init_padding(self):
self.assertEqual(self.audio.tags._padding, 0)
|
gpl-2.0
| -8,287,115,952,533,721,000 | 31.028571 | 78 | 0.658787 | false |
OmkarPathak/pygorithm
|
pygorithm/searching/interpolation_search.py
|
1
|
1522
|
"""
Author: SHARAD BHAT
Created On: 22nd August 2017
- Best O(1)
- Average O(log(logn))
- Worst O(n)
"""
import inspect
def search(_list, target):
"""
This function performs an interpolation search
on a sorted list and returns the index
of item if successful else returns False
:param _list: list to search
:param target: item to search for
:return: index of item if successful else returns False
"""
if type(_list) is not list:
raise TypeError("interpolation search only accepts lists, not {}".format(str(type(_list))))
# First element
low = 0
# Last element
high = len(_list) - 1
# List is assumed to be sorted
while low <= high and target >= _list[low] and target <= _list[high]:
position = low + int(((float(high - low) / (_list[high] - _list[low])) * (target - _list[low])))
if _list[position] == target:
return position
# If target is greater, search in right half
if _list[position] < target:
low = position + 1
# If target is smaller, search in left half
else:
high = position - 1
return False
def time_complexities():
"""
Return information on functions
time complexity
:return: string
"""
return "Best Case: O(1), Average Case: O(log(logn)), Worst Case: O(logn)"
def get_code():
"""
easily retrieve the source code
of the function
:return: source code
"""
return inspect.getsource(search)
|
mit
| 2,176,310,217,265,764,400 | 22.415385 | 104 | 0.606439 | false |
florianfesti/boxes
|
boxes/generators/regularbox.py
|
1
|
4696
|
#!/usr/bin/env python3
# Copyright (C) 2013-2014 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
from boxes.generators.bayonetbox import BayonetBox
import copy
class RegularBox(BayonetBox):
"""Box with regular polygon as base"""
ui_group = "Box"
def __init__(self):
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.buildArgParser("h", "outside")
self.argparser.add_argument(
"--radius", action="store", type=float, default=50.0,
help="inner radius if the box (at the corners)")
self.argparser.add_argument(
"--n", action="store", type=int, default=5,
help="number of sides")
self.argparser.add_argument(
"--top", action="store", type=str, default="none",
choices=["none", "hole", "angled hole", "angled lid", "angled lid2", "round lid", "bayonet mount"],
help="style of the top and lid")
self.argparser.add_argument(
"--alignment_pins", action="store", type=float, default=1.0,
help="diameter of the alignment pins for bayonet lid")
self.lugs=6
def render(self):
r, h, n = self.radius, self.h, self.n
if self.outside:
r = r = r - self.thickness / math.cos(math.radians(360/(2*n)))
if self.top == "none":
h = self.adjustSize(h, False)
elif "lid" in self.top and self.top != "angled lid":
h = self.adjustSize(h) - self.thickness
else:
h = self.adjustSize(h)
t = self.thickness
fingerJointSettings = copy.deepcopy(self.edges["f"].settings)
fingerJointSettings.setValues(self.thickness, angle=360./n)
fingerJointSettings.edgeObjects(self, chars="gGH")
r, sh, side = self.regularPolygon(n, radius=r)
with self.saved_context():
self.regularPolygonWall(corners=n, r=r, edges='F', move="right")
if self.top == "angled lid":
self.regularPolygonWall(corners=n, r=r, edges='e', move="right")
self.regularPolygonWall(corners=n, r=r, edges='E', move="right")
elif self.top in ("angled hole", "angled lid2"):
self.regularPolygonWall(corners=n, r=r, edges='F', move="right",
callback=[lambda:self.regularPolygonAt(
0, 0, n, h=sh-t)])
if self.top == "angled lid2":
self.regularPolygonWall(corners=n, r=r, edges='E', move="right")
elif self.top in ("hole", "round lid"):
self.regularPolygonWall(corners=n, r=r, edges='F', move="right",
hole=(sh-t)*2)
if self.top == "round lid":
self.parts.disc(sh*2, move="right")
if self.top == "bayonet mount":
self.diameter = 2*sh
self.parts.disc(sh*2-0.1*t, callback=self.lowerCB,
move="right")
self.regularPolygonWall(corners=n, r=r, edges='F',
callback=[self.upperCB], move="right")
self.parts.disc(sh*2, move="right")
self.regularPolygonWall(corners=n, r=r, edges='F', move="up only")
side = 2 * math.sin(math.radians(180.0/n)) * r
fingers = self.top in ("hole", "angled hole", "round lid",
"angled lid2", "bayonet mount")
if n % 2:
for i in range(n):
self.rectangularWall(side, h, move="right",
edges="fgfG" if fingers else "fgeG")
else:
for i in range(n//2):
self.rectangularWall(side, h, move="right",
edges="fGfG" if fingers else "fGeG")
self.rectangularWall(side, h, move="right",
edges="fgfg" if fingers else "fgeg")
|
gpl-3.0
| -4,863,978,343,435,678,000 | 41.690909 | 111 | 0.550256 | false |
Acidity/PyPermissions
|
tests/permissions.py
|
1
|
3709
|
import unittest
from pypermissions.permission import Permission
class BasicPermissionTests(unittest.TestCase):
def setUp(self):
self.p1 = Permission("test.1.hello")
self.p2 = Permission("test.2.hello")
self.p3 = Permission("test")
self.p4 = Permission("test.1.hello")
self.ps1 = {self.p1, self.p2}
self.ps2 = {self.p1, self.p4}
self.ps3 = {self.p1}
self.wp1 = Permission("test.1.*")
self.wp2 = Permission("test.2.hello")
self.wp3 = Permission("test")
self.wp4 = Permission("test.1.*")
self.wp5 = Permission("test.1.goodbye")
self.wp6 = Permission("test.1")
self.wp7 = Permission("*")
self.wp8 = Permission("test.*.hello")
def test_equal(self):
self.assertEqual(self.p1, self.p4)
self.assertNotEqual(self.p1, self.p2)
self.assertNotEqual(self.p1, self.p3)
self.assertEqual(self.ps2, self.ps3)
def test_grants_permission(self):
self.assertTrue(self.p1.grants_permission(self.p1))
self.assertTrue(self.p1.grants_permission(self.p4))
self.assertFalse(self.p1.grants_permission(self.p2))
self.assertFalse(self.p1.grants_permission(self.p3))
self.assertFalse(self.p3.grants_permission(self.p1))
self.assertTrue(self.wp1.grants_permission(self.wp1))
self.assertTrue(self.wp1.grants_permission(self.wp4))
self.assertFalse(self.wp1.grants_permission(self.wp2))
self.assertFalse(self.wp1.grants_permission(self.wp3))
self.assertFalse(self.wp3.grants_permission(self.wp1))
self.assertTrue(self.wp1.grants_permission(self.wp5))
self.assertFalse(self.wp1.grants_permission(self.wp6))
self.assertTrue(self.wp7.grants_permission(self.wp1))
self.assertTrue(self.wp7.grants_permission(self.wp2))
self.assertTrue(self.wp7.grants_permission(self.wp3))
self.assertTrue(self.wp7.grants_permission(self.wp4))
self.assertTrue(self.wp7.grants_permission(self.wp5))
self.assertTrue(self.wp7.grants_permission(self.wp6))
self.assertTrue(self.wp8.grants_permission(self.wp2))
self.assertFalse(self.wp8.grants_permission(self.wp1))
def test_grants_any_permission(self):
self.assertTrue(self.p1.grants_any_permission(self.ps1))
self.assertTrue(self.p2.grants_any_permission(self.ps1))
self.assertFalse(self.p3.grants_any_permission(self.ps1))
self.assertTrue(self.p4.grants_any_permission(self.ps1))
def test_segments(self):
self.assertEqual(self.p1.segments, ["test", "1", "hello"])
self.assertEqual(self.p2.segments, ["test", "2", "hello"])
self.assertEqual(self.p3.segments, ["test"])
self.assertEqual(self.p1.segments, self.p4.segments)
def test_is_end_wildcard(self):
self.assertTrue(self.wp1.is_end_wildcard)
self.assertTrue(self.wp4.is_end_wildcard)
self.assertTrue(self.wp7.is_end_wildcard)
self.assertFalse(self.wp8.is_end_wildcard)
self.assertFalse(self.p1.is_end_wildcard)
self.assertFalse(self.p2.is_end_wildcard)
self.assertFalse(self.p3.is_end_wildcard)
self.assertFalse(self.p4.is_end_wildcard)
def test_is_wildcard(self):
self.assertTrue(self.wp1.is_wildcard)
self.assertTrue(self.wp4.is_wildcard)
self.assertTrue(self.wp7.is_wildcard)
self.assertTrue(self.wp8.is_wildcard)
self.assertFalse(self.p1.is_wildcard)
self.assertFalse(self.p2.is_wildcard)
self.assertFalse(self.p3.is_wildcard)
self.assertFalse(self.p4.is_wildcard)
if __name__ == "__main__":
unittest.main()
|
mit
| 1,619,232,180,412,081,000 | 39.315217 | 66 | 0.662712 | false |
MartinHvidberg/gdbot
|
Find_Love.py
|
1
|
11112
|
'''
Find "List Of Valid Elements" cases in any field in any FC in NIS
Created on 31. July 2014
@author: [email protected] / [email protected]
'''
strName = "Find Love"
strVer = "1.0.0"
strBuild = "'140805"
### History
# Ver. 1.0.0 - First working version
# Partly based on "gdbot_rc_domainvalidation.py
# Remove '' from output love
# Sort love as integers
# Ver. 1.0.1 - by halpe
# Output file is now the gdbot file
# Domains are read from the NIS, no longer hard-coded
### To do
# Look for XXX in the code
import sys
from datetime import datetime # for datetime.now()
import arcpy
import arcEC # My recycled Easy-arcpy helper functions
timStart = datetime.now()
# *** Main
arcEC.SetMsg("'"+strName+"' ver. "+strVer+" build "+strBuild,0)
# *** Manage input parameters ***
strFDS = r"Database Connections/[email protected]/NIS.Nautical"
strFileName = "validation.gdbot"
# ** Harvest strings from GUI
if arcpy.GetParameterAsText(0) != '':
arcEC.SetMsg("GUI said",0)
strFDS = arcpy.GetParameterAsText(0)
strFileName = arcpy.GetParameterAsText(1)
arcEC.SetMsg("Input Feature dataset: "+strFDS,0)
arcEC.SetMsg("Output filename: "+strFileName,0)
# *** Open output file
arcpy.env.overwriteOutput = True
try:
f = open(strFileName, 'w')
except IOError, e:
print e.errno
print e
strErrorMessage = "Error - Can't open output file: "+strFileName+" system says: " + str(e.errno) + " : " + str(e)
arcEC.SetMsg(strErrorMessage,2)
sys.exit(strErrorMessage)
f.write("# Source: FindLove.py\n")
f.write("\n")
f.write("% file_title = Find Love\n")
f.write("% gdbot_syntax_version = N/A\n")
f.write("\n")
f.write("% log_file = \n")
f.write("% log_email = ([email protected], [email protected]) # Who to mail if I need human help\n\n")
f.write("\n")
f.write(": ruleID : ruleTitle : Mode : FC : FCsubtype : Condition : ActionType : Action : Comments)\n")
dicT = dict()
# *** Open DOMAIN ***
strConn = "Database Connections/[email protected]"
domains = arcpy.da.ListDomains(strConn)
dicDom = dict()
for domain in domains:
codedVals = domain.codedValues
try: # some have them as string
del codedVals['-32767']
except:
pass
try: # some have them as ints
del codedVals[-32767]
except:
pass
dicDom[domain.name] = codedVals
# *** Open FDS Descriptions ***
arcEC.SetMsg("Open Description",0)
dicDescribtion = arcpy.Describe(strFDS)
strReport = ""
strReport += "\n - catalog path: " + dicDescribtion.catalogPath
strReport += "\n - name: " + dicDescribtion.name
strReport += "\n - data type: " + dicDescribtion.dataType
strReport += "\n - children expanded: " + str(dicDescribtion.childrenExpanded)
strReport += "\n - children count: " + str(len(dicDescribtion.children))
arcEC.SetMsg(strReport,0)
if len(dicDescribtion.children) > 0:
arcEC.SetMsg(" Analyzing Descriptions of layers: "+str(len(dicDescribtion.children)),0)
numFCcounter = 0
for dicChildDescribtion in dicDescribtion.children:
numFCcounter += 1
numFieldCounter = 0
FCname = dicChildDescribtion.baseName # XXX remove 'NIS.' from here
if FCname[:4] == "NIS.":
FCname = FCname[4:]
arcEC.SetMsg("\n====== FC ====== " + str(numFCcounter) + " of " + str(len(dicDescribtion.children)) + " ====== " + FCname + " =============", 0)
for objField in dicChildDescribtion.fields:
# Build dic of dic of (fieldtype/fieldname)
ft = str(objField.type)
fn = str(objField.name)
if not ft in dicT.keys():
dicT[ft] = dict()
if not fn in dicT[ft].keys():
dicT[ft][fn] = 1
else:
newCount = dicT[ft][fn] + 1
dicT[ft][fn] = newCount
numFieldCounter += 1
# Look for Love in this field
if objField.type in ("Geometry", "OID", "Blob","Date", "Double", "Guid", "Integer", "SmallInteger"): # Considered irrelevant, or can't hold love
pass#arcEC.SetMsg(str(objField.name) + " type: "+ str(objField.type) + " ... Irrelevant!",0)
elif objField.type in ("String"): # relevant!
# =================================================================================================================
# # S-57 - attribute type L (list)
# * list (L): The expected input is a list of one or more numbers selected
# from a list of pre-defined attribute values. Where more than one
# value is used, they must normally be separated by commas but
# in special cases slashes (/) may be used.
# The abbreviation for this type is L.
if objField.name in ("CATACH", "CATAIR", "CATBRG", "CATDPG", "CATHAF", "CATHLK", "CATLIT", "CATLMK", "CATLND", "CATMPA", "CATOFP", "CATPIP", "CATREA", "CATROS", "CATRSC", "CATSCF", "CATSIT", "CATSIW", "CATSPM", "CATVEG", "COLOUR", "COLPAT", "FUNCTN", "LITVIS", "NATCON", "NATQUA", "NATSUR", "PRODCT", "QUASOU", "RESTRN", "STATUS", "SURTYP", "TECSOU"):
#arcEC.SetMsg(" ====== Found L string ======" + str(objField.name),0)
if objField.domain != "":
arcEC.SetMsg(str(objField.name) + " ====== List field with Domain.",0)
else:
### Handle LIST field without Esri-Domain, but with S-57 "list of valid elements"
if "NAUTICAL_"+objField.name in dicDom.keys():
lst_love = dicDom["NAUTICAL_"+objField.name].keys()
lst_love = sorted(lst_love, key=int)
str_love = str(lst_love).replace('\'','').replace('"','').replace('[','(').replace(']',')').replace(' ','').replace('u','') # CATHLK seems to come as miscoded unicode
else:
str_love = "()"
gdbot = ": 136-{0}-{1} : {2} field {3} violated S-57 list of valid elements : LOVE : {2} : * : {3} % {4} : LOG : FCSUBTYPE : ".format(("000"+str(numFCcounter))[-3:], ("000"+str(numFieldCounter))[-3:], FCname, objField.name, str_love)
f.write(gdbot+"\n")
arcEC.SetMsg(gdbot, 0)
continue
# # S-57 - attribute type E (enumerated)
# * enumerated (E): The expected input is a number selected from a list of
# pre-defined attribute values. Exactly one value must be chosen.
# The abbreviation for this type is E.
elif objField.name in ("$JUSTH", "$JUSTV", "$SPACE", "$TINTS", "BCNSHP", "BOYSHP", "BUISHP", "CAT_TS", "CATBUA", "CATCAM", "CATCAN", "CATCBL", "CATCHP", "CATCOA", "CATCON", "CATCOV", "CATCRN", "CATCTR", "CATDAM", "CATDIS", "CATDOC", "CATFIF", "CATFNC", "CATFOG", "CATFOR", "CATFRY", "CATGAT", "CATICE", "CATINB", "CATLAM", "CATMFA", "CATMOR", "CATNAV", "CATOBS", "CATOLB", "CATPIL", "CATPLE", "CATPRA", "CATPYL", "CATQUA", "CATRAS", "CATROD", "CATRTB", "CATRUN", "CATSEA", "CATSIL", "CATSLC", "CATSLO", "CATTRK", "CATTSS", "CATWAT", "CATWED", "CATWRK", "CATZOC", "CATZOC-ED31", "CONDTN", "CONRAD", "CONVIS", "DUNITS", "EXCLIT", "EXPSOU", "HORDAT", "HUNITS", "JRSDTN", "LITCHR", "MARSYS", "PUNITS", "QUAPOS", "SIGGEN", "T_ACWL", "T_MTOD", "TOPSHP", "TRAFIC", "VERDAT", "WATLEV"):
continue
# # S-57 - attribute type F (float)
# * float (F): The expected input is a floating point numeric value with defined range, resolution, units and format.
# The abbreviation for this type is F.
elif objField.name in ("$CSIZE", "$SCALE", "BURDEP", "CURVEL", "DRVAL1", "DRVAL2", "ELEVAT", "ESTRNG", "HEIGHT", "HORACC", "HORCLR", "HORLEN", "HORWID", "ICEFAC", "LIFCAP", "ORIENT", "POSACC", "RADIUS", "SECTR1", "SECTR2", "SIGPER", "SOUACC", "VALACM", "VALDCO", "VALLMA", "VALMAG", "VALMXR", "VALNMR", "VALSOU", "VERACC", "VERCCL", "VERCLR", "VERCOP", "VERCSA", "VERLEN"): # ...
continue
# # S-57 - attribute type I (integer)
# * integer (I): The expected input is an integer numeric value with defined range, units and format.
# The abbreviation for this type is I.
elif objField.name in ("CSCALE", "MLTYLT", "SCAMAX", "SCAMIN", "SCVAL1", "SCVAL2", "SDISMN", "SDISMX", "SIGFRQ", "T_TINT"):
continue
# # S-57 - attribute type A (coded string)
elif objField.name in ("$CHARS", "$SCODE", "AGENCY", "COMCHA", "CPDATE", "DATEND", "DATSTA", "NATION", "NMDATE", "PEREND", "PERSTA", "PRCTRY", "RADWAL", "RECDAT", "RECIND", "RYRMGV", "SHIPAM", "SIGGRP", "SIGSEQ", "SORDAT", "SORIND", "SUREND", "SURSTA", "T_HWLW", "T_THDF", "T_TSVL", "T_VAHC", "TIMEND", "TIMSTA", "TS_TSP", "TS_TSV"):
continue
# # S-57 - attribute type S ()
# * free text (S): The expected input is a free-format alphanumeric string.
# It may be a file name which points to a text or graphic file.
# The abbreviation for this type is S.
elif objField.name in ("$NTXST", "$TXSTR", "CALSGN", "CLSDEF", "CLSNAM", "INFORM", "NINFOM", "NOBJNM", "NPLDST", "NTXTDS", "OBJNAM", "PICREP", "PILDST", "PUBREF", "SURATH", "SYMINS", "TXTDSC"):
continue
# # Known Esri fields, i.e. Not S-57.
elif objField.name in ("DELETE_COMMENT", "EDITOR", "EDITOR_COMMENT", "NIS_EDITOR", "NIS_EDITOR_COMMENT", "NIS_VERIFIER", "PARENTID", "VERIFIER"):
continue
# # Known GST fields, i.e. Not S-57.
elif objField.name in ("GST_LINTXT", "GST_NID"):
continue
# # Known fields with unknown decent. XXX <--- Have Esri explain these XXX
elif objField.name in ("DSNM", "LNAM", "MAPID", "NAME", "NOID", "VI_NAME"):
continue
# =================================================================================================================
else:
arcEC.SetMsg(str(objField.name) + " ... Should this be checked ?",0)
else: # Unexpected field type
arcEC.SetMsg("Unexpected field type found: " + str(objField.type) + "Field: " + str(objField.name) + " in " + dicChildDescribtion.baseName, 1)
else:
pass#arcEC.SetMsg(" Field have domain: " + objField.name,0)
else:
arcEC.SetMsg("No Feature Layers found",2)
#for ftype in dicT.keys():
# for fname in dicT[ftype].keys():
# f.write(str(ftype) + "\t " + str(fname) + " \t " + str(dicT[str(ftype)][str(fname)]) + "\n")
# *** All Done - Cleaning up ***
f.close()
timEnd = datetime.now()
durRun = timEnd-timStart
arcEC.SetMsg("Python stript duration (h:mm:ss.dddddd): "+str(durRun),0)
# *** End of Script ***
# Music that accompanied the coding of this script:
# AC/DC - Back in Black
# Bob Marley - No woman, no cry
|
apache-2.0
| 5,876,315,571,980,581,000 | 52.423077 | 794 | 0.559395 | false |
csdms/bob
|
bob/cmd.py
|
1
|
1966
|
#! /usr/bin/env python
from __future__ import absolute_import
from .subcommands import list, show, build, clean, pack
def main():
import argparse
parser = argparse.ArgumentParser(description='Let Bob do the building')
subparsers = parser.add_subparsers()
list_parser = subparsers.add_parser('list', help='list known packages')
list_parser.set_defaults(func=list)
show_parser = subparsers.add_parser('show', help='show build scripts')
show_parser.add_argument('packages', nargs='*', help='packages to build')
show_parser.set_defaults(func=show)
build_parser = subparsers.add_parser('build', help='build packages')
build_parser.add_argument('packages', nargs='*', help='packages to build')
build_parser.add_argument('-p', '--package-file',
type=argparse.FileType('r'),
help='package file')
build_parser.add_argument('--run', default=False, action='store_true',
help='packages to build')
build_parser.set_defaults(func=build)
clean_parser = subparsers.add_parser('clean', help='clean build files')
clean_parser.add_argument('packages', nargs='*', help='packages to build')
clean_parser.add_argument('--run', default=False, action='store_true',
help='packages to build')
clean_parser.set_defaults(func=clean)
pack_parser = subparsers.add_parser('pack', help='pack up a distribution')
pack_parser.add_argument('prefix', help='path to folder to pack')
pack_parser.add_argument('name', help='distribution name')
pack_parser.add_argument('-v', '--version', default=None,
help='distribution version')
pack_parser.add_argument('--force', default=False, action='store_true',
help='overwrite existing files')
pack_parser.set_defaults(func=pack)
args = parser.parse_args()
args.func(args)
|
mit
| -8,297,377,659,961,194,000 | 40.829787 | 78 | 0.637843 | false |
bwhmather/python-payment-terminal
|
payment_terminal/drivers/bbs/tests/test_messages.py
|
2
|
3632
|
import unittest
from payment_terminal.drivers.bbs.fields import ConstantField
import payment_terminal.drivers.bbs.messages as m
class TestBBSMessages(unittest.TestCase):
def test_message_meta(self):
class TestMessage(m.BBSMessage):
normal_field = "nothing interesting"
pitch_field = ConstantField(b'constant')
self.assertTrue(hasattr(TestMessage, '_fields'))
self.assertTrue(hasattr(TestMessage, 'pitch_field'))
self.assertTrue(hasattr(TestMessage, 'normal_field'))
def test_message_inheritance(self):
class BaseMessage(m.BBSMessage):
first = ConstantField(b'one')
second = ConstantField(b'two')
third = ConstantField(b'three')
class ChildMessage(BaseMessage):
second = ConstantField(b'overridden')
fourth = ConstantField(b'four')
self.assertEqual(
list(ChildMessage._fields.keys()),
['first', 'second', 'third', 'fourth']
)
self.assertEqual(
[field.value for field in ChildMessage._fields.values()],
[b'one', b'overridden', b'three', b'four']
)
def test_pack_display_text(self):
self.assertEqual(
b'\x41100Hello World',
m.DisplayTextMessage("Hello World").pack()
)
self.assertEqual(
b'\x41000Prompt customer',
m.DisplayTextMessage(
"Prompt customer", prompt_customer=False
).pack()
)
self.assertEqual(
b'\x41110Expects input',
m.DisplayTextMessage(
"Expects input", expects_input=True
).pack()
)
def test_unpack_display_text(self):
message = m.DisplayTextMessage.unpack(b'\x41000Hello World')
self.assertFalse(message.prompt_customer)
self.assertFalse(message.expects_input)
self.assertEqual(message.text, "Hello World")
message = m.DisplayTextMessage.unpack(b'\x41100Prompt customer')
self.assertTrue(message.prompt_customer)
self.assertFalse(message.expects_input)
self.assertEqual(message.text, "Prompt customer")
message = m.DisplayTextMessage.unpack(b'\x41010Expects input')
self.assertFalse(message.prompt_customer)
self.assertTrue(message.expects_input)
self.assertEqual(message.text, "Expects input")
def test_pack_print_text(self):
self.assertEqual(
m.PrintTextMessage(commands=[
('write', "First"),
('cut-partial'),
('write', "Second"),
('cut-through'),
]).pack(),
b'\x42\x20\x22\x2aFirst\x0eSecond\x0c'
)
def test_unpack_print_text(self):
message = m.PrintTextMessage.unpack(
b'\x42\x20\x22\x2aFirst\x0eSecond\x0c'
)
self.assertEqual(
message.commands,
[
('write', "First"),
('cut-partial'),
('write', "Second"),
('cut-through'),
]
)
def test_pack_reset_timer(self):
self.assertEqual(m.ResetTimerMessage(60).pack(), b'\x43060')
try:
m.ResetTimerMessage(6000).pack()
except ValueError:
pass
else:
self.fail()
def test_unpack_reset_timer(self):
self.assertEqual(m.ResetTimerMessage.unpack(b'\x43060').seconds, 60)
try:
m.ResetTimerMessage.unpack(b'\x43abc')
except:
pass
else:
self.fail()
|
bsd-3-clause
| 8,351,303,565,162,628,000 | 30.042735 | 76 | 0.570209 | false |
joshsamara/game-website
|
game_comments/views.py
|
1
|
3752
|
from __future__ import absolute_import
from crispy_forms.utils import render_crispy_form
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
from django.http import JsonResponse
from django.utils.html import escape
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
import django_comments
from django_comments import signals
from django_comments.views.comments import CommentPostBadRequest
@csrf_protect
@require_POST
@login_required
def game_comment_post(request, next=None, using=None):
"""
Post a comment.
Modified version of the default post view for django_comments
"""
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated():
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.get_username()
if not data.get('email', ''):
data["email"] = request.user.email
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = models.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except TypeError:
return CommentPostBadRequest(
"Invalid content_type value: %r" % escape(ctype))
except AttributeError:
return CommentPostBadRequest(
"The given content-type %r does not resolve to a valid model." % \
escape(ctype))
except ObjectDoesNotExist:
return CommentPostBadRequest(
"No object matching content-type %r and object PK %r exists." % \
(escape(ctype), escape(object_pk)))
except (ValueError, ValidationError) as e:
return CommentPostBadRequest(
"Attempting go get content-type %r and object PK %r exists raised %s" % \
(escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
preview = "preview" in data
# Construct the comment form
form = django_comments.get_form()(target, data=data)
# Check security information
if form.security_errors():
return CommentPostBadRequest(
"The comment form failed security verification: %s" % \
escape(str(form.security_errors())))
# If there are errors or if we requested a preview show the comment
if form.errors or preview:
return JsonResponse({
'form_html': render_crispy_form(form),
'success': False,
})
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if request.user.is_authenticated():
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
for (receiver, response) in responses:
if response == False:
return CommentPostBadRequest(
"comment_will_be_posted receiver %r killed the comment" % receiver.__name__)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
target.push_notification()
return JsonResponse({
'success': True,
})
|
mit
| -7,891,929,186,340,703,000 | 34.742857 | 92 | 0.662846 | false |
springfiles/upq
|
jobs/download.py
|
1
|
1078
|
# This file is part of the "upq" program used on springfiles.com to manage file
# uploads, mirror distribution etc. It is published under the GPLv3.
#
#Copyright (C) 2011 Daniel Troeder (daniel #at# admin-box #dot# com)
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
# downloads a file
from upqjob import UpqJob
from upqdb import UpqDB
from time import time
import os
import shutil
import requests
class Download(UpqJob):
"""
"download url:$url"
"""
def run(self):
url=self.jobdata['url']
filename=os.path.basename(url)
tmpfile=os.path.join(self.getcfg('temppath', '/tmp'), filename)
self.jobdata['file']=tmpfile
self.logger.debug("going to download %s", url)
try:
response = requests.get(url, stream=True, verify=False)
with open(tmpfile, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
self.logger.debug("downloaded to %s", tmpfile)
except Exception as e:
self.logger.error(str(e))
return False
return True
|
gpl-3.0
| 5,155,227,144,408,807,000 | 25.95 | 79 | 0.717069 | false |
tdwyer/Viridian-gw
|
AmpacheTools/guifunctions.py
|
1
|
2425
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
### BEGIN LICENSE
# Copyright (C) 2010 Dave Eddy <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import pygtk
pygtk.require("2.0")
import gtk
import os
"""
GTK Helper functions
"""
def create_single_column_tree_view(column_name, model, sort_column=None):
"""Create a treeview by passing a column_name and a model (gtk.ListStore())."""
tree_view = gtk.TreeView(model)
tree_view.set_rules_hint(True)
column = create_column(column_name, 0, sort_column)
tree_view.append_column(column)
return tree_view
def create_column(column_name, column_id, sort_column=None, pixbuf=False):
"""Helper function for treeviews, this will return a column ready to be appended."""
if pixbuf:
renderer_text = gtk.CellRendererPixbuf()
column = gtk.TreeViewColumn(column_name)
column.pack_start(renderer_text, expand=False)
column.add_attribute(renderer_text, 'pixbuf', 0)
else:
renderer_text = gtk.CellRendererText()
column = gtk.TreeViewColumn(column_name, renderer_text, text=column_id)
if sort_column != None:
column.set_sort_column_id(sort_column)
else:
column.set_sort_column_id(column_id)
return column
def create_image_pixbuf(file, width, height=None):
"""Helper function to create a pixel buffer from a file of a set width and height."""
if height == None:
height = width
image = gtk.gdk.pixbuf_new_from_file(file).scale_simple(width, height, gtk.gdk.INTERP_BILINEAR)
return image
def hyperlink(url, text=None):
"""Returns a button that acts as a hyperlink."""
if text == None:
text = url
label = gtk.Label("<span foreground='blue' underline='low'>"+text+"</span>")
label.set_use_markup(True)
button = gtk.Button()
button.add(label)
button.set_relief(gtk.RELIEF_NONE)
button.connect('clicked', lambda x_: os.popen("gnome-open '%s' &" % (url)))
return button
|
gpl-3.0
| -8,954,783,973,329,544,000 | 34.676471 | 96 | 0.730722 | false |
pgleeson/TestArea
|
models/GranCell_Dan_analysis/F_I_plot.py
|
1
|
1406
|
#
#
# A file which generates a frequency vs current curve for various cells
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
from sys import *
from java.io import File
from GenerateF_ICurve import FreqVsCurrentGenerator
from math import *
simConfig="TestTonic"
preStimAmp = -0.05
preStimDel = 0
preStimDur = 200
stimAmpLow = 0.0
stimAmpInc = 0.002
stimAmpHigh = 0.1
stimDel = preStimDur
stimDur = 1000
simDuration = preStimDur + stimDur # ms
analyseStartTime = stimDel + 300 # So it's firing at a steady rate...
analyseStopTime = simDuration
analyseThreshold = -20 # mV
# Change this number to the number of processors you wish to use on your local machine
maxNumSimultaneousSims = 4
# Load neuroConstruct project
projFile = File("VSCSGranCell.neuro.XML")
gen = FreqVsCurrentGenerator()
gen.generateF_ICurve(projFile,
"NEURON",
simConfig,
preStimAmp, preStimDel, preStimDur,
stimAmpLow, stimAmpInc, stimAmpHigh,
stimDel, stimDur,
simDuration,
analyseStartTime, analyseStopTime,
analyseThreshold,
maxNumSimultaneousSims)
|
gpl-2.0
| -1,169,122,728,037,807,400 | 20.31746 | 86 | 0.64936 | false |
OpenSourcePolicyCenter/taxdata
|
puf_data/finalprep.py
|
1
|
19683
|
import os
import sys
import numpy as np
import pandas
from impute_itmexp import impute_itemized_expenses
from impute_pencon import impute_pension_contributions
BENPUF = False # set temporarily to True to generate a benpuf.csv file
# BENPUF = False will generate a puf.csv file without any benefits variables
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
def main():
"""
Contains all the logic of the puf_data/finalprep.py script.
"""
# (*) Read unprocessed input file into a Pandas Dataframe
cps_matched_puf_path = os.path.join(CUR_PATH, 'cps-matched-puf.csv')
data = pandas.read_csv(cps_matched_puf_path)
# Rename certain CPS variables
renames = {
'XHID': 'h_seq',
'XFID': 'ffpos',
'xstate': 'fips'
}
data = data.rename(columns=renames)
# - Check the PUF year
max_flpdyr = max(data['flpdyr'])
if max_flpdyr == 2008:
data = transform_2008_varnames_to_2009_varnames(data)
else: # if PUF year is 2009+
data = age_consistency(data)
# - Make recid variable be a unique integer key:
data = create_new_recid(data)
# - Make several variable names be uppercase as in SOI PUF:
data = capitalize_varnames(data)
# - Impute cmbtp variable to estimate income on Form 6251 but not in AGI:
cmbtp_standard = data['e62100'] - data['e00100'] + data['e00700']
zero = np.zeros(len(data.index))
medical_limit = np.maximum(zero, data['e17500'] -
np.maximum(zero, data['e00100']) * 0.075)
med_adj = np.minimum(medical_limit,
0.025 * np.maximum(zero, data['e00100']))
stx_adj = np.maximum(zero, data['e18400'])
cmbtp_itemizer = (cmbtp_standard + data['p04470'] + data['e21040'] -
data['e18500'] - data['e20800'] - stx_adj - med_adj)
cmbtp = np.where(data['FDED'] == 1, cmbtp_itemizer, cmbtp_standard)
data['cmbtp'] = np.where(data['f6251'] == 1, cmbtp, 0.)
# - Split earnings variables into taxpayer (p) and spouse (s) amounts:
data = split_earnings_variables(data, max_flpdyr)
# - Add AGI bin indicator used for adjustment factors:
data = add_agi_bin(data)
# - Replace e20500 with g20500:
data = replace_20500(data)
# - Remove variables not expected by Tax-Calculator:
if max_flpdyr >= 2009:
data = remove_unused_variables(data)
# - Remove benefits variables when BENPUF is False:
if not BENPUF:
data = remove_benefits_variables(data)
# - Convert data to integers:
data = data.round(0).astype('int64')
# - Impute itemized expense amounts for non-itemizers:
data = impute_itemized_expenses(data.copy())
# - Impute pension contributions:
data = impute_pension_contributions(data.copy())
# - Rename 'filer' to 'data_source'
data = data.rename(columns={'filer': 'data_source'})
# - Write processed data to the final CSV-formatted file:
if BENPUF:
write_path = os.path.join(CUR_PATH, 'benpuf.csv')
data.to_csv(write_path, index=False)
else:
write_path = os.path.join(CUR_PATH, 'puf.csv')
data.to_csv(write_path, index=False)
return 0
# end of main function code
def create_new_recid(data):
"""
Construct unique recid.
"""
data['recid'] = data.index + 1
return data
def age_consistency(data):
"""
Construct age_head from agerange if available; otherwise use CPS value.
Construct age_spouse as a normally-distributed agediff from age_head.
"""
# set random-number-generator seed so that always get same random numbers
np.random.seed(seed=123456789)
# generate random integers to smooth age distribution in agerange
shape = data['age_head'].shape
agefuzz8 = np.random.randint(0, 9, size=shape)
agefuzz9 = np.random.randint(0, 10, size=shape)
agefuzz10 = np.random.randint(0, 11, size=shape)
agefuzz15 = np.random.randint(0, 16, size=shape)
# assign age_head using agerange midpoint or CPS age if agerange absent
data['age_head'] = np.where(data['agerange'] == 0,
data['age_head'],
(data['agerange'] + 1 - data['dsi']) * 10)
# smooth the agerange-based age_head within each agerange
data['age_head'] = np.where(np.logical_and(data['agerange'] == 1,
data['dsi'] == 0),
data['age_head'] - 3 + agefuzz9,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 2,
data['dsi'] == 0),
data['age_head'] - 4 + agefuzz9,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 3,
data['dsi'] == 0),
data['age_head'] - 5 + agefuzz10,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 4,
data['dsi'] == 0),
data['age_head'] - 5 + agefuzz10,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 5,
data['dsi'] == 0),
data['age_head'] - 5 + agefuzz10,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 6,
data['dsi'] == 0),
data['age_head'] - 5 + agefuzz15,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 1,
data['dsi'] == 1),
data['age_head'] - 0 + agefuzz8,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 2,
data['dsi'] == 1),
data['age_head'] - 2 + agefuzz8,
data['age_head'])
data['age_head'] = np.where(np.logical_and(data['agerange'] == 3,
data['dsi'] == 1),
data['age_head'] - 4 + agefuzz10,
data['age_head'])
# convert zero age_head to one
data['age_head'] = np.where(data['age_head'] == 0,
1, data['age_head'])
# assign age_spouse relative to age_head if married;
# if head is not married, set age_spouse to zero;
# if head is married but has unknown age, set age_spouse to one;
# do not specify age_spouse values below 15
adiff = np.random.normal(0.0, 4.0, size=shape)
agediff = np.int_(adiff.round())
age_sp = data['age_head'] + agediff
age_spouse = np.where(age_sp < 15, 15, age_sp)
data['age_spouse'] = np.where(data['mars'] == 2,
np.where(data['age_head'] == 1,
1, age_spouse),
0)
return data
def capitalize_varnames(data):
"""
Capitalize some variable names.
"""
renames = {
'dsi': 'DSI',
'eic': 'EIC',
'fded': 'FDED',
'flpdyr': 'FLPDYR',
'mars': 'MARS',
'midr': 'MIDR',
'xtot': 'XTOT',
'recid': 'RECID',
}
data = data.rename(columns=renames)
return data
def remove_unused_variables(data):
"""
Delete non-benefit variables not expected by Tax-Calculator.
"""
data['s006'] = data['matched_weight'] * 100
UNUSED_READ_VARS = [
'agir1', 'efi', 'elect', 'flpdmo', 'wage_head', 'wage_spouse',
'f3800', 'f8582', 'f8606', 'f8829', 'f8910',
'n20', 'n25', 'n30', 'prep', 'schb', 'schcf', 'sche',
'tform', 'ie', 'txst', 'xfpt', 'xfst',
'xocah', 'xocawh', 'xoodep', 'xopar', 'agerange',
's008', 's009', 'wsamp', 'txrt', 'matched_weight',
'e01000', 'e03260', 'e09400', 'e24516', 'e62720', 'e62730',
'e62740', 'e05100', 'e05800', 'e08800', 'e15360', 'p04470',
'e00100', 'e20800', 'e21040', 'e62100', 'e59560', 'p60100',
'e19550', 'e20550', 'e20600', 'e19700', 'e02500', 'e07200',
'e87870', 'e30400', 'e24598', 'e11300', 'e30500',
'e07180', 'e53458', 'e33000', 'e25940', 'e12000', 'p65400',
'e15210', 'e24615', 'e07230', 'e11100', 'e10900',
'e11582', 'e11583', 'e25920', 's27860', 'e10960', 'e59720',
'e87550', 'e26190', 'e53317', 'e53410', 'e04600', 'e26390',
'e15250', 'p65300', 'p25350', 'e06500', 'e10300', 'e26170',
'e26400', 'e11400', 'p25700', 'e04250', 'e07150', 'e60000',
'e59680', 'e24570', 'e11570', 'e53300', 'e10605', 'e22320',
'e26160', 'e22370', 'e53240', 'p25380', 'e10700', 'e09600',
'e06200', 'e24560', 'p61850', 'e25980', 'e53280', 'e25850',
'e25820', 'e10950', 'e68000', 'e26110', 'e58950', 'e26180',
'e04800', 'e06000', 'e87880', 't27800', 'e06300', 'e59700',
'e26100', 'e05200', 'e87875', 'e82200', 'e25860', 'e07220',
'e11070', 'e11550', 'e11580', 'p87482', 'e20500', 'FDED',
'e11900', 'e18600', 'e25960', 'e15100', 'p27895', 'e12200',
'nu18_dep', 'e11601', 'e11603', 'e11602', 'e25550', 'f8867',
'f8949']
MORE_UNUSED_READ_VARS = [
'jcps88',
'jcps89',
'jcps80',
'jcps81',
'jcps82',
'jcps83',
'jcps84',
'jcps85',
'jcps86',
'jcps87',
'zwaspt',
'e52872',
'jcps19',
'jcps18',
'jcps17',
'jcps16',
'jcps15',
'jcps14',
'jcps13',
'jcps12',
'jcps11',
'jcps10',
'icps4',
'icps5',
'jcps68',
'jcps69',
'cweight',
'jcps63',
'jcps60',
'jcps61',
'jcps66',
'jcps67',
'jcps64',
'jcps65',
'cpsseq',
'jcps97',
'jcps96',
'jcps95',
'jcps94',
'jcps93',
'jcps92',
'jcps91',
'jcps90',
'jcps99',
'jcps98',
'soiseq',
'jcps79',
'jcps78',
'wt',
'jcps71',
'jcps70',
'jcps73',
'jcps72',
'jcps75',
'jcps74',
'jcps77',
'jcps76',
'jcps62',
'jcps44',
'jcps45',
'jcps46',
'jcps47',
'jcps40',
'jcps41',
'jcps42',
'jcps43',
'zwassp',
'jcps48',
'jcps49',
'p86421',
'prodseq',
'jcps53',
'jcps52',
'jcps51',
'jcps50',
'jcps57',
'jcps56',
'jcps55',
'jcps54',
'jcps59',
'jcps58',
'icps6',
'jcps4',
'finalseq',
'jcps100',
'e07140',
'jcps26',
'jcps27',
'jcps24',
'jcps25',
'jcps22',
'jcps23',
'jcps20',
'jcps21',
'jcps28',
'jcps29',
'e52852',
'jcps35',
'jcps34',
'jcps37',
'jcps36',
'jcps31',
'jcps30',
'jcps33',
'jcps32',
'jcps39',
'jcps38',
'jcps7',
'jcps6',
'jcps5',
'icps7',
'jcps3',
'jcps2',
'jcps1',
'icps3',
'icps8',
'icps9',
'jcps9',
'jcps8']
ALL_UNUSED_READ_VARS = UNUSED_READ_VARS + MORE_UNUSED_READ_VARS
data = data.drop(ALL_UNUSED_READ_VARS, 1)
NEW_POST_PR83_UNUSED_READ_VARS = [
'SOISEQ',
'age_dep1', 'age_dep2', 'age_dep3', 'age_dep4', 'age_dep5',
'age_oldest', 'age_youngest',
'cpsseq',
'e07140',
'e52852',
'e52872',
'finalseq',
'ftpt_head', 'ftpt_spouse',
'gender_head', 'gender_spouse',
'h_seq',
'hga_head', 'hga_spouse',
'head_age',
'i',
'jcps25', 'jcps28', 'jcps35', 'jcps38',
'medicaid',
'medicarex_dep',
'num_medicaid',
'num_medicare',
'num_snap',
'num_ss',
'num_ssi',
'num_vet',
'p86421',
'peridnum',
'prodseq',
'snap_dep',
'snap_participationp',
'snap_participations',
'sp_ptr',
'spouse_age',
'ss',
'ssi_dep',
'ssi_participationp',
'ssi_participations',
'vb',
'vb_participationp',
'vb_participations',
'vbp',
'vbs',
'wt']
# data = data.drop(NEW_POST_PR83_UNUSED_READ_VARS, 1)
data = data.fillna(value=0)
return data
def remove_benefits_variables(data):
"""
Delete benefits variables.
"""
BENEFIT_VARS = [
'ssi', 'ssip', 'ssis', 'ssi_participation',
'snap', 'snapp', 'snaps', 'snap_participation',
'medicarex', 'medicarexp', 'medicarexs']
# data = data.drop(BENEFIT_VARS, 1)
return data
def transform_2008_varnames_to_2009_varnames(data):
"""
Convert 2008 IRS-SOI PUF variable names into 2009 PUF variable names.
"""
data['e18400'] = data['e18425'] + data['e18450']
# drop unused variables only existing in 2008 IRS-SOI PUF
UNUSED = {'e18425', 'e18450', 'e25370', 'e25380', 'state',
'e87500', 'e87510', 'e87520', 'e87540'}
data = data.drop(UNUSED, 1)
# drop variables not expected by Tax-Calculator
UNUSED_READ_VARS = {
'agir1', 'efi', 'elect', 'flpdmo',
'f3800', 'f8582', 'f8606',
'n20', 'n25', 'prep', 'schb', 'schcf', 'sche',
'tform', 'ie', 'txst', 'xfpt', 'xfst',
'xocah', 'xocawh', 'xoodep', 'xopar',
's008', 's009', 'wsamp', 'txrt',
'e30400', 'e24598', 'e11300', 'e24535', 'e30500',
'e07180', 'e53458', 'e33000', 'e25940', 'e12000', 'p65400',
'e24615', 'e07230', 'e11100', 'e10900', 'e11581',
'e11582', 'e11583', 'e25920', 's27860', 'e59720',
'e87550', 'e26190', 'e53317', 'e53410', 'e04600', 'e26390',
'p65300', 'p25350', 'e06500', 'e10300', 'e26170',
'e26400', 'e11400', 'p25700', 'e04250', 'e07150',
'e59680', 'e24570', 'e11570', 'e53300', 'e10605', 'e22320',
'e26160', 'e22370', 'e53240', 'e10700', 'e09600',
'e06200', 'e24560', 'p61850', 'e25980', 'e53280', 'e25850',
'e25820', 'e68000', 'e26110', 'e58950', 'e26180',
'e04800', 'e06000', 't27800', 'e06300', 'e59700',
'e26100', 'e05200', 'e82200', 'e25860', 'e07220',
'e11900', 'e25960', 'p27895', 'e12200'}
data = data.drop(UNUSED_READ_VARS, 1)
return data
def split_earnings_variables(data, data_year):
"""
Split earnings subject to FICA or SECA taxation between taxpayer and spouse
"""
# split wage-and-salary earnings subject to FICA taxation
total = np.where(data['MARS'] == 2,
data['wage_head'] + data['wage_spouse'], 0).astype(float)
frac_p = np.where(total != 0, data['wage_head'] / total, 1.)
frac_s = 1.0 - frac_p
data['e00200p'] = np.around(frac_p * data['e00200'], 2)
data['e00200s'] = np.around(frac_s * data['e00200'], 2)
# specify FICA-SECA maximum taxable earnings (mte) for data_year
if data_year == 2008:
mte = 102000
elif data_year == 2009:
mte = 106800
elif data_year == 2011:
mte = 106800
else:
raise ValueError('illegal SOI PUF data year {}'.format(data_year))
# total self-employment earnings subject to SECA taxation
# (minimum handles a few secatip values slightly over the mte cap)
secatip = np.minimum(mte, data['e30400']) # for taxpayer
secatis = np.minimum(mte, data['e30500']) # for spouse
# split self-employment earnings subject to SECA taxation
# ... compute secati?-derived frac_p and frac_s
total = np.where(data['MARS'] == 2, secatip + secatis, 0).astype(float)
frac_p = np.where(total != 0, secatip / total, 1.)
frac_s = 1.0 - frac_p
# ... split e00900 (Schedule C) and e02100 (Schedule F) net earnings/loss
data['e00900p'] = np.around(frac_p * data['e00900'], 2)
data['e00900s'] = np.around(frac_s * data['e00900'], 2)
data['e02100p'] = np.around(frac_p * data['e02100'], 2)
data['e02100s'] = np.around(frac_s * data['e02100'], 2)
# ... estimate Schedule K-1 box 14 self-employment earnings/loss
# ... Note: secati? values fall in the [0,mte] range.
# ... So, if sum of e00900? and e02100? is negative and secati? is
# ... zero, we make a conservative assumption and set box14 to zero
# ... (rather than to a positive number), but we allow the estimate
# ... of box 14 to be negative (that is, represent a loss).
nonbox14 = data['e00900p'] + data['e02100p']
box14 = np.where(np.logical_and(nonbox14 <= 0, secatip <= 0),
0.,
secatip - nonbox14)
data['k1bx14p'] = box14.round(2)
nonbox14 = data['e00900s'] + data['e02100s']
box14 = np.where(np.logical_and(nonbox14 <= 0, secatis <= 0),
0.,
secatis - nonbox14)
data['k1bx14s'] = box14.round(2)
# ... check consistency of self-employment earnings estimates
raw = data['e00900p'] + data['e02100p'] + data['k1bx14p']
estp = np.where(raw < 0, 0., np.where(raw > mte, mte, raw))
raw = data['e00900s'] + data['e02100s'] + data['k1bx14s']
ests = np.where(raw < 0, 0., np.where(raw > mte, mte, raw))
assert np.allclose(estp, secatip, rtol=0.0, atol=0.01)
assert np.allclose(ests, secatis, rtol=0.0, atol=0.01)
return data
def add_agi_bin(data):
"""
Add an AGI bin indicator used in Tax-Calc to apply adjustment factors
"""
agi = pandas.Series([0] * len(data.e00100))
agi[data.e00100 < 0] = 0
agi[(data.e00100 >= 0) & (data.e00100 < 5000)] = 1
agi[(data.e00100 >= 5000) & (data.e00100 < 10000)] = 2
agi[(data.e00100 >= 10000) & (data.e00100 < 15000)] = 3
agi[(data.e00100 >= 15000) & (data.e00100 < 20000)] = 4
agi[(data.e00100 >= 20000) & (data.e00100 < 25000)] = 5
agi[(data.e00100 >= 25000) & (data.e00100 < 30000)] = 6
agi[(data.e00100 >= 30000) & (data.e00100 < 40000)] = 7
agi[(data.e00100 >= 40000) & (data.e00100 < 50000)] = 8
agi[(data.e00100 >= 50000) & (data.e00100 < 75000)] = 9
agi[(data.e00100 >= 75000) & (data.e00100 < 100000)] = 10
agi[(data.e00100 >= 100000) & (data.e00100 < 200000)] = 11
agi[(data.e00100 >= 200000) & (data.e00100 < 500000)] = 12
agi[(data.e00100 >= 500000) & (data.e00100 < 1e6)] = 13
agi[(data.e00100 >= 1e6) & (data.e00100 < 1.5e6)] = 14
agi[(data.e00100 >= 1.5e6) & (data.e00100 < 2e6)] = 15
agi[(data.e00100 >= 2e6) & (data.e00100 < 5e6)] = 16
agi[(data.e00100 >= 5e6) & (data.e00100 < 1e7)] = 17
agi[(data.e00100 >= 1e7)] = 18
data['agi_bin'] = agi
return data
def replace_20500(data):
"""
Replace e20500, net casualty losses, with g20500, gross casualty losses
(gross loss values less than 10% AGI are unknown and assumed to be zero)
"""
gross = np.where(data.e20500 > 0.,
data.e20500 + 0.10 * np.maximum(0., data.e00100),
0.)
data['g20500'] = np.int_(gross.round())
return data
if __name__ == '__main__':
sys.exit(main())
|
mit
| 5,739,250,510,163,735,000 | 34.401079 | 79 | 0.518823 | false |
0snug0/pyflare
|
zoneimporter.py
|
1
|
2021
|
#! /usr/bin/python
# zoneimporter.py works and tested for A, AAAA, TXT, MX, and NS records to be upload to CloudFlare using APIv4
# MUST be in bind format
# Make sure you have added your api key and email address to this file
# This is not a CloudFlare supported script, but happy to help out, send bug reports or request to [email protected]
# USEAGE:
# python zoneimporter.py file.txt zone
# python zoneimporter.py foobar.com.txt foobar.com
import requests, json
from sys import argv
###### CONFIG ######
cf_api = ''
cf_email = ''
cf_zone_name = argv[2]
cf_endpoint = 'https://api.cloudflare.com/client/v4/zones/'
headers = {'X-Auth-Email': cf_email, 'X-Auth-Key': cf_api, 'Content-Type': 'application/json'}
def getZoneID(zone_name):
url = cf_endpoint + '?name=' +zone_name
r = requests.get(url, headers=headers)
return json.loads(r.text)
def recList():
bind_file = f = open(argv[1])
rec_list = []
for line in bind_file:
if 'IN\t' in line:
rec_list.append(line.split())
return rec_list
def buildDNSDict(rec_type, rec_name, rec_content, rec_ttl, priority=0):
if rec_type.lower() == 'mx':
data = {"type": rec_type, "name": rec_name, "content": rec_content, "ttl": rec_ttl, 'priority': priority}
else:
data = {"type": rec_type, "name": rec_name, "content": rec_content, "ttl": rec_ttl}
return data
def postToCF(dns_dict):
cf_zone_id = getZoneID(cf_zone_name)['result'][0]['id']
url = cf_endpoint + cf_zone_id + '/dns_records'
r = requests.post(url, data=dns_dict, headers=headers)
response = json.loads(r.text)
if response['success']:
print 'Completed \n'
else:
print 'ERROR:', response['errors'][0]['message'], '\n'
for rec in recList():
print rec[0], rec[3], ''.join(rec[4:])
if rec[3].lower() == 'mx':
dns_dict = buildDNSDict(rec[3], rec[0], rec[5], rec[1], priority = rec[4])
elif rec[3].lower() == 'txt':
dns_dict = buildDNSDict(rec[3], rec[0], ''.join(rec[4:]), rec[1])
else:
dns_dict = buildDNSDict(rec[3], rec[0], rec[4], rec[1])
postToCF(json.dumps(dns_dict))
|
mit
| -4,555,443,536,895,921,000 | 32.147541 | 118 | 0.665017 | false |
crs4/omero.biobank
|
test/kb/test_data_sample.py
|
1
|
2194
|
# BEGIN_COPYRIGHT
# END_COPYRIGHT
import os, unittest, logging
logging.basicConfig(level=logging.ERROR)
from bl.vl.kb import KnowledgeBase as KB
from kb_object_creator import KBObjectCreator
OME_HOST = os.getenv("OME_HOST", "localhost")
OME_USER = os.getenv("OME_USER", "root")
OME_PASS = os.getenv("OME_PASS", "romeo")
class TestKB(KBObjectCreator):
def __init__(self, name):
super(TestKB, self).__init__(name)
self.kill_list = []
def setUp(self):
self.kb = KB(driver='omero')(OME_HOST, OME_USER, OME_PASS)
def tearDown(self):
self.kill_list.reverse()
for x in self.kill_list:
self.kb.delete(x)
self.kill_list = []
def check_object(self, o, conf, otype):
try:
self.assertTrue(isinstance(o, otype))
for k in conf.keys():
v = conf[k]
# FIXME this is omero specific...
if hasattr(v, 'ome_obj'):
self.assertEqual(getattr(o, k).id, v.id)
self.assertEqual(type(getattr(o, k)), type(v))
elif hasattr(v, '_id'):
self.assertEqual(getattr(o, k)._id, v._id)
else:
self.assertEqual(getattr(o, k), v)
except:
pass
def test_data_sample(self):
conf, ds = self.create_data_sample()
self.kill_list.append(ds.save())
self.check_object(ds, conf, self.kb.DataSample)
def test_snp_markers_set(self):
conf, sms = self.create_snp_markers_set()
self.kill_list.append(sms.save())
self.check_object(sms, conf, self.kb.SNPMarkersSet)
def test_genotype_data_sample(self):
conf, gds = self.create_genotype_data_sample()
self.kill_list.append(gds.save())
self.check_object(gds, conf, self.kb.GenotypeDataSample)
def test_data_object(self):
conf, do = self.create_data_object()
self.kill_list.append(do.save())
self.check_object(do, conf, self.kb.DataObject)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestKB('test_data_sample'))
suite.addTest(TestKB('test_snp_markers_set'))
suite.addTest(TestKB('test_genotype_data_sample'))
suite.addTest(TestKB('test_data_object'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
|
gpl-2.0
| -1,286,266,242,549,782,800 | 26.772152 | 62 | 0.649043 | false |
KholdStare/generators-to-coroutines
|
generators_to_coroutines/descriptor_magic.py
|
1
|
2405
|
import copy
import six
class BindingExtensionDescriptor(object):
""" Descriptor for a function that can be attached to a bound method,
and become bound to the same object instance.
For example:
obj.method.extended(*args)
extended(*args) would be called called with obj as an implicit parameter,
i.e. 'extended' would be a bound method if it was attached using this
descriptor.
The attaching process is complicated... (see other functions/classes in
this module.)"""
def __init__(self, func):
self.func = func
def __get__(self, boundMethod, type=None):
obj = boundMethod.__self__
return six.create_bound_method(self.func, obj)
def wrapMethodAndAttachDescriptors(descriptors):
""" Given a dictionary mapping names to descriptor objects, create a
decorator for attaching these descriptors to a bound method. """
class WrapMethod(object):
""" A descriptor that wraps a method, and intercepts calls to __get__,
to inject other descriptors onto instances of the bound method. """
def __init__(self, func):
self.func = func
self.descriptors = descriptors
def __get__(self, obj, type=None):
method = six.create_bound_method(self.func, obj)
if obj is not None:
return _wrapAlreadyBoundMethodAndAttachDescriptors(
method, self.descriptors)
return method
return WrapMethod
class BoundMethodWrapper(object):
""" Wraps a bound method in an object, to allow invoking of descriptors on
said method. """
def __init__(self, boundMethod):
self.boundMethod = boundMethod
def __call__(self, *args, **kwargs):
return self.boundMethod(*args, **kwargs)
def __getattr__(self, attr):
return self.boundMethod.__getattribute__(attr)
def _wrapAlreadyBoundMethodAndAttachDescriptors(boundMethod, descriptors):
""" Given a bound method and descriptors, wrap the method appropriately so
the descriptors are properly attached and invoked. """
# need a specialized copy of the wrapping class to attach these
# descriptors
localBoundMethodWrapperClass = copy.copy(BoundMethodWrapper)
for key in descriptors:
setattr(localBoundMethodWrapperClass, key, descriptors[key])
return localBoundMethodWrapperClass(boundMethod)
|
bsd-2-clause
| 3,636,101,478,478,068,700 | 31.066667 | 78 | 0.675676 | false |
blesscat/flux_line_bot
|
fluxclient/printer/flux_raft.py
|
1
|
13555
|
#!/usr/bin/env python3
import sys
import time
import math
import re
import os
from queue import Queue
from math import ceil, sqrt
import numpy as np
class Raft():
def __init__(self):
self.move_re = re.compile(r"G1 ?([XYZEF] ?\-?\d+\.?\d+)?([XYZEF] ?\-?\d+\.?\d+)?([XYZEF] ?\-?\d+\.?\d+)?([XYZEF] ?\-?\d+\.?\d+)?")
self.axis_re = re.compile(r"([XYZEF]) ?(\-?\d+\.?\d+)")
self.extrusion = 0.0759617 * 0.8
self.line_width = 0.4
self.resolution = 0.3
self.first_layer = 0.3
self.layer_height = 0.2
self.count = 3
self.z_space = 0.12
self.width = ceil(172 / self.resolution)
self.grid = [[]]
self.gcode = []
def print_start_gcode(self):
code = """M107 ; disable fan
M104 S220 ; set temperature
G28 ; home all axes
G1 Z5 F5000 ; lift nozzle
M109 S200 ; wait for temperature to be reached
G21 ; set units to millimeters
G90 ; use absolute coordinates
M82 ; use absolute distances for extrusion
G92 E0 ; reset extrusion distance
G1 Z0.3 F9000.000 ; move to next layer (0)
G1 E-2.00000 F2400.00000 ; retract
G92E0
G1 F1800
"""
print(code, file=self.output_stream)
def process(self, gcode, debug=False):
#Process all gcode on first few layers, and fill the grid, skip skirt...
self.grid = self.fill_grid(gcode)
#Select all connected islands, find the edge points at each one
islands = self.find_islands()
print(";Islands found %d" % len(islands), file=sys.stderr)
#Print start gcode
self.print_start_gcode()
#Print raft gcode
raft_gcode = self.generate_gcode(islands)
#Debug output
if debug and os.environ.get("flux_debug") == '1':
self.output_grid()
#Print other gcode ( uplift Z by elf.count*self.layer_height+self.z_space )
skip = 15
for line in gcode:
#Skip first 15 lines
if skip > 0:
skip = skip - 1
continue
#Lift Z
line = re.sub("Z ?(-?[\d.]+)", self.z_rep, line)
print(line, end="", file=self.output_stream)
def z_rep(self, matchobj):
z_old = float(matchobj.group(1))
return "Z" + str(z_old + self.count * self.layer_height + self.z_space)
def generate_gcode(self, islands):
island_id = 0
for island in islands:
island_id = island_id + 1
print(";Island #%d" % island_id, file=self.output_stream)
edge = island
x = edge[0][0]
y = edge[0][1]
last_point = [0, 0]
sorted_edge = [[x, y]]
#Traverse the edge
while len(edge) > 0:
if self.is_edge(edge, x - 1, y):
x = x - 1
elif self.is_edge(edge, x + 1, y):
x = x + 1
elif self.is_edge(edge, x, y - 1):
y = y - 1
elif self.is_edge(edge, x, y + 1):
y = y + 1
elif self.is_edge(edge, x - 1, y - 1):
(x, y) = (x - 1, y - 1)
elif self.is_edge(edge, x + 1, y + 1):
(x, y) = (x + 1, y + 1)
elif self.is_edge(edge, x + 1, y - 1):
(x, y) = (x + 1, y - 1)
elif self.is_edge(edge, x - 1, y + 1):
(x, y) = (x - 1, y + 1)
if last_point[0] == x and last_point[1] == y:
x = edge[0][0]
y = edge[0][1]
continue
else:
edge.remove([x, y])
self.grid[x][y] = 4
last_point = [x, y]
sorted_edge.append([x, y])
#Outline of raft
print("G92 E0", file=self.output_stream)
print("G1 Z%lf" % self.first_layer, file=self.output_stream)
extruded = 0
(x_min, y_min, x_max, y_max) = (999999, 999999, -1, -1)
last_point = [0, 0]
for pt in sorted_edge:
if x_min > pt[0]:
x_min = pt[0]
if x_max < pt[0]:
x_max = pt[0]
if y_min > pt[1]:
y_min = pt[1]
if y_max < pt[1]:
y_max = pt[1]
x = self.m2g(pt[0])
y = self.m2g(pt[1])
if last_point[0] == 0 and last_point[1] == 0:
last_point = [x, y]
e = self.dist(last_point[0], last_point[1], x, y) * self.extrusion
extruded = extruded + e
last_point = [x, y]
print("G1X%lfY%lfE%lf" % (x, y, extruded), file=self.output_stream)
#Infill of raft
horizontal_lines = abs(ceil((y_max - y_min) * self.resolution / self.line_width))
vertical_lines = abs(ceil((x_max - x_min) * self.resolution / self.line_width))
print("Lines / Horizontal %lf Vertical %lf" % (horizontal_lines, vertical_lines), file=sys.stderr)
print("Xmin %lf Xmax %lf Ymin %lf Ymax %lf" % (x_min, x_max, y_min, y_max), file=sys.stderr)
width = len(self.grid)
for l in range(0, self.count):
print("G1 Z%lf" % (self.first_layer + l * self.layer_height), file=self.output_stream)
if l % 2 == 0:
for r in range(0, horizontal_lines):
y = self.g2m(self.m2g(y_min) + self.line_width * r)
range_of_x = range(0, width)
if r % 2 == 1:
range_of_x = reversed(range_of_x)
inside = False
fill_start = 0
for x in range_of_x:
if self.grid[x][y] > 0 and not inside:
inside = True
fill_start = self.m2g(x)
elif self.grid[x][y] == 0 and inside:
e = abs(self.m2g(x) - fill_start) * self.extrusion
extruded = extruded + e
print("G1 X%lf Y%lf ; H line" % (fill_start, self.m2g(y)), file=self.output_stream)
print("G1 X%lf Y%lf E%lf" % (self.m2g(x), self.m2g(y), extruded), file=self.output_stream)
inside = False
else:
for r in range(0, vertical_lines):
x = self.g2m(self.m2g(x_min) + self.line_width * r)
range_of_y = range(0, width)
if r % 2 == 1:
range_of_y = reversed(range_of_y)
inside = False
fill_start = 0
for y in range_of_y:
if self.grid[x][y] > 0 and not inside:
inside = True
fill_start = self.m2g(y)
elif self.grid[x][y] == 0 and inside:
e = abs(self.m2g(y) - fill_start) * self.extrusion
extruded = extruded + e
print("G1 X%lf Y%lf ; V line" % (self.m2g(x), fill_start), file=self.output_stream)
print("G1 X%lf Y%lf E%lf" % (self.m2g(x), self.m2g(y), extruded), file=self.output_stream)
inside = False
def is_edge(self, edge, x, y):
if [x, y] in edge and self.check_grid(x, y) == 3:
return True
else:
return False
def dist(self, x, y, x2, y2):
return sqrt((x - x2) * (x - x2) + (y - y2) * (y - y2))
def m2g(self, val):
return (val - ceil(86 / self.resolution)) * self.resolution
def g2m(self, val):
return round(val / self.resolution) + ceil(86 / self.resolution)
def check_grid(self, x, y):
if x >= 0 and y >= 0 and x < self.width and y < self.width:
return self.grid[x][y]
return 0
def fill_grid(self, gcode):
self.grid = np.zeros((ceil(172 / self.resolution), ceil(172 / self.resolution)))
self.width = ceil(172 / self.resolution)
print("Grid size %d^2" % len(self.grid), file=sys.stderr)
x = y = z = 0
expansion = self.expansion / self.resolution
last_point = [0, 0]
min_division = self.resolution * expansion / 10.0
for ln in range(0, len(gcode)):
line = gcode[ln]
if "skirt" in line:
continue
if z > 2:
print("Gcode parsing end", file=sys.stderr)
break
if self.move_re.match(line):
for (axis, number) in self.axis_re.findall(line):
if axis == 'X':
x = float(number)
if axis == 'Y':
y = float(number)
if axis == 'Z':
z = float(number)
if (x - last_point[0]) * (x - last_point[0]) + (y - last_point[1]) * (y - last_point[1]) > min_division * min_division:
denom = int(ceil(sqrt((x - last_point[0]) * (x - last_point[0]) + (y - last_point[1]) * (y - last_point[1])) / min_division))
for numer in range(0, denom + 1):
lx = last_point[0] + (x - last_point[0]) * numer / denom
ly = last_point[1] + (y - last_point[1]) * numer / denom
(rx, ry) = (self.g2m(lx), self.g2m(ly))
self.fill_circle(rx, ry, 0, 0, expansion * expansion)
else:
(rx, ry) = (self.g2m(x), self.g2m(y))
self.fill_circle(rx, ry, 0, 0, expansion * expansion)
last_point = [x, y]
return self.grid
def fill_circle(self, rx, ry, x, y, r):
Q = Queue()
Q.put([x, y])
while not Q.empty():
n = Q.get()
(x, y) = n
if x * x + y * y < r:
if rx + x >= 0 and ry + y >= 0 and rx + x < self.width and ry + y < self.width and self.grid[rx + x][ry + y] == 0:
self.grid[rx + x][ry + y] = 1
Q.put([x - 1, y])
Q.put([x + 1, y])
Q.put([x, y - 1])
Q.put([x, y + 1])
#find all connected islands
def find_islands(self):
islands = []
#iterate all points on grid
for x in range(0, self.width):
for y in range(0, self.width):
if self.grid[x][y] == 1:
edge = []
self.find_all_connected_points(edge, x, y)
islands.append(edge)
return islands
#flood grouping
def find_all_connected_points(self, edge, x, y):
grid = self.grid
width = len(self.grid)
Q = Queue()
if x >= width or y >= width or x < 0 or y < 0:
return
if self.grid[x][y] != 1:
return
Q.put([x, y])
while not Q.empty():
n = Q.get()
(x, y) = n
if self.grid[x][y] == 1:
#Edge detection
#If surrounded by filled area, then it's not
if self.check_grid(x, y - 1) > 0 and self.check_grid(x - 1, y) > 0 and self.check_grid(x + 1, y) > 0 and self.check_grid(x, y + 1) > 0:
self.grid[x][y] = 2
else: # n is on edge
edge.append([x, y])
self.grid[x][y] = 3
if self.check_grid(x - 1, y) == 1:
Q.put([x - 1, y])
if self.check_grid(x + 1, y) == 1:
Q.put([x + 1, y])
if self.check_grid(x, y - 1) == 1:
Q.put([x, y - 1])
if self.check_grid(x, y + 1) == 1:
Q.put([x, y + 1])
#debug tool
def output_grid(self):
from PIL import Image
im = np.zeros((self.width, self.width, 3))
for x in range(0, self.width):
for y in range(0, self.width):
if self.grid[x][y] == 1:
im[x][y] = [0, 255, 0]
elif self.grid[x][y] == 2:
im[x][y] = [0, 0, 255]
elif self.grid[x][y] == 3:
im[x][y] = [255, 0, 0]
elif self.grid[x][y] == 4:
im[x][y] = [0, 128, 255]
else:
im[x][y] = [255, 255, 255]
Image.fromarray(im.astype(np.uint8)).save("grid.png")
def main(self, gcode, output_stream, debug):
if type(gcode) == str:
with open(gcode) as f:
gcode = f.readlines()
self.output_stream = output_stream
self.resolution = 0.5
self.expansion = 10 # flux only param : equals to raft exapansion
self.first_layer = 0.3 # equal to first layer height
self.layer_height = 0.2 # equal to layer height
self.count = 3 # equal to raft layers
self.process(gcode)
if __name__ == '__main__':
#Read gcode
fname = sys.argv[1]
with open(fname) as f:
gcode = f.readlines()
raft = Raft()
raft.main(gcode, output_stream=sys.stdout, debug=True)
|
agpl-3.0
| -6,356,251,802,322,692,000 | 37.183099 | 151 | 0.441387 | false |
datawire/ambassador
|
python/ambassador/ir/irretrypolicy.py
|
1
|
1500
|
from typing import Any, TYPE_CHECKING
from ..config import Config
from ..utils import RichStatus
from .irresource import IRResource
if TYPE_CHECKING:
from .ir import IR # pragma: no cover
class IRRetryPolicy (IRResource):
def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.retrypolicy",
kind: str="IRRetryPolicy",
name: str="ir.retrypolicy",
**kwargs) -> None:
# print("IRRetryPolicy __init__ (%s %s %s)" % (kind, name, kwargs))
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
**kwargs
)
def setup(self, ir: 'IR', aconf: Config) -> bool:
if not self.validate_retry_policy():
self.post_error("Invalid retry policy specified: {}".format(self))
return False
return True
def validate_retry_policy(self) -> bool:
retry_on = self.get('retry_on', None)
is_valid = False
if retry_on in {'5xx', 'gateway-error', 'connect-failure', 'retriable-4xx', 'refused-stream', 'retriable-status-codes'}:
is_valid = True
return is_valid
def as_dict(self) -> dict:
raw_dict = super().as_dict()
for key in list(raw_dict):
if key in ["_active", "_errored", "_referenced_by", "_rkey",
"kind", "location", "name", "namespace", "metadata_labels"]:
raw_dict.pop(key, None)
return raw_dict
|
apache-2.0
| 4,995,557,643,552,938,000 | 29.612245 | 128 | 0.553333 | false |
raycarnes/stock-logistics-warehouse
|
stock_available/models/product_product.py
|
2
|
2081
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module is copyright (C) 2014 Numérigraphe SARL. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class ProductProduct(models.Model):
"""Add a field for the stock available to promise.
Useful implementations need to be installed through the Settings menu or by
installing one of the modules stock_available_*
"""
_inherit = 'product.product'
@api.one
@api.depends('virtual_available')
def _immediately_usable_qty(self):
"""No-op implementation of the stock available to promise.
By default, available to promise = forecasted quantity.
Must be overridden by another module that actually implement
computations."""
self.immediately_usable_qty = self.virtual_available
immediately_usable_qty = fields.Float(
digits=dp.get_precision('Product Unit of Measure'),
compute='_immediately_usable_qty',
string='Available to promise',
help="Stock for this Product that can be safely proposed "
"for sale to Customers.\n"
"The definition of this value can be configured to suit "
"your needs")
|
agpl-3.0
| 3,671,457,567,160,154,600 | 40.6 | 79 | 0.64375 | false |
rwl/pylon
|
examples/pyreto/bandit.py
|
1
|
1933
|
__author__ = 'Richard Lincoln, [email protected]'
""" This example demonstrates how to use the discrete Roth-Erev reinforcement
learning algorithms to learn the n-armed bandit task. """
import pylab
import scipy
from pybrain.rl.agents import LearningAgent
from pybrain.rl.explorers import BoltzmannExplorer #@UnusedImport
from pybrain.rl.experiments import Experiment
from pyreto.bandit import BanditEnvironment, BanditTask
from pyreto.roth_erev import RothErev, PropensityTable #@UnusedImport
from pyreto.roth_erev import VariantRothErev #@UnusedImport
payouts = scipy.array([[200.0, 300.0, 100.0], # Expected value: 210
[900.0, 400.0, 600.0], # Expected value: 510
[700.0, 600.0, 550.0], # Expected value: 595
[150.0, 50.0, 1000.0], # Expected value: 147.5
[700.0, 800.0, 900.0]]) # Expected value: 790
distrib = scipy.array([[0.7, 0.2, 0.1],
[0.1, 0.6, 0.3],
[0.4, 0.2, 0.3],
[0.5, 0.45, 0.05],
[0.3, 0.5, 0.2]])
env = BanditEnvironment(payouts, distrib)
task = BanditTask(env)
table = PropensityTable(payouts.shape[0])
table.initialize(500.0)
#learner = RothErev(experimentation=0.55, recency=0.3)
learner = VariantRothErev(experimentation=0.65, recency=0.3)
learner.explorer = BoltzmannExplorer(tau=100.0, decay=0.9995)
agent = LearningAgent(table, learner)
experiment = Experiment(task, agent)
epis = int(1e1)
batch = 2
avgRewards = scipy.zeros(epis)
allActions = scipy.zeros(epis * batch)
c = 0
for i in range(epis):
experiment.doInteractions(batch)
avgRewards[i] = scipy.mean(agent.history["reward"])
allActions[c:c + batch] = agent.history["action"].flatten() + 1
agent.learn()
agent.reset()
c += batch
pylab.figure(figsize=(16, 6))
#pylab.plot(avgRewards)
pylab.plot(allActions)
pylab.show()
|
apache-2.0
| -6,356,878,838,941,284,000 | 30.688525 | 77 | 0.651319 | false |
parpg/parpg
|
tools/pychan_designer/pychan_designer.py
|
1
|
14194
|
#! /usr/bin/env python2
from __future__ import print_function
import sys
import os
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from fife import fife
from fife.extensions.basicapplication import ApplicationBase, Setting
from fife.extensions import pychan
from parpg.common.optionparser import OptionParser, OptionError
from parpg.common.utils import dedent_chomp
from parpg import gui
USAGE_MESSAGE = '''\
usage: pychan_designer.py [-h] xml_script_path
Load a pychan xml script and display the gui element it contains.
-h show this help message
-v increase the verbosity of console output; may be
specified multiple times
-q decrease the verbosity of console output; may be
specified multiple times
'''
def is_child(widget, parent):
"""
Recursively search a widget hierarchy to determine if the
widget is a decendent of parent.
"""
if widget is None or parent is None:
result = False
elif hasattr(parent, 'children'):
if widget in parent.children:
result = True
else:
result = False
for child in parent.children:
if is_child(widget, child):
result = True
break
else:
result = False
return result
class LabelLogHandler(logging.Handler):
def __init__(self, text_box, level=logging.NOTSET):
assert hasattr(text_box, 'text') and hasattr(text_box, 'adaptLayout')
logging.Handler.__init__(self, level=level)
self.text_box = text_box
def emit(self, record):
message= self.format(record)
self.text_box.text = unicode(message, 'utf8')
self.text_box.adaptLayout()
class TextBoxLogHandler(LabelLogHandler):
def emit(self, record):
message= self.format(record)
self.text_box.text = '\n'.join([self.text_box.text, message])
self.text_box.adaptLayout()
class GuichanDesignerApplication(ApplicationBase):
def __init__(self, settings_file='settings-dist.xml'):
setting = Setting(settings_file=settings_file)
super(GuichanDesignerApplication, self).__init__(setting=setting)
# PyChanDesigner fonts
pychan.loadFonts('fonts/freefont.fontdef')
pychan.setupModalExecution(self.mainLoop, self.breakFromMainLoop)
# pychan default settings need an overwrite, because we don't like some aspects (like opaque widgets)
screen_width, screen_height = \
[int(dimension) for dimension in
setting.get('FIFE', 'ScreenResolution').split('x')]
self.xml_script_path = ''
self.active_widget = None
self.selected_widget = None
self.widget_stack = []
self.logger = logging.getLogger('PychanDesignerApplication')
self.xml_editor = pychan.loadXML('gui/xml_editor.xml')
self.console = pychan.loadXML('gui/console.xml')
with file('gui/pychan_designer.xml') as xml_file:
self.gui = pychan.loadXML(xml_file)
self.gui.min_size = (screen_width, screen_height)
self.gui.max_size = (screen_width, screen_height)
editor = self.gui.findChild(name='editor')
editor.content = self.xml_editor
self.gui.mapEvents(
{
'exitButton': self.quit,
'reloadButton': self.reloadXml,
'applyButton': self.applyXml,
'saveButton': self.saveXml,
'xmlEditorTab': self.showXmlEditor,
'consoleTab': self.showConsole,
}
)
self.gui.adaptLayout()
self.gui.show()
def showXmlEditor(self):
editor = self.gui.findChild(name='editor')
editor.content = self.xml_editor
def showConsole(self):
editor = self.gui.findChild(name='editor')
editor.content = self.console
def setupLogging(self, level):
console = self.console
self.logger.setLevel(level)
console_handler = TextBoxLogHandler(console, level)
console_formatter = \
logging.Formatter('%(levelname)s: %(message)s')
console_handler.setFormatter(console_formatter)
self.logger.addHandler(console_handler)
status_bar = self.gui.findChild(name='statusBar')
status_bar_handler = LabelLogHandler(status_bar, logging.ERROR)
self.logger.addHandler(status_bar_handler)
def _applyActivateEventCapture(self, widget):
widget.capture(self._activateCallback, 'mouseEntered')
widget.capture(self._deactivateCallback, 'mouseExited')
def selectWidget(self):
widget = self.widget_stack[0]
self.selected_widget = widget
property_viewer = self.gui.findChild(name='propertyViewer')
columns = property_viewer.content
name_rows = columns.findChild(name='propertyNameColumnRows')
value_rows = columns.findChild(name='propertyValueColumnRows')
name_rows.removeAllChildren()
value_rows.removeAllChildren()
assert len(name_rows.children) == 0 and \
len(value_rows.children) == 0, \
'propertyViewer was not properly cleared!'
for attribute in sorted(widget.ATTRIBUTES,
cmp=lambda a, b: cmp(a.name, b.name)):
name = attribute.name
name_label = pychan.Label(text=unicode(name, 'utf8'))
name_label.font = 'FreeMono'
name_label.background_color = (250, 250, 250)
name_container = pychan.HBox()
name_container.border_size = 1
name_container.base_color = (250, 250, 250)
name_container.addChild(name_label)
alternate_name = '_'.join(['old', name])
if hasattr(widget, alternate_name):
value = getattr(widget, alternate_name)
else:
value = getattr(widget, name)
if isinstance(value, fife.Color):
value = (value.r, value.g, value.b, value.a)
elif isinstance(value, fife.GuiFont):
value = value.name
elif isinstance(value, fife.GuiImage):
# FIXME Technomage 2011-01-27: Unfortunately I haven't found a
# way to display the image path only, so for now it's being
# skipped.
continue
value_label = pychan.TextField(text=unicode(repr(value), 'utf8'))
value_label.font = 'FreeMono'
value_label.background_color = (220, 220, 220)
value_label.min_size = (0, 20)
value_container = pychan.HBox()
value_container.border_size = 1
value_container.min_size = (0, 24)
value_container.base_color = (250, 250, 250)
value_container.addChild(value_label)
name_container.min_size = value_container.min_size
value_label.capture(
self._createChangeAttributeCallback(name, widget,
type(attribute)),
'keyPressed'
)
name_rows.addChild(name_container)
value_rows.addChild(value_container)
columns.adaptLayout()
if self.selected_widget is not None:
self.unhighlightWidget(self.selected_widget)
self.highlightWidget(widget)
def _createChangeAttributeCallback(self, attribute_name, wrapped_widget,
attribute_type):
def _changeAttributeCallback(widget, event):
if (event.getKey().getValue() == pychan.events.guichan.Key.ENTER):
try:
value = eval(widget.text)
setattr(wrapped_widget, attribute_name, value)
except (ValueError, TypeError, NameError) as exception:
self.logger.error(exception)
widget.text = unicode(getattr(wrapped_widget,
attribute_name))
return _changeAttributeCallback
def _activateCallback(self, widget):
self.logger.debug(
'mouse entered {0}(name={1!r})'.format(
type(widget).__name__,
widget.name
)
)
if len(self.widget_stack) == 0:
self.widget_stack.append(widget)
self.activateWidget(widget)
elif is_child(self.widget_stack[0], widget):
self.widget_stack.append(widget)
else:
parent = self.widget_stack[0]
self.widget_stack.insert(0, widget)
self.deactivateWidget(parent)
self.activateWidget(widget)
def _deactivateCallback(self, widget):
self.logger.debug(
'mouse exited {0}(name={1!r})'.format(
type(widget).__name__,
widget.name
)
)
widget_stack = self.widget_stack
index = widget_stack.index(widget)
self.deactivateWidget(widget)
if index == 0 and len(widget_stack) > 1:
parent = widget_stack[1]
self.activateWidget(parent)
widget_stack.remove(widget)
def activateWidget(self, widget):
self.highlightWidget(widget)
self.logger.debug(
'activated {0}(name={1!r})'.format(
type(widget).__name__,
widget.name
)
)
def deactivateWidget(self, widget):
self.unhighlightWidget(widget)
self.logger.debug(
'deactivated {0}(name={1!r})'.format(
type(widget).__name__,
widget.name
)
)
def highlightWidget(self, widget):
if not hasattr(widget, 'highlighted') or not widget.highlighted:
widget.highlighted = True
widget.old_base_color = widget.base_color
widget.base_color = (255, 0, 0)
widget.old_background_color = widget.background_color
widget.background_color = (255, 0, 0)
widget.old_border_size = widget.border_size
widget.border_size = 1
if hasattr(widget, 'opaque'):
widget.old_opaque = widget.opaque
widget.opaque = 1
widget.adaptLayout()
def unhighlightWidget(self, widget):
if hasattr(widget, 'highlighted') and widget.highlighted:
widget.highlighted = False
widget.base_color = widget.old_base_color
widget.background_color = widget.old_background_color
widget.border_size = widget.old_border_size
if hasattr(widget, 'opaque'):
widget.opaque = widget.old_opaque
widget.adaptLayout()
def saveXml(self):
with file(self.xml_script_path, 'w') as xml_file:
xml_content = self.xml_editor.text
xml_file.write(xml_content)
self.logger.info('saved file {0}'.format(self.xml_script_path))
def applyXml(self):
xml_content = self.xml_editor.text
xml_stream = StringIO(str(xml_content))
xml_stream.seek(0)
self.loadXml(xml_stream)
def reloadXml(self):
with file(self.xml_script_path, 'r') as xml_file:
self.loadXml(xml_file)
def loadXml(self, xml_file):
self.logger.debug(
'loading file {0}'.format(getattr(xml_file, 'name', ''))
)
top_widget = pychan.loadXML(xml_file)
top_widget.deepApply(self._applyActivateEventCapture)
top_widget.deepApply(lambda widget: widget.capture(self.selectWidget,
'mousePressed'))
widget_preview = self.gui.findChild(name='widgetPreview')
widget_preview.content = top_widget
top_widget.adaptLayout()
# FIXME Technomage 2011-01-23: Containers are not displayed with their
# background images when attached to another widget. A workaround
# is to call beforeShow after attaching the container.
if isinstance(top_widget, pychan.Container):
top_widget.beforeShow()
xml_editor = self.xml_editor
xml_file.seek(0)
xml_editor.text = unicode(xml_file.read(), 'utf8')
xml_editor.resizeToContent()
self.logger.info(
'successfully loaded file {0}'.format(
getattr(xml_file, 'name', '')
)
)
def main(argv=sys.argv):
option_parser = OptionParser(
usage=USAGE_MESSAGE,
args=argv[1:]
)
logging_level = logging.WARNING
for option in option_parser:
if option == '-h' or option =='--help':
print(option_parser.usage)
sys.exit(0)
elif option == '-v':
logging_level -= 10
elif option == '-q':
logging_level += 10
else:
print('Error: unknown option {0!r}\n'.format(option),
file=sys.stderr)
print(option_parser.usage, file=sys.stderr)
sys.exit(1)
try:
xml_script_path = os.path.abspath(option_parser.get_next_prog_arg())
except OptionError as exception:
print('Error: {0}\n'.format(exception), file=sys.stderr)
print(option_parser.usage, file=sys.stderr)
sys.exit(1)
application = GuichanDesignerApplication()
application.setupLogging(logging_level)
parpg_root = os.path.abspath(os.path.join('..', '..', 'game'))
os.chdir(parpg_root)
# Load PARPG fonts
fonts_directory = os.path.abspath('fonts')
file_names = os.listdir(fonts_directory)
for file_name in file_names:
base_name, extension = os.path.splitext(file_name)
if extension == '.fontdef':
file_path = os.path.join(fonts_directory, file_name)
pychan.loadFonts(file_path)
application.xml_script_path = xml_script_path
with file(xml_script_path) as xml_file:
application.loadXml(xml_file)
application.run()
if __name__ == '__main__':
main()
|
gpl-3.0
| -9,181,230,754,927,632,000 | 37.570652 | 109 | 0.586586 | false |
gauravjuvekar/debian-python-humanfriendly
|
humanfriendly/sphinx.py
|
1
|
5429
|
# Human friendly input/output in Python.
#
# Author: Peter Odding <[email protected]>
# Last Change: February 17, 2016
# URL: https://humanfriendly.readthedocs.io
"""
Customizations for and integration with the Sphinx_ documentation generator.
The :mod:`humanfriendly.sphinx` module uses the `Sphinx extension API`_ to
customize the process of generating Sphinx based Python documentation.
The most relevant functions to take a look at are :func:`setup()`,
:func:`enable_special_methods()` and :func:`enable_usage_formatting()`.
.. _Sphinx: http://www.sphinx-doc.org/
.. _Sphinx extension API: http://sphinx-doc.org/extdev/appapi.html
"""
# Standard library modules.
import logging
import types
# Modules included in our package.
from humanfriendly.usage import USAGE_MARKER, render_usage
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def setup(app):
"""
Enable all of the provided Sphinx_ customizations.
:param app: The Sphinx application object.
The :func:`setup()` function makes it easy to enable all of the Sphinx
customizations provided by the :mod:`humanfriendly.sphinx` module with the
least amount of code. All you need to do is to add the module name to the
``extensions`` variable in your ``conf.py`` file:
.. code-block:: python
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
When Sphinx sees the :mod:`humanfriendly.sphinx` name it will import the
module and call its :func:`setup()` function.
At the time of writing this just calls :func:`enable_special_methods()` and
:func:`enable_usage_formatting()`, but of course more functionality may be
added at a later stage. If you don't like that idea you may be better of
calling the individual functions from your own ``setup()`` function.
"""
enable_special_methods(app)
enable_usage_formatting(app)
def enable_special_methods(app):
"""
Enable documenting "special methods" using the autodoc_ extension.
:param app: The Sphinx application object.
This function connects the :func:`special_methods_callback()` function to
``autodoc-skip-member`` events.
.. _autodoc: http://www.sphinx-doc.org/en/stable/ext/autodoc.html
"""
app.connect('autodoc-skip-member', special_methods_callback)
def special_methods_callback(app, what, name, obj, skip, options):
"""
Enable documenting "special methods" using the autodoc_ extension.
Refer to :func:`enable_special_methods()` to enable the use of this
function (you probably don't want to call
:func:`special_methods_callback()` directly).
This function implements a callback for ``autodoc-skip-member`` events to
include documented "special methods" (method names with two leading and two
trailing underscores) in your documentation. The result is similar to the
use of the ``special-members`` flag with one big difference: Special
methods are included but other types of members are ignored. This means
that attributes like ``__weakref__`` will always be ignored (this was my
main annoyance with the ``special-members`` flag).
The parameters expected by this function are those defined for Sphinx event
callback functions (i.e. I'm not going to document them here :-).
"""
if getattr(obj, '__doc__', None) and isinstance(obj, (types.FunctionType, types.MethodType)):
return False
else:
return skip
def enable_usage_formatting(app):
"""
Reformat human friendly usage messages to reStructuredText_.
:param app: The Sphinx application object (as given to ``setup()``).
This function connects the :func:`usage_message_callback()` function to
``autodoc-process-docstring`` events.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
"""
app.connect('autodoc-process-docstring', usage_message_callback)
def usage_message_callback(app, what, name, obj, options, lines):
"""
Reformat human friendly usage messages to reStructuredText_.
Refer to :func:`enable_usage_formatting()` to enable the use of this
function (you probably don't want to call :func:`usage_message_callback()`
directly).
This function implements a callback for ``autodoc-process-docstring`` that
reformats module docstrings using :func:`.render_usage()` so that Sphinx
doesn't mangle usage messages that were written to be human readable
instead of machine readable. Only module docstrings whose first line starts
with :data:`.USAGE_MARKER` are reformatted.
The parameters expected by this function are those defined for Sphinx event
callback functions (i.e. I'm not going to document them here :-).
"""
# Make sure we only modify the docstrings of modules.
if isinstance(obj, types.ModuleType) and lines:
# Make sure we only modify docstrings containing a usage message.
if lines[0].startswith(USAGE_MARKER):
# Convert the usage message to reStructuredText.
text = render_usage('\n'.join(lines))
# Clear the existing line buffer.
while lines:
lines.pop()
# Fill up the buffer with our modified docstring.
lines.extend(text.splitlines())
|
mit
| 987,001,562,281,309,600 | 36.965035 | 97 | 0.700682 | false |
APSL/kaneda
|
django_kaneda/__init__.py
|
1
|
1695
|
import logging
from django.utils.functional import LazyObject
class LazyMetrics(LazyObject):
def _setup(self):
from kaneda import Metrics
from kaneda.utils import import_class, get_object_from_settings
from kaneda.exceptions import UnexistingKanedaClass, SettingsError
from . import settings
if settings.DEBUG:
backend_class = import_class('kaneda.backends.LoggerBackend')
if settings.LOGGER:
backend = backend_class(logger=logging.getLogger(settings.LOGGER))
elif settings.LOGGER_FILENAME:
backend = backend_class(filename=settings.LOGGER_FILENAME)
else:
backend = backend_class()
_metrics = Metrics(backend=backend)
else:
if not settings.BACKEND and not settings.QUEUE:
raise SettingsError('You need to set KANEDA_BACKEND or KANEDA_QUEUE on settings.py to django_kaneda')
if settings.BACKEND:
try:
backend = get_object_from_settings(settings.BACKEND, settings)
_metrics = Metrics(backend=backend)
except UnexistingKanedaClass:
raise UnexistingKanedaClass('The selected KANEDA_BACKEND class does not exists.')
if settings.QUEUE:
try:
queue = get_object_from_settings(settings.QUEUE, settings)
_metrics = Metrics(queue=queue)
except UnexistingKanedaClass:
raise UnexistingKanedaClass('The selected KANEDA_QUEUE class does not exists.')
self._wrapped = _metrics
metrics = LazyMetrics()
|
mit
| -6,203,067,789,636,577,000 | 41.375 | 117 | 0.616519 | false |
zapcoop/vertex
|
vertex_api/inventory/models/provisioning.py
|
1
|
5543
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from inventory.constants import PO_STATUS_CHOICES, PR_STATUS_CHOICES
class PurchaseRequest(models.Model):
parent = models.ForeignKey(
'self',
related_name='children_pr_set',
null=True,
editable=False
)
user_id = models.CharField(
max_length=32,
blank=True,
verbose_name=_('User defined ID (optional)')
)
issue_date = models.DateField(
auto_now_add=True,
verbose_name=_('Issue date')
)
required_date = models.DateField(
blank=True,
null=True,
verbose_name=_('Date required')
)
supplier_contact = models.ForeignKey(
'contacts.EmailAddress',
limit_choices_to={'organization__groups': 3},
blank=True,
null=True
)
active = models.BooleanField(
default=True,
verbose_name=_('Active')
)
status = models.CharField(
choices=PR_STATUS_CHOICES,
max_length=10,
verbose_name=_('Status')
)
notes = models.TextField(
blank=True,
verbose_name=_('Notes')
)
class Meta:
verbose_name = _('Purchase request')
verbose_name_plural = _('Purchase requests')
app_label = 'inventory'
def __str__(self):
return '#%s (%s)' % (self.user_id if self.user_id else self.id, self.issue_date)
@property
def supplier(self):
return self.supplier_contact.organization if self.supplier_contact else None
class PurchaseRequestItem(models.Model):
purchase_request = models.ForeignKey(PurchaseRequest, related_name='items',
verbose_name=_('Purchase request'))
item = models.ForeignKey('inventory.Item', verbose_name=_('Item'))
qty = models.PositiveIntegerField(verbose_name=_('Quantity'))
notes = models.TextField(null=True, blank=True, verbose_name=_('Notes'))
def __str__(self):
return "{purchase_request}: {item}".format(purchase_request=self.purchase_request,
item=self.item)
class Meta:
verbose_name = _('Purchase request item')
verbose_name_plural = _('Purchase request items')
app_label = 'inventory'
class PurchaseOrder(models.Model):
user_id = models.CharField(max_length=32, null=True, blank=True,
verbose_name=_('User defined ID'))
purchase_request = models.ForeignKey(PurchaseRequest, null=True, blank=True,
verbose_name=_('Purchase request'))
supplier = models.ForeignKey('contacts.HardwareSupplier', related_name='purchase_orders',
verbose_name=_('Supplier'))
issue_date = models.DateField(auto_now_add=True, verbose_name=_('Issue date'))
required_date = models.DateField(null=True, blank=True, verbose_name=_('Date required'))
active = models.BooleanField(default=True, verbose_name=_('Active'))
notes = models.TextField(null=True, blank=True, verbose_name=_('Notes'))
status = models.CharField(choices=PO_STATUS_CHOICES, max_length=10, verbose_name=_('Status'))
def __str__(self):
return '#%s (%s)' % (self.user_id if self.user_id else self.id, self.issue_date)
class Meta:
verbose_name = _('Purchase order')
verbose_name_plural = _('Purchase orders')
app_label = 'inventory'
class PurchaseOrderItemStatus(models.Model):
name = models.CharField(verbose_name=_('Name'), max_length=32)
def __str__(self):
return self.name
class Meta:
verbose_name = _('Purchase order item status')
verbose_name_plural = _('Purchase order item status')
app_label = 'inventory'
class PurchaseOrderItem(models.Model):
purchase_order = models.ForeignKey(PurchaseOrder, related_name='items',
verbose_name=_('Purchase order'))
item = models.ForeignKey('inventory.Item', verbose_name=_('Item'))
agreed_price = models.PositiveIntegerField(null=True, blank=True,
verbose_name=_('Agreed price'))
active = models.BooleanField(default=True, verbose_name=_('Active'))
status = models.ForeignKey(PurchaseOrderItemStatus, null=True, blank=True,
verbose_name=_('Status'))
qty = models.PositiveIntegerField(verbose_name=_('Quantity'))
received_qty = models.PositiveIntegerField(default=0, null=True, blank=True,
verbose_name=_('received'))
def __str__(self):
return str(self.item)
class Meta:
verbose_name = _('Purchase order item')
verbose_name_plural = _('Purchase order items')
app_label = 'inventory'
# register(PurchaseRequestStatus, _('Purchase request status'), ['name'])
# register(PurchaseRequest, _('Purchase request'), ['user_id', 'id', 'budget', 'required_date',
# 'status__name', 'originator'])
# register(PurchaseRequestItem, _('Purchase request item'), ['item_template__description', 'qty',
# 'notes'])
# register(PurchaseOrderStatus, _('Purchase order status'), ['name'])
# register(PurchaseOrderItemStatus, _('Purchase order item status'), ['name'])
# register(PurchaseOrder, _('Purchase order'), ['user_id', 'id', 'required_date', 'status__name',
# 'supplier__name', 'notes'])
# register(PurchaseOrderItem, _('Purchase order item'), ['item_template__description', 'qty'])
|
agpl-3.0
| -5,115,240,644,877,288,000 | 38.035211 | 97 | 0.61501 | false |
TexZK/pywolf
|
pywolf/audio.py
|
1
|
15904
|
import io
import os
import struct
import subprocess
import tempfile
import wave
from pywolf.utils import (
stream_read, stream_write,
stream_pack, stream_unpack, stream_unpack_array,
BinaryResource, ResourceManager
)
ADLIB_CARRIERS = (3, 4, 5, 11, 12, 13, 19, 20, 21)
ADLIB_MODULATORS = (0, 1, 2, 8, 9, 10, 16, 17, 18)
ADLIB_REG_DUMMY = 0x00
ADLIB_REG_SPLIT = 0x08
ADLIB_REG_CHAR = 0x20
ADLIB_REG_SCALE = 0x40
ADLIB_REG_ATTACK = 0x60
ADLIB_REG_SUSTAIN = 0x80
ADLIB_REG_FREQ_L = 0xA0
ADLIB_REG_FREQ_H = 0xB0
ADLIB_REG_FEEDBACK = 0xC0
ADLIB_REG_EFFECTS = 0xBD
ADLIB_REG_WAVE = 0xE0
def samples_expand(chunks_handler, index):
sounds_start = chunks_handler.sounds_start
sounds_infos = chunks_handler.sounds_infos
start, length = sounds_infos[index]
chunk_index = sounds_start + start
remaining = length
while remaining:
chunk = chunks_handler[chunk_index]
if len(chunk) <= remaining:
yield from chunk
remaining -= len(chunk)
else:
yield from memoryview(chunk)[:remaining]
remaining = 0
chunk_index += 1
def samples_upsample(samples, factor):
assert 1 < factor
remainder = 0
for sample in samples:
times = factor + remainder
times_floor = int(times)
yield from (sample for _ in range(times_floor))
remainder = times - times_floor
def wave_write(file, rate, samples, sample_format='<B'):
with wave.open(file, 'w') as wave_stream:
wave_stream.setnchannels(1)
wave_stream.setsampwidth(struct.calcsize(sample_format))
wave_stream.setframerate(rate)
wave_stream.setnframes(len(samples))
wave_stream.writeframesraw(samples) # FIXME: pack into bytes() if necessary
class SquareWaveGenerator(object):
def __init__(self, sample_rate, high=1, low=-1, silence=0, frequency=0, duty_cycle=0.5, round_period=True):
assert 0 < sample_rate
self.sample_rate = sample_rate
self.low = low
self.high = high
self.silence = silence
self.frequency = 0
self.duty_cycle = duty_cycle
self.round_period = round_period
self.reset()
def reset(self):
self.period_length = 1
self.phase_index = 0
self.threshold_index = self.duty_cycle
self.set_frequency(self.frequency)
def set_frequency(self, frequency):
if frequency != self.frequency:
phase_index = self.phase_index
if frequency:
assert 0 < frequency < 2 * self.sample_rate
period_length = self.sample_rate / frequency
phase_index *= period_length / self.period_length
if self.round_period:
period_length = round(period_length)
phase_index = int(phase_index)
phase_index %= period_length
else:
period_length = 1
phase_index = 0
self.frequency = frequency
self.phase_index = phase_index
self.period_length = period_length
self.set_duty_cycle(self.duty_cycle)
def set_duty_cycle(self, duty_cycle):
assert 0 <= duty_cycle <= 1
threshold_index = self.period_length * duty_cycle
if self.round_period:
threshold_index = round(threshold_index)
self.duty_cycle = duty_cycle
self.threshold_index = threshold_index
def __call__(self, length=1):
if self.frequency:
high = self.high
low = self.low
period_length = self.period_length
phase_index = self.phase_index
threshold_index = self.threshold_index
for _ in range(length):
yield high if phase_index < threshold_index else low
phase_index = (phase_index + 1) % period_length
self.phase_index = phase_index
else:
silence = self.silence
yield from (silence for _ in range(length))
def buzzer_expand(dividers, sample_rate=44100, char_rate=140, buzzer_clock=1193180, round_period=True):
generator = SquareWaveGenerator(sample_rate, high=0xFF, low=0x00, silence=0x80, round_period=round_period)
char_length = sample_rate / char_rate
offset = 0
for divider in dividers:
generator.set_frequency(buzzer_clock / (divider * 60) if divider else 0)
length = offset + char_length
length_floor = round(length)
yield from generator(length_floor)
offset = length - length_floor
def convert_imf_to_wave(imf_chunk, imf2wav_path, wave_path=None, wave_rate=44100, imf_rate=700, chunk_path=None):
wave_is_temporary = wave_path is None
chunk_is_temporary = chunk_path is None
tempdir_path = tempfile.gettempdir()
PIPE = subprocess.PIPE
try:
if chunk_is_temporary:
with tempfile.NamedTemporaryFile('wb', delete=False) as chunk_file:
chunk_file.write(imf_chunk)
chunk_path = os.path.join(tempdir_path, chunk_file.name)
else:
with open(chunk_path, 'wb') as chunk_file:
chunk_file.write(imf_chunk)
if wave_is_temporary:
with tempfile.NamedTemporaryFile('wb', delete=False) as wave_file:
wave_path = os.path.join(tempdir_path, wave_file.name)
else:
wave_path = os.path.abspath(wave_path)
imf2wav_path = os.path.abspath(imf2wav_path)
args = [imf2wav_path, chunk_path, wave_path, str(imf_rate), str(wave_rate)]
subprocess.Popen(args, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate()
except:
if wave_is_temporary:
try:
os.unlink(wave_path)
except:
pass
raise
finally:
if chunk_is_temporary:
try:
os.unlink(chunk_path)
except:
pass
return wave_path
def convert_wave_to_ogg(wave_path, oggenc2_path, ogg_path=None):
ogg_is_temporary = ogg_path is None
tempdir_path = tempfile.gettempdir()
PIPE = subprocess.PIPE
try:
if ogg_is_temporary:
with tempfile.NamedTemporaryFile('wb', delete=False) as ogg_file:
ogg_path = os.path.join(tempdir_path, ogg_file.name)
else:
ogg_path = os.path.abspath(ogg_path)
oggenc2_path = os.path.abspath(oggenc2_path)
args = [oggenc2_path, wave_path, '-o', ogg_path]
subprocess.Popen(args, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate()
except:
if ogg_is_temporary:
try:
os.unlink(ogg_path)
except:
pass
raise
return ogg_path
class BuzzerSound(object):
def __init__(self, dividers):
self.dividers = dividers
def __len__(self):
return len(self.dividers)
def __iter__(self):
yield from self.dividers
def __getitem__(self, key):
return self.dividers[key]
def to_samples(self, rate=44100):
yield from buzzer_expand(self.dividers, rate)
def wave_write(self, file, rate=44100):
samples = bytes(self.to_samples(rate))
wave_write(file, rate, samples)
class BuzzerSoundManager(ResourceManager):
def __init__(self, chunks_handler, start=None, count=None):
super().__init__(chunks_handler, start, count)
def _load_resource(self, index, chunk):
return BuzzerSound(chunk)
class AdLibSoundHeader(BinaryResource):
SIZE = struct.calcsize('<LH13B3sB')
def __init__(self,
length, priority,
modulator_char, carrier_char,
modulator_scale, carrier_scale,
modulator_attack, carrier_attack,
modulator_sustain, carrier_sustain,
modulator_wave, carrier_wave,
conn, voice, mode, block):
self.length = length
self.priority = priority
self.modulator_char = modulator_char
self.carrier_char = carrier_char
self.modulator_scale = modulator_scale
self.carrier_scale = carrier_scale
self.modulator_attack = modulator_attack
self.carrier_attack = carrier_attack
self.modulator_sustain = modulator_sustain
self.carrier_sustain = carrier_sustain
self.modulator_wave = modulator_wave
self.carrier_wave = carrier_wave
self.conn = conn
self.voice = voice
self.mode = mode
self.block = block
@classmethod
def from_stream(cls, stream):
args = list(stream_unpack('<LH13B', stream))
stream_unpack('<3B', stream) # unused
args += stream_unpack('<B', stream)
return cls(*args)
def to_stream(self, stream):
stream_write(stream, self.to_bytes())
@classmethod
def from_bytes(cls, data, offset=0):
args = struct.unpack_from('<LH13B', data, offset)
offset += cls.SIZE - 1
args += stream_unpack('<B', data, offset)
return cls(*args)
def to_bytes(self):
return struct.pack('<LH13B3sB',
self.length,
self.priority,
self.modulator_char,
self.carrier_char,
self.modulator_scale,
self.carrier_scale,
self.modulator_attack,
self.carrier_attack,
self.modulator_sustain,
self.carrier_sustain,
self.modulator_wave,
self.carrier_wave,
self.conn,
self.voice,
self.mode,
b'',
self.block)
def to_imf_chunk(self, length=None, which=0, old_muse_compatibility=False):
modulator = ADLIB_MODULATORS[which]
carrier = ADLIB_CARRIERS[which]
setup_events = [
[ADLIB_REG_DUMMY, 0x00, 0],
[ADLIB_REG_EFFECTS, 0x00, 0],
[ADLIB_REG_SPLIT, 0x00, 0],
[(ADLIB_REG_FREQ_H + modulator), 0x00, 0],
[(ADLIB_REG_CHAR + modulator), self.modulator_char, 0],
[(ADLIB_REG_SCALE + modulator), self.modulator_scale, 0],
[(ADLIB_REG_ATTACK + modulator), self.modulator_attack, 0],
[(ADLIB_REG_SUSTAIN + modulator), self.modulator_sustain, 0],
[(ADLIB_REG_WAVE + modulator), self.modulator_wave, 0],
[(ADLIB_REG_CHAR + carrier), self.carrier_char, 0],
[(ADLIB_REG_SCALE + carrier), self.carrier_scale, 0],
[(ADLIB_REG_ATTACK + carrier), self.carrier_attack, 0],
[(ADLIB_REG_SUSTAIN + carrier), self.carrier_sustain, 0],
[(ADLIB_REG_WAVE + carrier), self.carrier_wave, 0],
[ADLIB_REG_FEEDBACK, (self.conn if old_muse_compatibility else 0x00), 0],
]
if length is None:
length = self.length
length = (len(setup_events) + length) * 4
setup_events_data = [struct.pack('<BBH', *event) for event in setup_events]
return b''.join([struct.pack('<H', length)] + setup_events_data)
class AdLibSound(BinaryResource):
def __init__(self, header, events, metadata=b''):
self.header = header
self.events = events
self.metadata = metadata # TODO: fill from stream/chunk
def __len__(self):
return self.header.length
def __iter__(self):
yield from self.events
def __getitem__(self, key):
return self.events[key]
def to_imf_chunk(self, delay_cycles=5, which=0, old_muse_compatibility=False):
events = self.events
header = self.header
metadata = self.metadata
if events:
modulator = ADLIB_MODULATORS[which]
freq_l_reg = ADLIB_REG_FREQ_L + modulator
freq_h_reg = ADLIB_REG_FREQ_H + modulator
block = ((header.block & 7) << 2) | 0x20
key_on_data = struct.pack('<BBH', freq_h_reg, block, delay_cycles)
key_off_data = struct.pack('<BBH', freq_h_reg, 0x00, delay_cycles)
events_data = []
for event in events:
if event:
events_data.append(struct.pack('<BBH', freq_l_reg, event, 0))
events_data.append(key_on_data)
else:
events_data.append(key_off_data)
events_data.append(key_off_data)
setup_data = header.to_imf_chunk(len(events_data), which, old_muse_compatibility)
imf_chunk = b''.join([setup_data] + events_data + [metadata])
return imf_chunk
else:
setup_data = header.to_imf_chunk(0, which, old_muse_compatibility)
return bytes(setup_data)
@classmethod
def from_stream(cls, stream):
header = AdLibSoundHeader.from_stream(stream)
events = stream_read(stream, header.length)
return cls(header, events)
def to_stream(self, stream):
self.header.to_stream(stream)
stream_write(stream, self.events)
@classmethod
def from_bytes(cls, data):
return cls.from_stream(io.BytesIO(data))
def to_bytes(self):
return b''.join([self.header.to_bytes(), self.events])
class AdLibSoundManager(ResourceManager):
def __init__(self, chunks_handler, start=None, count=None, old_muse_compatibility=False):
super().__init__(chunks_handler, start, count)
self.old_muse_compatibility = old_muse_compatibility
def _load_resource(self, index, chunk):
return AdLibSound.from_bytes(chunk)
class Music(BinaryResource):
def __init__(self, events):
self.events = events
def __len__(self):
return len(self.events)
def __iter__(self):
yield from self.events
def __getitem__(self, key):
return self.events[key]
def to_imf_chunk(self):
length = len(self.events) * 4
events_data = [struct.pack('<BBH', *event) for event in self.events]
return b''.join([struct.pack('<H', length)] + events_data)
@classmethod
def from_stream(cls, stream):
length = stream_unpack('<H', stream)[0]
assert length % 4 == 0
length //= 4
events = list(stream_unpack_array('<BBH', stream, length, scalar=False))
return cls(events)
def to_stream(self, stream):
stream_pack(stream, '<H', len(self.events))
for event in self.events:
stream_pack(stream, '<BBH', event)
@classmethod
def from_bytes(cls, data):
return cls.from_stream(io.BytesIO(data))
def to_bytes(self):
return self.to_imf_chunk()
class MusicManager(ResourceManager):
def __init__(self, chunks_handler, start=None, count=None):
super().__init__(chunks_handler, start, count)
def _load_resource(self, index, chunk):
return Music.from_bytes(chunk)
class SampledSound(object):
def __init__(self, rate, samples):
self.rate = rate
self.samples = samples
def wave_write(self, file):
rate = self.rate
samples = self.samples
wave_write(file, rate, samples)
class SampledSoundManager(ResourceManager):
def __init__(self, chunks_handler, rate, start=None, count=None):
super().__init__(chunks_handler, start, count)
self.rate = rate
def _load_resource(self, index, chunk):
samples = bytes(samples_expand(self._chunks_handler, index))
return SampledSound(self.rate, samples)
|
gpl-3.0
| -5,442,634,940,452,369,000 | 31.259635 | 113 | 0.577842 | false |
Napoleon314/Venus3D
|
build_cfg_default.py
|
1
|
1816
|
project_name = "Venus3D"
binary_path = "Binary"
build_path = "Build"
dependent_path = "Dependent"
document_path = "Document"
external_path = "External"
include_path = "Include"
source_path = "Source"
test_path = "Test"
install_path = "SDK"
dependent_list = [("maker", "https://github.com/Napoleon314/maker"),("vulkan_loader", "https://github.com/Napoleon314/vulkan_loader")]
# Compiler name.
# On Windows desktop, could be "vc140", "vc120", "vc110", "mingw", "auto".
# On Windows store, could be "vc140", "vc120", "vc110", "auto".
# On Windows phone, could be "vc140", "vc120", "vc110", "auto".
# On Android, could be "gcc", "auto".
# On Linux, could be "gcc", "auto".
# On MacOSX, could be "clang", "auto".
# On iOS, could be "clang", "auto".
compiler = "auto"
# Toolset name.
# On Windows desktop, could be "v140", "auto".
# On Windows store, could be "auto".
# On Windows phone, could be "auto".
# On Android, could be "4.6", "4.8", "4.9", "auto".
# On Linux, could be "auto".
# On MacOSX, could be "auto".
# On iOS, could be "auto".
toolset = "auto"
# Target CPU architecture.
# On Windows desktop, could be "x86", "x64".
# On Windows store, could be "arm", "x86", "x64".
# On Windows phone, could be "arm", "x86".
# On Android, cound be "armeabi", "armeabi-v7a", "arm64-v8a", "x86", "x86_64".
# On Linux, could be "x86", "x64".
# On MacOSX, could be "x64".
# On iOS, could be "arm", "x86".
arch = ("x86", "x64")
# Configuration. Could be "Debug", "Release", "MinSizeRel", "RelWithDebInfo".
config = ("Debug", "Release", "MinSizeRel", "RelWithDebInfo")
# Target platform for cross compiling. Could be "android" plus version number, "win_store", "win_phone" plus version number, "ios", or "auto".
target = "auto"
intrinsics_level = 1
|
mit
| -8,911,665,374,767,017,000 | 34.607843 | 142 | 0.627203 | false |
3dbug/blender
|
anim_from_csv.py
|
1
|
4165
|
import bpy
action="default"
text=action + ".txt"
obj_map = {}
target_map = { "Root" : 0, "Spine" : 1, "ArmFK.L" : 3, "LegIK.L" : 4, "Fingers.L" : 6,
"Links.L" : 7, "ArmFK.R" : 19, "LegIK.R" : 20, "Fingers.R" :22, "Links.R" : 23 }
macros = { "Hand.L": ["Fingers.L", "Links.L"],
"Hand.R": [ "Fingers.R", "Links.R" ],
"All": ["Spine","ArmFK.L","ArmFK.R","LegIK.L","LegIK.R","Links.R","Fingers.R","Links.L","Fingers.L"],
"Stance": ["Spine","Root","LegIK.L","LegIK.R"],
"Arm.L": ["ArmFK.L","Links.L","Fingers.L"],
"Arm.R": ["ArmFK.R","Links.R","Fingers.R"] }
def get_pose_index(obj, pose_name ):
idx = 0
for pm in obj.pose_library.pose_markers:
if pose_name == pm.name:
return idx
idx += 1
return None
def build_layers(targets):
layers = [False]*32
print( targets )
for t in targets:
if t in target_map:
layers[target_map[ t ] ] = True
return tuple( layers )
def build_layers2(targets):
for i in range(32):
bpy.context.object.data.layers[i] = False
for t in targets:
if t in target_map:
layer_id=target_map[ t ]
bpy.context.object.data.layers[ layer_id ] = True
def apply( obj, targets, pose_name):
idx = get_pose_index( obj, pose_name )
if idx is None:
print("pose %s not found." % pose_name )
return
sel_layers = build_layers( targets )
print("sellayers=" + str(sel_layers))
bpy.ops.armature.armature_layers(layers=sel_layers) # 2.7 api, alternatively build_layers2 can be used
bpy.ops.poselib.apply_pose(pose_index=idx)
bpy.ops.pose.select_all(action='SELECT')
if "Root" in targets:
bpy.ops.anim.keyframe_insert(type='BUILTIN_KSI_LocRot')
else:
bpy.ops.anim.keyframe_insert(type='Rotation')
def remove_dummy():
for key, value in obj_map.items() :
print (key, value)
bpy.context.scene.objects.active = obj
obj.select = True
bpy.ops.armature.armature_layers(layers=[True]*32)
bpy.context.scene.frame_set( 0 )
bpy.ops.anim.keyframe_delete(type='BUILTIN_KSI_LocRot', confirm_success=True)
bpy.ops.armature.armature_layers(layers=[False]*32)
def add_dummy( obj_name ):
# set dummy keyframe to create an action 8.7.2014
sel_layers = build_layers( ['Root'] )
obj = bpy.data.objects.get( obj_name )
if obj == None:
print("obj name=" + obj_name + " not found.")
return
bpy.context.scene.frame_set( 0 )
obj.animation_data.action.name = action + '_' + obj_name
bpy.context.scene.objects.active = obj
obj.select = True
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.anim.keyframe_insert(type='Rotation')
txt = bpy.data.texts[ text ].as_string()
for line in txt.splitlines():
if line.find(" ") != -1 and line.find("#") == -1:
sframe,rig_and_poses= line.split(" ")
rest=rig_and_poses.split(":")
print("frame :%s" % sframe )
obj_name=rest[0]
poses=rest[1]
pose_list=poses.split(",")
if obj_name.upper() == "SET":
var,obj_name = pose_list[0].split('=')
obj_map[var]=obj_name
print( obj_map )
add_dummy( obj_name )
continue
bpy.context.scene.frame_set( int( sframe ))
for assignment in pose_list:
bone_group_name,pose_name=assignment.split("=")
if bone_group_name in macros:
targets = macros[ bone_group_name ]
else:
targets = [ bone_group_name ]
obj = bpy.data.objects.get( obj_map[ obj_name])
if obj == None:
print("obj name=" + obj_name + " not found.")
break
bpy.context.scene.objects.active = obj
obj.select = True
bpy.ops.object.mode_set(mode = 'POSE')
print("assign obj:%s bone_group:%s, pose:%s" % (obj, bone_group_name, pose_name))
apply( obj, targets, pose_name)
remove_dummy()
|
gpl-3.0
| 2,746,421,672,938,407,400 | 34.598291 | 113 | 0.553421 | false |
boxu0001/practice
|
py3/S99_recoverBST.py
|
1
|
1891
|
"""
99. Recover Binary Search Tree
Hard
Two elements of a binary search tree (BST) are swapped by mistake.
Recover the tree without changing its structure.
Example 1:
Input: [1,3,null,null,2]
1
/
3
\
2
Output: [3,1,null,null,2]
3
/
1
\
2
Example 2:
Input: [3,1,4,null,null,2]
3
/ \
1 4
/
2
Output: [2,1,4,null,null,3]
2
/ \
1 4
/
3
Follow up:
A solution using O(n) space is pretty straight forward.
Could you devise a constant space solution?
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverTree(self, root: TreeNode) -> None:
#inorder traversal
stack=[root] if root else []
firstBig=None
lastSmall=None
goLeft=True
prevNode=None
curNode=None
while stack:
if goLeft and stack[-1].left:
stack+=[stack[-1].left]
else:
poped = stack.pop()
if poped.right:
stack+=[poped.right]
goLeft=True
else:
goLeft=False #这之前都是inorder traversal的通用代码
prevNode=curNode
curNode=poped
if prevNode and prevNode.val > curNode.val:
if firstBig==None:
firstBig=prevNode
lastSmall=curNode
tmp=lastSmall.val
lastSmall.val=firstBig.val
firstBig.val=tmp
#使用inorder traversa
#变量 firstBig 存第一次出现的过大节点 a[k-1] < a[k] > a[k+1]...中的a[k]
#变量 lastSmall 存最后出现的过小节点 ...< a[t-1] > a[t] < a[t+1]...中的a[t]
#最后做 firstBig lastSmall 数值交换
|
gpl-3.0
| 5,910,172,456,264,468,000 | 18.521739 | 72 | 0.524234 | false |
pombredanne/pyelftools
|
elftools/dwarf/namelut.py
|
3
|
7323
|
#-------------------------------------------------------------------------------
# elftools: dwarf/namelut.py
#
# DWARF pubtypes/pubnames section decoding (.debug_pubtypes, .debug_pubnames)
#
# Vijay Ramasami ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
import os
import collections
from collections import OrderedDict
from ..common.utils import struct_parse
from ..common.py3compat import Mapping
from bisect import bisect_right
import math
from ..construct import CString, Struct, If
NameLUTEntry = collections.namedtuple('NameLUTEntry', 'cu_ofs die_ofs')
class NameLUT(Mapping):
"""
A "Name LUT" holds any of the tables specified by .debug_pubtypes or
.debug_pubnames sections. This is basically a dictionary where the key is
the symbol name (either a public variable, function or a type), and the
value is the tuple (cu_offset, die_offset) corresponding to the variable.
The die_offset is an absolute offset (meaning, it can be used to search the
CU by iterating until a match is obtained).
An ordered dictionary is used to preserve the CU order (i.e, items are
stored on a per-CU basis (as it was originally in the .debug_* section).
Usage:
The NameLUT walks and talks like a dictionary and hence it can be used as
such. Some examples below:
# get the pubnames (a NameLUT from DWARF info).
pubnames = dwarf_info.get_pubnames()
# lookup a variable.
entry1 = pubnames["var_name1"]
entry2 = pubnames.get("var_name2", default=<default_var>)
print(entry2.cu_ofs)
...
# iterate over items.
for (name, entry) in pubnames.items():
# do stuff with name, entry.cu_ofs, entry.die_ofs
# iterate over items on a per-CU basis.
import itertools
for cu_ofs, item_list in itertools.groupby(pubnames.items(),
key = lambda x: x[1].cu_ofs):
# items are now grouped by cu_ofs.
# item_list is an iterator yeilding NameLUTEntry'ies belonging
# to cu_ofs.
# We can parse the CU at cu_offset and use the parsed CU results
# to parse the pubname DIEs in the CU listed by item_list.
for item in item_list:
# work with item which is part of the CU with cu_ofs.
"""
def __init__(self, stream, size, structs):
self._stream = stream
self._size = size
self._structs = structs
# entries are lazily loaded on demand.
self._entries = None
# CU headers (for readelf).
self._cu_headers = None
def get_entries(self):
"""
Returns the parsed NameLUT entries. The returned object is a dictionary
with the symbol name as the key and NameLUTEntry(cu_ofs, die_ofs) as
the value.
This is useful when dealing with very large ELF files with millions of
entries. The returned entries can be pickled to a file and restored by
calling set_entries on subsequent loads.
"""
if self._entries is None:
self._entries, self._cu_headers = self._get_entries()
return self._entries
def set_entries(self, entries, cu_headers):
"""
Set the NameLUT entries from an external source. The input is a
dictionary with the symbol name as the key and NameLUTEntry(cu_ofs,
die_ofs) as the value.
This option is useful when dealing with very large ELF files with
millions of entries. The entries can be parsed once and pickled to a
file and can be restored via this function on subsequent loads.
"""
self._entries = entries
self._cu_headers = cu_headers
def __len__(self):
"""
Returns the number of entries in the NameLUT.
"""
if self._entries is None:
self._entries, self._cu_headers = self._get_entries()
return len(self._entries)
def __getitem__(self, name):
"""
Returns a namedtuple - NameLUTEntry(cu_ofs, die_ofs) - that corresponds
to the given symbol name.
"""
if self._entries is None:
self._entries, self._cu_headers = self._get_entries()
return self._entries.get(name)
def __iter__(self):
"""
Returns an iterator to the NameLUT dictionary.
"""
if self._entries is None:
self._entries, self._cu_headers = self._get_entries()
return iter(self._entries)
def items(self):
"""
Returns the NameLUT dictionary items.
"""
if self._entries is None:
self._entries, self._cu_headers = self._get_entries()
return self._entries.items()
def get(self, name, default=None):
"""
Returns NameLUTEntry(cu_ofs, die_ofs) for the provided symbol name or
None if the symbol does not exist in the corresponding section.
"""
if self._entries is None:
self._entries, self._cu_headers = self._get_entries()
return self._entries.get(name, default)
def get_cu_headers(self):
"""
Returns all CU headers. Mainly required for readelf.
"""
if self._cu_headers is None:
self._entries, self._cu_headers = self._get_entries()
return self._cu_headers
def _get_entries(self):
"""
Parse the (name, cu_ofs, die_ofs) information from this section and
store as a dictionary.
"""
self._stream.seek(0)
entries = OrderedDict()
cu_headers = []
offset = 0
# According to 6.1.1. of DWARFv4, each set of names is terminated by
# an offset field containing zero (and no following string). Because
# of sequential parsing, every next entry may be that terminator.
# So, field "name" is conditional.
entry_struct = Struct("Dwarf_offset_name_pair",
self._structs.Dwarf_offset('die_ofs'),
If(lambda ctx: ctx['die_ofs'], CString('name')))
# each run of this loop will fetch one CU worth of entries.
while offset < self._size:
# read the header for this CU.
namelut_hdr = struct_parse(self._structs.Dwarf_nameLUT_header,
self._stream, offset)
cu_headers.append(namelut_hdr)
# compute the next offset.
offset = (offset + namelut_hdr.unit_length +
self._structs.initial_length_field_size())
# before inner loop, latch data that will be used in the inner
# loop to avoid attribute access and other computation.
hdr_cu_ofs = namelut_hdr.debug_info_offset
# while die_ofs of the entry is non-zero (which indicates the end) ...
while True:
entry = struct_parse(entry_struct, self._stream)
# if it is zero, this is the terminating record.
if entry.die_ofs == 0:
break
# add this entry to the look-up dictionary.
entries[entry.name.decode('utf-8')] = NameLUTEntry(
cu_ofs = hdr_cu_ofs,
die_ofs = hdr_cu_ofs + entry.die_ofs)
# return the entries parsed so far.
return (entries, cu_headers)
|
unlicense
| -3,595,565,746,216,191,500 | 35.984848 | 82 | 0.599208 | false |
jberci/resolwe
|
resolwe/flow/management/commands/runlistener.py
|
1
|
1572
|
""".. Ignore pydocstyle D400.
=================================
Standalone Executor Contact Point
=================================
Command to run on local machine::
./manage.py runlistener
"""
import asyncio
from signal import SIGINT, SIGTERM, signal
from django.conf import settings
from django.core.management.base import BaseCommand
from resolwe.flow.managers.listener import ExecutorListener
class Command(BaseCommand):
"""Run the executor listener."""
help = "Run the standalone manager contact point for executors."
def add_arguments(self, parser):
"""Add command arguments."""
super().add_arguments(parser)
parser.add_argument('--clear-queue', action='store_true',
help="Consume and ignore any outstanding messages in the result queue on startup.")
def handle(self, *args, **kwargs):
"""Run the executor listener. This method never returns."""
listener = ExecutorListener(redis_params=getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_CONNECTION', {}))
def _killer(signum, frame):
"""Kill the listener on receipt of a signal."""
listener.terminate()
signal(SIGINT, _killer)
signal(SIGTERM, _killer)
async def _runner():
"""Run the listener instance."""
if kwargs['clear_queue']:
await listener.clear_queue()
async with listener:
pass
loop = asyncio.new_event_loop()
loop.run_until_complete(_runner())
loop.close()
|
apache-2.0
| -7,991,205,269,413,775,000 | 29.230769 | 115 | 0.604326 | false |
Adamssss/projectEuler
|
Problem 001-150 Python/pb083.py
|
1
|
2021
|
import math
import time
t1 = time.time()
# read the matrix into a 2D list
f = open('pb083_matrix.txt','r')
mr= f.read().split('\n')
f.close()
'''
mr[0] = '131,673,234,103,18'
mr[1] = '201,96,342,965,150'
mr[2] = '630,803,746,422,111'
mr[3] = '537,699,497,121,956'
mr[4] = '805,732,524,37,331'
'''
L = 80
mc = []
for i in range(0,L):
mc.append(mr[i].split(','))
m = []
for i in range(L):
m.append([0]*L)
for i in range(0,L):
for j in range(0,L):
temp = 0
for d in mc[i][j]:
temp = temp*10+ord(d)-48
m[i][j] += temp
pv = []
mc = []
for i in range(L):
pv.append([999999]*L)
mc.append([False]*L)
i = 0
j = 0
while j < L:
#for t in pv:
#print(t)
#print('==============================')
if mc[i][j]:
if j != 0:
if pv[i][j] + m[i][j-1] < pv[i][j-1]:
pv[i][j-1] = pv[i][j] + m[i][j-1]
mc[i][j-1] = True
j -= 1
continue
if i != 0:
if pv[i][j] + m[i-1][j] < pv[i-1][j]:
pv[i-1][j] = pv[i][j] + m[i-1][j]
mc[i-1][j] = True
i -= 1
continue
if j == 0:
if i == 0:
pv[i][j] = m[i][j]
else:
temp = m[i][j] + pv[i-1][j]
if temp < pv[i][j]:
pv[i][j] = temp
mc[i][j] = True
continue
else:
if i == 0:
temp = m[i][j] + pv[i][j-1]
if temp < pv[i][j]:
pv[i][j] = temp
mc[i][j] = True
continue
else:
if pv[i-1][j] > pv[i][j-1]:
temp = pv[i][j-1]
else:
temp = pv[i-1][j]
temp += m[i][j]
if temp < pv[i][j]:
pv[i][j] = temp
mc[i][j] = True
continue
i += 1
if i == L:
i = 0
j += 1
print(pv[-1][-1])
print("time:",time.time()-t1)
|
mit
| 8,534,431,882,672,108,000 | 19.835052 | 49 | 0.355764 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.