max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
model_to_local.py
|
EricBoittier/fdcm_project
| 0 |
2171994
|
import pandas as pd
from ase import Atoms
from dscribe.descriptors import SOAP
import numpy as np
import sys
from ARS import *
def func(data, a, b,c,d,e,f,g,h,i,j):
return a*np.sin(b*data[0]+c)+ d + e*data[1]**3 + f*data[1]**2 + g*data[1] + h*data[2]**3 + i*data[2]**2 + j*data[2]
def func2(data, a, b,c,d,g,j):
return a*np.sin(b*data[0]+c)+ d + g*data[1]**2 + j*data[2]**2
def func3(data, a, b,c,d):
return a*np.sin(b*data[0]+c)+ d #+ g*data[1] + j*data[2]
def do_transformation(old_global_xyz, cube, frame_file):
ars_obj = ARS(old_global_xyz, cube, frame_file, pcube_2=cube)
f = open("frame_33_local.xyz").readlines()
fitted_local = []
for x in f[2:]:
fitted_local.append([float(x.split()[1]), float(x.split()[2]), float(x.split()[3])])
ars_obj.set_local_charge_positions(np.array(fitted_local))
cp = ars_obj.local_to_global()
ars_obj.set_charge_positions_plus(cp)
ars_obj.save_charges_global("kernel.xyz")
BOHR_TO_ANGSTROM = 0.529177
model = pd.read_pickle(r"kernel_data/best_model.pkl")
def z_to_char(z):
if z == 1:
return "H"
elif z == 6:
return "C"
elif z == 7:
return "N"
elif z == 8:
return "O"
header = """24
s x[A] y[A] z[A] q[e]\n"""
line_format = "N {} {} {} {} \n"
def cube_to_xyz(path):
lines = open(path).readlines()
n_atoms = int(lines[2].split()[0])
atoms = []
for line in lines[7 - 1:7 + n_atoms - 1]:
Z, x, y, z = line.split()[1:]
atoms.append([float(Z), float(x) * BOHR_TO_ANGSTROM, float(y) * BOHR_TO_ANGSTROM, float(z) * BOHR_TO_ANGSTROM])
return atoms
path = "/home/boittier/Documents/PhD/data/ester/frame_33.chk.d.cube"
def model_to_local(cube_path, local_output):
atoms = cube_to_xyz(cube_path)
print(atoms)
RCUT = 5
NMAX = 4
LMAX = 5
# SOAP descriptors
soap_desc = SOAP(species=["C", "H", "O"], rcut=RCUT, nmax=NMAX, lmax=LMAX, crossover=True)
atom_types = [z_to_char(x[0]) for x in atoms]
string = [x[1:] for x in atoms]
atoms = Atoms(atom_types, string)
samples = [atoms]
der, des = soap_desc.derivatives(samples, method="analytical", return_descriptor=True, n_jobs=8)
print(des.shape)
a_, b_, c_ = des.shape
X = des[0].reshape((1, b_ * c_))
pred = model.predict(X)
pred = pred[0].reshape(24, 3)
f = open(local_output, "w")
f.write(header)
for i, xyz in enumerate(pred):
f.write(line_format.format(*xyz, q[i]))
def fit_to_local(cube_path, fit_path, old_mdcm_path, local_output, frame_file):
# Load the fit
per_charge_params = pd.read_pickle(fit_path)
# Creat an ARS object for atom positions, charges, angles, dih, etc
ars_obj = ARS(old_mdcm_path, cube_path, frame_file, pcube_2=cube_path)
q = ars_obj.c_charges
n_charges = len(q)
a1 = ars_obj.get_angle(0, 1, 7)
print(a1)
a2 = ars_obj.get_angle(1, 7, 9)
print(a2)
d1 = ars_obj.get_dih( 0, 1, 7, 9)
print(d1)
# Calculate the parameters from the cube file
f = open(local_output, "w")
f.write(header)
fitted_local = []
for i in range(n_charges):
x = func([d1, a1, a2], *per_charge_params[i])
y = func([d1, a1, a2], *per_charge_params[i+n_charges])
z = func([d1, a1, a2], *per_charge_params[i+n_charges*2])
f.write(line_format.format(x, y, z, q[i]))
fitted_local.append([x, y, z])
ars_obj.set_local_charge_positions(np.array(fitted_local))
cp = ars_obj.local_to_global()
ars_obj.set_charge_positions_plus(cp)
ars_obj.save_charges_global("fit.xyz")
pass
# frame_file = "/home/boittier/ester_traj/frames.txt"
# cube = "/home/boittier/ester_traj/t0/frame_33.chk.p.cube"
# old_global_xyz = "/home/boittier/FDCM/ester_t1_100/frame_33/refined.xyz"
# local_output = "frame_33_local.xyz"
def main():
# frame_file = sys.argv[1]
# cube = sys.argv[2]
# old_global_xyz = sys.argv[3]
# local_output = sys.argv[4]
# model_to_local(cube, local_output)
# do_transformation(old_global_xyz, cube, frame_file)
frame_file = "/home/unibas/boittier/fdcm_project/mdcms/amide/model1/frames.txt"
cube_path = "/data/unibas/boittier/fdcm/amide/scan-large/SCAN_amide1.pdb-0.xyz.chk.d.cube"
local_output = "test_fit.xyz"
fit_path = "/home/unibas/boittier/fdcm_project/fdcm_notebooks/loop-model.pkl"
old_mdcm_path = "/home/unibas/boittier/fdcm_project/mdcms/amide/model1/24_charges_refined.xyz"
fit_to_local(cube_path, fit_path, old_mdcm_path, local_output, frame_file)
if __name__ == "__main__":
main()
| 4,718 |
utility/bulkAssembler.py
|
nlapier2/multiInstanceLearning
| 4 |
2172205
|
# Author: <NAME>
# Date: June 29, 2016
import argparse
import os
import shlex
import subprocess
import sys
def write_conf(config, path):
conf = open(config, 'w')
conf.write('max_rd_len=180\n')
conf.write('[LIB]\n')
conf.write('avg_ins=700\n')
conf.write('reverse_seq=0\n')
conf.write('asm_flags=3\n')
conf.write('rank=1\n')
conf.write('rd_len_cutoff=100\n')
conf.write('\nq=' + path+'\n')
conf.close()
def assemble(args, kmer):
start = int(args.start.split('SRR')[1])
if args.end == 'NONE':
end = start
else:
end = int(args.end.split('SRR')[1])
config = args.out + 'config-' + args.start + '-' + args.end + '.config' # location of config file
contigs = args.out + 'contigs-' + args.start + '-' + args.end + '.fasta' # the file that will hold all contigs
if args.verbose:
print 'Attempting to open: ' + config + ' and ' + contigs
conf = open(config, 'w')
conf.close()
contig = open(contigs, 'w')
for cur in range(start, end + 1):
directory = args.out + 'SRR' + str(cur) + '/' # directory to output assembly of this file to
path = args.path + 'SRR' + str(cur) + args.extension # path to current fastx file
if args.combine == 'NO':
if args.verbose:
print 'Calling: mkdir ' + directory
os.mkdir(directory)
os.chdir(directory)
if args.verbose:
print 'Making config file for SRR' + str(cur)
write_conf(config, path)
if args.verbose:
print 'Calling: ' + args.location + ' all -s ' + config + ' -K ' + str(kmer) + \
'-R -V -o graph_prefix 1>all.log 2>all.err'
subprocess.call(shlex.split(args.location + ' all -s ' + config + ' -K ' + str(kmer) +
'-R -V -o graph_prefix 1>all.log 2>all.err'))
if args.verbose:
print 'Dumping scaffolds into bulk contig file...'
scaffolds = open(directory+'graph_prefix.scafSeq', 'r')
for line in scaffolds:
if line.startswith('>'): # defline, not a read
contig.write('>SRR' + str(cur) + '.0 ' + line.split('>')[1])
elif len(line) > 3: # non-empty line (contains read)
contig.write(''.join(line.split()) + '\n')
contig.close()
os.remove(config)
def parseargs(): # handle user arguments
parser = argparse.ArgumentParser(description='Script that performs assembly on a number of patient files.')
parser.add_argument('--start', default='NONE', help='File number to start on. Required.')
parser.add_argument('--end', default='NONE', help='File number to end on. Default: only download start run')
parser.add_argument('--extension', default='.fasta', help='File extension (ie .fasta, .fastq, ...) Default: .fasta')
parser.add_argument('--path', default='./', help='Path to files to be assembled. Default: current directory.')
parser.add_argument('--location', default='NONE', help='Path to assembler. Required.')
parser.add_argument('--assembler', default='soap', help='Which assembler to use. Default: "soap" (soapDenovo2).')
parser.add_argument('--kmer', default=63, help='Kmer value to use for the assembler. Default: 63')
parser.add_argument('--out', default='./', help='Output directory. Default is current directory.')
parser.add_argument('--combine', default='NO',help='Do not perform assembly, just combine assembled files.')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Verbose output')
args = parser.parse_args()
return args
def main():
args = parseargs()
if args.start == 'NONE':
print "Error: File number to start on must be specified with --start"
sys.exit()
if args.location == 'NONE':
print "Error: Location of assembler executable must be specified with --location"
sys.exit()
if args.assembler != 'soap':
print "Error: Only soapDenovo2 is supported at this time (use default for --assembler)."
sys.exit()
try:
kmer = int(args.kmer)
except ValueError:
print "Error: --kmer must be an odd integer between 13 and 127."
sys.exit()
if kmer < 13 or kmer > 127 or kmer % 2 != 1:
print "Error: --kmer must be an odd integer between 13 and 127."
sys.exit()
if not args.path.endswith('/'):
args.path += '/'
if not args.out.endswith('/'):
args.out += '/'
if not args.extension.startswith('.'):
args.extension = '.' + args.extension
if args.verbose:
print "Verbose output requested."
assemble(args, kmer)
if __name__ == "__main__":
main()
| 4,787 |
telemetry_websocket.py
|
cjackie/drone-tello-experiments
| 0 |
2172015
|
import logging
from threading import *
from websocket_server import WebsocketServer
from threaded_telemetry_streamer import ThreadedTelemetryStreamer
streamer_lock = Semaphore(1)
streamer = None
clients = set()
def new_client(client, server):
print("new_client: " + str(client))
global streamer, streamer_lock, clients
streamer_lock.acquire()
if streamer == None:
try:
streamer = ThreadedTelemetryStreamer(server)
streamer.start()
clients.add(client["id"])
except Exception as e:
print("Something went wrong in new_client: " + str(e))
streamer = None
else:
clients.add(client["id"])
streamer_lock.release()
def client_left(client, server):
print("client_left: " + str(client))
global streamer, streamer_lock, clients
streamer_lock.acquire()
clients.remove(client["id"])
if len(clients) == 0:
streamer.stop()
streamer = None
streamer_lock.release()
def message_received(client, server, message):
print(str(client) + '---->' + message)
server = WebsocketServer(13254, host='localhost', loglevel=logging.INFO)
server.set_fn_new_client(new_client)
server.set_fn_client_left(client_left)
server.set_fn_message_received(message_received)
server.run_forever()
| 1,311 |
text2emospch/src/model/tweet_sentiment_classification_model.py
|
jlim262/text2emotional-speech
| 0 |
2171312
|
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from transformers import AutoModel, AutoConfig
from built.registry import Registry
@Registry.register(category="model")
class TweetSentimentClassificationModel(nn.Module):
"""Tweet Sentiment Classification Model.
Parameters
----------
transformer_type:
transformer_path:
drop_out_rate:
num_classes:
Returns
-------
classifier_logits : torch.Tensor with shape (batch_size, num_classes).
Probabilities of each label.
"""
def __init__(self, transformer_type, transformer_path, drop_out_rate, num_classes):
super().__init__()
self.transformer_type = transformer_type
self.transformer_path = transformer_path
self.config = AutoConfig.from_pretrained(transformer_type)
self.transformer = AutoModel.from_pretrained(
transformer_type, output_hidden_states=True)
self.drop_out = nn.Dropout(drop_out_rate)
self.classifier = nn.Linear(self.config.hidden_size, num_classes)
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
outputs = self.transformer(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
# classification
pooled_output = outputs['pooler_output']
sequence_output = outputs[1]
pooled_output = self.drop_out(sequence_output)
classifier_logits = self.classifier(pooled_output)
return classifier_logits
| 1,566 |
tests/test_matlab_funcs.py
|
GuckLab/ggf
| 0 |
2172420
|
import numpy as np
from ggf import matlab_funcs
def test_besselj():
# https://de.mathworks.com/help/matlab/ref/besselj.html
z = np.arange(6) * .2
req = matlab_funcs.besselj(1, z)
ans = [0,
0.0995,
0.1960,
0.2867,
0.3688,
0.4401,
]
assert np.allclose(req, ans, rtol=0, atol=5e-5)
def test_besselh():
z = np.arange(1, 6) * .2
req = matlab_funcs.besselh(2, z)
ansr = [0.0050,
0.0197,
0.0437,
0.0758,
0.1149,
]
ansi = [-32.1571,
-8.2983,
-3.8928,
-2.3586,
-1.6507,
]
assert np.allclose(req.real, ansr, rtol=0, atol=5e-5)
assert np.allclose(req.imag, ansi, rtol=0, atol=5e-5)
def test_gammaln():
x = np.array([1/5, 1/2, 2/3, 8/7, 3])
req = matlab_funcs.gammaln(x)
ans = [1.5241, 0.5724, 0.3032, -0.0667, 0.6931]
assert np.allclose(req, ans, rtol=0, atol=5e-5)
def test_legendre():
z = np.arange(6) * .2
req = matlab_funcs.legendre(3, z)
ans = [[0.000, -.2800, -.440, -.3600, .0800, 1.0000],
[1.5000, 1.1758, 0.2750, -0.9600, -1.980, 0.0000],
[0.0000, 2.8800, 5.0400, 5.7600, 4.3200, 0.0000],
[-15.0000, -14.1091, -11.5481, -7.6800, -3.2400, 0.0000]
]
assert np.allclose(req, ans, rtol=0, atol=5e-5)
def test_lscov():
# https://de.mathworks.com/help/matlab/ref/lscov.html
x1 = np.array([.2, .5, .6, .8, 1.0, 1.1])
x2 = np.array([.1, .3, .4, .9, 1.1, 1.4])
X = np.array([np.ones(x1.size), x1, x2]).T
y = np.array([.17, .26, .28, .23, .27, .34])
w = np.ones_like(x1)
req = matlab_funcs.lscov(X, y, w)
ans = [0.1203, 0.3284, -0.1312]
assert np.allclose(req, ans, rtol=0, atol=5e-5)
w2 = np.array([1, 1, 1, 1, 1, .1])
req2 = matlab_funcs.lscov(X, y, w2)
ans2 = [0.1046, 0.4614, -0.2621]
assert np.allclose(req2, ans2, rtol=0, atol=5e-5)
def test_quadl():
def myfunc(x): return 1./(x**3-2*x-5)
Q = matlab_funcs.quadl(myfunc, 0, 2)
assert np.allclose(Q, -0.4605, rtol=0, atol=5e-5)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
| 2,349 |
tests/unit/test_engines.py
|
authoritylabs/py_al_papi
| 0 |
2172380
|
from tests.test_helper import *
class TestEngines(unittest.TestCase):
def test_all_engines(self):
assert_equal(Engines.all(), ["google", "yahoo", "bing"])
| 167 |
src/config/database.py
|
egor-muindor/yandere_bot
| 0 |
2172568
|
from orator import DatabaseManager, Model
from db import DATABASES
__all__ = ['db', 'DATABASES']
db = DatabaseManager(DATABASES)
Model.set_connection_resolver(db)
| 165 |
tests/Category_tests.py
|
AndrewIndeche/News-App
| 0 |
2172490
|
import unittest
from app.models import Category
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Category class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_Category = Article('<NAME>','Cointelegraph','El Salvador’s Bitcoin detractors: Opposition mounts despite crypto rollout - Cointelegraph','2021-09-11T05:07:00Z','https://images.cointelegraph.com/images/1200_aHR0cHM6Ly9zMy5jb2ludGVsZWdyYXBoLmNvbS91cGxvYWRzLzIwMjEtMDkvMzczZjNkMjMtMTExMS00NTEwLWEwNDgtZWQyNmMwN2JhMjhmLmpwZw==.jpg','https://cointelegraph.com/news/el-salvador-s-bitcoin-detractors-opposition-mounts-despite-crypto-rollout')
def test_instance(self):
self.assertTrue(isinstance(self.new_Category,Category))
if __name__ == '__main__':
unittest.main()
| 859 |
_code/police-militarization/pie.py
|
jeffreyshen19/blog
| 5 |
2172318
|
import csv
import get_category
# Pull data for pie chart
categories = {
"grenade-launchers": {
"cost": 0,
"quantity": 0,
},
"night-vision": {
"cost": 0,
"quantity": 0,
},
"assault-rifles": {
"cost": 0,
"quantity": 0,
},
"armored-vehicles" : {
"cost": 0,
"quantity": 0,
},
"aircraft": {
"cost": 0,
"quantity": 0,
},
"body-armor": {
"cost": 0,
"quantity": 0,
},
"other": {
"cost": 0,
"quantity": 0,
},
}
with open('1033.csv') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
for row in csv_reader:
# Add category breakdown
category = get_category.get_category(row["item-name"])
categories[category]["cost"] += float(row["cost"])
categories[category]["quantity"] += int(row["quantity"])
fieldnames = ["category", "quantity", "cost"]
with open("../../data/police-militarization/1033-by-category.csv", 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key, value in categories.items():
value["category"] = key
writer.writerow(value)
| 1,233 |
kmerexpr/multinomial_model.py
|
bob-carpenter/kmers
| 1 |
2170331
|
# Sample code automatically generated on 2021-12-11 17:57:52
# by www.matrixcalculus.org from input:
# d/dtheta y' * log(x * exp(theta) / sum(exp(theta))) - 1/18 * theta' * theta
# = 1 / sum(exp(theta))
# * (x' * (y ./ (1 / sum(exp(theta)) * x * exp(theta))))
# .* exp(theta)
# - 1 / sum(exp(theta)).^2
# * exp(theta)' * x'
# * (y ./ (1 / sum(exp(theta)) * x * exp(theta)))
# * exp(theta)
# - 2/18 * theta
# where
# theta is a vector
# x is a matrix
# y is a vector
# The generated code is provided "as is" without warranty of any kind.
# The code here refactors the auto-generated code into a class and
# pulls the testing out.
import numpy as np
from scipy.sparse import load_npz
from scipy.special import softmax as softmax
from scipy import optimize
import time
# BMW: Class names are usually done in CamelCase style
class multinomial_model:
"""Multinomial model of k-mer reads.
The likelihood and prior are
y ~ multinomial(x * softmax(theta))
theta ~ normal(0, 3)
where
* y: M x 1 array of integer counts
* x: M x T left stochastic matrix of kmer probabilities for isoform
* theta: T x 1 vector of expression values
with size constants
* K: size of k-mer
* M = 4^K: number of distinct k-mers
* T: number of target isoforms
All operations are on the log scale, with target log posterior
log p(theta | y) = y' * log(x * softmax(theta)) - 1/18 * theta' * theta
where
softmax(theta) = exp(theta) / sum(exp(theta))
Because theta is T x 1, the likelihood itself is not identified as
theta + c yields the same density as theta for any constant c. The
parameters are identified through the prior/penalty.
The log posterior could be considered a penalized maximum likelihood with
a scaled L2 penalty
penalty(theta) = beta * ||theta||_2^2
The penalty will shrink estimates of theta toward zero, which has he effect
of making softmax(theta) more uniform.
The constructor instantiates a model based on two arguments
corresponding to x and y. Because x is so large, it is loaded from
a file to which it has been serialized in csr .npz format, which is
described in the scipy docs
https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.save_npz.html
:param x_file: file in csr .npz format from scipy.sparse
containing the x matrix
:param y: vector of read counts
"""
def __init__(self, x_file=None, y_file=None, beta =1/18):
"""Construct a multinomial model.
Keyword arguments:
x_file -- path to file containing a serialized sparse,
left-stochastic matrix in .npz format from scipy.sparse
y -- vector of k-mer counts
beta -- parameter for prior
"""
x = load_npz(x_file)
if(isinstance(y_file, np.ndarray)):
y = y_file
else:
y = np.load(y_file)
self.ymask = y.nonzero() # Need only need self.ynnz and self.xnnz. Throw away the rest?
self.ynnz = y[self.ymask]
self.xnnz = x[self.ymask]
self.N = np.sum(y)
self.name = "softmax"
x_dim = x.shape
self.M = x_dim[0]
self.T = x_dim[1]
self.beta =beta
# dimension checking
assert len(x_dim) == 2
x_rows = x_dim[0]
x_cols = x_dim[1]
dim = y.shape
assert len(dim) == 1
y_rows = dim[0]
assert y_rows == x_rows
def logp_grad_old(self, theta=None):
"""Return log density and its gradient evaluated at the
specified simplex.
Keyword arguments:
theta -- simplex of expected isoform proportions
"""
x = self.x
y = self.y
dim = theta.shape
assert len(dim) == 1
theta_rows = dim[0]
dim = x.shape
assert len(dim) == 2
x_rows = dim[0]
x_cols = dim[1]
dim = y.shape
assert len(dim) == 1
y_rows = dim[0]
assert y_rows == x_rows
assert theta_rows == x_cols
# import pdb; pdb.set_trace()
ymask = y.nonzero()
ynnz = y[ymask]
sig = softmax(theta)
xTsig = x.dot(sig)
xTsignnz = xTsig[ymask]
t_3 = (x[ymask].T).dot(ynnz / xTsignnz)
functionValue = ynnz.dot(np.log(xTsignnz)) - (theta.dot(theta) *self.beta)
gradient = t_3 * sig - self.N * sig - (2 *self.beta) * theta
# Double check: Think ((sig).dot(t_3)*sig )) = sum(y)*sig = N*sig
return functionValue, gradient
def logp_grad(self, theta=None):
"""Return log density and its gradient evaluated at the
specified simplex.
Keyword arguments:
theta -- simplex of expected isoform proportions
"""
sig = softmax(theta)
# xTsig = x.dot(sig)
# xTsignnz = xTsig[ymask]
xTsignnz= self.xnnz.dot(sig)
t_3 = (self.xnnz.T).dot(self.ynnz / xTsignnz)
functionValue = self.ynnz.dot(np.log(xTsignnz)) - (theta.dot(theta) *self.beta)
gradient = t_3 * sig - self.N * sig - (2 *self.beta) * theta
return functionValue, gradient
def fit(self, theta0=None, factr=1.0, gtol=1e-12, n_iters = 50000):
if theta0 is None: #initialize to normal 0 1
theta0 = np.random.normal(0, 1, self.T)
func = lambda theta: -self.logp_grad(theta)[0]
fprime = lambda theta: -self.logp_grad(theta)[1]
start = time.time()
theta_sol, f_sol, dict_flags_convergence = optimize.fmin_l_bfgs_b(func, theta0, fprime, pgtol = gtol, factr = factr, maxiter=n_iters, maxfun = 10*n_iters)
end = time.time()
print("softmax model took ", end - start, " time to fit")
if dict_flags_convergence['warnflag'] == 1:
print("WARNING: softmax model did not converge. too many function evaluations or too many iterations. Print d[task]:", dict_flags_convergence["task"])
print("Total iterations: ", str(dict_flags_convergence['nit']))
elif dict_flags_convergence['warnflag'] == 2:
print("WARNING: softmax model did not converge due to: ", dict_flags_convergence["task"])
# dict_sol["grad"] = -dict_sol["grad"]
dict_opt = {'x' : softmax(theta_sol), 'loss_records' : -f_sol, 'iteration_counts' : dict_flags_convergence['nit'], 'grad' : -dict_flags_convergence["grad"]}
return dict_opt
| 6,517 |
tests/weather_api_unmarhsalling_test.py
|
meandnano/weatherbot
| 0 |
2170583
|
import json
import pytest
from app import weather
"""Taken from https://openweathermap.org/current"""
resp_json = """{
"coord": {
"lon": -122.08,
"lat": 37.39
},
"weather": [
{
"id": 800,
"main": "Clear",
"description": "clear sky",
"icon": "01d"
}
],
"base": "stations",
"main": {
"temp": 282.55,
"feels_like": 281.86,
"temp_min": 280.37,
"temp_max": 284.26,
"pressure": 1023,
"humidity": 100
},
"visibility": 16093,
"wind": {
"speed": 1.5,
"deg": 350
},
"clouds": {
"all": 1
},
"dt": 1560350645,
"sys": {
"type": 1,
"id": 5122,
"message": 0.0139,
"country": "US",
"sunrise": 1560343627,
"sunset": 1560396563
},
"timezone": -25200,
"id": 420006353,
"name": "Mountain View",
"cod": 200
}"""
def test_from_api_succeeds_when_correct_json():
expected: weather.WeatherState = {
"place_name": "Mountain View",
"weather_desc": "clear sky",
"temp": 282.55,
"temp_feels_like": 281.86,
"pressure": 1023,
"humidity": 100,
"cloud": 1,
"wind_speed": 1.5,
"when": 1560350645.0,
}
json_dict: dict = json.loads(resp_json)
assert expected == weather.from_api(json_dict)
def test_from_api_raises_when_insufficient_json():
json_dict: dict = json.loads("{}")
with pytest.raises(KeyError):
weather.from_api(json_dict)
| 1,452 |
home/pedrosenarego/clean.py
|
rv8flyboy/pyrobotlab
| 63 |
2172737
|
f = open('Index.csv','r')
a = ['0','1']
lst = []
for line in f:
for word in a:
if word in line:
line = line.replace(word,'')
lst.append(line)
f.close()
f = open('Index.csv','w')
for line in lst:
f.write(line)
f.close()
| 252 |
ir_config_parameter_multi_company/__init__.py
|
ShaheenHossain/itpp-labs-misc-addons13
| 0 |
2172573
|
# -*- coding: utf-8 -*-
from . import models
def uninstall_hook(cr, registry):
from odoo import api, SUPERUSER_ID
env = api.Environment(cr, SUPERUSER_ID, {})
# remove properties
field_id = env.ref("base.field_ir_config_parameter_value").id
env["ir.property"].search([("fields_id", "=", field_id)]).unlink()
| 331 |
plenum/test/consensus/message_req/message_req_3pc_service/test_incorrect_message_req_processing.py
|
jandayanan/indy-plenum
| 148 |
2172695
|
from unittest.mock import Mock
import pytest
from plenum.common.constants import PREPREPARE, COMMIT, PREPARE, LEDGER_STATUS
from plenum.common.exceptions import IncorrectMessageForHandlingException
from plenum.common.messages.internal_messages import MissingMessage
from plenum.common.messages.node_messages import MessageReq, MessageRep, PrePrepare, Prepare, Commit
from plenum.common.types import f
from plenum.server.consensus.message_request.message_req_service import MessageReqService
from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root, logger
def raise_ex():
raise IncorrectMessageForHandlingException(msg="", reason="", log_method=logger.info)
def test_process_message_req_incorrect_inst_id(message_req_service: MessageReqService, external_bus, data):
key = (data.view_no, 1)
message_req = MessageReq(**{
f.MSG_TYPE.nm: PREPREPARE,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id + 1,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
})
message_req_service.process_message_req(message_req, "frm")
assert len(external_bus.sent_messages) == 0
def test_process_message_req_handler_raise_ex(message_req_service: MessageReqService, external_bus, data):
msg_type = PREPREPARE
key = (data.view_no, 1)
message_req = MessageReq(**{
f.MSG_TYPE.nm: msg_type,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
})
message_req_service.handlers[msg_type].process_message_req = lambda msg: raise_ex()
message_req_service.process_message_req(message_req, "frm")
assert len(external_bus.sent_messages) == 0
def test_process_missing_message_incorrect_inst_id(message_req_service: MessageReqService, external_bus, data):
frm = "frm"
missing_msg = MissingMessage(msg_type=PREPREPARE,
key=data.last_ordered_3pc,
inst_id=data.inst_id + 1,
dst=[frm],
stash_data=None)
message_req_service.process_missing_message(missing_msg)
assert len(external_bus.sent_messages) == 0
def test_process_missing_message_raise_ex(message_req_service: MessageReqService, external_bus, data):
frm = "frm"
msg_type = PREPREPARE
missing_msg = MissingMessage(msg_type=msg_type,
key=data.last_ordered_3pc,
inst_id=data.inst_id + 1,
dst=[frm],
stash_data=None)
message_req_service.handlers[msg_type].prepare_msg_to_request = lambda msg: raise_ex()
message_req_service.process_missing_message(missing_msg)
assert len(external_bus.sent_messages) == 0
def test_process_message_rep_without_msg(message_req_service: MessageReqService, external_bus, data, pp):
key = (pp.viewNo, pp.ppSeqNo)
message_req_service.handlers[PREPREPARE].requested_messages[key] = None
message_rep = MessageRep(**{
f.MSG_TYPE.nm: PREPREPARE,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
f.MSG.nm: None
})
frm = "frm"
network_handler = Mock()
external_bus.subscribe(PrePrepare, network_handler)
message_req_service.process_message_rep(message_rep, frm)
network_handler.assert_not_called()
def test_process_message_rep_invalid_preprepare(message_req_service: MessageReqService, external_bus, data, pp):
key = (pp.viewNo, pp.ppSeqNo)
msg_type = PREPREPARE
message_req_service.handlers[PREPREPARE].requested_messages[key] = None
message_rep = MessageRep(**{
f.MSG_TYPE.nm: msg_type,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
f.MSG.nm: dict(pp.items())
})
frm = "frm"
network_handler = Mock()
external_bus.subscribe(PrePrepare, network_handler)
message_req_service.handlers[msg_type].extract_message = lambda msg, frm: raise_ex()
message_req_service.process_message_rep(message_rep, frm)
network_handler.assert_not_called()
def test_process_message_rep_invalid_inst_id(message_req_service: MessageReqService, external_bus, data, pp):
key = (pp.viewNo, pp.ppSeqNo)
msg_type = PREPREPARE
message_req_service.handlers[PREPREPARE].requested_messages[key] = None
message_rep = MessageRep(**{
f.MSG_TYPE.nm: msg_type,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id + 1,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
f.MSG.nm: dict(pp.items())
})
frm = "frm"
network_handler = Mock()
external_bus.subscribe(PrePrepare, network_handler)
message_req_service.process_message_rep(message_rep, frm)
network_handler.assert_not_called()
def test_process_message_rep_with_incorrect_type(message_req_service: MessageReqService, external_bus, data, pp):
key = (pp.viewNo, pp.ppSeqNo)
message_rep = MessageRep(**{
f.MSG_TYPE.nm: LEDGER_STATUS,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
f.MSG.nm: dict(pp.items())
})
frm = "frm"
network_handler = Mock()
external_bus.subscribe(PrePrepare, network_handler)
external_bus.subscribe(Prepare, network_handler)
external_bus.subscribe(Commit, network_handler)
message_req_service.process_message_rep(message_rep, frm)
network_handler.assert_not_called()
def test_process_message_req_with_incorrect_type(message_req_service: MessageReqService, external_bus, data, pp):
msg_type = LEDGER_STATUS
key = (data.view_no, 1)
message_req = MessageReq(**{
f.MSG_TYPE.nm: msg_type,
f.PARAMS.nm: {f.INST_ID.nm: data.inst_id,
f.VIEW_NO.nm: key[0],
f.PP_SEQ_NO.nm: key[1]},
})
message_req_service.process_message_req(message_req, "frm")
assert len(external_bus.sent_messages) == 0
| 6,222 |
tests/test_socket_transport.py
|
pthomaid/constellations
| 1 |
2172666
|
import unittest
import json
import time
from constellations import socket_transport
def callback(message):
print("Received " + message)
class TestSocketTransport(unittest.TestCase):
# TODO Cleanup the server and ports properly in order to run multiple independent tests
def setUp(self):
pass
def tearDown(self):
pass
def test_socket_transport_server_start_stop(self):
addr = {'host':'', 'port':5000}
st = socket_transport.SocketTransport(addr)
st.receive(callback)
time.sleep(5)
st.close()
# TODO assert that everything is closed, if possible
def test_socket_transport_server_port_busy(self):
addr1 = {'host':'', 'port':5001}
addr2 = {'host':'', 'port':5001}
st1 = socket_transport.SocketTransport(addr1)
st1.receive(callback)
st2 = socket_transport.SocketTransport(addr2)
st2.receive(callback)
print(st1.host + ":" + str(st1.port))
print(st2.host + ":" + str(st2.port))
time.sleep(5)
st1.close()
st2.close()
def test_socket_transport_server_client(self):
addr = {'host':'', 'port':5010}
st = socket_transport.SocketTransport(addr)
actual_addr = {'host':st.host, 'port':st.port}
st.receive(callback)
time.sleep(5)
message = "I am not a message"
st.send(actual_addr, message)
st.close()
if __name__ == "__main__":
unittest.main()
| 1,504 |
api/weather.py
|
AkhilVinayakp/WeatherAPI
| 0 |
2169412
|
from flask import Flask
from flask_restful import Resource, Api
# creating flask app
app = Flask(__name__)
api = Api(app)
class Weather(Resource):
def get(self, service_name: str) -> str:
return "service {}".format(service_name)
| 245 |
src/utils/mapgenes.py
|
JArgasinska/git
| 126 |
2172388
|
import mygene
import csv
syms = []
with open('geneset.txt', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for row in spamreader:
syms.append(row[0])
mg = mygene.MyGeneInfo()
#xli = ['DDX26B','CCDC83', 'MAST3', 'RPL11', 'ZDHHC20', 'LUC7L3', 'SNORD49A', 'CTSH', 'ACOT8']
#xli = ['CCDC83', 'MAST3', 'RPL11']
rows = mg.querymany(syms, species=10090, scopes="symbol", fields=["ensembl.gene"], returnall=True)['out']
for r in rows:
print(r['ensembl']['gene'])
| 521 |
evtstrd/http.py
|
srittau/eventstreamd
| 0 |
2170445
|
import logging
from asyncio.streams import StreamWriter, StreamReader
from http import HTTPStatus
from typing import Tuple, Iterable, List, Dict
Header = Tuple[str, str]
class HTTPError(Exception):
def __init__(
self,
status: HTTPStatus,
message: str,
*,
headers: Iterable[Header] = [],
) -> None:
super().__init__(message)
self.status = status
self.headers: List[Header] = list(headers)
class BadRequestError(HTTPError):
def __init__(self, message: str) -> None:
super().__init__(HTTPStatus.BAD_REQUEST, message)
class CGIArgumentError(BadRequestError):
def __init__(self, argument_name: str, message: str) -> None:
full_message = f"{argument_name}: {message}"
super().__init__(full_message)
self.argument_name = argument_name
class NotFoundError(HTTPError):
def __init__(self, path: str) -> None:
message = f"'{path}' not found"
super().__init__(HTTPStatus.NOT_FOUND, message)
class MethodNotAllowedError(HTTPError):
def __init__(self, method: str) -> None:
message = f"method {method} not allowed"
super().__init__(HTTPStatus.METHOD_NOT_ALLOWED, message)
self.method = method
async def read_http_head(
reader: StreamReader
) -> Tuple[str, str, Dict[str, str]]:
async def read_line() -> str:
line_ = await reader.readline()
try:
return line_.decode("ascii").strip()
except UnicodeDecodeError:
raise BadRequestError("non-ASCII characters in header")
async def read_request_line() -> Tuple[str, str]:
line_ = await read_line()
try:
m, p, http_tag = line_.split(" ")
except ValueError:
raise BadRequestError("invalid request line")
if http_tag != "HTTP/1.1":
raise BadRequestError("unsupported HTTP version")
if m not in ["HEAD", "GET", "POST", "PUT"]:
raise NotImplementedError()
return m, p
def parse_header_line(li: str) -> Tuple[str, ...]:
try:
return tuple(li.split(": ", maxsplit=1))
except ValueError:
raise BadRequestError("invalid header line")
method, path = await read_request_line()
headers = {}
while True:
line = await read_line()
if not line:
break
he, va = parse_header_line(line)
headers[he.lower()] = va
return method, path, headers
def write_http_head(
writer: StreamWriter, code: HTTPStatus, headers: Iterable[Header]
) -> None:
status_line = "HTTP/1.1 {} {}\r\n".format(code.value, code.phrase)
writer.write(status_line.encode("ascii"))
for h, v in headers:
line = h.encode("ascii") + b": " + v.encode("ascii") + b"\r\n"
writer.write(line)
writer.write(b"\r\n")
def write_response(
writer: StreamWriter,
status: HTTPStatus,
headers: Iterable[Header],
body: str,
) -> None:
write_http_head(writer, status, headers)
writer.write(body.encode("utf-8"))
def write_http_error(writer: StreamWriter, exc: HTTPError) -> None:
body = str(exc) + "\r\n"
write_response(writer, exc.status, exc.headers, body)
def write_chunk(writer: StreamWriter, data: bytes) -> None:
writer.write(bytes(hex(len(data))[2:], "ascii"))
writer.write(b"\r\n")
writer.write(data)
writer.write(b"\r\n")
encoded = (
data.decode("utf-8", errors="ignore")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
logging.debug(f"wrote chunk to listener: {encoded}")
def write_last_chunk(writer: StreamWriter) -> None:
write_chunk(writer, b"")
| 3,685 |
src/modules/forms/recipes/addToGroceryList.py
|
kevin-funderburg/recipe-manager
| 0 |
2171936
|
import sys
import sqlite3
from sqlite3 import Error
# dbpath = r"../../../../server/db/database.sqlite" #this is the path when executing directly from command line
dbpath = r"server/db/database.sqlite" #this is the path when executing from npm
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def main():
id = str(sys.argv[1])
print("id: ", id)
sql = "SELECT ingredients FROM recipes WHERE id = " + id
conn = create_connection(dbpath)
with conn:
# execute_sql(conn, sql)
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
ingredients = row[0]
ingredients = ingredients.split("\n")
for i in ingredients:
sql = "INSERT INTO grocerylist (name) VALUES('" + i + "')"
with conn:
# execute_sql(conn, sql)
cur = conn.cursor()
cur.execute(sql)
print(i + " added to grocerylist")
if __name__ == "__main__":
main()
| 1,270 |
15.py
|
christi-john/hackerrank-algorithms
| 0 |
2172624
|
# https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'breakingRecords' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY scores as parameter.
#
def breakingRecords(scores):
# Write your code here
list1=[]
ans1=0
ans2=0
for i in range(len(scores)):
list1.append(scores[i])
if(max(list1)==scores[i] and i!=0):
if(list1[-1]!=list1[-2]):
ans1+=1
elif((min(list1)==scores[i] and i!=0)):
if(list1[-1]!=list1[-2]):
ans2+=1
return[ans1,ans2]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
scores = list(map(int, input().rstrip().split()))
result = breakingRecords(scores)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 1,004 |
common/algos/algos.py
|
ValRat/SummerSnake2021
| 0 |
2172226
|
from typing import Callable, Dict, Tuple, List
from heapq import heapify, heappush, heappop
from functools import total_ordering
from common.point_utils import get_all_valid_neighbours
# aka gScore, saves the cost of path from start to known universe
class StoredScores(dict):
def __missing__(self, key):
return float('inf')
# For ease of storing within the minheap
@total_ordering
class PointWithCost:
def __init__(self, point: Tuple[int, int], cost: int):
self.point = point
self.cost = cost
def __eq__(self, other):
return self.cost == other.cost
def __ne__(self, other):
return self.cost != other.cost
def __gt__(self, other):
return self.cost > other.cost
def __str__(self):
return f'Point: {self.point} | Cost: {self.cost}'
# Reconstructs the path from goal to initial location
def reconstruct_path(cameFrom: Dict[Tuple[int, int], Tuple[int, int]], current: Tuple[int, int]) -> List[Tuple[int, int]]:
total_path: List[Tuple[int, int]] = []
total_path.append(current)
while current in cameFrom.keys():
current = cameFrom[current]
total_path.append(current)
return list(reversed(total_path))
# This is dumb, optimize if needed, required since we overrode __eq__ I think
def minheap_contains(heap: List[PointWithCost], point: Tuple[int, int]) -> bool:
return point in map(lambda p : p.point, heap)
# Stolen from wikipedia
def a_star(start: Tuple[int,int], goal: Tuple[int, int], hazards: List[Tuple[int,int]] = [], height: int = 5, width: int = 5, h: Callable = lambda x : 0) -> List[Tuple[int, int]]:
# TODO: make this a little more defined?
# Weight function between adjacent nodes
def d(node_from: Tuple[int, int], node_to: Tuple[int, int]) -> int:
return 1
# print(f'Start: {start}, Goal: {goal}')
openSet: List[PointWithCost] = []
heapify(openSet)
heappush(openSet, PointWithCost(start, 0))
cameFrom: Dict[Tuple[int, int], Tuple[int, int]] = {}
gScore = StoredScores()
gScore[start] = 0
fScore = StoredScores()
fScore[start] = h(start)
while len(openSet) > 0:
current = heappop(openSet).point
# print(f'Current: {current}')
if (current == goal):
# print(f'Reached goal')
return reconstruct_path(cameFrom, current)
# This list shouldn't include actual hazards, no
neighbours = get_all_valid_neighbours(current, hazards, height, width)
# print(f'Neighbours: {neighbours} ')
for neighbour in neighbours:
# print(f'Neighbour: {neighbour} ')
tentative_gScore = gScore[current] + d(current, neighbour)
if tentative_gScore < gScore[neighbour]:
cameFrom[neighbour] = current
gScore[neighbour] = tentative_gScore
fScore[neighbour] = gScore[neighbour] + h(neighbour)
if not minheap_contains(openSet, neighbour):
neighbour_pwc = PointWithCost(neighbour,fScore[neighbour])
# print(f'Adding Neighbour: {neighbour_pwc} ')
heappush(openSet, PointWithCost(neighbour, fScore[neighbour]))
# TODO: better exception/error handling
raise Exception('No valid path found')
# Testing purposes
if __name__ == '__main__':
openSet: List[PointWithCost] = []
randopoint = (1, 1)
heapify(openSet)
heappush(openSet, PointWithCost(randopoint, 0))
heappush(openSet, PointWithCost(randopoint, 50))
heappush(openSet, PointWithCost(randopoint, 99))
heappush(openSet, PointWithCost(randopoint, 30))
heappush(openSet, PointWithCost(randopoint, 40))
heappush(openSet, PointWithCost(randopoint, 33))
heappush(openSet, PointWithCost(randopoint, 10))
should_contain = minheap_contains(openSet, (1, 1))
should_not_contain = minheap_contains(openSet, (1, 2))
print(f'should_contain: {should_contain}, should_not_contain: {should_not_contain}')
| 4,049 |
S3_DataStructures/C10_ElementaryDataStructures/queue.py
|
JasonVann/CLRS
| 1 |
2172473
|
class Queue():
def __init__(self, n):
# n is the # of elements the queue can hold
self.n = n
self.data = [None] * n
self.head = 0 # the head of the queue
self.tail = 0 # the index to the next available element
self.free = n
def empty(self):
return self.free == self.n
def full(self):
return self.free == 0
def enqueue(self, i):
if self.full():
raise Exception("Queue overflow!")
self.data[self.tail] = i
self.free -= 1
self.tail = (self.tail + 1) % self.n
def dequeue(self):
if self.empty():
raise Exception("No element in the queue")
num = self.data[self.head]
self.free += 1
self.head = (self.head + 1) % self.n
return num
def __str__(self):
res = ''
i = 0
while i < self.n - self.free:
res += str(self.data[(self.head + i)%self.n])
res += ', '
i += 1
return 'Queue data is: ' + res[:-2]
def test():
queue = Queue(12)
queue.head = 6
queue.tail = 6
queue.enqueue(15)
queue.enqueue(6)
queue.enqueue(9)
queue.enqueue(8)
queue.enqueue(4)
print(queue.head, queue.tail, queue.free)
print(queue)
queue.enqueue(17)
queue.enqueue(3)
queue.enqueue(5)
print(queue.head, queue.tail, queue.free)
print(queue)
queue.dequeue()
print(queue.head, queue.tail, queue.free)
print(queue)
def Ex10_1_3():
queue = Queue(6)
queue.enqueue(4)
queue.enqueue(1)
queue.enqueue(3)
queue.dequeue()
queue.enqueue(8)
queue.dequeue()
print(queue.head, queue.tail, queue.free)
print(queue)
#test()
Ex10_1_3()
| 1,745 |
user/migrations/0008_delete_access_verifycode.py
|
ThePokerFaCcCe/messenger
| 0 |
2172791
|
# Generated by Django 3.2.10 on 2022-01-10 01:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0007_add_is_token_expired'),
]
operations = [
migrations.DeleteModel(
name='Access',
),
migrations.DeleteModel(
name='VerifyCode',
),
]
| 369 |
cap2/extensions/experimental/tcems/mixcr.py
|
nanusefue/CAP2-1
| 9 |
2171470
|
import luigi
import logging
import subprocess
import logging
from os.path import join, dirname, basename
from ....pipeline.utils.cap_task import CapTask
from ....pipeline.config import PipelineConfig
from ....pipeline.utils.conda import CondaPackage
from ....pipeline.preprocessing import BaseReads
logger = logging.getLogger('tcems')
class MixcrAlign(CapTask):
module_description = """
This module aligns reads to VDJ genome regions
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pkg = CondaPackage(
package="mixcr",
executable="mixcr",
channel="imperial-college-research-computing",
config_filename=self.config_filename,
)
self.config = PipelineConfig(self.config_filename)
self.reads = BaseReads.from_cap_task(self)
def requires(self):
return self.pkg, self.reads
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
version = self.run_cmd(f'{self.pkg.bin} --version').stdout.decode('utf-8')
return version
@classmethod
def dependencies(cls):
return ["mixcr", BaseReads]
@classmethod
def _module_name(cls):
return 'tcems::mixcr_align'
def output(self):
out = {
'alignments': self.get_target(f'alignments', 'vdjca'),
}
return out
@property
def alignments_path(self):
return self.output()[f'alignments'].path
def _run(self):
align_cmd = f'{self.pkg.bin} align -p rna-seq -s hsa -OallowPartialAlignments=true {self.reads.read_1}'
if self.paired:
align_cmd += f' {self.reads.read_1} {self.alignments_path}'
else:
align_cmd += f' {self.alignments_path}'
self.run_cmd(align_cmd)
class MixcrAssemble(CapTask):
module_description = """
This module builds assemblies of T/B Cell immune receptors.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.align = MixcrAlign.from_cap_task(self)
self.pkg = self.align.pkg
def requires(self):
return self.pkg, self.align
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
version = self.run_cmd(f'{self.pkg.bin} --version').stdout.decode('utf-8')
return version
@classmethod
def dependencies(cls):
return ["mixcr", MixcrAlign]
@classmethod
def _module_name(cls):
return 'tcems::mixcr_assemble'
def output(self):
out = {
'partial_1': self.get_target(f'partial_1', 'vdjca'),
'partial_2': self.get_target(f'partial_2', 'vdjca'),
'extended': self.get_target(f'extended', 'vdjca'),
'assembled': self.get_target(f'assembled_clones', 'clna'),
'report': self.get_target(f'assembly_report', 'txt'),
'contigs': self.get_target(f'full_clones', 'clns'),
}
return out
@property
def partial_1_path(self):
return self.output()[f'partial_1'].path
@property
def partial_2_path(self):
return self.output()[f'partial_2'].path
@property
def extended_path(self):
return self.output()[f'extended'].path
@property
def assembled_path(self):
return self.output()[f'assembled'].path
@property
def report_path(self):
return self.output()[f'report'].path
@property
def contigs_path(self):
return self.output()[f'contigs'].path
def _run(self):
cmds = [
f'assemblePartial {self.align.alignments_path} {self.partial_1_path}',
f'assemblePartial {self.partial_1_path} {self.partial_2_path}',
f'extend {self.partial_2_path} {self.extended_path}',
f'assemble --write-alignments --report {self.report_path} {self.extended_path} {self.assembled_path}',
f'assembleContigs --report {self.report_path} {self.assembled_path} {self.contigs_path}',
]
for cmd in cmds:
cmd = f'{self.pkg.bin} ' + cmd
self.run_cmd(cmd)
class MixcrClones(CapTask):
module_description = """
This module identifies clonal sequences in VDJ sequences.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assemble = MixcrAssemble.from_cap_task(self)
self.pkg = self.assemble.pkg
def requires(self):
return self.pkg, self.assemble
@classmethod
def version(cls):
return 'v0.1.0'
def tool_version(self):
version = self.run_cmd(f'{self.pkg.bin} --version').stdout.decode('utf-8')
return version
@classmethod
def dependencies(cls):
return ["mixcr", MixcrAssemble]
@classmethod
def _module_name(cls):
return 'tcems::mixcr_clones'
def output(self):
out = {
'igh': self.get_target(f'full_clones_IGH', 'txt'),
}
return out
@property
def igh_path(self):
return self.output()[f'igh'].path
def _run(self):
cmds = [
f'exportClones -c IGH -p fullImputed {self.assemble.contigs_path} {self.igh_path}',
]
for cmd in cmds:
cmd = f'{self.pkg.bin} ' + cmd
self.run_cmd(cmd)
| 5,358 |
h2o-py/tests/testdir_misc/pyunit_frame_from_pandas.py
|
ahmedengu/h2o-3
| 6,098 |
2169606
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import h2o
import pandas as pd
from tests import pyunit_utils
def test_pandas_to_h2oframe():
def compare_frames(h2ofr, pdfr, colnames=None):
if not colnames:
colnames = list(pdfr.columns)
assert h2ofr.shape == pdfr.shape
assert h2ofr.columns == colnames, "Columns differ: %r vs %r" % (h2ofr.columns, colnames)
for i in range(len(h2ofr.columns)):
s1 = pdfr[pdfr.columns[i]].tolist()
s2 = h2ofr[colnames[i]].as_data_frame()[colnames[i]].tolist()
assert s1 == s2, ("The columns are different: h2oframe[%d] = %r, pdframe[%d] = %r"
% (i, s1, i, s2))
pddf = pd.DataFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
h2odf1 = h2o.H2OFrame.from_python(pddf)
h2odf2 = h2o.H2OFrame.from_python(pddf, column_names=["A", "B", "C"])
h2odf3 = h2o.H2OFrame(pddf)
compare_frames(h2odf1, pddf)
compare_frames(h2odf2, pddf, ["A", "B", "C"])
compare_frames(h2odf3, pddf)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_pandas_to_h2oframe)
else:
test_pandas_to_h2oframe()
| 1,195 |
ogb_lsc/pcq/datasets.py
|
kawa-work/deepmind-research
| 10,110 |
2172622
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PCQM4M-LSC datasets."""
import functools
import pickle
from typing import Dict, List, Tuple, Union
import numpy as np
from ogb import lsc
NUM_VALID_SAMPLES = 380_670
NUM_TEST_SAMPLES = 377_423
NORMALIZE_TARGET_MEAN = 5.690944545356371
NORMALIZE_TARGET_STD = 1.1561347795107815
def load_splits() -> Dict[str, List[int]]:
"""Loads dataset splits."""
dataset = _get_pcq_dataset(only_smiles=True)
return dataset.get_idx_split()
def load_kth_fold_indices(data_root: str, k_fold_split_id: int) -> List[int]:
"""Loads k-th fold indices."""
fname = f"{data_root}/k_fold_splits/{k_fold_split_id}.pkl"
return list(map(int, _load_pickle(fname)))
def load_all_except_kth_fold_indices(data_root: str, k_fold_split_id: int,
num_k_fold_splits: int) -> List[int]:
"""Loads indices except for the kth fold."""
if k_fold_split_id is None:
raise ValueError("Expected integer value for `k_fold_split_id`.")
indices = []
for index in range(num_k_fold_splits):
if index != k_fold_split_id:
indices += load_kth_fold_indices(data_root, index)
return indices
def load_smile_strings(
with_labels=False) -> List[Union[str, Tuple[str, np.ndarray]]]:
"""Loads the smile strings in the PCQ dataset."""
dataset = _get_pcq_dataset(only_smiles=True)
smiles = []
for i in range(len(dataset)):
smile, label = dataset[i]
if with_labels:
smiles.append((smile, label))
else:
smiles.append(smile)
return smiles
@functools.lru_cache()
def load_cached_conformers(cached_fname: str) -> Dict[str, np.ndarray]:
"""Returns cached dict mapping smile strings to conformer features."""
return _load_pickle(cached_fname)
@functools.lru_cache()
def _get_pcq_dataset(only_smiles: bool):
return lsc.PCQM4MDataset(only_smiles=only_smiles)
def _load_pickle(fname: str):
with open(fname, "rb") as f:
return pickle.load(f)
| 2,504 |
callbacks_routers/risk_calculators_infection.py
|
jchudb93/website
| 0 |
2172140
|
import os
import base64
from io import BytesIO
import pickle
import dash_html_components as html
from dash.dependencies import Input, Output, State, ALL
from risk_calculator.infection.calculator import predict_risk_infec, get_languages
from risk_calculator.features import build_feature_cards, build_feature_importance_graph, oxygen_options
from risk_calculator.utils import build_lab_ques_card, labs_ques, valid_input, switch_oxygen, get_oxygen_info
def register_callbacks(app):
with open('assets/risk_calculators/infection/model_with_lab.pkl', 'rb') as labs, \
open('assets/risk_calculators/infection/model_without_lab.pkl', 'rb') as no_labs:
labs = pickle.load(labs)
no_labs = pickle.load(no_labs)
labs_model = labs["model"]
labs_features = labs["json"]
labs_imputer = labs["imputer"]
labs_explainer = labs["explainer"]
labs_cols = labs["columns"]
labs_auc = labs["AUC"]
labs_population = [labs["Size Training"],labs["Size Test"]]
labs_positive = [labs["Percentage Training"],labs["Percentage Test"]]
no_labs_model = no_labs["model"]
no_labs_features = no_labs["json"]
no_labs_imputer = no_labs["imputer"]
no_labs_explainer = no_labs["explainer"]
no_labs_cols = no_labs["columns"]
no_labs_auc = no_labs["AUC"]
no_labs_population = [no_labs["Size Training"],no_labs["Size Test"]]
no_labs_positive = [no_labs["Percentage Training"],no_labs["Percentage Test"]]
languages = get_languages(
labs_auc,
labs_population,
labs_positive,
no_labs_auc,
no_labs_population,
no_labs_positive,
)
oxygen_in_infec, oxygen_infec_ind = get_oxygen_info(no_labs_cols,no_labs_features["numeric"])
#displaying shap image
@app.server.route("/")
def display_fig_infec(img, close_all=True):
imgByteArr = BytesIO()
img.savefig(imgByteArr, format='PNG')
imgByteArr = imgByteArr.getvalue()
encoded=base64.b64encode(imgByteArr)
return 'data:image/png;base64,{}'.format(encoded.decode())
@app.callback(
Output('page-desc-infection', 'children'),
[Input('language-calc-infection', 'value')])
def infection_page_desc(language):
return languages["page_desc_infection"][language]
@app.callback(
Output('lab_values_indicator_infection_text', 'children'),
[Input('language-calc-infection', 'value')])
def infection_labs_card(language):
return build_lab_ques_card(language)
@app.callback(
Output('lab_values_indicator_infection', 'options'),
[Input('language-calc-infection', 'value')])
def infection_labs_card_options(language):
return [{'label': labs_ques(x,language), 'value': x} for x in [1,0]]
@app.callback(
Output('features-infection-text', 'children'),
[Input('language-calc-infection', 'value')])
def infection_labs_card_text(language):
return html.H5(languages["insert_feat_text"][language])
@app.callback(
Output('infection-model-desc', 'children'),
[Input('lab_values_indicator_infection', 'value'),
Input('language-calc-infection', 'value')])
def get_infection_model_desc(labs,language):
return languages["technical_details_infection_labs"][language] if labs else languages["technical_details_infection_no_labs"][language]
if oxygen_in_infec:
@app.callback(
Output("calc-numeric-{}-wrapper-infection-nolabs".format(oxygen_infec_ind), 'children'),
[Input('oxygen-answer-infection', 'value'),
Input('language-calc-infection', 'value')])
def get_oxygen_infection(have_val,language):
return oxygen_options(
oxygen_infec_ind,
False,
have_val,
languages["oxygen"][language],
language
)
@app.callback(
Output('feature-importance-bar-graph-infection', 'children'),
[Input('lab_values_indicator_infection', 'value')])
def get_infection_model_feat_importance(labs):
return build_feature_importance_graph(False,labs)
@app.callback(
Output('features-infection', 'children'),
[Input('lab_values_indicator_infection', 'value'),
Input('language-calc-infection', 'value')])
def get_infection_model_feat_cards(labs,language):
if labs:
return build_feature_cards(labs_features,False,labs,language)
return build_feature_cards(no_labs_features,False,labs,language)
@app.callback(
Output('submit-features-calc-infection', 'n_clicks'),
[Input('lab_values_indicator_infection', 'value')])
def reset_submit_button_infection(labs):
return 0
@app.callback(
Output('submit-features-calc-infection', 'children'),
[Input('language-calc-infection', 'value')])
def set_submit_button_infection(language):
return languages["submit"][language],
@app.callback(
[Output('score-calculator-card-body-infection', 'children'),
Output('calc-input-error-infection', 'children'),
Output('imputed-text-infection', 'children'),
Output('visual-1-infection', 'src'),
Output('visual-1-infection', 'style'),
Output('visual-1-infection-explanation', 'children')],
[Input('language-calc-infection', 'value'),
Input('submit-features-calc-infection', 'n_clicks'),
Input('lab_values_indicator_infection', 'value')],
[State({'type': 'infection', 'index': ALL}, 'value'),
State({'type': 'temperature', 'index': ALL}, 'value')]
)
def calc_risk_score_infection(*argv):
language = argv[0]
default = html.H4(languages["results_card_infection"][language][0],className="score-calculator-card-content-infection"),
submit = argv[1]
labs = argv[2]
feats = argv[3:-1]
temp_unit = argv[-1]
if not labs and oxygen_in_infec:
feats = switch_oxygen(feats,oxygen_infec_ind)
#if submit button was clicked
if submit > 0:
x = feats
if labs:
valid, err, x = valid_input(labs_features["numeric"],x[0],len(labs_features["numeric"]),language)
else:
valid, err, x = valid_input(no_labs_features["numeric"],x[0],len(no_labs_features["numeric"]),language)
if valid:
if labs:
score, imputed, fig = predict_risk_infec(labs_cols,labs_model,labs_features,labs_imputer,labs_explainer,x,temp_unit,languages["results_card_infection"][language],language)
else:
score, imputed, fig = predict_risk_infec(no_labs_cols,no_labs_model,no_labs_features,no_labs_imputer,no_labs_explainer,x,temp_unit,languages["results_card_infection"][language],language)
if fig:
image = display_fig_infec(fig)
else:
image = ''
return score,'',imputed,image,{"height":200},languages["visual_1"][language]
else:
return default,err,'','',{},''
#user has not clicked submit
return default,'','','',{},''
| 7,320 |
git_tests/third_file.py
|
dmchu/selenium_gr_5
| 0 |
2171768
|
# 2. Round 5.23222 to two decimal places
x = 5.23225
y = round(x,2)
z = round(x,3)
print(round(z + y))
| 107 |
processing/network.py
|
FlorianPix/Collaboration_Network
| 1 |
2172774
|
"""network functions"""
import itertools
from typing import Optional
import networkx as nx
from processing.model import Coordinates, Location, Paper
from processing.util import progressbar
def build_city_graph(papers: list[Paper], coords: dict[Location, Optional[Coordinates]]) \
-> nx.Graph:
"""
create graph of locations that have coordinates with number of co-occurrences as edge weights
"""
graph = nx.Graph()
for paper in progressbar(papers, "building graph: "):
valid_locations = filter(lambda l: l in coords and coords[l] is not None, list(set(paper.locations)))
for (l_1, l_2) in itertools.combinations(valid_locations, 2):
if graph.has_edge(l_1, l_2):
graph[l_1][l_2]['weight'] += 1
else:
graph.add_edge(l_1, l_2, weight=1)
return graph
def build_country_graph(papers: list[Paper], coords: dict[Location, Optional[Coordinates]]) -> nx.Graph:
"""
create graph of locations with number of co-occurrences as edge weights
"""
graph = nx.Graph()
for paper in progressbar(papers, "building graph: "):
valid_locations = filter(lambda l: l in coords and coords[l] is not None, list(set(paper.locations)))
valid_locations = {Location(city=None, state=None, country=l.country) for l in valid_locations}
for (l_1, l_2) in itertools.combinations_with_replacement(valid_locations, 2):
l_1: Location
l_2: Location
if graph.has_edge(l_1, l_2):
graph[l_1][l_2]['weight'] += 1
else:
graph.add_edge(l_1, l_2, weight=1)
return graph
| 1,656 |
test/test_container.py
|
wiryonolau/python-easydi
| 1 |
2172334
|
import os
import sys
import unittest
import traceback
import inspect
from easydi import *
class ProviderA:
def __init__(self, value):
self._value = "{}.{}".format("A", value)
@property
def value(self):
return self._value
class ProviderB:
def __init__(self, value):
self._value = "{}.{}".format("B", value)
@property
def value(self):
return self._value
class ServiceA:
def __init__(self, provider):
if not isinstance(provider, (ProviderA, ProviderB)):
raise Exception("Invalid Provider")
self._provider = provider
@property
def provider(self):
return self._provider
class ServiceB:
def __init__(self, config_value):
self._config_value = config_value
def get(self):
return self._config_value
class ServiceC:
def __init__(self, providers):
self._providers = providers
@property
def providers(self):
return self._providers
def has(self, provider_class):
for p in self._providers:
if isinstance(p, provider_class):
return True
return False
class Config:
def __init__(self):
self._config = {
"section1" : {
"key1" : "1",
"key2" : 2
}
}
def get(self, name, placeholder=None, value_format=None):
section, key = name.split(".")
value = None
try:
value = self._config[section][key]
except:
value = placeholder
if value_format is not None:
return value_format(value)
return value
def set(self, section, key, value):
if section not in self._config:
self._config[section] = {}
self._config[section][key] = value
class EasyDiTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._container = Container()
self._container.register(Config, _config=True)
self._config = self.retrieve_instance(Config).instance()
def retrieve_class_path(self, obj):
if not inspect.isclass(obj):
raise Exception("Object must be a class")
paths = obj.__module__.split(".")
paths.append(obj.__qualname__)
return paths
def retrieve_instance(self, obj):
class_path = self.retrieve_class_path(obj)
for path in class_path:
if isinstance(obj, ObjectFactoryMap):
obj = getattr(obj, path)
else:
obj = getattr(self._container, path)
return obj
class TestDependency(EasyDiTest):
def test(self):
self._container.register(ProviderA, DependencyConfig("section1.key1"))
self._container.register(ServiceA, ProviderA)
s1 = self.retrieve_instance(ServiceA).instance()
s2 = self.retrieve_instance(ServiceA).instance()
s3 = self.retrieve_instance(ServiceA).build()
self.assertTrue(isinstance(s1, ServiceA))
self.assertTrue(s1 == s2)
self.assertTrue(s1 != s3)
class TestDependencyConfig(EasyDiTest):
def test(self):
self._container.register(ServiceB, DependencyConfig("section1.key1", None))
self.assertTrue(isinstance(self._config, Config))
self.assertTrue(self._config.get("section1.key1") == "1")
self.assertTrue(self._config.get("section1.key3", "3") == "3")
self.assertTrue(self._config.get("section1.key1", value_format=int) == 1)
s1 = self.retrieve_instance(ServiceB).instance()
self.assertTrue(isinstance(s1, ServiceB))
self.assertTrue(s1.get() == "1")
class TestDependencyPath(EasyDiTest):
def test(self):
self._container.register(ProviderA, DependencyConfig("section1.key2"))
self._container.register(ServiceA, DependencyPath("test.test_container.ProviderA"))
s1 = self.retrieve_instance(ServiceA).instance()
self.assertTrue(isinstance(s1, ServiceA))
class TestDependencyCallback(EasyDiTest):
def test(self):
# Test not registering ProviderB
self._container.register(ServiceA, DependencyCallback(self.callback))
self._container.register(ProviderA)
s1 = self.retrieve_instance(ServiceA).instance()
self.assertTrue(isinstance(s1, ServiceA))
self.assertTrue(isinstance(s1.provider, ProviderA))
self.assertTrue(s1.provider.value == "A.2")
# Update config and check if instance change
self._config.set("section1", "key1", 2)
s1 = self.retrieve_instance(ServiceA).instance()
self.assertFalse(isinstance(s1.provider, ProviderB))
# Check if new instance change
s1 = self.retrieve_instance(ServiceA).build()
self.assertTrue(isinstance(s1.provider, ProviderB))
self.assertTrue(s1.provider.value == "B.test")
def callback(self, container):
config = container["_config"].instance()
if config.get("section1.key1") == "1":
return ProviderA(config.get("section1.key2"))
else:
return ProviderB(config.get("section1.key3", "test"))
class TestDependencyGroup(EasyDiTest):
def test(self):
self._container.register(ProviderA, DependencyConfig("section1.key2"), _group="providers")
self._container.register(ProviderB, DependencyConfig("section1.key2"), _group="providers")
self._container.register(ServiceC, DependencyGroup("providers"))
s1 = self.retrieve_instance(ServiceC).instance()
self.assertTrue((s1.has(ProviderA) and s1.has(ProviderB)))
| 5,646 |
metaopt/objective/integer/failing/__init__.py
|
cigroup-ol/metaopt
| 8 |
2171721
|
# -*- coding: utf-8 -*-
"""
Package of failing integer functions.
"""
from metaopt.objective.integer.failing.f import f as f
from metaopt.objective.integer.failing.g import f as g
FUNCTIONS_FAILING = [f, g]
| 209 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/program_enrollments/models.py
|
osoco/better-ways-of-thinking-about-software
| 3 |
2170800
|
"""
Django model specifications for the Program Enrollments API
"""
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from simple_history.models import HistoricalRecords
from user_util import user_util
from common.djangoapps.student.models import CourseEnrollment
from .constants import ProgramCourseEnrollmentRoles, ProgramCourseEnrollmentStatuses, ProgramEnrollmentStatuses
class ProgramEnrollment(TimeStampedModel):
"""
This is a model for Program Enrollments from the registrar service
.. pii: PII is found in the external key for a program enrollment
.. pii_types: other
.. pii_retirement: local_api
"""
STATUS_CHOICES = ProgramEnrollmentStatuses.__MODEL_CHOICES__
class Meta:
app_label = "program_enrollments"
# A student enrolled in a given (program, curriculum) should always
# have a non-null ``user`` or ``external_user_key`` field (or both).
unique_together = (
('user', 'program_uuid', 'curriculum_uuid'),
('external_user_key', 'program_uuid', 'curriculum_uuid'),
)
user = models.ForeignKey(
User,
null=True,
blank=True, on_delete=models.CASCADE
)
external_user_key = models.CharField(
db_index=True,
max_length=255,
null=True
)
program_uuid = models.UUIDField(db_index=True, null=False)
curriculum_uuid = models.UUIDField(db_index=True, null=False)
status = models.CharField(max_length=9, choices=STATUS_CHOICES)
historical_records = HistoricalRecords()
def clean(self):
if not (self.user or self.external_user_key):
raise ValidationError(_('One of user or external_user_key must not be null.'))
@classmethod
def retire_user(cls, user_id):
"""
With the parameter user_id, retire the external_user_key field
Return True if there is data that was retired
Return False if there is no matching data
"""
enrollments = cls.objects.filter(user=user_id)
if not enrollments:
return False
for enrollment in enrollments:
retired_external_key = user_util.get_retired_external_key(
enrollment.external_user_key,
settings.RETIRED_USER_SALTS,
)
enrollment.historical_records.update(external_user_key=retired_external_key)
enrollment.external_user_key = retired_external_key
enrollment.save()
return True
def __str__(self):
return f'[ProgramEnrollment id={self.id}]'
def __repr__(self):
return ( # lint-amnesty, pylint: disable=missing-format-attribute
"<ProgramEnrollment" # pylint: disable=missing-format-attribute
" id={self.id}"
" user={self.user!r}"
" external_user_key={self.external_user_key!r}"
" program_uuid={self.program_uuid!r}"
" curriculum_uuid={self.curriculum_uuid!r}"
" status={self.status!r}"
">"
).format(self=self)
class ProgramCourseEnrollment(TimeStampedModel):
"""
This is a model to represent a learner's enrollment in a course
in the context of a program from the registrar service
.. no_pii:
"""
STATUS_CHOICES = ProgramCourseEnrollmentStatuses.__MODEL_CHOICES__
class Meta:
app_label = "program_enrollments"
# For each program enrollment, there may be only one
# waiting program-course enrollment per course key.
unique_together = (
('program_enrollment', 'course_key'),
)
program_enrollment = models.ForeignKey(
ProgramEnrollment,
on_delete=models.CASCADE,
related_name="program_course_enrollments"
)
# In Django 2.x, we should add a conditional unique constraint to this field so
# no duplicated tuple of (course_enrollment_id, status=active) exists
# MST-168 is the Jira ticket to accomplish this once Django is upgraded
course_enrollment = models.ForeignKey(
CourseEnrollment,
null=True,
blank=True,
on_delete=models.CASCADE
)
course_key = CourseKeyField(max_length=255)
status = models.CharField(max_length=9, choices=STATUS_CHOICES)
historical_records = HistoricalRecords()
@property
def is_active(self):
return self.status == ProgramCourseEnrollmentStatuses.ACTIVE
def __str__(self):
return f'[ProgramCourseEnrollment id={self.id}]'
def __repr__(self):
return ( # lint-amnesty, pylint: disable=missing-format-attribute
"<ProgramCourseEnrollment" # pylint: disable=missing-format-attribute
" id={self.id}"
" program_enrollment={self.program_enrollment!r}"
" course_enrollment=<{self.course_enrollment}>"
" course_key={self.course_key}"
" status={self.status!r}"
">"
).format(self=self)
class CourseAccessRoleAssignment(TimeStampedModel):
"""
This model represents a role that should be assigned to the eventual user of a pending enrollment.
.. no_pii:
"""
class Meta:
unique_together = ('role', 'enrollment')
role = models.CharField(max_length=64, choices=ProgramCourseEnrollmentRoles.__MODEL_CHOICES__)
enrollment = models.ForeignKey(ProgramCourseEnrollment, on_delete=models.CASCADE)
def __str__(self):
return f'[CourseAccessRoleAssignment id={self.id}]'
def __repr__(self):
return ( # lint-amnesty, pylint: disable=missing-format-attribute
"<CourseAccessRoleAssignment" # pylint: disable=missing-format-attribute
" id={self.id}"
" role={self.role!r}"
" enrollment={self.enrollment!r}"
">"
).format(self=self)
| 6,150 |
kslurm/style/__init__.py
|
pvandyken/cluster_utils
| 1 |
2172410
|
__submodules__ = ["console"]
# <AUTOGEN_INIT>
from kslurm.style.console import (
console,
)
__all__ = ["console"]
# </AUTOGEN_INIT>
| 139 |
model/_3d/grad_accum.py
|
innat/BraTS-MGMT-Classification
| 2 |
2172413
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 17 14:05:53 2021
@author: innat
"""
import tensorflow as tf
class BrainTumorModel3D(tf.keras.Model):
def __init__(self, model, n_gradients=1):
super(BrainTumorModel3D, self).__init__()
self.model = model
self.n_gradients = tf.constant(n_gradients, dtype=tf.int32)
self.n_acum_step = tf.Variable(0, dtype=tf.int32, trainable=False)
self.gradient_accumulation = [tf.Variable(tf.zeros_like(v, dtype=tf.float32),
trainable=False) for v in self.model.trainable_variables]
def train_step(self, data):
self.n_acum_step.assign_add(1)
images, labels = data
with tf.GradientTape() as tape:
predictions = self.model(images, training=True)
loss = self.compiled_loss(labels,
predictions,
regularization_losses=[self.reg_l2_loss()])
gradients = tape.gradient(loss, self.model.trainable_variables)
# Accumulate batch gradients
for i in range(len(self.gradient_accumulation)):
self.gradient_accumulation[i].assign_add(gradients[i])
# If n_acum_step reach the n_gradients then we apply accumulated gradients
# to update the variables otherwise do nothing
tf.cond(tf.equal(self.n_acum_step, self.n_gradients), self.apply_accu_gradients, lambda: None)
self.compiled_metrics.update_state(labels, predictions)
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
images, labels = data
predictions = self.model(images, training=False)
loss = self.compiled_loss(labels, predictions,
regularization_losses=[self.reg_l2_loss()])
self.compiled_metrics.update_state(labels, predictions)
return {m.name: m.result() for m in self.metrics}
def call(self, inputs, *args, **kwargs):
return self.model(inputs)
def reg_l2_loss(self, weight_decay = 1e-5):
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in self.model.trainable_variables
])
def apply_accu_gradients(self):
# apply accumulated gradients
self.optimizer.apply_gradients(zip(self.gradient_accumulation,
self.model.trainable_variables))
# reset
self.n_acum_step.assign(0)
for i in range(len(self.gradient_accumulation)):
self.gradient_accumulation[i].assign(
tf.zeros_like(
self.model.trainable_variables[i], dtype=tf.float32)
)
| 2,772 |
code/pgms/fib-num.py
|
souradeepta/PythonPractice
| 0 |
2171171
|
def fib_series(N):
first = 0
second = 1
ans = []
i = 0
ans.append(first)
ans.append(second)
while i < N-2:
temp = second
second = first + second
first = temp
ans.append( second)
i+=1
return ans
def fib_recur(N):
first = 0
second = 1
ans = []
ans.append(fib_recur())
print(fib_series(10))
| 382 |
pinterest.py
|
shaikhsajid1111/social-media-profile-scrapers
| 139 |
2171578
|
try:
import argparse
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
import json
from fake_headers import Headers
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
except ModuleNotFoundError:
print("Please download dependencies from requirement.txt")
except Exception as ex:
print(ex)
class Pinterest:
'''This class scraps pinterest and returns a dict containing all user data'''
@staticmethod
def init_driver(browser_name:str):
"""Initialize webdriver"""
def set_properties(browser_option):
"""Set Properties of webdriver"""
ua = Headers().generate() #fake user agent
browser_option.add_argument('--headless')
browser_option.add_argument('--disable-extensions')
browser_option.add_argument('--incognito')
browser_option.add_argument('--disable-gpu')
browser_option.add_argument('--log-level=3')
browser_option.add_argument(f'user-agent={ua}')
browser_option.add_argument('--disable-notifications')
browser_option.add_argument('--disable-popup-blocking')
return browser_option
try:
browser_name = browser_name.strip().title()
ua = Headers().generate() #fake user agent
#automating and opening URL in headless browser
if browser_name.lower() == "chrome":
browser_option = ChromeOptions()
browser_option = set_properties(browser_option)
driver = webdriver.Chrome(ChromeDriverManager().install(),options=browser_option) #chromedriver's path in first argument
elif browser_name.lower() == "firefox":
browser_option = FirefoxOptions()
browser_option = set_properties(browser_option)
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(),options=browser_option)
else:
driver = "Browser Not Supported!"
return driver
except Exception as ex:
print(ex)
@staticmethod
def scrap(username,browser_name):
try:
URL = 'https://in.pinterest.com/{}'.format(username)
try:
driver = Pinterest.init_driver(browser_name)
driver.get(URL)
except AttributeError:
print("Driver is not set")
exit()
wait = WebDriverWait(driver, 10)
element = wait.until(EC.title_contains("Pinterest"))
script = driver.find_element_by_id("initial-state").get_attribute("innerHTML")
json_data = json.loads(script)
data = json_data['resourceResponses'][0]['response']['data']
user_data = data['user']
is_verified_merchant = user_data['is_verified_merchant']
full_name = user_data['full_name']
impressum_url = user_data['impressum_url']
pin_count = user_data['pin_count']
domain_url = user_data['domain_url']
profile_image = user_data['image_xlarge_url']
bio = user_data['about']
board_count = user_data['board_count']
is_indexed = user_data['indexed']
follower = user_data['follower_count']
following = user_data['following_count']
country = user_data['country']
location = user_data['location']
profile_data = {
'full_name' : full_name,
'profile_image' : profile_image,
'followers' : follower,
'followings' : following,
'bio' : bio,
'country' : country,
'impressum_url' : impressum_url,
'website' : domain_url,
'board_count' : board_count,
'location' : location,
'pin_count' : pin_count,
'is_verified' : is_verified_merchant,
}
driver.close()
driver.quit()
return profile_data
except Exception as ex:
driver.close()
driver.quit()
print(ex)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("username",help="username to search")
parser.add_argument("--browser",help="What browser your PC have?")
args = parser.parse_args()
browser_name = args.browser if args.browser is not None else "chrome"
print(Pinterest.scrap(args.username,browser_name))
#last updated - 11th September,2020
| 5,077 |
Error.py
|
lindseymardona/msd-exploration
| 0 |
2171809
|
class Error(Exception):
"""Base class for other exceptions"""
pass
class EmptyListError(Error):
"""Raised when the column is empty"""
pass
| 155 |
Chapter14/r2_paths_auth/my_library/models/sample_auth_http.py
|
a17juanbl/exercicios
| 125 |
2172081
|
# -*- coding: utf-8 -*-
# /!\/!\/!\/!\/!\/!\/!\/!\
# Note that this is just a sample code
# You need to add this file in __init__.py
# /!\/!\/!\/!\/!\/!\/!\/!\
from odoo import exceptions, models
from odoo.http import request
class IrHttp(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _auth_method_base_group_user(cls):
cls._auth_method_user()
if not request.env.user.has_group('base.group_user'):
raise exceptions.AccessDenied()
# this is for the exercise
@classmethod
def _auth_method_groups(cls, group_xmlids=None):
cls._auth_method_user()
if not any(map(request.env.user.has_group, group_xmlids or [])):
raise exceptions.AccessDenied()
# the controller will be like this add this in main.py
@http.route('/my_module/all-books/group_user', type='http',
auth='base_group_user')
def all_books_mine_base_group_user(self):
# your code
return ...
# this is for the exercise
@http.route('/my_module/all-books/groups', type='http',
auth='groups(base.group_no_one)')
def all_books_mine_groups(self):
# your code
return ...
| 1,210 |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/portal/models/res_groups.py
|
gtfarng/Odoo_migrade
| 1 |
2172481
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class PortalGroup(models.Model):
""" A portal is simply a group of users with the flag 'is_portal' set to True.
The flag 'is_portal' makes a user group usable as a portal.
"""
_inherit = 'res.groups'
is_portal = fields.Boolean('Portal', help="If checked, this group is usable as a portal.")
| 450 |
asyncheventloop.py
|
hicke/Training
| 0 |
2171862
|
import asyncio
import requests
import bs4 as bs
def download():
url_1 = requests.get('https://stackoverflow.com/questions/33357233/when-to-use-and-when-not-to-use-python-3-5-await/33399896#33399896')
url_2 = requests.get('https://docs.python.org/3/library/asyncio-task.html')
async def asyncdownloader(url):
for a in range(10):
a_sauce = requests.get(url)
a_soup = bs.BeautifulSoup(sauce.text, 'lxml')
print(a)
#downloader('https://www.nytimes.com/books/best-sellers/')
#asyncdownloader('https://www.nytimes.com/books/best-sellers/')
await asyncio.gather(
download(url_1),
download(url_2)
)
| 647 |
src/python/nn/training.py
|
PeterJackNaylor/NucSeg
| 0 |
2171395
|
from google.protobuf.descriptor import Error
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
import segmentation_models as sm
from augmentation import setup_datahandler
from metric.dist_metrics import AccuracyDistance, F1_ScoreDistance
def options():
parser = argparse.ArgumentParser(description="setting up training")
parser.add_argument("--path_train", type=str)
parser.add_argument("--path_validation", type=str)
parser.add_argument("--backbone", type=str, default="resnet50")
parser.add_argument("--model", type=str, default="Unet")
parser.add_argument("--encoder", type=str, default="imagenet")
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--learning_rate", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=5e-3)
parser.add_argument("--loss", type=str, default="CE")
args = parser.parse_args()
args.encoder = args.encoder if args.encoder == "imagenet" else None
if args.loss == "CE":
loss = "binary_crossentropy"
activation = "sigmoid"
metrics = [
sm.metrics.IOUScore(threshold=0.5),
sm.metrics.FScore(threshold=0.5),
"binary_accuracy",
tf.keras.metrics.AUC()
]
early_stopping_var = "val_f1-score"
elif args.loss == "focal":
loss = sm.losses.CategoricalFocalLoss()
activation = "sigmoid"
metrics = [
sm.metrics.IOUScore(threshold=0.5),
sm.metrics.FScore(threshold=0.5),
"binary_accuracy",
tf.keras.metrics.AUC(),
]
early_stopping_var = "val_f1-score"
elif args.loss == "mse":
loss = "mse"
activation = "relu"
metrics = ["mse", AccuracyDistance(), F1_ScoreDistance()]
early_stopping_var = "val_f1_score_d"
else:
raise Error("unknown loss, not implemented")
args.k_loss = loss
args.classes = 1
args.activation = activation
args.metrics = metrics
args.early_stopping_var = early_stopping_var
if args.model == "Unet":
model_f = sm.Unet
elif args.model == "FPN":
model_f = sm.FPN
elif args.model == "Linknet":
model_f = sm.Linknet
elif args.model == "PSPNet":
model_f = sm.PSPNet
else:
raise Error(f"unknown model: {args.model}, not implemented")
args.model_f = model_f
return args
def get_Xy(path):
data = np.load(path)
x, y = data["x"], data["y"]
# y = tf.keras.utils.to_categorical(y, num_classes=2)
x = x.astype("uint8")
y = y.astype("uint8")
return x, y
def load_data(opts):
x_t, y_t = get_Xy(opts.path_train)
x_v, y_v = get_Xy(opts.path_validation)
return x_t, y_t, x_v, y_v
def main():
opt = options()
x_train, y_train, x_val, y_val = load_data(opt)
image_size = 224
epochs = opt.epochs
ds_train, ds_val = setup_datahandler(
x_train,
y_train,
x_val,
y_val,
opt.batch_size,
opt.backbone,
image_size,
)
# define model
model = opt.model_f(
opt.backbone,
classes=opt.classes,
activation=opt.activation,
encoder_weights=opt.encoder,
)
model = sm.utils.set_regularization(
model,
kernel_regularizer=tf.keras.regularizers.l2(opt.weight_decay),
bias_regularizer=tf.keras.regularizers.l2(opt.weight_decay),
)
optimizer = tf.keras.optimizers.Adam(learning_rate=opt.learning_rate)
model.compile(
optimizer,
loss=opt.k_loss,
metrics=opt.metrics,
)
callbacks = [
tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=epochs / 5,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
),
tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.2, patience=epochs / 10, min_lr=1e-6
),
tf.keras.callbacks.ModelCheckpoint(
'./model_weights.h5', save_weights_only=True,
save_best_only=True, mode='min'
),
]
# fit model
history = model.fit(
ds_train,
batch_size=opt.batch_size,
epochs=epochs,
validation_data=ds_val,
callbacks=callbacks,
max_queue_size=10,
workers=10,
use_multiprocessing=True,
verbose=1,
)
hist_df = pd.DataFrame(history.history)
with open("history.csv", mode="w") as f:
hist_df.to_csv(f)
if __name__ == "__main__":
main()
| 4,731 |
adventofcode/2020/07/b.py
|
nevivurn/cp
| 0 |
2172787
|
#!/usr/bin/env python3
import sys
rules = {}
for line in sys.stdin:
line = line.rstrip()[:-1]
name, rule = line.split(' contain ')
name = ' '.join(name.split()[:-1])
if rule == 'no other bags':
rules[name] = []
continue
contains = []
for r in rule.split(', '):
r = r.split()[:-1]
cnt = int(r[0])
rname = ' '.join(r[1:])
contains.append((rname, cnt))
rules[name] = contains
ans = {name: None for name in rules}
def dfs(name):
if ans[name]:
return ans[name]
cnt = 1
for (n, c) in rules[name]:
cnt += c*dfs(n)
ans[name] = cnt
return cnt
dfs('shiny gold')
print(ans['shiny gold'] - 1)
| 706 |
neptune/internal/cli/tracking.py
|
jiji-online/neptune-cli
| 0 |
2172656
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import platform
import time
import requests
from neptune import version
from neptune.internal.cli.commands.utils.git_utils import get_git_version
logger = logging.getLogger()
_TRACKING_URL = u'https://heapanalytics.com/api/track'
_TRACKING_APP_ID = u'3102000718'
class Timer(object):
''' Measure time elapsed in miliseconds. '''
def __init__(self):
self.start = self.end = self.elapsed = None
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.elapsed = (self.end - self.start) * 1000.0
def report_tracking_metrics(event, identity, timeout=1.0, **properties):
properties.update(
python_version=platform.python_version(),
platform=platform.platform(),
cli_version=version.__version__,
git=(get_git_version() or u'not available')
)
body = {
u'app_id': _TRACKING_APP_ID,
u'identity': identity,
u'event': event,
u'properties': properties
}
try:
requests.post(_TRACKING_URL, json=body, timeout=timeout)
logger.debug('Track sent (%s)', body)
except requests.exceptions.RequestException:
logger.debug('Track request failed.')
def provide_default_user_identity_function(offline_token_storage_service):
def get_user_identity_from_token():
token = offline_token_storage_service.load()
if token:
return token.access_token.preferred_username or 'unknown'
else:
return 'unknown'
return get_user_identity_from_token
| 2,238 |
Projects/Online Workouts/w3resource/Basic - Part-II/program-12.py
|
ivenpoker/Python-Projects
| 1 |
2172592
|
#!/usr/bin/env python3
#######################################################################
# #
# Program purpose: Find all permutations of a list. #
# Program Author : <NAME> <<EMAIL>> #
# Creation Date : September 5, 2019 #
# #
#######################################################################
import random
def random_integer_list(list_size=10):
data = []
for i in range(list_size):
data.append(random.choice(seq=range(list_size)))
return data
def find_permutations(nums=None):
if nums is None:
return []
result_perms = [[]]
for n in nums:
new_perms = []
for perm in result_perms:
for i in range(len(perm) + 1):
new_perms.append(perm[:i] + [n] + perm[i:])
result_perms = new_perms
return result_perms
if __name__ == "__main__":
random_list = random_integer_list(list_size=5)
perms = find_permutations(nums=random_list)
print(f"List: {random_list}")
print(f"Permutations:\n{perms}")
| 1,224 |
community/migrations/0003_auto_20200823_2259.py
|
freewarelovers/CommunityLovers
| 3 |
2172127
|
# Generated by Django 3.1 on 2020-08-23 21:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('community', '0002_communityowner'),
]
operations = [
migrations.AlterModelOptions(
name='community',
options={'verbose_name': 'Community', 'verbose_name_plural': 'Communitys'},
),
migrations.AlterModelOptions(
name='communityowner',
options={'verbose_name': 'Community owner', 'verbose_name_plural': 'Community owners'},
),
]
| 593 |
Suits.py
|
Milon34/Python_Learning
| 0 |
2172770
|
a=int(input())
b=int(input())
c=int(input())
d=int(input())
e=int(input())
f=int(input())
Mnf=0
Mns=0
Mnf=min(a,d)
Mns=min(b,c)
if (d > Mns):
Mns=abs(Mns-d)
d=abs(Mns-d)
Mns=e*Mns
d=f*d
print(Mns+d)
else:
print(Mnf*f)
| 243 |
build/lib/ImageLibrary/animations.py
|
oeick/robotframework-imagelibrary
| 0 |
2172615
|
from __future__ import absolute_import
from ImageLibrary.image_processor import ImageProcessor
from ImageLibrary import utils
class Animations(object):
def __init__(self, error_handler, output_dir):
self.error_handler = error_handler
self.output_dir = output_dir
#### ANIMATIONS ####
@utils.add_error_info
def wait_for_animation_stops(self, zone=None, timeout=15, threshold=0.9, step=0.1):
"""Wait until animation stops in the given zone or in the whole active window if zone is not provided.
Pass _zone_, _timeout_, _step_, _thrreshold_ as arguments. All are optional.
Default values are given in the example.
Examples:
| Wait For Animation Stops | zone=zone_coordinates | timeout=15 | threshold=0.95 | step=0.1
"""
return ImageProcessor(self.error_handler, self.output_dir)._wait_for_animation_stops(zone, timeout, threshold, step)
@utils.add_error_info
def wait_for_animation_starts(self, zone=None, timeout=15, threshold=0.9, step=0.1):
"""Same as `Wait For Animation Stops` but on the contrary.
Pass _zone_, _timeout_, _step_, _thrreshold_ as arguments. All are optional.
Default values are given in the example.
Examples:
| Wait For Animation Starts | zone=zone_coordinates | timeout=15 | threshold=0.95 | step=0.1
"""
return ImageProcessor(self.error_handler, self.output_dir)._wait_for_animation_starts(zone, timeout, threshold, step)
@utils.add_error_info
def is_zone_animating(self, zone=None, threshold=0.9, step=0.1):
"""Checks if the given zone is animating. Returns bool.
Pass _zone_, _threshold_, _step_ as arguments. All are optional. If zone is not provided
the whole active area is taken.
Default values are given in the example.
Examples:
| ${is_animating} = | Is Zone Animating | zone=game_zone | threshold=0.9 | step=0.1
"""
return ImageProcessor(self.error_handler, self.output_dir)._is_animating(zone, threshold, step)
| 2,146 |
betl/chernoff_bounds.py
|
avrohr/betl
| 0 |
2171346
|
import numpy as np
from scipy.linalg import block_diag
from numpy.linalg import matrix_power
from scipy.optimize import minimize
import picos as pic
def state_covariance(A, B, K, V):
A_cl = A + B @ K
d = A.shape
X_V = pic.SymmetricVariable('X_V', shape=d)
F = pic.Problem()
F.set_objective('min', pic.trace(X_V))
F.add_constraint(A_cl.T * X_V * A_cl - X_V + V == 0)
F.add_constraint(X_V >> 0)
F.solve(verbosity=0, primals=None)
# Unstable, so expected variance is infinite
if F.status != 'optimal':
return np.Inf
X_V = np.atleast_2d(X_V.value)
return X_V
# E is the steady state state covariance
def bounds(E, Q, R, A, B, K, n, p):
Q_ = Q + K.T @ R @ K
A_ = A + B @ K
omega = block_diag(*list(Q_ for i in range(n)))
column_list = list()
for i in range(n):
entry_list = list()
for j in range(n):
exp = np.abs(j-i)
if j >= i:
entry = matrix_power(A_, exp) @ E
else:
entry = E @ matrix_power(A_, exp).T
entry_list.append(entry)
column = np.vstack(entry_list)
column_list.append(column)
cov = np.hstack(column_list)
assert np.allclose(cov, cov.T)
M = omega @ cov
eig = np.linalg.eigvals(M + np.eye(M.shape[0]) * 1e-9)
assert 0 < p < 1
beta = p
# assert np.alltrue(0 < eig < 1)
def x(eta):
return - 1/eta * np.log(beta/2) - 1/(2*eta) * np.sum(np.log(1 - 2 * eta * eig))
test = np.linspace(-1000., 0., 10000, endpoint=False)
f = lambda eta: -x(eta)
xs = list(f(eta) for eta in test)
# import matplotlib.pyplot as plt
# plt.plot(test, xs)
# plt.show()
# lower Bound
bnds = ((None, -1e-6),)
res = minimize(fun=lambda eta: -x(eta), x0=test[np.argmin(xs)], bounds=bnds)
k_m = x(res.x)
max = 1/(2 * np.max(eig))
test = np.linspace(0.0001, max, 1000, endpoint=False)
f = lambda eta: x(eta)
xs = list(f(eta) for eta in test)
bnds = ((1e-4, max),)
res = minimize(x, x0=test[np.argmin(xs)], bounds=bnds)
k_p = x(res.x)
return k_m, k_p
# A = np.random.rand(3, 3)
# B = np.random.rand(3, 2)
#
# Q = np.eye(3)
# R = np.eye(3) * 10
#
# from scipy.linalg import solve_discrete_are
#
# P = np.array(np.array(solve_discrete_are(A, B, Q, R)))
# K = - np.linalg.inv(R + B.T @ P @ B) @ (B.T @ P @ A)
#
#
# bounds(np.eye(3) * 0.01, Q, R, A, B, K, 3)
# A = np.array([[1.01, 0.01, 0. ],
# [0.01, 1.01, 0.01],
# [0. , 0.01, 1.01]])
#
# B = np.array([[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]])
#
# Q = np.eye(3)
# R = np.eye(3) * 10
# cov = np.array([[ 0.09312158, -0.00558783, 0.03694939],
# [-0.00558783, 0.0911873 , -0.00599917],
# [ 0.03694939, -0.00599917, 0.1346676 ]])
#
# K = np.array([[-0.3364908 , -0.02618322, -0.00035088],
# [-0.0383956 , -0.33996468, -0.01508168],
# [ 0.01147953, -0.01979958, -0.33529024]])
#
# V = np.array([[0.05, 0. , 0. ],
# [0. , 0.05, 0. ],
# [0. , 0. , 0.05]])
#
# cov_ = state_covariance(A, B, K, V)
# print(bounds(cov_, Q, R, A, B, K, 10, 0.01))
| 3,170 |
May/Week3/LeftMostColumnWithOne.py
|
nimishbongale/leetcode-30days-solutions-scratchpad
| 0 |
2172664
|
# """
# This is BinaryMatrix's API interface.
# You should not implement it, or speculate about its implementation
# """
#class BinaryMatrix(object):
# def get(self, x: int, y: int) -> int:
# def dimensions(self) -> list[]:
class Solution:
def leftMostColumnWithOne(self, binaryMatrix: 'BinaryMatrix') -> int:
dim=binaryMatrix.dimensions()
p={0:0,1:dim[1]-1}
while p[0]!=dim[0] and p[1]!=-1:
if binaryMatrix.get(p[0],p[1])==0:
p[0]=p[0]+1
else:
p[1]=p[1]-1
k=p[1]+1
return -1 if k==dim[0] else k
| 609 |
examples/test_IsothermInflectionWorkChain_graphite_Ar.py
|
mbercx/aiida-lsmo
| 2 |
2171573
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Run example IsothermInflection for Ar in Graphite."""
from pathlib import Path
import os
import click
import pytest
from aiida import engine
from aiida.plugins import DataFactory, WorkflowFactory
from aiida.orm import Dict
from aiida import cmdline
THIS_DIR = Path(__file__).resolve().parent
DATA_DIR = THIS_DIR / 'data'
# Workchain objects
IsothermInflectionWorkChain = WorkflowFactory('lsmo.isotherm_inflection')
# Data objects
CifData = DataFactory('cif')
NetworkParameters = DataFactory('zeopp.parameters')
@pytest.fixture(scope='function')
def graphite_20a():
"""CifData for graphite."""
with open(os.path.join(DATA_DIR, 'Graphite_20A.cif'), 'rb') as handle:
cif = CifData(file=handle, label='Graphite_20A')
return cif
def run_isotherm_inflection_ar_graphite(raspa_code, zeopp_code, graphite_20a): # pylint: disable=redefined-outer-name
"""Prepare inputs and submit the workchain.
Usage: verdi run run_thisworkchainexample.py raspa@localhost zeopp@localhost"""
builder = IsothermInflectionWorkChain.get_builder()
builder.metadata.label = 'test'
builder.raspa_base.raspa.code = raspa_code
builder.zeopp.code = zeopp_code
options = {
'resources': {
'num_machines': 1,
'tot_num_mpiprocs': 1,
},
'max_wallclock_seconds': 1 * 60 * 60,
'withmpi': False,
}
builder.raspa_base.raspa.metadata.options = options
builder.zeopp.metadata.options = options
builder.structure = graphite_20a
builder.molecule = Dict(
dict={
'name': 'Ar',
'forcefield': 'HIRSCHFELDER',
'ff_cutoff': 8,
'molsatdens': 35.4,
'proberad': 1.7,
'singlebead': True,
'charged': False,
'pressure_zero': 1,
})
builder.parameters = Dict(
dict={
'ff_framework': 'DREIDING',
'temperature': 87, # T_sat Ar
'ff_cutoff': 8.0, # NOTE: Low to have cheap testing
'box_length': 16.0,
'zeopp_probe_scaling': 1.0,
'zeopp_volpo_samples': 10000,
'zeopp_block_samples': 100,
'raspa_widom_cycles': 1000,
'raspa_gcmc_init_cycles': 300,
'raspa_gcmc_prod_cycles': 300,
'pressure_num': 4,
'raspa_verbosity': 10
})
results, node = engine.run_get_node(builder)
assert node.is_finished_ok, results
params = results['output_parameters'].get_dict()
assert 'loading_absolute_dev_from_dil' in params['isotherm']
@click.command()
@cmdline.utils.decorators.with_dbenv()
@click.option('--raspa-code', type=cmdline.params.types.CodeParamType())
@click.option('--zeopp-code', type=cmdline.params.types.CodeParamType())
def cli(raspa_code, zeopp_code):
"""Run example.
Example usage: $ ./test_isotherm_inflection_workchain_graphite_ar.py --raspa-code ... --zeopp-code ...
Help: $ ./test_isotherm_inflection_workchain_graphite.py --help
"""
with open(os.path.join(DATA_DIR, 'Graphite_20A.cif'), 'rb') as handle:
cif = CifData(file=handle)
run_isotherm_inflection_ar_graphite(raspa_code, zeopp_code, cif)
if __name__ == '__main__':
cli() # pylint: disable=no-value-for-parameter
| 3,336 |
scripts/slave/recipe_modules/commit_position/tests/chromium_hash_from_commit_position.py
|
bopopescu/chromium-build
| 0 |
2172162
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'commit_position',
'recipe_engine/raw_io',
'recipe_engine/step',
]
def RunSteps(api):
result = api.commit_position.chromium_hash_from_commit_position(464748)
api.step('result', [])
api.step.active_result.presentation.logs['details'] = [
'result: %r' % (result,),
]
def GenTests(api):
yield (
api.test('basic') +
api.step_data(
'resolving commit_pos 464748',
stdout=api.raw_io.output_text(
'hash:2c8b8311c920b0a7beef28c09a11a6f14abfbabc'))
)
| 694 |
CRISPResso2/CRISPRessoPooledWGSCompareCORE.py
|
astroboi-SH-KWON/CRISPR_toolkit_with_comments
| 0 |
2171565
|
# -*- coding: utf-8 -*-
'''
CRISPResso2 - <NAME> and <NAME> 2018
Software pipeline for the analysis of genome editing outcomes from deep sequencing data
(c) 2018 The General Hospital Corporation. All Rights Reserved.
'''
import os
import errno
import sys
import subprocess as sb
import glob
import argparse
import re
from CRISPResso2 import CRISPRessoShared
from CRISPResso2 import CRISPRessoMultiProcessing
import traceback
import logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)-5s @ %(asctime)s:\n\t %(message)s \n',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
error = logging.critical
warn = logging.warning
debug = logging.debug
info = logging.info
def check_library(library_name):
try:
return __import__(library_name)
except:
error('You need to install %s module to use CRISPRessoPooledWGSCompare!' % library_name)
sys.exit(1)
def check_PooledWGS_output_folder(output_folder):
quantification_summary_file=os.path.join(output_folder,'SAMPLES_QUANTIFICATION_SUMMARY.txt')
if os.path.exists(quantification_summary_file):
return quantification_summary_file
else:
raise PooledWGSOutputFolderIncompleteException('The folder %s is not a valid CRISPRessoPooled or CRISPRessoWGS output folder.' % output_folder)
pd=check_library('pandas')
###EXCEPTIONS############################
class PooledWGSOutputFolderIncompleteException(Exception):
pass
_ROOT = os.path.abspath(os.path.dirname(__file__))
CRISPResso_compare_to_call = os.path.join(os.path.dirname(_ROOT),'CRISPRessoCompare.py')
def main():
try:
description = ['~~~CRISPRessoPooledWGSCompare~~~','-Comparison of two CRISPRessoPooled or CRISPRessoWGS analyses-']
compare_header = r'''
____________________________________
| __ __ __ __ __ __ __ |
||__)/ \/ \| |_ | \ /| |/ _ (_ |
|| \__/\__/|__|__|__// |/\|\__)__) |
| __ __ __ __ __ |
| / / \|\/||__) /\ |__)|_ |
| \__\__/| || /--\| \ |__ |
|____________________________________|
'''
compare_header = CRISPRessoShared.get_crispresso_header(description,compare_header)
print(compare_header)
parser = argparse.ArgumentParser(description='CRISPRessoPooledWGSCompare Parameters',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('crispresso_pooled_wgs_output_folder_1', type=str, help='First output folder with CRISPRessoPooled or CRISPRessoWGS analysis')
parser.add_argument('crispresso_pooled_wgs_output_folder_2', type=str, help='Second output folder with CRISPRessoPooled or CRISPRessoWGS analysis')
#OPTIONALS
parser.add_argument('-n','--name', help='Output name', default='')
parser.add_argument('-n1','--sample_1_name', help='Sample 1 name', default='Sample_1')
parser.add_argument('-n2','--sample_2_name', help='Sample 2 name', default='Sample_2')
parser.add_argument('-o','--output_folder', help='', default='')
parser.add_argument('-p','--n_processes',type=int, help='Number of processes to use for CRISPResso comparison',default=1)
parser.add_argument('--save_also_png',help='Save also .png images additionally to .pdf files',action='store_true')
parser.add_argument('--debug', help='Show debug messages', action='store_true')
args = parser.parse_args()
debug_flag = args.debug
crispresso_compare_options=['save_also_png',]
#check that the CRISPRessoPooled output is present
quantification_summary_file_1=check_PooledWGS_output_folder(args.crispresso_pooled_wgs_output_folder_1)
quantification_summary_file_2=check_PooledWGS_output_folder(args.crispresso_pooled_wgs_output_folder_2)
#create outputfolder and initialize the log
get_name_from_folder=lambda x: os.path.basename(os.path.abspath(x)).replace('CRISPRessoPooled_on_','').replace('CRISPRessoWGS_on_','')
if not args.name:
database_id='%s_VS_%s' % (get_name_from_folder(args.crispresso_pooled_wgs_output_folder_1),get_name_from_folder(args.crispresso_pooled_wgs_output_folder_2))
else:
database_id=args.name
OUTPUT_DIRECTORY='CRISPRessoPooledWGSCompare_on_%s' % database_id
if args.output_folder:
OUTPUT_DIRECTORY=os.path.join(os.path.abspath(args.output_folder),OUTPUT_DIRECTORY)
_jp=lambda filename: os.path.join(OUTPUT_DIRECTORY,filename) #handy function to put a file in the output directory
log_filename=_jp('CRISPRessoPooledWGSCompare_RUNNING_LOG.txt')
try:
info('Creating Folder %s' % OUTPUT_DIRECTORY)
os.makedirs(OUTPUT_DIRECTORY)
info('Done!')
except:
warn('Folder %s already exists.' % OUTPUT_DIRECTORY)
log_filename=_jp('CRISPRessoPooledWGSCompare_RUNNING_LOG.txt')
logging.getLogger().addHandler(logging.FileHandler(log_filename))
with open(log_filename,'w+') as outfile:
outfile.write('[Command used]:\nCRISPRessoPooledWGSCompare %s\n\n[Execution log]:\n' % ' '.join(sys.argv))
#load data and calculate the difference
df_quant_1=pd.read_table(quantification_summary_file_1)
df_quant_2=pd.read_table(quantification_summary_file_2)
df_comp=df_quant_1.set_index(['Name','Amplicon']).join(df_quant_2.set_index(['Name','Amplicon']),lsuffix='_%s' % args.sample_1_name,rsuffix='_%s' % args.sample_2_name)
#df_comp=df_quant_1.set_index('Name').join(df_quant_2.set_index('Name'),lsuffix='_%s' % args.sample_1_name,rsuffix='_%s' % args.sample_2_name)
df_comp['(%s-%s)_Unmodified%%' % (args.sample_1_name,args.sample_2_name)]=df_comp['Unmodified%%_%s' % args.sample_1_name]-df_comp['Unmodified%%_%s' % args.sample_2_name]
df_comp.fillna('NA').to_csv(_jp('COMPARISON_SAMPLES_QUANTIFICATION_SUMMARIES.txt'),sep='\t')
#now run CRISPRessoCompare for the pairs for wich we have data in both folders
crispresso_cmds = []
processed_regions = set([])
for idx,row in df_comp.iterrows():
if idx[0] in processed_regions:
continue
if row.isnull().any():
warn('Skipping sample %s since it was not processed in one or both conditions' % idx[0])
else:
processed_regions.add(idx[0])
#crispresso_output_folder_1=os.path.join(args.crispresso_pooled_wgs_output_folder_1,'CRISPResso_on_%s' % idx)
#crispresso_output_folder_2=os.path.join(args.crispresso_pooled_wgs_output_folder_2,'CRISPResso_on_%s' % idx)
crispresso_output_folder_1=os.path.join(args.crispresso_pooled_wgs_output_folder_1,'CRISPResso_on_%s' % idx[0])
crispresso_output_folder_2=os.path.join(args.crispresso_pooled_wgs_output_folder_2,'CRISPResso_on_%s' % idx[0])
crispresso_compare_cmd=CRISPResso_compare_to_call +' "%s" "%s" -o "%s" -n1 "%s" -n2 "%s" ' % (crispresso_output_folder_1,
crispresso_output_folder_2,
OUTPUT_DIRECTORY,
args.sample_1_name+'_%s' % idx[0],
args.sample_2_name+'_%s' % idx[0],
)
crispresso_compare_cmd=CRISPRessoShared.propagate_crispresso_options(crispresso_compare_cmd,crispresso_compare_options,args)
info('Running CRISPRessoCompare:%s' % crispresso_compare_cmd)
crispresso_cmds.append(crispresso_compare_cmd)
CRISPRessoMultiProcessing.run_crispresso_cmds(crispresso_cmds,args.n_processes,'Comparison')
info('All Done!')
print(CRISPRessoShared.get_crispresso_footer())
sys.exit(0)
except Exception as e:
debug_flag = False
if 'args' in vars() and 'debug' in args:
debug_flag = args.debug
if debug_flag:
traceback.print_exc(file=sys.stdout)
error('\n\nERROR: %s' % e)
sys.exit(-1)
| 8,476 |
RecoTauTag/TauTagTools/python/HLTPFTauSelector_cfi.py
|
nistefan/cmssw
| 3 |
2172495
|
import FWCore.ParameterSet.Config as cms
from RecoTauTag.TauTagTools.PFTauSelector_cfi import pfTauSelector
hltPFTauSelector = pfTauSelector.clone(
src = cms.InputTag("fixedConePFTauProducer"),
discriminators = cms.VPSet(
cms.PSet( discriminator=cms.InputTag("fixedConePFTauDiscriminationByIsolation"),selectionCut=cms.double(0.5))
),
cut = cms.string("pt > 0"),
)
| 391 |
demo/inference_demo.py
|
Fei-dong/SOLO_SORT
| 2 |
2172648
|
from mmdet.apis import init_detector, inference_detector, show_result_pyplot, show_result_ins
import mmcv
import time
import logging
logging.basicConfig(level = logging.DEBUG,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# config_file = '../configs/solo/decoupled_solo_r50_fpn_8gpu_3x.py'
config_file = 'configs/solov2/solov2_r50_fpn_8gpu_3x.py'
# download the checkpoint from model zoo and put it in `checkpoints/`
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R50_3x.pth' # SOLOv2_R50_3x.pth
checkpoint_file = 'SOLOv2_R50_3x.pth'
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# test a single image
# img = 'demo.jpg'
img = 'input.jpg'
prev_time = time.time()
result = inference_detector(model, img)
logger.info('process frame time:'+str(time.time()-prev_time))
# print(len(result[0]))
# print(len(result[0][0]))
# print(result[0][0][0])
# print(result[0][1])
# print(result[0][2])
# prev_time = time.time()
show_result_ins(img, result, model.CLASSES, score_thr=0.25, out_file="demo_out.jpg")
logger.info('postprocessing frame time:'+str(time.time()-prev_time))
| 1,211 |
scope_injected_contextmanager/__init__.py
|
sloev/scope_injected_contextmanager
| 3 |
2172711
|
__author__ = "sloev"
__email__ = "<EMAIL>"
__version__ = "0.0.2"
import inspect
import sys
from contextlib import contextmanager
def _create_key_error_stack_trace_formatter(frame, func):
def format_keyerror_stacktrace(kind, key_error, traceback):
first_missing_key = str(key_error).strip("'")
caller_traceback = inspect.getframeinfo(frame)
exc = AttributeError(
f'Function "{func.__name__}" is missing the variable "{first_missing_key}". '
+ f'It should have been declared in File "{caller_traceback.filename}", '
+ f"line {caller_traceback.lineno}, in {caller_traceback.function}"
)
sys.__excepthook__(AttributeError, exc, caller_traceback)
@contextmanager
def handle_keyerror():
sys.excepthook = format_keyerror_stacktrace
yield
sys.excepthook = sys.__excepthook__
return handle_keyerror()
def scope_injected_contextmanager(func):
"""
scope_injected_contextmanager takes a function and returns context manager
simple example:
from scope_injected_contextmanager import scope_injected_contextmanager
fetch = lambda request: ('ok', 200)
@scope_injected_contextmanager
def log_request(request, response):
print(f"request: {request} response: {response}")
with log_request:
request = {
'query_args': {
'foo': 10
}
}
response = fetch(request)
# prints
# request: {'query_args': {'foo': 10}} response: ('ok', 200)
"""
signature = inspect.getfullargspec(func)
func_arg_names = signature.args
defaults = signature.defaults or tuple()
func_arg_names = func_arg_names[: len(func_arg_names) - len(defaults)]
class ScopeInjectedContextManager:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
return ScopeInjectedContextManager(*args, **kwargs).__enter__()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
frame = inspect.currentframe().f_back
inner_locals_copy = frame.f_locals.copy()
exception = None
args = tuple(self.args)
kwargs = dict(**self.kwargs)
with _create_key_error_stack_trace_formatter(frame, func):
kwargs.update({k: inner_locals_copy[k] for k in func_arg_names})
func(*args, **kwargs)
return ScopeInjectedContextManager()
| 2,641 |
main/evaluation.py
|
MarcSerraPeralta/rec-flows
| 0 |
2171845
|
import torch
import numpy as np
import matplotlib.pyplot as plt
import sys
plt.ioff()
##############################################################################################################
# MAP
def average_precision(ytrue,ypred,k=None,eps=1e-10,reduce_mean=True):
if k is None:
k=ypred.size(1)
_,spred=torch.topk(ypred,k,dim=1)
found=torch.gather(ytrue,1,spred)
pos=torch.arange(1,spred.size(1)+1).unsqueeze(0).to(ypred.device)
prec=torch.cumsum(found,1)/pos.float()
mask=(found>0).float()
ap=torch.sum(prec*mask,1)/(torch.sum(ytrue,1)+eps)
if reduce_mean:
return ap.mean()
return ap
##############################################################################################################
# NDCG
def NDCG_F(ypred, reli, k=None, reduce_mean=True, normalized=True):
if k is None:
k = ypred.size(1)
_, idx = torch.topk(ypred, k, dim=1)
ideal, _ = torch.topk(reli, k, dim=1)
relik = torch.gather(reli, 1, idx)
pos = torch.arange(1, relik.size(1) + 1).float().unsqueeze(0).to(ypred.device)
DCG = torch.sum( (torch.pow(2,relik).float() - 1)/torch.log2(pos+1) , 1)
if not normalized:
return DCG
IDCG = torch.sum( (torch.pow(2,ideal).float() - 1)/torch.log2(pos+1) , 1)
if IDCG != 0:
NDCG = DCG/IDCG
else:
NDCG = DCG*0
if reduce_mean:
return NDCG.mean()
return NDCG
class NDCG_CLASS():
def __init__(self, params):
self.NDCG = None
self.default = {"mymodel":None, "Rtype":None, "outpath":"results/NDCG",
"meta_name":"opt_tags", "meta_path":"results/metadata",
"bias_top":1, "bias_normal":1, "alpha":None, "reli":[0.5],
"minNclass":1, "topN":1000, "Z_TYPE":"out", "tunpost_factor":1}
self.params = self.default.copy()
self.set(params)
return
def set(self, params):
for p in params.keys():
self.params[p] = params[p]
self.update()
return
def update(self):
#"NDCG_all_(opt_tags)_(t1n1)_(z-out)_(min1)_(top1000)_(mean-y)_alpha-0-2_(reli-0.5)_model-name"
outname = "NDCG_" + "{}_".format("-".join(map(str, self.params['Rtype'])))
if len(self.params['alpha']) == 1:
outname += "{}_".format(self.params['alpha'])
else:
outname += "{}-{}_".format(self.params['alpha'][0], self.params['alpha'][-1])
for p in self.params.keys():
if p in ["Rtype", "alpha", "mymodel"]: continue
if (self.params[p] != self.default[p]) and (type(self.params[p])==list):
outname += "-".join(map(str, self.params[p])) + "_"
if (self.params[p] != self.default[p]) and (type(self.params[p])!=list):
outname += "{}_".format(self.params[p])
outname += "{}".format(self.params["mymodel"].name)
self.outname = self.params["outpath"] + "/" + outname
return
def get(self):
self.NDCG = torch.load(self.outname)
return
def save(self, NDCG):
torch.save(NDCG, self.outname)
return
def plot(self, legend=False):
"""
Assuming only one reli is used
"""
# CHECK
if self.NDCG == None: return "ERROR: No NDCG loaded"
method2linetype = {"tunning":"-", "postfiltering":"--", "tun+post":":"}
NDCG = self.NDCG
# TRANSFORM DATA
indexes = NDCG.keys()
alpha = []
reli = 0
method = []
for a, t, r in indexes:
if a not in alpha: alpha += [a]
if t not in method: method += [t]
reli = r
alpha = sorted(alpha)
data_types = list(NDCG[a,t,r].keys())
classes = list(NDCG[a,t,r][data_types[1]].keys())
if len(alpha)==1: print("ERROR: alpha must be a list (not int) for plotting"); sys.exit(0)
fig1 = plt.figure(1, figsize=[25,10])
axes = []
if "NDCG" in data_types:
axes += [fig1.add_subplot(1, len(data_types), 1)]
axes[-1].set_xlim(alpha[0], alpha[-1])
axes[-1].set_ylabel("NDCG_tag")
for t in method:
for class_ in classes:
axes[-1].plot(alpha, [NDCG[a, t, reli]["NDCG"][class_] for a in alpha], method2linetype[t], label=class_)
if legend and ("COUNTS" not in data_types) and ("DIST" not in data_types): axes[-1].legend(loc='center left', bbox_to_anchor=(1.1, 0.5)); legend=False #plot only one legent
axes[-1].set_xlabel("alpha")
if "COUNTS" in data_types:
idx2reli = {0:0, 1:reli, 2:1}
if "NDCG" in data_types: start = 2
for counts_i in range(3):
axes += [fig1.add_subplot(3, len(data_types), start + len(data_types)*counts_i)]
axes[-1].set_xlim(alpha[0], alpha[-1])
axes[-1].set_ylabel("#reli {}".format(idx2reli[counts_i]))
for t in method:
for class_ in classes:
axes[-1].plot(alpha, [NDCG[a, t, reli]["COUNTS"][class_][counts_i] for a in alpha], method2linetype[t], label=class_)
if legend and ("DIST" not in data_types): axes[-1].legend(loc='center left', bbox_to_anchor=(1.1, -0.5)); legend=False #plot only one legent
axes[-1].set_xlabel("alpha")
if "DIST" in data_types:
start = 1
if "NDCG" in data_types: start += 1
if "COUNTS" in data_types: start += 1
idx2dist = {0:"dist(z_tunning, z_mean_tag)", 1:"dist(z_tunning, z_out)"}
for counts_i in range(2):
axes += [fig1.add_subplot(2, len(data_types), start + len(data_types)*counts_i)]
axes[-1].set_xlim(alpha[0], alpha[-1])
axes[-1].set_ylabel("{}".format(idx2dist[counts_i]))
for t in method:
for class_ in classes:
axes[-1].plot(alpha, [NDCG[a, t, reli]["DIST"][class_][counts_i] for a in alpha], method2linetype[t], label=class_)
if legend: axes[-1].legend(loc='center left', bbox_to_anchor=(1.1, -0.1)); legend=False #plot only one legent
axes[-1].set_xlabel("alpha")
#fig1.tight_layout()
plt.subplots_adjust()
fig1.savefig(self.outname + ".pdf", format="pdf", bbox_inches='tight')
fig1.clf()
del axes, fig1
return None
def plot_average(self):
"""
Assuming only one reli is used
"""
# CHECK
if self.NDCG == None: return "ERROR: No NDCG loaded"
method2linetype = {"tunning":"-", "postfiltering":"--", "tun+post":":"}
NDCG = self.NDCG
# TRANSFORM DATA
indexes = NDCG.keys()
alpha = []
reli = 0
method = []
for a, t, r in indexes:
if a not in alpha: alpha += [a]
if t not in method: method += [t]
reli = r
alpha = sorted(alpha)
data_types = list(NDCG[a,t,r].keys())
classes = list(NDCG[a,t,r][data_types[1]].keys())
if len(alpha)==1: print("ERROR: alpha must be a list (not int) for plotting"); sys.exit(0)
# CALCULATE MEAN
for a, t, r in indexes:
if "NDCG" in data_types:
NDCG[a,t,r]["NDCG"] = torch.tensor([i for i in NDCG[a,t,r]["NDCG"].values()]).mean().data.tolist()
if "COUNTS" in data_types:
NDCG[a,t,r]["COUNTS"] = torch.tensor([i.data.tolist() for i in NDCG[a,t,r]["COUNTS"].values()]).mean(0).data.tolist()
if "DIST" in data_types:
NDCG[a,t,r]["DIST"] = torch.tensor([i.data.tolist() for i in NDCG[a,t,r]["DIST"].values()]).mean(0).data.tolist()
fig1 = plt.figure(1, figsize=[25,10])
axes = []
if "NDCG" in data_types:
axes += [fig1.add_subplot(1, len(data_types), 1)]
axes[-1].set_xlim(alpha[0], alpha[-1])
axes[-1].set_ylabel("NDCG_tag")
for t in method:
axes[-1].plot(alpha, [NDCG[a, t, reli]["NDCG"] for a in alpha], method2linetype[t])
axes[-1].set_xlabel("alpha")
if "COUNTS" in data_types:
idx2reli = {0:0, 1:reli, 2:1}
if "NDCG" in data_types: start = 2
for counts_i in range(3):
axes += [fig1.add_subplot(3, len(data_types), start + len(data_types)*counts_i)]
axes[-1].set_xlim(alpha[0], alpha[-1])
axes[-1].set_ylabel("#reli {}".format(idx2reli[counts_i]))
for t in method:
axes[-1].plot(alpha, [NDCG[a, t, reli]["COUNTS"][counts_i] for a in alpha], method2linetype[t])
axes[-1].set_xlabel("alpha")
if "DIST" in data_types:
start = 1
if "NDCG" in data_types: start += 1
if "COUNTS" in data_types: start += 1
idx2dist = {0:"dist(z_tunning, z_mean_tag)", 1:"dist(z_tunning, z_out)"}
for counts_i in range(2):
axes += [fig1.add_subplot(2, len(data_types), start + len(data_types)*counts_i)]
axes[-1].set_xlim(alpha[0], alpha[-1])
axes[-1].set_ylabel("{}".format(idx2dist[counts_i]))
for t in method:
axes[-1].plot(alpha, [NDCG[a, t, reli]["DIST"][counts_i] for a in alpha], method2linetype[t])
axes[-1].set_xlabel("alpha")
#fig1.tight_layout()
plt.subplots_adjust()
fig1.savefig(self.outname + "_average.pdf", format="pdf", bbox_inches='tight')
fig1.clf()
del axes, fig1
return None
def worst_NDCG(topN, ones, relis, rel_i, Nsongs=180198):
print("topN", topN, "ones", ones, "relis", relis, "reli", rel_i)
inp_v1 = torch.tensor(range(topN)).view(1,topN)
reli_v1 = torch.zeros(1, topN)
reli_v1[0,:ones] = 1
reli_v1[0,ones:relis+ones] = rel_i
inp_v2 = torch.tensor(range(Nsongs)).view(1,Nsongs)
reli_v2 = torch.zeros(1,Nsongs)
reli_v2[0,:ones] = 1
reli_v2[0,ones:relis+ones] = rel_i
inp_v3 = torch.rand(100,Nsongs)
print("worst NDCG (topN, reli)=", NDCG_F(inp_v1, reli_v1, k=topN).data.tolist())
print("worst DCG =", NDCG_F(inp_v1, reli_v1, k=topN, normalized=False).data.tolist())
print("worst NDCG (reli, topN)=", NDCG_F(inp_v2, reli_v2, k=topN).data.tolist())
print("worst DCG =", NDCG_F(inp_v2, reli_v2, k=topN, normalized=False).data.tolist())
print("rand NDCG (reli, topN)=", sum([NDCG_F(inp_v3[i].view(1,Nsongs), reli_v2, k=topN).data.tolist() for i in range(100)])/100)
print("rand DCG =", sum([NDCG_F(inp_v3[i].view(1,Nsongs), reli_v2, k=topN, normalized=False).data.tolist() for i in range(100)])/100)
return
| 9,297 |
src/Pandas_Wrapper_pcg/atomic_counter.py
|
Gal-Tch/Pandas_Wrapper
| 0 |
2171528
|
import threading
class AtomicCounter:
def __init__(self):
self._lock = threading.Lock()
self._counter = 0
def increment(self):
with self._lock:
self._counter += 1
return self._counter
| 241 |
examples/LUDOX_protocol.py
|
Bioprotocols/paml
| 6 |
2172321
|
import json
import logging
import os
from typing import Tuple
import rdflib as rdfl
import sbol3
import tyto
from sbol3 import Document
import paml
logger: logging.Logger = logging.Logger("LUDOX_protocol")
CONT_NS = rdfl.Namespace('https://sift.net/container-ontology/container-ontology#')
OM_NS = rdfl.Namespace('http://www.ontology-of-units-of-measure.org/resource/om-2/')
def prepare_document() -> Document:
logger.info('Setting up document')
doc = sbol3.Document()
sbol3.set_namespace('https://bbn.com/scratch/')
return doc
def import_paml_libraries() -> None:
logger.info('Importing libraries')
paml.import_library('liquid_handling')
logger.info('... Imported liquid handling')
paml.import_library('plate_handling')
logger.info('... Imported plate handling')
paml.import_library('spectrophotometry')
logger.info('... Imported spectrophotometry')
paml.import_library('sample_arrays')
logger.info('... Imported sample arrays')
DOCSTRING = \
'''
With this protocol you will use LUDOX CL-X (a 45% colloidal silica suspension) as a single point reference to
obtain a conversion factor to transform absorbance (OD600) data from your plate reader into a comparable
OD600 measurement as would be obtained in a spectrophotometer. This conversion is necessary because plate
reader measurements of absorbance are volume dependent; the depth of the fluid in the well defines the path
length of the light passing through the sample, which can vary slightly from well to well. In a standard
spectrophotometer, the path length is fixed and is defined by the width of the cuvette, which is constant.
Therefore this conversion calculation can transform OD600 measurements from a plate reader (i.e. absorbance
at 600 nm, the basic output of most instruments) into comparable OD600 measurements. The LUDOX solution
is only weakly scattering and so will give a low absorbance value.
'''
def create_protocol() -> paml.Protocol:
logger.info('Creating protocol')
protocol: paml.Protocol = paml.Protocol('iGEM_LUDOX_OD_calibration_2018')
protocol.name = "iGEM 2018 LUDOX OD calibration protocol"
protocol.description = DOCSTRING
return protocol
def create_h2o() -> sbol3.Component:
ddh2o = sbol3.Component('ddH2O', 'https://identifiers.org/pubchem.substance:24901740')
ddh2o.name = 'Water, sterile-filtered, BioReagent, suitable for cell culture' # TODO get via tyto
return ddh2o
def create_ludox() -> sbol3.Component:
ludox = sbol3.Component('LUDOX', 'https://identifiers.org/pubchem.substance:24866361')
ludox.name = 'LUDOX(R) CL-X colloidal silica, 45 wt. % suspension in H2O'
return ludox
PLATE_SPECIFICATION = \
"""cont:ClearPlate and
cont:SLAS-4-2004 and
(cont:wellVolume some
((om:hasUnit value om:microlitre) and
(om:hasNumericalValue only xsd:decimal[>= "200"^^xsd:decimal])))"""
PREFIX_MAP = json.dumps({"cont": CONT_NS, "om": OM_NS})
def create_plate(protocol: paml.Protocol):
# graph: rdfl.Graph = protocol._other_rdf
# plate_spec_uri = \
# "https://bbn.com/scratch/iGEM_LUDOX_OD_calibration_2018/container_requirement#RequiredPlate"
# graph.add((plate_spec_uri, CONT_NS.containerOntologyQuery, PLATE_SPECIFICATION))
# plate_spec = sbol3.Identified(plate_spec_uri,
# "foo", name="RequiredPlate")
spec = paml.ContainerSpec(queryString=PLATE_SPECIFICATION, prefixMap=PREFIX_MAP, name='plateRequirement')
plate = protocol.primitive_step('EmptyContainer',
specification=spec)
plate.name = 'calibration plate'
return plate
def provision_h2o(protocol: paml.Protocol, plate, ddh2o) -> None:
c_ddh2o = protocol.primitive_step('PlateCoordinates', source=plate.output_pin('samples'), coordinates='A1:D1')
protocol.primitive_step('Provision', resource=ddh2o, destination=c_ddh2o.output_pin('samples'),
amount=sbol3.Measure(100, tyto.OM.microliter))
def provision_ludox(protocol: paml.Protocol, plate, ludox) -> None:
c_ludox = protocol.primitive_step('PlateCoordinates', source=plate.output_pin('samples'), coordinates='A2:D2')
protocol.primitive_step('Provision', resource=ludox, destination=c_ludox.output_pin('samples'),
amount=sbol3.Measure(100, tyto.OM.microliter))
def measure_absorbance(protocol: paml.Protocol, plate, wavelength_param):
c_measure = protocol.primitive_step('PlateCoordinates', source=plate.output_pin('samples'), coordinates='A1:D2')
return protocol.primitive_step(
'MeasureAbsorbance',
samples=c_measure.output_pin('samples'),
wavelength=wavelength_param,
)
def ludox_protocol() -> Tuple[paml.Protocol, Document]:
#############################################
# set up the document
doc: Document = prepare_document()
#############################################
# Import the primitive libraries
import_paml_libraries()
#############################################
# Create the protocol
protocol: paml.Protocol = create_protocol()
doc.add(protocol)
# create the materials to be provisioned
ddh2o = create_h2o()
doc.add(ddh2o)
ludox = create_ludox()
doc.add(ludox)
# add an optional parameter for specifying the wavelength
wavelength_param = protocol.input_value('wavelength', sbol3.OM_MEASURE, optional=True,
default_value=sbol3.Measure(600, tyto.OM.nanometer))
# actual steps of the protocol
# get a plate
plate = create_plate(protocol)
# put ludox and water in selected wells
provision_h2o(protocol, plate, ddh2o)
provision_ludox(protocol, plate, ludox)
# measure the absorbance
measure = measure_absorbance(protocol, plate, wavelength_param)
output = protocol.designate_output('absorbance', sbol3.OM_MEASURE,
measure.output_pin('measurements'))
protocol.order(protocol.get_last_step(), output)
return protocol, doc
if __name__ == '__main__':
new_protocol: paml.Protocol
new_protocol, doc = ludox_protocol()
print('Validating and writing protocol')
v = doc.validate()
assert len(v) == 0, "".join(f'\n {e}' for e in v)
rdf_filename = os.path.join(os.path.dirname(__file__), 'iGEM 2018 LUDOX OD calibration protocol.nt')
doc.write(rdf_filename, sbol3.SORTED_NTRIPLES)
print(f'Wrote file as {rdf_filename}')
# render and view the dot
dot = new_protocol.to_dot()
dot.render(f'{new_protocol.name}.gv')
dot.view()
| 6,645 |
tests/models.py
|
tbrlpld/wagtail-generic-chooser
| 66 |
2171776
|
from django.db import models
class Person(models.Model):
first_name = models.CharField("First name", max_length=255)
last_name = models.CharField("Last name", max_length=255)
job_title = models.CharField("Job title", max_length=255)
| 247 |
user_client.py
|
MagnumPoryaIsfahani/COMP4000
| 0 |
2171471
|
from __future__ import print_function
import getpass
import logging
import sys
import grpc
from fuse import FUSE, FuseOSError, Operations
import users_pb2
import users_pb2_grpc
from passthrough import Passthrough
REMOTE_DIRECTORY = "/home/student/fuse"
# This method registers a new user
def registerUser(stub):
while True:
# input username
new_username = input("\nEnter a username for your account: ")
# input password
new_password = getpass.getpass("Enter a password for your account: ")
confirm_password = getpass.getpass("Please confirm your password by retyping: ")
if new_password != confirm_password:
print("\nError: your passwords didn't match.")
continue
# send request
response = stub.createUserAccount(users_pb2.CreateUserRequest(username=new_username, password=<PASSWORD>, confirmation=confirm_password))
if not response.success:
print("\nError: the username you have chosen already exists, please choose a different one.")
continue
print("\nSuccessfully created account for user", new_username)
return
# Deletes a user
# Returns true if success
def deleteUser(stub, uname, tok):
response = stub.deleteUserAccount(users_pb2.DeleteUserRequest(username=uname,token=tok))
if response.success:
print("\nYour account has been removed successfully.")
else:
print("\nError: your account has not been removed.")
def updateUser(stub, username, token):
while True:
new_password = getpass.getpass("Enter a new password for your account: ")
confirm_password = getpass.getpass("Please confirm your password by retyping: ")
if new_password != confirm_password:
print("\nError: your passwords don't match, please try again")
continue
response = stub.updateUserAccount(users_pb2.UpdateUserRequest(password=<PASSWORD>, token=token, username=username))
if response.code == grpc.StatusCode.OK.value[0]:
print("\nPassword updated!")
elif response.code == grpc.StatusCode.UNAUTHENTICATED.value[0]:
print("\nError: unauthorized. Token is invalid.")
elif response.code == grpc.StatusCode.ALREADY_EXISTS.value[0]:
print("\nError: new password must differ from old password.")
continue
elif response.code == grpc.StatusCode.DEADLINE_EXCEEDED.value[0]:
print("\nError: login timed out...")
elif response.code == grpc.StatusCode.NOT_FOUND.value[0]:
print("\nError: database not found.")
else:
print("Unknown Error.")
# return to userSelection
return
# menu once the user has logged in
def userSelection(stub, username, token):
while True:
operation = input("""
- YOU ARE LOGGED IN -
[1] Update Password
[2] Delete Account
[3] Mount remote filesystem
[q] Logout
Please choose an operation: """)
if operation == '1':
updateUser(stub, username, token)
elif operation == '2':
deleteUser(stub, username, token)
elif operation == '3':
# display file structure
print('\n- FILE STRUCTURE -')
reply = stub.displayTree(users_pb2.DisplayTreeRequest())
print(reply.tree)
# input mountpoint
mountpoint = input('Enter the mountpoint: ')
print("[ctrl+c] to unmount...")
# mount remote fs
FUSE(Passthrough(REMOTE_DIRECTORY, stub), mountpoint, nothreads=True, foreground=True)
print('\nFilesystem was unmounted...\n')
continue
elif operation != 'q':
print('Error: invalid input.')
continue
# logout of account
print("\nLogging out...")
return
# Client sends credentials to server via RPC call
# Server compares received credentials with locally stored credentials, and replies with authentication token to client if credentials match. The token must be a
# random 64 bit string"
# Server stores authentication token assigned to user and assigns expiry (arbitrary time)
# Server responds with authentication failure if username does not exist, or password is invalid
# the login screen
def menuSelect(stub):
# User Menu:
while True:
operation = input("""
- WELCOME TO THE COMP4000 REMOTE FILESYSTEM -
[1] Login to an existing account
[2] Register an account
[q] Quit
Please choose an operation: """)
if operation == "1" :
username = input("Enter your username: ")
password = <PASSWORD>("Enter your password: ")
response = stub.loginUserAccount(users_pb2.LoginUserRequest(username=username, password=password))
if response.success:
userSelection(stub, username, response.token)
else:
print('Error: incorrect username/password combo.')
elif operation == "2" :
registerUser(stub)
elif operation == "q":
quit()
else:
print("Error: invalid input.")
def run():
ip_address = "localhost"
if(len(sys.argv) > 1):
ip_address = sys.argv[1]
with grpc.insecure_channel(ip_address+':10001') as channel:
stub = users_pb2_grpc.UsersStub(channel)
menuSelect(stub)
quit()
if __name__ == "__main__":
logging.basicConfig()
run()
| 5,578 |
InterAutoTest_W/run.py
|
xuguoyan/pytest_api3
| 7 |
2172522
|
# coding=utf-8
import os
import pytest
from config import Conf
if __name__ == "__main__":
report_path = Conf._report_path()
pytest.main(['-s', '--alluredir', report_path+'/reslut'])
| 190 |
cadmus/pre_retrieval/key_fields.py
|
biomedicalinformaticsgroup/cadmus
| 5 |
2172279
|
import bs4
from bs4 import BeautifulSoup
import datetime
import pandas as pd
def key_fields(crossref_dict, doi_list, pmid_doi, pmcid, is_list):
# now we want to parse out the critical data for the crossref records
# we'll store the output in a parse dictionary
parse_d = {}
pmid = str()
pmcid_loop = str()
# we'll loop through the crossref_dict
for index, r_dict in crossref_dict.items():
message = r_dict['message']
#lets start parsing out the key variables we want from the metadata
doi = message.get('DOI')
# once we have the doi we can cross check that against the lists of pmids and pmcids from the NCBI id converter to fill in those ids.
if doi.lower() in [x.lower() for x in doi_list]:
for i in range(len(doi_list)):
if doi.lower() == doi_list[i].lower():
doi = doi_list[i]
pmid = pmid_doi[i]
pmcid_loop = pmcid[i]
else:
pass
else:
pmid = None
pmcid_loop = None
#lets start parsing out the key variables we want from the metadata
licenses = message.get('license')
# now the full text links
links = message.get('link')
# we use the is_list variable to say whether we are using the list of dois to generate missing metadata
# when false we can skip this stage and just take the licenses and full text links
# when true we can try to populate more fields from the crossref metadata
if is_list == True:
pub_type = message.get('type')
title = message.get('title')[0]
issn= message.get("ISSN")
# abstracts are presesented as xml and can need a simple bit of cleaning
abstract = message.get('abstract')
if abstract:
soup = BeautifulSoup(abstract)
abstract = soup.get_text(' ')
# author lists need to be unpacked
author = message.get('author')
# use list comprehension on the author dictionary to parse out the names
author_list = [f"{author_dict.get('family')}, {author_dict.get('given')}" for author_dict in author]
# the journal will be absent for these preprints so we can set it to biorxiv
journal = message.get('container-title')
# the pubdate is dependent on when it was uploaded
date = message.get('created')
y,m,d = date['date-parts'][0]
# convert the date into a datetime object
date = datetime.date(int(y),int(m),int(d))
# now add each record to the parse d
parse_d.update({index:{'doi':doi,
'pmid':pmid,
'pmcid': pmcid_loop,
'issn':issn,
'pub_type':pub_type,
'title':title,
'abstract':abstract,
'journal':journal,
'authors':author_list,
'pub_date':date,
'licenses': licenses,
'links':links}})
else:
parse_d.update({index:{'doi':doi,
'pmcid': pmcid_loop, # ? needed
'licenses': licenses,
'links':links}})
# finally we'll store the parse dictionary as our crossref metadata retrieved dataframe
cr_df = pd.DataFrame.from_dict(parse_d, orient = 'index')
return cr_df
| 4,079 |
test/quora/admin.py
|
xiaoxiaolulu/cookieStyle
| 5 |
2171956
|
from django.contrib import admin
from markdownx.admin import MarkdownxModelAdmin
from test.quora.models import Vote, Question, Answer
class VoteAdmin(admin.ModelAdmin):
list_display = ["uuid_id", "user", "value", "content_type", "object_id"]
class QuestionAdmin(MarkdownxModelAdmin):
list_display = ["id", 'user', "title", "slug", "status", "tags", "has_correct"]
class AnswerAdmin(admin.ModelAdmin):
list_display = ["uuid_id", "user", "question", "is_accepted"]
admin.site.register(Vote, VoteAdmin)
admin.site.register(Question, MarkdownxModelAdmin)
admin.site.register(Answer, AnswerAdmin)
| 616 |
ChessboardCalibration_master/calibration/chessboard_detection/prototypes.py
|
itachi176/MutilModelAI
| 0 |
2170291
|
import numpy as np
import scipy
import cv2
def gkern(l=11, sig=1.):
"""\
creates gaussian kernel with side length l and a sigma of sig
"""
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))
return kernel
class Prototype:
def __init__ (self, kernel_size = 11):
assert kernel_size % 2 == 1
zkernel = np.zeros((kernel_size, kernel_size))
self.kernel_size = kernel_size
self.sub_size = kernel_size // 2
self.kernelA = zkernel.copy()
self.kernelB = zkernel.copy()
self.kernelC = zkernel.copy()
self.kernelD = zkernel.copy()
def _conv(self, img):
assert img.shape == (self.kernel_size, self.kernel_size)
outputA = np.multiply(img, self.kernelA).sum()
outputB = np.multiply(img, self.kernelB).sum()
outputC = np.multiply(img, self.kernelC).sum()
outputD = np.multiply(img, self.kernelD).sum()
return outputA, outputB, outputC, outputD
class Prototype1(Prototype):
def __init__(self, kernel_size = 11):
super().__init__(kernel_size=kernel_size)
#print(kernelA)
gkernel = gkern(l = kernel_size)
self.kernelA[:self.sub_size,self.sub_size+1:] = gkernel[:self.sub_size, self.sub_size+1:]
self.kernelA = self.kernelA/self.kernelA.sum()
self.kernelB[self.sub_size+1:, :self.sub_size] = gkernel[self.sub_size+1:, :self.sub_size]
self.kernelB = self.kernelB/self.kernelB.sum()
self.kernelC[: self.sub_size, : self.sub_size] = gkernel[:self.sub_size, :self.sub_size]
self.kernelC = self.kernelC/self.kernelC.sum()
self.kernelD[self.sub_size+1:, self.sub_size+1:] = gkernel[self.sub_size+1:, self.sub_size+1:]
self.kernelD = self.kernelD/self.kernelD.sum()
class Prototype2(Prototype):
def __init__(self, kernel_size = 11):
super().__init__(kernel_size=kernel_size)
gkernel = gkern(l = kernel_size)
for u in range(kernel_size):
for v in range(kernel_size):
if u < v and u+v < kernel_size -1:
self.kernelA[u,v] = gkernel[u,v]
elif u > v and u+v > kernel_size -1:
self.kernelB[u,v] = gkernel[u,v]
elif u < v and u+v > kernel_size -1:
self.kernelC[u,v] = gkernel[u,v]
elif u> v and u+ v <kernel_size -1:
self.kernelD[u,v] = gkernel[u,v]
self.kernelA = self.kernelA/self.kernelA.sum()
self.kernelB = self.kernelB/self.kernelB.sum()
self.kernelC = self.kernelC/self.kernelC.sum()
self.kernelD = self.kernelD/self.kernelC.sum()
| 2,782 |
catalog/bindings/gmd/grid_function.py
|
NIVANorge/s-enda-playground
| 0 |
2172264
|
from dataclasses import dataclass
from bindings.gmd.grid_function_type import GridFunctionType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class GridFunction(GridFunctionType):
"""gml:GridFunction provides an explicit mapping rule for grid geometries,
i.e. the domain shall be a geometry of type grid.
It describes the mapping of grid posts (discrete point grid
coverage) or grid cells (discrete surface coverage) to the values in
the range set. The gml:startPoint is the index position of a point
in the grid that is mapped to the first point in the range set (this
is also the index position of the first grid post). If the
gml:startPoint property is omitted the gml:startPoint is assumed to
be equal to the value of gml:low in the gml:Grid geometry.
Subsequent points in the mapping are determined by the value of the
gml:sequenceRule.
"""
class Meta:
namespace = "http://www.opengis.net/gml"
| 974 |
tests/python/xgboost/04_PlanetKaggle_GPU.py
|
cclauss/h2o4gpu
| 0 |
2169540
|
# coding: utf-8
"""
:copyright: 2017-2018 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
# # Experiment 04: Amazon Planet (GPU version)
#
# This experiment uses the data from the Kaggle competition [Planet: Understanding the Amazon from Space](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/leaderboard). Here we use a pretrained ResNet50 model to generate the features from the dataset.
#
# The details of the machine we used and the version of the libraries can be found in [experiment 01](01_airline.ipynb).
# In[1]:
import sys, os
from collections import defaultdict
import numpy as np
import pkg_resources
from libs.loaders import load_planet_kaggle
from libs.planet_kaggle import threshold_prediction
from libs.timer import Timer
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from tqdm import tqdm
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session, get_session
print("System version: {}".format(sys.version))
print("XGBoost version: {}".format(pkg_resources.get_distribution('xgboost').version))
print("LightGBM version: {}".format(pkg_resources.get_distribution('lightgbm').version))
# In[2]:
#get_ipython().magic('env MOUNT_POINT=/datadrive')
# In[3]:
#Configure TF to use only one GPU, by default TF allocates memory in all GPUs
config = tf.ConfigProto(device_count = {'GPU': 1})
#Configure TF to limit the amount of GPU memory, by default TF takes all of them.
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
# The images are loaded and featurised using a pretrained ResNet50 model available from Keras
# In[4]:
X_train, y_train, X_test, y_test = load_planet_kaggle()
# In[5]:
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# ## XGBoost
# We will use a one-v-rest. So each classifier will be responsible for determining whether the assigned tag applies to the image
# In[6]:
def train_and_validate_xgboost(params, train_features, train_labels, validation_features, num_boost_round):
n_classes = train_labels.shape[1]
y_val_pred = np.zeros((validation_features.shape[0], n_classes))
time_results = defaultdict(list)
for class_i in tqdm(range(n_classes)):
dtrain = xgb.DMatrix(data=train_features, label=train_labels[:, class_i], nthread=-1)
dtest = xgb.DMatrix(data=validation_features, nthread=-1)
with Timer() as t:
model = xgb.train(params, dtrain, num_boost_round=num_boost_round)
time_results['train_time'].append(t.interval)
with Timer() as t:
y_val_pred[:, class_i] = model.predict(dtest)
time_results['test_time'].append(t.interval)
return y_val_pred, time_results
# In[7]:
def train_and_validate_lightgbm(params, train_features, train_labels, validation_features, num_boost_round):
n_classes = train_labels.shape[1]
y_val_pred = np.zeros((validation_features.shape[0], n_classes))
time_results = defaultdict(list)
for class_i in tqdm(range(n_classes)):
lgb_train = lgb.Dataset(train_features, train_labels[:, class_i], free_raw_data=False)
with Timer() as t:
model = lgb.train(params, lgb_train, num_boost_round = num_boost_round)
time_results['train_time'].append(t.interval)
with Timer() as t:
y_val_pred[:, class_i] = model.predict(validation_features)
time_results['test_time'].append(t.interval)
return y_val_pred, time_results
# In[8]:
metrics_dict = {
'Accuracy': accuracy_score,
'Precision': lambda y_true, y_pred: precision_score(y_true, y_pred, average='samples'),
'Recall': lambda y_true, y_pred: recall_score(y_true, y_pred, average='samples'),
'F1': lambda y_true, y_pred: f1_score(y_true, y_pred, average='samples'),
}
def classification_metrics(metrics, y_true, y_pred):
return {metric_name:metric(y_true, y_pred) for metric_name, metric in metrics.items()}
# In[9]:
results_dict = dict()
num_rounds = 50
# Now we are going to define the different models.
# In[10]:
xgb_params = {'max_depth':2, #'max_depth':6
'objective':'binary:logistic',
'min_child_weight':1,
'learning_rate':0.1,
'scale_pos_weight':2,
'gamma':0.1,
'reg_lamda':1,
'subsample':1,
'tree_method':'gpu_exact'
}
# *NOTE: We got an out of memory error with xgb. Please see the comments at the end of the notebook.*
# In[ ]:
y_pred, timing_results = train_and_validate_xgboost(xgb_params, X_train, y_train, X_test, num_boost_round=num_rounds)
# In[ ]:
results_dict['xgb']={
'train_time': np.sum(timing_results['train_time']),
'test_time': np.sum(timing_results['test_time']),
'performance': classification_metrics(metrics_dict,
y_test,
threshold_prediction(y_pred, threshold=0.1))
}
#
#
# Now let's try with XGBoost histogram.
#
# In[12]:
xgb_hist_params = {'max_depth':2,
'objective':'binary:logistic',
'min_child_weight':1,
'learning_rate':0.1,
'scale_pos_weight':2,
'gamma':0.1,
'reg_lamda':1,
'subsample':1,
'tree_method':'gpu_hist',
'max_bins': 63
}
# In[ ]:
y_pred, timing_results = train_and_validate_xgboost(xgb_hist_params, X_train, y_train, X_test, num_boost_round=num_rounds)
# In[ ]:
results_dict['xgb_hist']={
'train_time': np.sum(timing_results['train_time']),
'test_time': np.sum(timing_results['test_time']),
'performance': classification_metrics(metrics_dict,
y_test,
threshold_prediction(y_pred, threshold=0.1))
}
# ## LightGBM
#
#
# In[21]:
lgb_params = {'num_leaves': 2**6,
'learning_rate': 0.1,
'scale_pos_weight': 2,
'min_split_gain': 0.1,
'min_child_weight': 1,
'reg_lambda': 1,
'subsample': 1,
'objective':'binary',
'device': 'gpu',
'task': 'train',
'max_bin': 63
}
# In[22]:
y_pred, timing_results = train_and_validate_lightgbm(lgb_params, X_train, y_train, X_test, num_boost_round=num_rounds)
# In[23]:
results_dict['lgbm']={
'train_time': np.sum(timing_results['train_time']),
'test_time': np.sum(timing_results['test_time']),
'performance': classification_metrics(metrics_dict,
y_test,
threshold_prediction(y_pred, threshold=0.1))
}
# Finally, we show the results.
# In[24]:
# Results
print(json.dumps(results_dict, indent=4, sort_keys=True))
# In this dataset we have a big feature size, 2048. When using the standard version of XGBoost, xgb, we get an out of memory using a NVIDIA M60 GPU, even if we reduce the max depth of the tree to 2. A solution to this issue would be to reduce the feature size. One option could be using PCA and another could be to use a different featurizer, instead of ResNet whose last hidden layer has 2048 units, we could use VGG, [also provided by Keras](https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py), whose last hidden layer has 512 units.
#
# As it can be seen, LightGBM is faster than XGBoost, but in this case the speed is lower than in the CPU version. The GPU implementation cannot always speed up the training, since it has some additional cost of memory copy between CPU and GPU. So when the data size is small and the number of features is large, the GPU version will be slower.
| 8,016 |
cfgov/v1/migrations/0269_home_page_2021.py
|
adebisi-aden/consumerfinance.gov
| 37 |
2172545
|
# Generated by Django 2.2.24 on 2021-08-10 15:10
from django.db import migrations
import v1.atomic_elements.atoms
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('v1', '0268_remove_category_sidebar'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='answer_page_links',
field=wagtail.core.fields.StreamField([('page', wagtail.core.blocks.PageChooserBlock(page_type=['ask_cfpb.AnswerPage']))], blank=True),
),
migrations.AddField(
model_name='homepage',
name='highlight_cards',
field=wagtail.core.fields.StreamField([('highlight', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.TextBlock()), ('link_url', v1.atomic_elements.atoms.URLOrRelativeURLBlock())]))], blank=True),
),
]
| 952 |
tests/test_direct.py
|
ARMmbed/snippet
| 4 |
2171990
|
#
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import unittest
from snippet import config as snippet_config, api
from pathlib import Path
import shutil
import filecmp
from tests import tmp_test_dir
from tests import sample_input_dir
class Test(unittest.TestCase):
def tearDown(self):
shutil.rmtree(tmp_test_dir)
def test_run(self):
# writing two different languages sequentially to the same file
config = snippet_config.Config()
config.output_dir = tmp_test_dir
config.output_append = True
# only detect the python file
config.input_glob = Path(sample_input_dir).joinpath("example.py")
api.extract_code_snippets(config=config)
# only detect the java file
config.input_glob = Path(sample_input_dir).joinpath("example.java")
config.language_name = "java"
config.comment_prefix = "// "
api.extract_code_snippets(config=config)
self.assertTrue(
filecmp.cmp(
Path(tmp_test_dir).joinpath("number_1.md"), Path(sample_input_dir).joinpath("fixture.md"), shallow=False
)
)
| 1,186 |
accounts/adapter.py
|
establishment/django-establishment
| 1 |
2171981
|
def login(request, user):
from django.contrib.auth import login
if not hasattr(user, "backend"):
user.backend = "establishment.accounts.auth_backends.AuthenticationBackend"
login(request, user)
def perform_login(request, user):
if not user.is_active:
# TODO: raise a custom ErrorMessage here
raise RuntimeError("Inactive account")
login(request, user)
| 398 |
record_model.py
|
LupusAnay/wishlist
| 1 |
2171449
|
from PyQt5.QtCore import pyqtSlot, QModelIndex
from sqlalchemy.orm import Session
from db_table_model import DBTableModel
from record import Record
class RecordModel(DBTableModel):
def __init__(self, session: Session):
super().__init__(Record, session)
@pyqtSlot(int, name='remove')
def remove(self, index: int):
item = self._items[index]
self.beginRemoveRows(QModelIndex(), index, index)
self._items.remove(item)
self._session.delete(item)
self._session.commit()
self.endRemoveRows()
@pyqtSlot(str, str, str, float, name='add')
def add(self, name: str, note: str, link: str, price: float) -> int:
rows_before_insert = self.rowCount()
record = Record(name, note, link, price)
self._session.add(record)
self._session.commit()
self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount())
self._items.append(record)
if rows_before_insert == 0:
self._generate_role_names()
self.endInsertRows()
return record.id
@pyqtSlot(int, str, str, str, int, name='update')
def update(self, index: int, name, note, link, price) -> None:
item = self._items[index]
item.update(name=name, note=note, link=link, price=price)
self._session.commit()
self.dataChanged.emit(self.index(index),
self.index(index),
self.roleNames())
| 1,483 |
rl/fall/duel.py
|
danieloconell/louis
| 0 |
2172073
|
import time
from env import Falling
import rl
import pygame
rl.load_q()
env = Falling(duel=True)
ai_won = ai_lost = pl_won = pl_lost = 0
while True:
env.reset()
while not env.done:
env.render()
env.update_score(ai_won, ai_lost, pl_stats=[pl_won, pl_lost])
# why does making random move drastically increase performance
ai_action = rl.choose_action(env.agent, env.square, train=True)
env.make_action(ai_action, ai=True)
time.sleep(0.05)
# quit if the user wants and get the human players action
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
action = "left"
elif event.key == pygame.K_RIGHT:
action = "right"
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
action = "stay"
else:
action = "stay"
# move human player
env.make_action(action, player=True)
env.update()
# keep track of how many won and lost
if env.agent == env.square[0]:
ai_won += 1
else:
ai_lost += 1
if env.human == env.square[0]:
pl_won += 1
else:
pl_lost += 1
| 1,406 |
charcad/draw/lines.py
|
vczoni/charcad
| 0 |
2172284
|
from charcad.draw.coordinates import Coordinates
from charcad.draw.graph import GraphicObject, GraphicObjectArray, Graph
from charcad.draw.point import Point
from charcad.draw.utils import calc_angle, calc_distance, chrs as ch
class Lines(GraphicObject):
def __init__(self, x=0, y=0, transparent=True, formatter=None):
super(Lines, self).__init__(x, y, transparent)
self.formatter = formatter
self.objects = GraphicObjectArray()
class Line(Lines):
def __init__(self, x, y, formatter=None):
super(Line, self).__init__(x, y, formatter=formatter)
def __repr__(self):
return 'Line at <%s>' % id(self)
def __len__(self):
return len(self.objects)
@property
def origin(self):
self.coord = Coordinates(self.x, self.y)
return self.coord
@property
def end(self):
x = self.objects[-1].coord.x
y = self.objects[-1].coord.y
self.coord = Coordinates(x, y)
return self.coord
@property
def length(self):
return len(self)
def add_point(self, p):
self.objects.add(p)
def update_graph(self, w=1, h=1):
self.graph = Graph(w=w, h=h)
origin = self.origin
for obj in self.objects:
obj.coord -= origin
self.graph.add_graph(obj)
class Horizontal(Line):
def __init__(self, x, y, length, marker=None, formatter=None):
super(Horizontal, self).__init__(x, y, formatter)
if marker is None:
marker = ch.lr
self.marker = marker
self.create(length)
def create(self, length):
for i in range(length):
p = Point(self.x+i, self.y, marker=self.marker,
formatter=self.formatter)
self.add_point(p)
self.update_graph(w=length)
class Vertical(Line):
def __init__(self, x, y, length, marker=None, formatter=None):
super(Vertical, self).__init__(x, y, formatter)
if marker is None:
marker = ch.ud
self.marker = marker
self.create(length)
def create(self, length):
for i in range(length):
p = Point(self.x, self.y+i, marker=self.marker,
formatter=self.formatter)
self.add_point(p)
self.update_graph(h=length)
class Route(Lines):
def __init__(self, transparent=True):
super(Route, self).__init__(transparent=transparent)
self.objects = GraphicObjectArray()
def __repr__(self):
return 'Route at <%s>' % id(self)
def __len__(self):
return len(self.objects)
@property
def origin(self):
x = min([obj.coord.x for obj in self.objects])
y = min([obj.coord.y for obj in self.objects])
return Coordinates(x, y)
@property
def h(self):
return (max([obj.coord.y for obj in self.objects])
- min([obj.coord.y for obj in self.objects])
+ 1)
@property
def w(self):
return (max([obj.coord.x for obj in self.objects])
- min([obj.coord.x for obj in self.objects])
+ 1)
def add_point(self, p):
self.objects.add(p)
def create_route(self, *points, marker='.', origin_marker='.',
formatter=None, origin_formatter=None):
for point, next_point in zip(points[0:-1], points[1:]):
point.set_marker(marker)
point.set_formatter(formatter)
self.connect(point, next_point)
# edit origins
self.objects[0].set_marker(origin_marker)
self.objects[0].set_formatter(origin_formatter)
self.objects[-1].set_marker(origin_marker)
self.objects[-1].set_formatter(origin_formatter)
self.coord = self.origin
self.update()
def connect(self, p1, p2):
current_point = p1
target_point = p2
arrived = current_point == target_point
self.add_point(current_point)
while not arrived:
next_points = [current_point + m for m in movements]
distances = [calc_distance(p, target_point)
for p in next_points]
idx = distances.index(min(distances))
current_point += movements[idx]
self.add_point(current_point)
arrived = current_point == target_point
def update(self):
self.graph = Graph(w=self.w, h=self.h)
origin = self.origin
for obj in self.objects:
obj.coord -= origin
self.graph.add_graph(obj)
movements = [
Coordinates(0, 1),
Coordinates(1, 1),
Coordinates(1, 0),
Coordinates(1, -1),
Coordinates(0, -1),
Coordinates(-1, -1),
Coordinates(-1, 0),
Coordinates(-1, 1),
]
| 4,767 |
timebandits_app/tests/tests.py
|
alex-shen1/TimeBanditsApp
| 0 |
2171537
|
"""Tests for the site"""
# pylint: disable=imported-auth-user
from django.test import TestCase
from django.contrib.auth.models import User
from timebandits_app.forms.task_form import TaskForm
from ..templatetags import formatters
class TrivialTest(TestCase):
"""Contains trivial tests"""
def test_trivial_math(self):
"""Most trivial possible test to verify testing works."""
self.assertTrue(1 + 1 == 2)
class CurrencyFormatterTest(TestCase):
"""Contains test cases for the formatter"""
def test_short_decimal(self):
"""Test one decimal place"""
self.assertTrue(formatters.currency(200.0), "$200.00")
def test_no_decimal(self):
"""Test no decimal points"""
self.assertTrue(formatters.currency(20), "$20.00")
def test_long_decimal(self):
"""Test 3 decimal places"""
self.assertTrue(formatters.currency(2.0000), "$2.00")
def test_zero(self):
"""Test 0"""
self.assertTrue(formatters.currency(0), "$0.00")
class VolunteerLevelFormatter(TestCase):
"""Contains test cases for the formatter"""
def test_vl_formatter1(self):
"""Test when volunteer level is 12"""
self.assertTrue(formatters.volunteer_level(12), "2")
def test_vl_formatter2(self):
"""Test when volunteer level is -1"""
self.assertTrue(formatters.volunteer_level(-1), "0")
def test_vl_formatter3(self):
"""Test when volunteer level is 0"""
self.assertTrue(formatters.volunteer_level(0), "1")
class TaskTest(TestCase):
"""Tests TaskForm Validators"""
form_data_template = {
"task_title": "Testing!",
"task_description": "Test Description",
"task_capacity": "1",
"event_date": "2020-12-12",
"time_to_complete": "1",
"donation_amount": "0",
"event_address": "Down the road"}
def test_invalid_capacity(self):
"""Tests that tasks with negative capacity fails."""
user = User.objects.create_user(
username='testuser', password='<PASSWORD>')
form_data = self.form_data_template.copy()
form_data["task_capacity"] = "-1"
form_data["owner"] = user.account
form = TaskForm(form_data)
self.assertEqual(
form.errors['task_capacity'],
[u"Task capacity cannot be negative."])
self.assertFalse(form.is_valid())
def test_invalid_date(self):
"""Tests that tasks with date in the past fail."""
user = User.objects.create_user(
username='testuser', password='<PASSWORD>')
form_data = self.form_data_template.copy()
form_data["event_date"] = "2020-08-08"
form_data["owner"] = user.account
form = TaskForm(form_data)
self.assertEqual(
form.errors['event_date'],
[u"Event cannot be in the past!"])
def test_valid_date(self):
"""Tests that tasks with valid dates pass."""
user = User.objects.create_user(
username='testuser', password='<PASSWORD>')
form_data = self.form_data_template.copy()
form_data["event_date"] = "2021-09-09"
form_data["owner"] = user.account
form = TaskForm(form_data)
self.assertTrue(form.is_valid())
def test_invalid_time_to_complete(self):
"""Tests that tasks with negative time_to_complete fail."""
user = User.objects.create_user(
username='testuser', password='<PASSWORD>')
form_data = self.form_data_template.copy()
form_data["time_to_complete"] = "-1"
form_data["owner"] = user.account
form = TaskForm(form_data)
self.assertEqual(
form.errors['time_to_complete'],
[u"Time to complete must be a positive number."])
def test_too_high_donation(self):
"""Tests that tasks with donations over $200 maximum fail."""
user = User.objects.create_user(
username='testuser', password='<PASSWORD>')
form_data = self.form_data_template.copy()
form_data["donation_amount"] = "1001"
form_data["owner"] = user.account
form = TaskForm(form_data)
self.assertEqual(
form.errors['donation_amount'],
[u"1001.0 is not within donation range of 0 - 200"])
def test_too_low_donation(self):
"""Tests that tasks with donations over $200 maximum fail."""
user = User.objects.create_user(
username='testuser', password='<PASSWORD>')
form_data = self.form_data_template.copy()
form_data["donation_amount"] = "-5"
form_data["owner"] = user.account
form = TaskForm(form_data)
self.assertEqual(
form.errors['donation_amount'],
[u"-5.0 is not within donation range of 0 - 200"])
| 4,829 |
DjangoWeb/core/models.py
|
LUIGHI1308/CAOS-NEWS
| 0 |
2172860
|
from django.db import models
class Categoria(models.Model):
idCategoria= models.IntegerField(primary_key=True, verbose_name='Id de categoria')
nombreCategoria= models.CharField(max_length=50, verbose_name='Nombre de la categoria')
def __str__(self):
return self.nombreCategoria
class registro_usuario(models.Model):
username= models.CharField(max_length=20,primary_key=True, verbose_name='Username')
mail= models.CharField(max_length=100,verbose_name='Correo')
password= models.CharField(max_length=20, verbose_name='Password')
def __str__(self):
return self.username
| 618 |
books_api/core/tests.py
|
kennyaires/olist-challenge
| 1 |
2172111
|
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from .models import Author, Book
from .serializers import AuthorSerializer, BookSerializer
AUTHORS_URL = reverse('core:author-list')
BOOKS_URL = reverse('core:book-list')
class PublicApiTests(TestCase):
def setUp(self):
self.client = APIClient()
# Creates test authors
Author.objects.bulk_create([
Author(name="<NAME>"),
Author(name="<NAME>"),
Author(name="<NAME>")
])
def test_retrieve_authors_list(self):
"""Test retrieving the list of authors"""
res = self.client.get(AUTHORS_URL)
authors = Author.objects.all()
serializer = AuthorSerializer(authors, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_and_get_book_successful(self):
"""Test creating a new book and getting info of it"""
payload = dict(name='<NAME>', edition='5',
publication_year=1999, authors=[3])
self.client.post(BOOKS_URL, payload)
exists = Book.objects.filter(name=payload['name']).exists()
self.assertTrue(exists)
res = self.client.get(BOOKS_URL)
serializer = BookSerializer(Book.objects.filter(
name=payload['name'])
.first())
self.assertIn(serializer.data, res.data)
| 1,518 |
project_python-4.4.py
|
geromahony/da2019-applied-databases
| 0 |
2172873
|
import project_pythonDB
import pymysql
def menu_select_1():
# User is shown first
# 15 cities in world database
print('='*60)
print('| ' + '{:3s}'.format('ID') + ' | ' +
'{:16s}'.format('Name') + ' | ' +
'{:16s}'.format('District') + ' | ' +
'{:12s}'.format('Population') + ' |')
print('='*60)
cities = project_pythonDB.get_15_cities()
for city in cities:
print('| ' + '{:3d}'.format(city['ID']) + ' | ' +
'{:16s}'.format(city['Name']) + ' | ' +
'{:16s}'.format(city['District']) + ' | ' +
'{:12d}'.format(city['Population']) + ' |' )
print('='*60)
def menu_select_2():
equal_syms = ['<','>','=']
print('Cities by Population')
print('--------------------')
while True:
equality = input("Enter < > or = : ")
if equality in equal_syms:
break
else:
print('Incorrect Symbol, Try Again ')
while True:
try:
population = int(input("Enter Population: "))
break
except:
print('Please Enter an Integer:')
cities = project_pythonDB.get_cities_by_population(equality,population)
print('='*42)
print('| ' + '{:4s}'.format('ID') + ' | ' +
'{:16s}'.format('Name') + ' | ' +
'{:12s}'.format('Population') + ' |')
print('='*42)
for city in cities:
print('| ' + '{:4d}'.format(city['ID']) + ' | ' +
'{:16s}'.format(city['Name']) + ' | ' +
'{:12d}'.format(city['Population']) + ' |')
print('='*42)
def check_code(codes,code):
for item in codes:
if item['countrycode'] == code:
return True
return False
def menu_select_3():
print('Add New City')
print('------------')
codes = project_pythonDB.get_country_codes()
while True:
country_code = input("Enter Country Code:")
if check_code(codes,country_code):
break
else:
print('Invalid Country Code, Try Again')
city_name = input("Enter City Name:")
district = input("Enter District:")
while True:
try:
population = int(input("Enter Population: "))
break
except:
print('Please Enter an Integer:')
try:
rows = project_pythonDB.add_city(country_code, city_name, district, population)
except pymysql.err.ProgrammingError as e:
print('Programming Error:', e)
except pymysql.err.IntegrityError as e:
print('Insertion Error:',e)
except Exception as e:
print('Error Adding City:',e)
def menu_select_4():
print("Menu 4:")
def menu_select_5():
print("Menu 5:")
def menu_select_6():
print('Countries by Name')
print('-----------------')
name = input('Enter Country Name:')
countries = project_pythonDB.view_country_by_name(name)
print('='*156)
print('| ' + '{:55s}'.format('Name') + ' | ' +
'{:16s}'.format('Continent') + ' | ' +
'{:12s}'.format('Population') + ' | ' +
'{:60s}'.format('Head of State') + ' |')
print('='*156)
for country in countries:
print('| ' + '{:55s}'.format(country['name']) + ' | ' +
'{:16s}'.format(country['continent']) + ' | ' +
'{:12d}'.format(country['population']) + ' | ' +
'{:60s}'.format(country['headofstate']) + ' |')
print('='*156)
def get_pop(country,equality,popu_req):
country_pop = []
if equality == ">":
for item in country:
if item['population'] > popu_req:
country_pop.append(item)
return country_pop
elif equality == "<":
for item in country:
if item['population'] < popu_req:
country_pop.append(item)
return country_pop
elif equality == "=":
for item in country:
if item['population'] == popu_req:
country_pop.append(item)
return country_pop
def menu_select_7():
equal_syms = ['<', '>', '=']
print('Countries by Population')
print('-----------------------')
while True:
equality = input("Enter < > or = : ")
if equality in equal_syms:
break
else:
print('Incorrect Symbol, Try Again ')
while True:
try:
population = int(input("Enter Population: "))
break
except:
print('Please Enter an Integer:')
countries = project_pythonDB.get_country_details()
pop_count = get_pop(countries,equality,population)
print('='*100)
print('| ' + '{:4s}'.format('Code') + ' | ' +
'{:55s}'.format('Name') + ' | ' +
'{:16s}'.format('Continent') + ' | ' +
'{:12s}'.format('Population') + ' |')
print('='*100)
for country in pop_count:
print('| ' + '{:4s}'.format(country['code']) + ' | ' +
'{:55s}'.format(country['name']) + ' | ' +
'{:16s}'.format(country['continent']) + ' | ' +
'{:12d}'.format(country['population']) + ' |')
print('='*100)
def display_menu():
print("MENU")
print("====")
print("1 - View 15 Cities \n" \
"2 - View Cities by Population \n" \
"3 - Add New City \n" \
"4 - Find Car by Engine Size \n" \
"5 - Add New Car \n" \
"6 - View Countries by Name \n" \
"7 - View Countries by Population \n" \
"x - Exit Application")
def main():
while True:
display_menu()
inp = input("Choice:")
if inp == "x":
break
elif inp == "1":
menu_select_1()
elif inp == "2":
menu_select_2()
elif inp == "3":
menu_select_3()
elif inp == "4":
menu_select_4()
elif inp == "5":
menu_select_5()
elif inp == "6":
menu_select_6()
elif inp == "7":
menu_select_7()
if __name__ == "__main__":
main()
| 6,014 |
System/infinit-drive.10s.py
|
uberfastman/bitbar-plugins
| 0 |
2172104
|
#!/usr/bin/env python3
# <bitbar.title>Ifinit Drive Journal Stats</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author><NAME></bitbar.author>
# <bitbar.author.github>strages</bitbar.author.github>
# <bitbar.desc>Short description of what your plugin does.</bitbar.desc>
# <bitbar.image>https://raw.githubusercontent.com/strages/infinit-drive-journal-stats-bitbar-plugin/master/screenshot.png</bitbar.image>
# <bitbar.dependencies>python3</bitbar.dependencies>
# <bitbar.abouturl>https://github.com/strages/infinit-drive-journal-stats-bitbar-plugin</bitbar.abouturl>
import json
import os
import subprocess
p = subprocess.Popen(
['infinit-journal', '--stat', '--script'],
stdout = subprocess.PIPE,
env = {'PATH': '/usr/local/bin:%s' % os.environ['PATH']},
)
out, err = p.communicate()
res = json.loads(out.decode('utf-8'))
total_size = 0
for k, v in res.items():
total_size += v['size']
def GetHumanReadable(size,precision=0):
suffixes=[' B',' KB',' MB',' GB',' TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision,size,suffixes[suffixIndex])
total_size_readable = GetHumanReadable(total_size)
print(total_size_readable)
| 1,351 |
itelegram/migrations/0001_initial.py
|
YazdanRa/django-itelegram
| 3 |
2172354
|
# Generated by Django 3.1.2 on 2020-10-24 15:47
import django.contrib.postgres.fields
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="TelegramUpdate",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"update_id",
models.CharField(
help_text="This is a unique ID for each Update from Telegram",
max_length=128,
verbose_name="Telegram Update ID",
),
),
(
"bot",
models.CharField(
help_text="Displays the bot which has participated is in this update.",
max_length=32,
verbose_name="Telegram bot",
),
),
("message", models.JSONField(blank=True, null=True, verbose_name="Telegram Update message")),
(
"edited_message",
models.JSONField(blank=True, null=True, verbose_name="Telegram Update edited message"),
),
("channel_post", models.JSONField(blank=True, null=True, verbose_name="Telegram Update channel post")),
(
"edited_channel_post",
models.JSONField(blank=True, null=True, verbose_name="Telegram Update edited channel post"),
),
("inline_query", models.JSONField(blank=True, null=True, verbose_name="Telegram Update inline query")),
(
"chosen_inline_result",
models.JSONField(blank=True, null=True, verbose_name="Telegram Update chosen inline result"),
),
(
"callback_query",
models.JSONField(blank=True, null=True, verbose_name="Telegram Update callback query"),
),
],
options={
"verbose_name": "Telegram Update",
"verbose_name_plural": "Telegram Updates",
},
),
migrations.CreateModel(
name="TelegramUser",
fields=[
(
"id",
models.BigIntegerField(
help_text="Telegram ID is a unique ID for each user which is help us tp identify users.",
primary_key=True,
serialize=False,
verbose_name="Telegram User ID",
),
),
(
"first_name",
models.CharField(blank=True, max_length=255, null=True, verbose_name="Telegram User First name"),
),
(
"last_name",
models.CharField(blank=True, max_length=255, null=True, verbose_name="Telegram User Last name"),
),
(
"username",
models.CharField(blank=True, max_length=255, null=True, verbose_name="Telegram User Username"),
),
(
"is_bot",
models.BooleanField(
default=False,
help_text="Designates whether the account is a telegram bot or a human user.",
verbose_name="Telegram User IsBot",
),
),
(
"language_code",
models.CharField(blank=True, max_length=8, null=True, verbose_name="Telegram User Language code"),
),
(
"bots",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(
help_text="Displays the bots which have been interacted with this user.",
max_length=32,
verbose_name="Telegram bot",
),
size=None,
),
),
("date_met", models.DateTimeField(auto_now_add=True)),
("last_seen", models.DateTimeField(blank=True, null=True)),
("phone_number", models.CharField(max_length=16, unique=True, verbose_name="Phone number")),
(
"site_user",
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="telegram",
to=settings.AUTH_USER_MODEL,
verbose_name="Connected Site User",
),
),
],
options={
"verbose_name": "Telegram User",
"verbose_name_plural": "Telegram Users",
},
),
]
| 5,374 |
mon-estimator/generate/table_comparisons_kneepoint.py
|
prise-3d/figures-generator
| 0 |
2171258
|
import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import make_interp_spline
M = [5, 11, 15, 21, 25]
scenes = ['p3d_bidir', 'p3d_contemporary-bathroom', 'p3d_crown', 'p3d_villa-lights-on']
scenes_label = ['Bidir', 'Bathroom', 'Crown', 'Villa']
labels = [r'Jung et al.', r'$G$-MoN$_b$', r'$G$-MoN', r'$GG$-MoN', r'$D$-MoN$_p$', r'$G$-MoN$_p$', r'Mean', r'MoN']
row_labels = ['djung', 'gini-binary-mon', 'gini-dmon', 'gini-mon', 'gini-partial-dmon', 'gini-partial-mon', 'mean', 'mon']
order = [6, 7, 0, 1, 2]
mean_index = 6
def main():
parser = argparse.ArgumentParser(description="Create a fully table comparisons")
parser.add_argument('--input', type=str, help='input csv file', required=True)
parser.add_argument('--output', type=str, help='output table file', required=True)
args = parser.parse_args()
p_input = args.input
p_output = args.output
k_ssim_values = [0.6, 0.7, 0.8]
df = pd.read_csv(p_input, sep=";", header=None)
f = open(p_output, 'w')
# start writing table
table_begin = "\\begin{table*}[ht]\n\\centering\n\\tiny\n"
f.write(table_begin)
# # first multicolumns (M, SSIM and estimator)
columns = '|c|l|'
for i in range(len(scenes)):
for k in k_ssim_values:
columns += 'r|'
tabular_begin = "\\begin{tabular}{" + columns + "}\n\\hline\n"
f.write(tabular_begin)
# first header line
first_header_line = f"\\multicolumn{{2}}{{|c|}}{{Scene}}"
second_header_line = f"\\multicolumn{{2}}{{|c|}}{{SSIM}}"
for s_index, scene in enumerate(scenes):
first_header_line += f' & \\multicolumn{{{len(k_ssim_values)}}}{{|c|}}{{{scenes_label[s_index]}}}'
for k in k_ssim_values:
second_header_line += f' & \\multicolumn{{1}}{{|c|}}{{{k}}}'
first_header_line += "\\\\\n"
second_header_line += "\\\\\n"
f.write(first_header_line)
f.write('\\hline\n')
f.write(second_header_line)
prevous_m_mean = None
# display for each M and estimator a specific line
for m in M:
ssim_values = {}
scene_spp_kneepoints = {}
for i in order:
if i not in ssim_values:
ssim_values[i] = []
current_df = df[df.iloc[:, 0].str.contains(f'comparisons-M{m}-{row_labels[i]}')]
for s_index, scene in enumerate(scenes):
scene_df = current_df[df.iloc[:, 1] == scene]
row = scene_df.iloc[0]
# linscapre model
y = np.append(0, row[2:201])
x = np.arange(len(y))
model = make_interp_spline(x, y)
xs = np.linspace(np.min(x), np.max(x), 100000)
ys = model(xs)
ssim_values[i].append(ys)
for i in order:
scene_spp_kneepoints[i] = {}
for s_index, scene in enumerate(scenes):
scene_spp_kneepoints[i][scene] = []
for ssim in k_ssim_values:
spp_index = None
for spp_i, v_ssim in enumerate(ssim_values[i][s_index]):
if v_ssim > ssim:
spp_index = spp_i + 1
break
scene_spp_kneepoints[i][scene].append(spp_index)
# set constant mean values
if prevous_m_mean is None:
prevous_m_mean = scene_spp_kneepoints[mean_index]
# get max expected value
# write lines
counter = 0
for key, arr in scene_spp_kneepoints.items():
line = ""
if counter == 0:
f.write('\\hline\n')
line += f"\\multirow{{{len(order)}}}*{{\\rotatebox{{90}}{{$M = {m}$}}}} & "
else:
line += " & "
line += labels[key]
for s_index, scene in enumerate(scenes):
# if reduced_max == reduced_current:
for k_i, k in enumerate(k_ssim_values):
# get order of values
values = []
for i in order:
if row_labels[i] == 'mean':
current_v = prevous_m_mean[scene][k_i]
else:
current_v = scene_spp_kneepoints[i][scene][k_i]
if current_v == None:
current_v = 1000000
values.append(current_v)
seq = sorted(values)
index = [seq.index(v) for v in values]
if row_labels[key] == 'mean':
spp_text = prevous_m_mean[scene][k_i]
else:
spp_text = scene_spp_kneepoints[key][scene][k_i]
if spp_text == None:
line += f" & NR ({index[counter]+1})"
elif min(values) == spp_text:
line += f" & \\textbf{{{spp_text}}} ({index[counter]+1})"
else:
line += f" & {spp_text} ({index[counter]+1})"
# else:
# line += f" & {reduced_current:.5f} ({index[counter]+1})"
line += " \\\\\n"
f.write(line)
counter += 1
f.write("\\hline\n\\end{tabular}\n\\caption{SSIM comparison for each scene and different $M$ values for $100,000$ samples}\n")
f.write("\\label{table:ssim_kneepoint}\n")
f.write("\\end{table*}\n")
if __name__ == "__main__":
main()
| 5,838 |
fetchCountries.py
|
mvind/nytimes_country_mentions
| 0 |
2170983
|
countries = []
def getCountries():
global countries
with open('countries.txt', 'r') as f:
for line in f:
countries.append(line.replace('\n',''))
return countries
| 197 |
merge_assemblies_by_tiling_path.py
|
huddlej/fasta_tools
| 0 |
2170827
|
#!/bin/env python
"""
"""
import argparse
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import csv
import pysam
import re
# Zero-based index of PSL array output corresponding to the size of the target
# sequence (i.e., the output sequence).
PSL_TSIZE_INDEX=10
def write_records(sequence_name, new_sequences, output, chain):
# Save the new record with all sequences joined by the requested gaps and
# write out all records to the given output file.
new_record = SeqRecord(Seq("".join(new_sequences).rstrip("N")), id=sequence_name, description="")
SeqIO.write((new_record,), output, "fasta")
# Update all chain items with the final target sequence length and output the chain.
new_record_length = len(new_record)
for item in chain:
item[PSL_TSIZE_INDEX] = new_record_length
print "\t".join(map(str, item))
def merge_fasta_by_gaps(assemblies, tiling_path, output_filename, chromosome_suffix, default_gap_size):
"""
Merge all FASTA records from the same organismal chromosome into a single
sequence.
"""
current_chromosome = None
assemblies = pysam.FastaFile(assemblies)
tiling_path_fh = open(tiling_path, "r")
tiling_path_reader = csv.reader(tiling_path_fh, delimiter="\t")
output = open(output_filename, "w")
for path in tiling_path_reader:
chromosome, start, end, contig, contig_start, contig_end = path
start, end, contig_start, contig_end = map(int, (start, end, contig_start, contig_end))
contig_length = assemblies.get_reference_length(contig)
path_length = contig_end - contig_start
if current_chromosome != chromosome:
if current_chromosome is not None:
write_records(new_chromosome, new_sequences, output, chain)
new_chromosome = "%s%s" % (chromosome, chromosome_suffix)
new_sequences = []
chain = []
output_position = 0
current_chromosome = chromosome
previous_end = None
# If the end of the last interval is equal to the start of this
# interval, set the gap size to zero. Otherwise use the default gap
# size.
if previous_end == start:
gap_size = 0
else:
gap_size = default_gap_size
if path_length > 0:
new_sequences.append(assemblies.fetch(contig, contig_start, contig_end) + gap_size * "N")
chain.append([0, 0, 0, 0, 0, 0, 0, 0, "+", new_chromosome, None, output_position, output_position + path_length, contig, contig_length, contig_start, contig_end, 1, path_length, 0, 0])
output_position = output_position + path_length + gap_size
# Set the previous end for the next iteration.
previous_end = end
# Write out the final batch of sequences.
if len(new_sequences) > 0:
write_records(new_chromosome, new_sequences, output, chain)
tiling_path_fh.close()
output.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("assemblies", help="FASTA file of assemblies with a tiling path")
parser.add_argument("tiling_path", help="tiling path of assemblies in BED 3+3 format with reference coordinates in the first three columns and corresponding assembly coordinates in the last three columns")
parser.add_argument("output_file", help="FASTA file of assemblies merged by gaps across the given tiling path")
parser.add_argument("chromosome_suffix", help="suffix to add to each chromosome name in the output sequence to distinguish it from the standard reference")
parser.add_argument("--gap_size", type=int, default=1000, help="size of gaps in bases to insert between each input FASTA")
args = parser.parse_args()
merge_fasta_by_gaps(args.assemblies, args.tiling_path, args.output_file, args.chromosome_suffix, args.gap_size)
| 3,920 |
src/birdland/__init__.py
|
wrwetzel/Birdland
| 0 |
2171517
|
# ----------------------------------------------------------------------------
# __init__.py - Indicates that a directory is a package.
# Executed when package containing it is imported.
# i.e.
# from birdland import birdland
# allows you to define any variable at the package level.
# ----------------------------------------------------------------------------
import sys
import os
from pathlib import Path
# ------------------------------------------------------------
# WRW 16 Mar 2022 - Another approach. No guessing. Packaging places
# a file, 'Package_Type.txt', describing the packaging type.
program_directory = Path( __file__ ).parent.resolve().as_posix()
# print( "/// __init__.py __file__", __file__ )
# print( "/// __init__.py program_directory", program_directory )
# print( "/// __init__.py at top, sys.path", sys.path )
# print( "/// __init__.py at top, cwd", os.getcwd() )
if Path( program_directory, 'Package_Type_Development.txt' ).is_file():
Package_Type = 'Development'
elif Path( program_directory, 'Package_Type_Setuptools.txt' ).is_file():
Package_Type = 'Setuptools'
elif Path( program_directory, 'Package_Type_PyInstaller.txt' ).is_file():
Package_Type = 'PyInstaller'
elif Path( program_directory, 'Package_Type_Nuitka.txt' ).is_file():
Package_Type = 'Nuitka'
elif Path( program_directory, 'Package_Type_Tar.txt' ).is_file():
Package_Type = 'Tar'
else:
print( f"ERROR-DEV: 'Package_Type_*.txt' file not found at '__init__.py' in {program_directory}", file=sys.stderr )
sys.exit(1) # Doesn't do anything
# print( f"/// __init__.py: Package_Type: {Package_Type}" )
# ------------------------------------------------------------
if Package_Type == 'Setuptools':
sys.path.append( program_directory )
os.chdir( program_directory )
# print()
# print( "/// __init__.py before import, sys.path", sys.path )
# print( "/// __init__.py before import, cwd", os.getcwd() )
# print()
from birdland import birdland
from birdland import build_tables
from birdland import diff_index
def start_birdland():
sys.exit( birdland.main() )
def start_build_tables():
sys.exit( build_tables.main() )
def start_diff_index():
sys.exit( diff_index.main() )
| 2,345 |
Source/util/ModelUtil.py
|
lanmaoxinqing/python-markdown-to-model-generator
| 0 |
2172484
|
# -*- coding: UTF-8 -*-
import re
import os
import sys
def enum(**enums):
return type('Enum', (), enums)
PropertyTypes = enum(
Bool = 1,
Int = 2,
Long = 3,
Point = 4,
String = 5,
List = 900,
Custom = 999,
)
typeDict = {
#要转换的名称 #转换后的名称 #替换所需表达式
('bool', 'boolean') : PropertyTypes.Bool,
('int', 'integer') : PropertyTypes.Int,
('long', 'long long') : PropertyTypes.Long,
('float', 'cgfloat', 'double') : PropertyTypes.Point,
('string') : PropertyTypes.String,
('list', 'array') : PropertyTypes.List,
}
def getFiles(path) :
if os.path.isfile(path) :
return [path]
paths = []
for (dirPath, dirNames, fileNames) in os.walk(path) :
for fileName in fileNames :
absolutePath = os.path.join(dirPath, fileName)
if os.path.splitext(absolutePath)[1] == '.md' :
paths.append(absolutePath)
return paths
def readFile(filePath) :
file = open(filePath)
lines = file.readlines()
return lines
def isTableLine(line) :
global tableState
pattern = re.compile('^((.+)\|)+((.+))$')
if not pattern.match(line) :
tableState = 0
return False
# print line
#第一次匹配,表格头
if tableState == 0 :
tableState = 1
return False
#第二次匹配,表格排版
if tableState == 1 :
tableState = 2
return False
if tableState == 2 :
return True
def titleParser(line) :
title = re.search('^# (.+)$', line)
if not title:
return None
titleStr = title.group(1).strip()
if titleStr == 'Title' :
return None
return titleStr
def descParser(line) :
pass
def lineParser(line) :
params = line.split('|')
if params[0].strip() == "" :
params.pop(0)
if params[-1].strip() == "" :
params.pop(-1)
# print len(params)
#不是4行,不是属性表格
if len(params) != 4 :
return
name = params[0].strip()
#名字没字母
if not re.search('\w+', name) :
return
type = params[2].strip()
comment = params[3].strip()
return (name, type, comment)
'''
属性类型,属性名,列表层级
'''
def typeParser(originType, listLevel = 0) :
typeStr = originType.strip().lower()
#无类型,默认String
if not typeStr :
return (PropertyTypes.String, None, listLevel)
#匹配列表[xxx]
match = re.search('\[(.+)\]', typeStr)
if match:
subOriginType = match.group(1)
subTypeResult = typeParser(subOriginType, listLevel + 1)
subType = subTypeResult[1]
currentListLevel = subTypeResult[2]
return (PropertyTypes.List, subType, currentListLevel)
#匹配列表list<xxx>
match = re.search('list\<(.+)\>', typeStr)
if match:
subOriginType = match.group(1)
subTypeResult = typeParser(subOriginType, listLevel + 1)
subType = subTypeResult[1]
currentListLevel = subTypeResult[2]
return (PropertyTypes.List, subType, currentListLevel)
#匹配列表array<xxx>
match = re.search('array\<(.+)\>', typeStr)
if match:
subOriginType = match.group(1)
subTypeResult = typeParser(subOriginType, listLevel + 1)
subType = subTypeResult[1]
currentListLevel = subTypeResult[2]
return (PropertyTypes.List, subType, currentListLevel)
#匹配预设字典
for eles in typeDict.keys() :
if typeStr in eles :
return (typeDict[eles], None, listLevel)
#自定义对象
resultType = originType.strip()#默认不修改类型大小写
if not re.search('[A-Z]+', resultType) :
resultType = typeStr.title()#如果全是小写,标题化
return (PropertyTypes.Custom, resultType, listLevel)
| 3,637 |
game_play/flappy_bird/base_v0/utility.py
|
FrankTianTT/AI-Play-Game
| 1 |
2172329
|
# by <NAME>, 2021-1-14
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from stable_baselines3 import DQN
import torch
import gym
import torch.nn as nn
import os
import gym_flappy_bird
class CnnEvalCallback(EvalCallback):
"""
this is a EvalCallback for CnnPolicy, which is for dealing with the "type error" of training-env and eval-env.
"""
def __init__(self,
eval_env,
callback_on_new_best=None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1):
super().__init__(eval_env=eval_env,
callback_on_new_best=callback_on_new_best,
n_eval_episodes=n_eval_episodes,
eval_freq=eval_freq,
log_path=log_path,
best_model_save_path=best_model_save_path,
deterministic=deterministic,
render=render,
verbose=verbose)
self.eval_env = VecTransposeImage(self.eval_env)
class CustomCNN(BaseFeaturesExtractor):
"""
:param observation_space: (gym.Space)
:param features_dim: (int) Number of features extracted.
This corresponds to the number of unit for the last layer.
"""
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 256):
super(CustomCNN, self).__init__(observation_space, features_dim)
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
n_input_channels = observation_space.shape[0]
self.Conv2d1 = nn.Conv2d(n_input_channels, 16, kernel_size=8, stride=4, padding=0)
self.relu1 = nn.ReLU()
self.Conv2d2 = nn.Conv2d(16, 32, kernel_size=8, stride=4, padding=0)
self.relu2 = nn.ReLU()
self.Conv2d3 = nn.Conv2d(32, 32, kernel_size=6, stride=3, padding=0)
self.relu3 = nn.ReLU()
self.Conv2d4 = nn.Conv2d(32, 32, kernel_size=4, stride=2, padding=0)
self.relu4 = nn.ReLU()
self.flatten = nn.Flatten()
# Compute shape by doing one forward pass
with torch.no_grad():
n_flatten = self.forward_cnn(
torch.as_tensor(observation_space.sample()[None]).float()
).shape[1]
self.linear1 = nn.Linear(n_flatten, features_dim)
self.relu5 = nn.ReLU()
def forward_cnn(self, x):
x = self.Conv2d1(x)
x = self.relu1(x)
x = self.Conv2d2(x)
x = self.relu2(x)
x = self.Conv2d3(x)
x = self.relu3(x)
x = self.Conv2d4(x)
x = self.relu4(x)
x = self.flatten(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_cnn(x)
x = self.linear1(x)
x = self.relu5(x)
return x
if __name__ == "__main__":
# env = gym.make("FlappyBird-v0")
# policy_kwargs = dict(
# features_extractor_class=CustomCNN,
# features_extractor_kwargs=dict(features_dim=128),
# )
# model = DQN(policy="CnnPolicy", env=env,)
#
# model_customized = DQN(policy="CnnPolicy", env=env, policy_kwargs=policy_kwargs)
#
# print(model_customized.policy)
#
# total_params = sum(p.numel() for p in model.policy.parameters())
# total_trainable_params = sum(p.numel() for p in model.policy.parameters() if p.requires_grad)
# print('model:\ntotal parameters: {}, training parameters: {}'.format(total_params, total_trainable_params))
# # total parameters: 125984068, training parameters: 125984068
#
# total_params = sum(p.numel() for p in model_customized.policy.parameters())
# total_trainable_params = sum(p.numel() for p in model_customized.policy.parameters() if p.requires_grad)
# print('customized model:\ntotal parameters: {}, training parameters: {}'.format(total_params, total_trainable_params))
# # total parameters: 203748, training parameters: 203748
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
env = gym.make("FlappyBird-v0")
model = DQN.load(os.path.join(os.path.dirname(__file__), 'logs/best_model.zip'))
model.policy.q_net.features_extractor.Conv2d1.register_forward_hook(
get_activation('Conv2d1'))
model.policy.q_net.features_extractor.relu2.register_forward_hook(
get_activation('relu2'))
model.policy.q_net.features_extractor.relu3.register_forward_hook(
get_activation('relu3'))
model.policy.q_net.features_extractor.relu4.register_forward_hook(
get_activation('relu4'))
model.policy.q_net.features_extractor.relu5.register_forward_hook(
get_activation('relu5'))
obs = env.reset()
action, _ = model.predict(obs)
print(activation['Conv2d1'])
| 5,252 |
lime/ast.py
|
ComedicChimera/lime
| 0 |
2172852
|
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, List, Text
@dataclass
class TextPosition:
line: int
start_col: int
end_col: int
class AST(ABC):
@abstractmethod
def position(self) -> TextPosition:
pass
@dataclass
class LimeIdentifier(AST):
name: str
line: int
col: int
def position(self) -> TextPosition:
return TextPosition(self.line, self.col, self.col + len(self.name))
def __repr__(self):
return self.name
@dataclass
class LimeValue(AST):
value: Any
pos: TextPosition
def position(self) -> TextPosition:
return self.pos
def __repr__(self):
if not self.value:
return "()"
return self.value.__repr__()
@dataclass
class LimeList(AST):
exprs: List[AST]
pos: TextPosition
def position(self) -> TextPosition:
return self.pos
def __repr__(self):
return self.exprs.__repr__()
@dataclass
class LimeFuncApp(AST):
func: AST
arg: AST
def position(self) -> TextPosition:
return TextPosition(
self.func.position().line,
self.func.position().start_col,
self.arg.position().end_col
)
def __repr__(self):
return f'({self.func} {self.arg})'
@dataclass
class LimeFuncAbs(AST):
args: List[str]
expr: AST
args_start_col: int
def position(self) -> TextPosition:
return TextPosition(
self.expr.position().line,
self.args_start_col,
self.expr.position().end_col
)
def __repr__(self):
return "".join("\\{arg}." for arg in self.args) + self.expr
@dataclass
class LimeBind(AST):
var_name: str
expr: AST
var_start_col: int
def position(self) -> TextPosition:
return TextPosition(
self.expr.position().line,
self.vars_start_col,
self.expr.position().end_col
)
| 1,979 |
Q309.py
|
Linchin/python_leetcode_git
| 0 |
2172601
|
"""
Q309
Best Time to Buy and Sell Stock with Cooldown
Medium
Say you have an array for which the ith element is the
price of a given stock on day i.
Design an algorithm to find the maximum profit. You may
complete as many transactions as you like (ie, buy one and
sell one share of the stock multiple times) with the
following restrictions:
You may not engage in multiple transactions at the same
time (ie, you must sell the stock before you buy again).
After you sell your stock, you cannot buy stock on next
day. (ie, cooldown 1 day)
"""
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
n = len(prices)
if n == 0:
return 0
# hold after the ith time
hold = [0] * n
hold[0] = -prices[0]
# empty after the ith time
empty = [0] * n
for i in range(1, n-1):
hold[i] = max(hold[i-1], empty[i-2]-prices[i])
empty[i] = max(empty[i-1], hold[i-1]+prices[i])
return max(empty[n-2], hold[n-2]+prices[n-1])
sol = Solution()
prices = [1,2,3,0,2]
print(sol.maxProfit(prices))
| 1,132 |
research/object_detection/frames_inspector.py
|
agonzgarc/ADL
| 0 |
2172402
|
import pdb
import random
import numpy as np
import functools
import json
import os
import tensorflow as tf
import imp
import pickle
from object_detection import trainer
from object_detection import selection_funcs as sel
from object_detection import evaluator_al as evaluator
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.utils import config_util
from object_detection.save_subset_imagenetvid_tf_record import save_tf_record
from object_detection.utils import label_map_util
from object_detection.utils import np_box_ops
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
from pycocotools import mask
from PIL import Image
from object_detection.utils import visualization_utils as vis_utils
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
#tf.logging.set_verbosity(tf.logging.WARN)
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/datatmp/Experiments/Javad/tf/data/ILSVRC/',
'Directory that contains data.')
FLAGS = flags.FLAGS
False_PN='FP'
data_dir='/datatmp/Experiments/Javad/tf/data/ILSVRC/'
#train_dir='/datatmp/Experiments/Javad/tf/model/'+False_PN+'_gtR1cycle1/'
eval_train_dir='/datatmp/Experiments/Javad/tf/model/R1cycle0/'+False_PN+'_gtR1cycle1eval_train/'
current_cycle_path='/datatmp/Experiments/Javad/tf/model/R1cycle0/'
next_cycle_path='/datatmp/Experiments/Javad/tf/model/'+False_PN+'_gtR1cycle1/'
#eval_train_dir='/datatmp/Experiments/Javad/tf/model/'+False_PN+'_gtR1cycle1/'+False_PN+'_gtR1cycle2eval_train/'
#current_cycle_path='/datatmp/Experiments/Javad/tf/model/'+False_PN+'_gtR1cycle1/'
#next_cycle_path='/datatmp/Experiments/Javad/tf/model/'+False_PN+'_gtR1cycle2/'
current_active_set=[]
next_active_set=[]
with open(current_cycle_path + 'active_set.txt', 'r') as f:
for line in f:
current_active_set.append(int(line))
with open(next_cycle_path + 'active_set.txt', 'r') as f:
for line in f:
next_active_set.append(int(line))
newly_added_frames=[f for f in next_active_set if f not in current_active_set]
data_info = {'data_dir': FLAGS.data_dir,
'annotations_dir':'Annotations',
'label_map_path': './data/imagenetvid_label_map.pbtxt',
'set': 'train_150K_clean'}
def get_dataset(data_info):
""" Gathers information about the dataset given and stores it in a
structure at the frame level.
Args:
data_info: dictionary with information about the dataset
Returns:
dataset: structure in form of list, each element corresponds to a
frame and its a dictionary with multiple keys
videos: list of videos
"""
dataset = []
path_file = os.path.join(data_info['data_dir'],'AL', data_info['set'] + '.txt')
with open(path_file,'r') as pF:
idx = 0
for line in pF:
# Separate frame path and clean annotation flag
split_line = line.split(' ')
# Remove trailing \n
verified = True if split_line[1][:-1] == '1' else False
path = split_line[0]
split_path = path.split('/')
filename = split_path[-1]
video = split_path[-3]+'/'+split_path[-2]
dataset.append({'idx':idx,'filename':filename,'video':video,'verified':verified})
idx+=1
videos = set([d['video'] for d in dataset])
return dataset,videos
dataset,videos = get_dataset(data_info)
def normalize_box(box,w,h):
""" Input: [ymin, xmin,ymax,xmax]
Output: normalized by width and height of image
"""
nbox = box.copy()
nbox[:,0] = nbox[:,0]/h
nbox[:,1] = nbox[:,1]/w
nbox[:,2] = nbox[:,2]/h
nbox[:,3] = nbox[:,3]/w
return nbox
def augment_active_set(dataset,videos,active_set,num_neighbors=5):
""" Augment set of indices in active_set by adding a given number of neighbors
Arg:
dataset: structure with information about each frames
videos: list of video names
active_set: list of indices of active_set
num_neighbors: number of neighbors to include
Returns:
aug_active_set: augmented list of indices with neighbors
"""
aug_active_set = []
# We need to do this per video to keep limits in check
for v in videos:
frames_video = [f['idx'] for f in dataset if f['video'] == v]
max_frame = np.max(frames_video)
idx_videos_active_set = [idx for idx in frames_video if idx in active_set]
idx_with_neighbors = [i for idx in idx_videos_active_set for i in range(idx-num_neighbors,idx+num_neighbors+1) if i >= 0 and i
<= max_frame ]
aug_active_set.extend(idx_with_neighbors)
return aug_active_set
# loading detected boxes
if os.path.exists(eval_train_dir + 'detections.dat'):
with open(eval_train_dir + 'detections.dat','rb') as infile:
###### pdb remove latinq
detections = pickle.load(infile)
#detected_boxes = pickle.load(infile,encoding='latin1')
aug_active_set = augment_active_set(dataset,videos,current_active_set,num_neighbors=5)
unlabeled_set = [f['idx'] for f in dataset if f['idx'] not in aug_active_set and f['verified']]
save_tf_record(data_info,unlabeled_set)
BOXES = detections['boxes']
SCORES= detections['scores']
score_thresh=0.5
j=1
pdb.set_trace()
for f in newly_added_frames:
anno_ind=unlabeled_set.index(f)
ind=SCORES[anno_ind] > score_thresh # Extracting boxes with score greater than threshold
boxes=np.array(BOXES[anno_ind])[ind,:]
v=dataset[f]['video']
video_dir = os.path.join(data_dir,'Data','VID','train',v)
curr_im = Image.open(os.path.join(video_dir,dataset[f]['filename']))
im_w,im_h = curr_im.size
vis_utils.draw_bounding_boxes_on_image(curr_im,normalize_box(boxes,im_w,im_h))
curr_im.save(data_dir+False_PN+'_samples'+'/'+str(j)+'_'+dataset[f]['filename'])
print(v)
print(dataset[f]['filename'])
j+=1
| 6,066 |
ietf/group/urls_info_details.py
|
MatheusProla/Codestand
| 2 |
2172861
|
from django.views.generic import RedirectView
from ietf.community import views as community_views
from ietf.doc import views_material as material_views
from ietf.group import views, views_edit, views_review, milestones as milestone_views
from ietf.utils.urls import url
urlpatterns = [
url(r'^$', views.group_home),
url(r'^documents/txt/$', views.group_documents_txt),
url(r'^documents/$', views.group_documents),
url(r'^documents/manage/$', community_views.manage_list),
url(r'^documents/csv/$', community_views.export_to_csv),
url(r'^documents/feed/$', community_views.feed),
url(r'^documents/subscription/$', community_views.subscription),
url(r'^charter/$', views.group_about),
url(r'^about/$', views.group_about),
url(r'^about/status/$', views.group_about_status),
url(r'^about/status/edit/$', views.group_about_status_edit),
url(r'^about/status/meeting/(?P<num>\d+)/$', views.group_about_status_meeting),
url(r'^history/$',views.history),
url(r'^email/$', views.email),
url(r'^deps/(?P<output_type>[\w-]+)/$', views.dependencies),
url(r'^meetings/$', views.meetings),
url(r'^edit/$', views_edit.edit, {'action': "edit"}),
url(r'^edit/(?P<field>\w+)/?$', views_edit.edit, {'action': "edit"}),
url(r'^conclude/$', views_edit.conclude),
url(r'^milestones/$', milestone_views.edit_milestones, {'milestone_set': "current"}, name='ietf.group.milestones.edit_milestones;current'),
url(r'^milestones/charter/$', milestone_views.edit_milestones, {'milestone_set': "charter"}, name='ietf.group.milestones.edit_milestones;charter'),
url(r'^milestones/charter/reset/$', milestone_views.reset_charter_milestones, None, 'ietf.group.milestones.reset_charter_milestones'),
url(r'^workflow/$', views_edit.customize_workflow),
url(r'^materials/$', views.materials),
url(r'^materials/new/$', material_views.choose_material_type),
url(r'^materials/new/(?P<doc_type>[\w-]+)/$', material_views.edit_material, { 'action': "new" }, 'ietf.doc.views_material.edit_material'),
url(r'^archives/$', views.derived_archives),
url(r'^photos/$', views.group_photos),
url(r'^reviews/$', views_review.review_requests),
url(r'^reviews/manage/(?P<assignment_status>assigned|unassigned)/$', views_review.manage_review_requests),
url(r'^reviews/email-assignments/$', views_review.email_open_review_assignments),
url(r'^reviewers/$', views_review.reviewer_overview),
url(r'^reviewers/(?P<reviewer_email>[\w%+-.@]+)/settings/$', views_review.change_reviewer_settings),
url(r'^secretarysettings/$', views_review.change_review_secretary_settings),
url(r'^email-aliases/$', RedirectView.as_view(pattern_name=views.email,permanent=False),name='ietf.group.urls_info_details.redirect.email'),
]
| 2,796 |
SMS-Automation.py
|
Rakesheshwaraiah/Python_Scripts
| 0 |
2172241
|
'''This script allows user to send sms to different users using phone number
please enter phone number with country code. For example a Indian phone number is +910000000000'''
import requests
rsep=requests.post('https://textbelt.com/text',{
'phone': input("Enter phone number"),
'message': input('Enter message'),
'key': 'textbelt'
})
if rsep.json()['success']:
print("Message is sent")
else:
print("Something went wrong. Please try again")
print(rsep.json())
| 491 |
alipay/aop/api/domain/AlipayCommercePoiPowerbankUploadModel.py
|
antopen/alipay-sdk-python-all
| 213 |
2172391
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommercePoiPowerbankUploadModel(object):
def __init__(self):
self._address_desc = None
self._can_borrow = None
self._can_borrow_cnt = None
self._contact_number = None
self._entity_code = None
self._entity_name = None
self._ext_properties = None
self._latitude = None
self._longitude = None
self._office_hours_desc = None
self._upload_time = None
@property
def address_desc(self):
return self._address_desc
@address_desc.setter
def address_desc(self, value):
self._address_desc = value
@property
def can_borrow(self):
return self._can_borrow
@can_borrow.setter
def can_borrow(self, value):
self._can_borrow = value
@property
def can_borrow_cnt(self):
return self._can_borrow_cnt
@can_borrow_cnt.setter
def can_borrow_cnt(self, value):
self._can_borrow_cnt = value
@property
def contact_number(self):
return self._contact_number
@contact_number.setter
def contact_number(self, value):
self._contact_number = value
@property
def entity_code(self):
return self._entity_code
@entity_code.setter
def entity_code(self, value):
self._entity_code = value
@property
def entity_name(self):
return self._entity_name
@entity_name.setter
def entity_name(self, value):
self._entity_name = value
@property
def ext_properties(self):
return self._ext_properties
@ext_properties.setter
def ext_properties(self, value):
self._ext_properties = value
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def office_hours_desc(self):
return self._office_hours_desc
@office_hours_desc.setter
def office_hours_desc(self, value):
self._office_hours_desc = value
@property
def upload_time(self):
return self._upload_time
@upload_time.setter
def upload_time(self, value):
self._upload_time = value
def to_alipay_dict(self):
params = dict()
if self.address_desc:
if hasattr(self.address_desc, 'to_alipay_dict'):
params['address_desc'] = self.address_desc.to_alipay_dict()
else:
params['address_desc'] = self.address_desc
if self.can_borrow:
if hasattr(self.can_borrow, 'to_alipay_dict'):
params['can_borrow'] = self.can_borrow.to_alipay_dict()
else:
params['can_borrow'] = self.can_borrow
if self.can_borrow_cnt:
if hasattr(self.can_borrow_cnt, 'to_alipay_dict'):
params['can_borrow_cnt'] = self.can_borrow_cnt.to_alipay_dict()
else:
params['can_borrow_cnt'] = self.can_borrow_cnt
if self.contact_number:
if hasattr(self.contact_number, 'to_alipay_dict'):
params['contact_number'] = self.contact_number.to_alipay_dict()
else:
params['contact_number'] = self.contact_number
if self.entity_code:
if hasattr(self.entity_code, 'to_alipay_dict'):
params['entity_code'] = self.entity_code.to_alipay_dict()
else:
params['entity_code'] = self.entity_code
if self.entity_name:
if hasattr(self.entity_name, 'to_alipay_dict'):
params['entity_name'] = self.entity_name.to_alipay_dict()
else:
params['entity_name'] = self.entity_name
if self.ext_properties:
if hasattr(self.ext_properties, 'to_alipay_dict'):
params['ext_properties'] = self.ext_properties.to_alipay_dict()
else:
params['ext_properties'] = self.ext_properties
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.office_hours_desc:
if hasattr(self.office_hours_desc, 'to_alipay_dict'):
params['office_hours_desc'] = self.office_hours_desc.to_alipay_dict()
else:
params['office_hours_desc'] = self.office_hours_desc
if self.upload_time:
if hasattr(self.upload_time, 'to_alipay_dict'):
params['upload_time'] = self.upload_time.to_alipay_dict()
else:
params['upload_time'] = self.upload_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommercePoiPowerbankUploadModel()
if 'address_desc' in d:
o.address_desc = d['address_desc']
if 'can_borrow' in d:
o.can_borrow = d['can_borrow']
if 'can_borrow_cnt' in d:
o.can_borrow_cnt = d['can_borrow_cnt']
if 'contact_number' in d:
o.contact_number = d['contact_number']
if 'entity_code' in d:
o.entity_code = d['entity_code']
if 'entity_name' in d:
o.entity_name = d['entity_name']
if 'ext_properties' in d:
o.ext_properties = d['ext_properties']
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'office_hours_desc' in d:
o.office_hours_desc = d['office_hours_desc']
if 'upload_time' in d:
o.upload_time = d['upload_time']
return o
| 6,236 |
assignments/12_csv_grad/csvfilter.py
|
cvk1988/biosystems-analytics-2020
| 0 |
2169881
|
#!/usr/bin/env python3
"""
Author : cory
Date : 2020-05-07
Purpose: Rock the Casbah
"""
import argparse
import os
import sys
import csv
from tabulate import tabulate
import re
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Filter delimited records',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f',
'--file',
help='Input file',
metavar='FILE',
type=argparse.FileType('r'),
required=True)
parser.add_argument('-v',
'--val',
help='Value for filter',
metavar='val',
type=str,
required=True,
default=None)
parser.add_argument('-c',
'--col',
help='Column for filter',
metavar='col',
type=str,
default='')
parser.add_argument('-o',
'--outfile',
help='Output filename',
metavar='OUTFILE',
type=argparse.FileType('wt'),
default='out.csv')
parser.add_argument('-d',
'--delimiter',
help='Input delimiter',
metavar='delim',
type=str,
default=',')
args = parser.parse_args()
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
col = args.col
val = args.val
reader = csv.DictReader(args.file, delimiter=args.delimiter)
val_col = ",".join(reader.fieldnames)
seqs_wr = 0
writer = csv.DictWriter(args.outfile, fieldnames=reader.fieldnames)
writer.writeheader()
if re.search(col, val_col, re.IGNORECASE):
for rec in reader:
if col and val:
text = rec[col]
if re.search(col, str(rec.keys()), re.IGNORECASE):
if re.search(val, text, re.IGNORECASE):
writer.writerow(rec)
seqs_wr += 1
elif val:
if re.search(val, str(rec.values()), re.IGNORECASE):
writer.writerow(rec)
seqs_wr += 1
else:
sys.exit(f'--col \"{col}\" not a valid column! \n Choose from {",".join(reader.fieldnames)}')
print(f'Done, wrote {seqs_wr} to \"{args.outfile.name}\".')
# --------------------------------------------------
if __name__ == '__main__':
main()
| 2,862 |
libraries/uFire_SHT20/python/RaspberryPi/shell.py
|
joaopedrovbs/arduino-support-test
| 17 |
2172808
|
import cmd
from SHT20 import SHT20
sht20 = SHT20()
class SHT20Shell(cmd.Cmd):
prompt = '> '
def do_temperature(self, a):
print("%.2f" %sht20.temperature())
def do_humidity(self, a):
print("%.2f" %sht20.humidity())
SHT20Shell().cmdloop()
| 298 |
backend/cli.py
|
hmatalonga/restaurant-recommender-system
| 0 |
2171753
|
#!/usr/bin/env python3
import sys
import pandas as pd
from prepare import gen_sim_matrix
from recommender import params, simple_search, keyword_search
def main():
try:
if len(sys.argv) < 2:
raise IOError('Dataset missing!')
print('Loading dataset.')
df = pd.read_pickle(sys.argv[1])
print(df.head(10).to_string())
print('Generating sim matrix.')
cosine_func = gen_sim_matrix(df)
mode = int(input('Search mode[1-2]: '))
if (mode == 1):
city = input('Enter city: ') or params['city']
cuisine = input('Enter cuisine: ') or params['cuisine']
price = input('Enter price: ') or params['price']
results = simple_search(df, cuisine=cuisine,
city=city, price=price, as_json=False)
print(results.to_string())
elif mode == 2:
results = keyword_search(
df, cosine_func, input('Restaurant name: '), as_json=False)
print(results.to_string())
except Exception as e:
print(e)
if __name__ == '__main__':
main()
| 1,145 |
tests/test_base.py
|
hubbub-tech/blubber-orm
| 1 |
2172744
|
#import pytest
import unittest
import src.blubber_orm.base as base
class TestAbstractModels(unittest.TestCase):
def test_abstract_attributes(self):
#TODO: test that table_name and database exist in namespace
pass
def test_database_connection(self):
#TODO: get the class AbstractModels and make sure database is of type
#DatabaseConnection.
pass
def test_abstract_methods(self):
#TODO: try to instantiate the class, it should fail.
#Try to run functions, should similarly fail.
pass
class TestModels(unittest.TestCase):
def prepare_for_tests(self):
#TODO: create a new table for test attributes? or create a test db?
pass
def test_abstract_inheritance(self):
#TODO: test Models is of type AbstractModels. then make sure that all
#the methods defined in AbstractModels have been defined in Models. also
pass
def test_insert(self):
#TODO: test data insertion works and handles errors as expected
#also rotate through table names to see if any of them fail
pass
def test_set(self):
#TODO: test data updates and handles errors if there is an non-attribute key 'test'
#also rotate through table names to see if any of them fail
pass
def test_get(self):
#TODO: test that function throws error at non-attributes or non-specific keys
#with appropriate errors. also rotate through table names to see if any of them fail
pass
def test_delete(self):
#TODO: delete a data row then check to make sure it's deleted by calling it back
#also rotate through table names to see if any of them fail
pass
def test_filter(self):
#TODO: test filter does not accept non-attributes. test it always returns type list
#also rotate through table names to see if any of them fail
pass
def test_get_columns(self):
#TODO: test to be sure get_columns gets the correct columns for all the
#models, this is important to verification at every other class method
#also rotate through table names to see if any of them fail
pass
def test_refresh(self):
#TODO: create two instances of a model class with the same data
#then make a change to one and refresh it
#assert that the two instances are different
#also rotate through table names to see if any of them fail
pass
if __name__ == '__main__':
unittest.main()
| 2,538 |
tests/test_atoms.py
|
ulissigroup/aflow
| 0 |
2172232
|
"""Test if reading atoms object can work, especially for POSCAR format vasp4.xx"""
"""Test the fetching for AflowFile works
"""
import pytest
import json
from pathlib import Path
from random import shuffle
from aflow.entries import Entry
curdir = Path(__file__).parent
# Load big json query
with open(curdir / "data_big.json", "r") as fd:
raw_entries = json.load(fd)
# convert raw_entries to list and do a random shuffle
raw_entries = list(raw_entries.values())
def test_atoms_read(batch=50):
"""test on randomly sampled entries"""
shuffle(raw_entries)
for e in raw_entries[:batch]:
print(e["aurl"])
entry = Entry(**e)
# Read the CONTCAR.relax, which should always present
atoms = entry.atoms()
assert atoms is not None
def test_noatom_entries():
"""Corner cases for some entries in LIB5 and LIB6 (AIMD runs)"""
pass
# def test_aurl_with_colon():
# """Test if aurl with colon can be read."""
# # Series with aurl that contain 0 ~ 3 colons after the edu domain name
# for ncolon in range(4):
# shuffle(raw_entries)
# for entry in raw_entries:
# aurl = entry["aurl"]
# # edu:xx --> 2
# if len(aurl.split(":")) == ncolon + 2:
# afile = AflowFile(aurl, "CONTCAR.relax")
# assert "CONTCAR.relax" in afile.filename
# content = afile()
# print(aurl, content)
# break
| 1,484 |
awscli_as_session/__init__.py
|
transposit/awscli-as-session
| 1 |
2172757
|
#
# Copyright 2018 Transposit Corporation. All Rights Reserved.
#
from .assession import AsSession
from .mfacredentials import MFACredentials
def awscli_initialize(cli):
cli.register('building-command-table.main', inject_commands)
def inject_commands(command_table, session, **kwargs):
command_table['as-session'] = AsSession(session)
command_table['mfa-credentials'] = MFACredentials(session)
| 411 |
OUCC/2021/comparing.py
|
eddiegz/Personal-C
| 3 |
2172875
|
line1=[int(i) for i in input().split()]
line2=[int(i) for i in input().split()]
line1.sort()
line2.sort()
st=True
for i in range(min(len(line1),len(line2))):
if line1[i]!=line2[i]:
st=False
if st==False:
print('False')
else:
print('True')
| 271 |
blatann/nrf/nrf_events/gap_events.py
|
eriknyquist/blatann
| 1 |
2172169
|
from enum import IntEnum
from blatann.nrf.nrf_types import *
from blatann.nrf.nrf_dll_load import driver
import blatann.nrf.nrf_driver_types as util
from blatann.nrf.nrf_events.generic_events import BLEEvent
class GapEvt(BLEEvent):
pass
class GapEvtAdvReport(GapEvt):
evt_id = driver.BLE_GAP_EVT_ADV_REPORT
def __init__(self, conn_handle, peer_addr, rssi, adv_type, adv_data):
# TODO: What? Adv event has conn_handle? Does not compute
super(GapEvtAdvReport, self).__init__(conn_handle)
self.peer_addr = peer_addr
self.rssi = rssi
self.adv_type = adv_type
self.adv_data = adv_data
def get_device_name(self):
dev_name_list = []
if BLEAdvData.Types.complete_local_name in self.adv_data.records:
dev_name_list = self.adv_data.records[BLEAdvData.Types.complete_local_name]
elif BLEAdvData.Types.short_local_name in self.adv_data.records:
dev_name_list = self.adv_data.records[BLEAdvData.Types.short_local_name]
return "".join(map(chr, dev_name_list))
@classmethod
def from_c(cls, event):
adv_report_evt = event.evt.gap_evt.params.adv_report
if not adv_report_evt.scan_rsp:
adv_type = BLEGapAdvType(adv_report_evt.type)
else:
adv_type = BLEGapAdvType.scan_response
return cls(conn_handle=event.evt.gap_evt.conn_handle,
peer_addr=BLEGapAddr.from_c(adv_report_evt.peer_addr),
rssi=adv_report_evt.rssi,
adv_type=adv_type,
adv_data=BLEAdvData.from_c(adv_report_evt))
def __repr__(self):
return "{}(conn_handle={!r}, peer_addr={!r}, rssi={!r}, adv_type={!r}, adv_data={!r})".format(
self.__class__.__name__, self.conn_handle,
self.peer_addr, self.rssi, self.adv_type, self.adv_data)
class GapEvtTimeout(GapEvt):
evt_id = driver.BLE_GAP_EVT_TIMEOUT
def __init__(self, conn_handle, src):
super(GapEvtTimeout, self).__init__(conn_handle)
self.src = src
@classmethod
def from_c(cls, event):
timeout_evt = event.evt.gap_evt.params.timeout
return cls(conn_handle=event.evt.gap_evt.conn_handle,
src=BLEGapTimeoutSrc(timeout_evt.src))
def __repr__(self):
return "{}(conn_handle={!r}, src={!r})".format(self.__class__.__name__, self.conn_handle, self.src)
class GapEvtConnParamUpdateRequest(GapEvt):
evt_id = driver.BLE_GAP_EVT_CONN_PARAM_UPDATE_REQUEST
def __init__(self, conn_handle, conn_params):
super(GapEvtConnParamUpdateRequest, self).__init__(conn_handle)
self.conn_params = conn_params
@classmethod
def from_c(cls, event):
conn_params = event.evt.gap_evt.params.conn_param_update_request.conn_params
return cls(conn_handle=event.evt.gap_evt.conn_handle,
conn_params=BLEGapConnParams.from_c(conn_params))
def __repr__(self):
return "{}(conn_handle={!r}, conn_params={!r})".format(self.__class__.__name__, self.conn_handle,
self.conn_params)
class GapEvtConnParamUpdate(GapEvt):
evt_id = driver.BLE_GAP_EVT_CONN_PARAM_UPDATE
def __init__(self, conn_handle, conn_params):
super(GapEvtConnParamUpdate, self).__init__(conn_handle)
self.conn_params = conn_params
@classmethod
def from_c(cls, event):
conn_params = event.evt.gap_evt.params.conn_param_update.conn_params
return cls(conn_handle=event.evt.gap_evt.conn_handle,
conn_params=BLEGapConnParams.from_c(conn_params))
def __repr__(self):
return "{}(conn_handle={!r}, conn_params={})".format(self.__class__.__name__, self.conn_handle,
self.conn_params)
class GapEvtConnected(GapEvt):
evt_id = driver.BLE_GAP_EVT_CONNECTED
def __init__(self, conn_handle, peer_addr, role, conn_params):
super(GapEvtConnected, self).__init__(conn_handle)
self.peer_addr = peer_addr
self.role = role
self.conn_params = conn_params
@classmethod
def from_c(cls, event):
connected_evt = event.evt.gap_evt.params.connected
return cls(conn_handle=event.evt.gap_evt.conn_handle,
peer_addr=BLEGapAddr.from_c(connected_evt.peer_addr),
role=BLEGapRoles(connected_evt.role),
conn_params=BLEGapConnParams.from_c(connected_evt.conn_params))
def __repr__(self):
return "{}(conn_handle={!r}, peer_addr={!r}, role={!r}, conn_params={})".format(self.__class__.__name__,
self.conn_handle,
self.peer_addr, self.role,
self.conn_params)
class GapEvtDisconnected(GapEvt):
evt_id = driver.BLE_GAP_EVT_DISCONNECTED
def __init__(self, conn_handle, reason):
super(GapEvtDisconnected, self).__init__(conn_handle)
self.reason = reason
@classmethod
def from_c(cls, event):
disconnected_evt = event.evt.gap_evt.params.disconnected
return cls(conn_handle=event.evt.gap_evt.conn_handle,
reason=BLEHci(disconnected_evt.reason))
def __repr__(self):
return "{}(conn_handle={!r}, reason={!r})".format(self.__class__.__name__, self.conn_handle, self.reason)
| 5,752 |
tests/test_cli.py
|
pingali/d
| 19 |
2172724
|
import os, sys, shutil, tempfile, json, stat
from nose import with_setup
from nose.tools import assert_raises
from unittest import TestCase
from click.testing import CliRunner
import imp
runner = CliRunner()
####################################################
# This file has (an incomplete) list of tests for the command line. In
# particular it tests whether repos are being created correctly, and
# when we add files they do show up in all the right places. The list
# is not complete. This needs more work.
#####################################################
##########################################################
# Load the command line and set the config file..
##########################################################
thisdir = os.path.abspath(os.path.dirname(__file__))
os.environ['DGIT_INI'] = os.path.join(thisdir,
'assets',
'dgit-cli.ini')
# Load the dgit command file...
dgitfile = os.path.join(thisdir, "..", "bin", "dgit")
dgitmod = imp.load_source('dgit', dgitfile)
##########################################################
# Setup and clean workspace
##########################################################
def clean_workspace():
"""
Clean the working space
"""
# Run the dgit config show to the workspace directory
result = runner.invoke(dgitmod.profile, ['show'])
output = result.output
output = output.split("\n")
workspaces = [o.strip() for o in output if "workspace :" in o]
if len(workspaces) > 0:
workspace = workspaces[0]
workspace = workspace.replace("workspace : ","")
else:
workspace = os.path.join(os.getcwd(), 'workspace')
default_workspace = os.path.expanduser("~/.dgit")
if ((workspace != default_workspace) and
os.path.exists(workspace)):
print("Removing tree", workspace)
shutil.rmtree(workspace)
def workspace_setup():
print("Setup")
dgitmod.setup()
clean_workspace()
def workspace_teardown():
print("teardown")
clean_workspace()
dgitmod.teardown()
repo_configurations = {
'simple1': ['test1', 'testrepo1', None],
'simple2': ['test2', 'testrepo2', None],
's3': ['test_s3', 'testrepo_s3', 'git+s3']
}
@with_setup(workspace_setup, workspace_teardown)
def test_list():
"""
List repos
"""
result = runner.invoke(dgitmod.list_repos)
assert "Found 0 repos" in result.output
@with_setup(workspace_setup, workspace_teardown)
def test_init():
"""
Init repo
"""
result = runner.invoke(dgitmod.init,
[ "{}/{}".format(repo_configurations['simple1'][0],
repo_configurations['simple1'][1]),
'--setup', 'git',
],
input="Hello\ntest")
result = runner.invoke(dgitmod.list_repos)
assert "Found 1 repos" in result.output
| 2,967 |
tests/graph/test_topsort.py
|
goraniliev/algorithms-1
| 2 |
2172626
|
from algorithms.sort import (
top_sort, top_sort_recursive
)
import unittest
class TestSuite(unittest.TestCase):
def setUp(self):
self.depGraph = {
"a" : [ "b" ],
"b" : [ "c" ],
"c" : [ 'e'],
'e' : [ 'g' ],
"d" : [ ],
"f" : ["e" , "d"],
"g" : [ ]
}
def test_topsort(self):
res = top_sort_recursive(self.depGraph)
#print(res)
self.assertTrue(res.index('g') < res.index('e'))
res = top_sort(self.depGraph)
self.assertTrue(res.index('g') < res.index('e'))
if __name__ == '__main__':
unittest.main()
| 793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.