max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
zpylib/grammar/builtin.py
|
louisyoungx/zpycli
| 0 |
2023743
|
# Python内置函数
builtInFunctions = {
'符': 'chr',
'二进制': 'bin',
'串': 'str',
'八进制': 'oct',
'符值': 'ord',
'十六进制': 'hex',
'元组': 'tuple',
'长': 'len',
'集合': 'set',
'全为真': 'all',
'字典': 'dict',
'任一为真': 'any',
'列表': 'list',
'迭代': 'iter',
'冻结集合': 'frozenset',
'超类': 'super',
'切片': 'slice',
'乘方': 'pow',
'字节': 'bytes',
'排序': 'sort',
'已排序': 'sorted',
'全局字典': 'globals',
'字节数组': 'bytearray',
'局部字典': 'locals',
'属性': 'property',
'对象': 'object',
'删属性': 'delattr',
'变量字典': 'vars',
'取属性': 'getattr',
'可调用': 'callable',
'有属性': 'hasattr',
'内存视图': 'memoryview',
'设属性': 'setattr',
'哈希': 'hash',
'复数': 'complex',
'商余': 'divmod',
'整数': 'int',
'评估': 'eval',
'浮点数': 'float',
'长整数': 'long',
'范围': 'range',
'布尔': 'bool',
'表示': 'repr',
'输入': 'input',
'打包': 'zip',
'打印': 'print',
'打开': 'open',
'执行': 'exec',
'编译': 'compile',
'反转': 'reversed',
'映射': 'map',
'和': 'sum',
'是实例': 'isinstance',
'枚举': 'enumerate',
'最大值': 'max',
'断点': 'breakpoint',
'最小值': 'min',
'是子类': 'issubclass',
'绝对值': 'abs',
'下一个': 'next',
'类型': 'type',
'筛选': 'filter',
'格式化': 'format',
'静态方法': 'staticmethod',
'舍入': 'round',
'类方法': 'classmethod',
'退出': 'exit',
'帮助': 'help',
'应用': 'apply',
'基字符串': 'basestring',
'缓存': 'buffer',
'比较': 'cmp',
'强制': 'coerce',
'执行文件': 'execfile',
'文件': 'file',
'简化': 'reduce',
'重载': 'reload',
'repr': 'repr',
'intern': 'intern',
'unicode': 'unicode',
'ascii': 'ascii',
'id': 'id',
'dir': 'dir',
'__字典__': '__dict__',
'__方法__': '__methods__',
'__成员__': '__members__',
'__对象__': '__class__',
'__基类__': '__bases__',
'__名字__': '__name__',
'__继承__': '__mro__',
'__子类__': '__subclasses__',
'__初始化__': '__init__',
'__导入__': '__import__',
'__主函数__': '__main__',
}
# 反转字典
def invert_dict(d):
return dict(zip(d.values(), d.keys()))
invertBuiltInFunctions = invert_dict(builtInFunctions)
| 2,122 |
src/process_data.py
|
Boson-data-solution/pneumonia_detector
| 0 |
2023544
|
import numpy as np
import cv2
import os
import pickle
import config as config
def get_data(data_dir, lables, img_size):
data = []
for label in lables:
path = os.path.join(data_dir, label)
for img in os.listdir(path):
if 'virus' in img:
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, 1])
except Exception as e:
print(e)
elif 'bacteria' in img:
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, 2])
except Exception as e:
print(e)
else:
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, 0])
except Exception as e:
print(e)
labelled_data = np.array(data)
X = []
y = []
for feature, label in labelled_data:
X.append(feature)
y.append(label)
return X, y
X_train, y_train = get_data('../data/train', config.LABELS, config.IMG_SIZE)
X_val, y_val = get_data('../data/val', config.LABELS, config.IMG_SIZE)
X_test, y_test = get_data('../data/test', config.LABELS, config.IMG_SIZE)
if not os.path.exists('../data/processed_data'):
os.makedirs('../data/processed_data')
with open('../data/processed_data/X_train.pkl', 'wb') as f:
pickle.dump(X_train, f)
with open('../data/processed_data/y_train.pkl', 'wb') as f:
pickle.dump(y_train, f)
with open('../data/processed_data/X_val.pkl', 'wb') as f:
pickle.dump(X_val, f)
with open('../data/processed_data/y_val.pkl', 'wb') as f:
pickle.dump(y_val, f)
with open('../data/processed_data/X_test.pkl', 'wb') as f:
pickle.dump(X_test, f)
with open('../data/processed_data/y_test.pkl', 'wb') as f:
pickle.dump(y_test, f)
| 2,241 |
src/meltano/core/compiler/m5oc_file.py
|
code-watch/meltano
| 8 |
2022882
|
import json
from typing import Dict
from pathlib import Path
class M5ocFile:
def __init__(self, path: Path, content: Dict):
self.path = path
self.content = content
@classmethod
def load(cls, file):
return cls(file.name, json.load(file))
| 276 |
target/classes/com/QAsystem/LightQA/questiontypeanalysis/split.py
|
eligah/LightQA
| 0 |
2023109
|
#!/usr/bin/python3
#coding:utf-8'
import random
if __name__ == "__main__":
idxSet = set()
while 1:
a = random.randomint(1,6000)
if a not in idxSet:
idxSet.add(a)
if len(idxSet) == 100:
break;
| 255 |
minimum_cost_permutation.py
|
LPRowe/miscellaneous
| 0 |
2023456
|
import functools
import math
import string
import random
def min_cost_perm(s, cost):
"""
Find the minimum cost permutation of s.
The cost of placing letter i after j is cost[i][j] where 'a' maps to 0, 'b' to 1, ..., 'z' to 25
"""
@functools.lru_cache(None)
def helper(i, used):
nonlocal s, cost
if used == target:
return 0
best = math.inf
v = set()
for k in range(len(s)):
if not (bitmask[k] & used) and (s[k] not in v):
v.add(s[k])
j = ord(s[k]) - 97
best = min(best, cost[i][j] + helper(j, used | bitmask[k]))
return best
bitmask = [1 << i for i in range(len(s))]
target = (1 << len(s)) - 1
return min(helper(i, bitmask[i]) for i in range(len(s)))
if __name__ == "__main__":
# Example: expect 8
s = "abcde"
cost = [[0, 5, 1, 5, 3],
[4, 0, 9, 4, 2],
[7, 9, 0, 10, 7],
[1, 2, 8, 0, 2],
[3, 9, 7, 7, 0]]
print(min_cost_perm(s, cost))
# Edge Case:
N = 20
letters = string.ascii_lowercase[:N]
s = ''.join([random.choice(letters) for _ in range(N)])
cost = [[0 for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
if i != j:
cost[i][j] = random.randint(1, 10**9)
print(min_cost_perm(s, cost))
| 1,426 |
hifi_gan/checkpoint.py
|
mbarnig/larynx
| 540 |
2023239
|
"""Methods for saving/loading checkpoints"""
import logging
import typing
from dataclasses import dataclass
from pathlib import Path
import torch
from .config import TrainingConfig
from .models import TrainingModel, setup_model
_LOGGER = logging.getLogger("hifi_gan.checkpoint")
# -----------------------------------------------------------------------------
@dataclass
class Checkpoint:
training_model: TrainingModel
epoch: int
global_step: int
version: int
def get_state_dict(model):
"""Return model state dictionary whether or not distributed training was used"""
if hasattr(model, "module"):
return model.module.state_dict()
return model.state_dict()
# -----------------------------------------------------------------------------
def load_checkpoint(
generator_path: typing.Union[str, Path],
config: TrainingConfig,
training_model: typing.Optional[TrainingModel] = None,
use_cuda: bool = False,
) -> Checkpoint:
"""Load models and training state from a directory of Torch checkpoints"""
# Generator
generator_path = Path(generator_path)
_LOGGER.debug("Loading generator from %s", generator_path)
generator_dict = torch.load(generator_path, map_location="cpu")
assert "generator" in generator_dict, "Missing 'generator' in state dict"
version = int(generator_dict.get("version", 1))
global_step = int(generator_dict.get("global_step", 1))
epoch = int(generator_dict.get("epoch", -1))
# Set up the generator first
training_model = setup_model(
config=config,
training_model=training_model,
last_epoch=epoch,
use_cuda=use_cuda,
)
assert training_model.generator, "No generator"
set_state_dict(training_model.generator, generator_dict["generator"])
return Checkpoint(
training_model=training_model,
epoch=epoch,
global_step=global_step,
version=version,
)
def set_state_dict(model, state_dict):
"""Load state dictionary whether or not distributed training was used"""
if hasattr(model, "module"):
return model.module.load_state_dict(state_dict)
return model.load_state_dict(state_dict)
| 2,208 |
aiida_optimade/cli/cmd_aiida_optimade.py
|
csadorf/aiida-optimade
| 0 |
2023850
|
import click
from aiida.cmdline.params.options import PROFILE as VERDI_PROFILE
from aiida.cmdline.params.types import ProfileParamType as VerdiProfileParamType
from aiida.manage.configuration import Profile
from aiida_optimade.cli.options import AIIDA_PROFILES
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option(
None, "-v", "--version", message="AiiDA-OPTIMADE version %(version)s"
)
@VERDI_PROFILE(
type=VerdiProfileParamType(),
default="optimade_sqla",
show_default=True,
help="AiiDA profile to use and serve. Configured profiles: "
f"{', '.join([repr(name) for name in AIIDA_PROFILES])}.",
)
@click.pass_context
def cli(ctx, profile: Profile):
"""AiiDA-OPTIMADE command line interface (CLI)."""
if ctx.obj is None:
ctx.obj = {}
ctx.obj["profile"] = profile
| 853 |
examples/networking/create_dx_resources.py
|
j--wong/lib-stax-python-sdk
| 8 |
2023674
|
import json
import os
from staxapp.config import Config
from staxapp.openapi import StaxClient
Config.access_key = os.getenv("STAX_ACCESS_KEY")
Config.secret_key = os.getenv("STAX_SECRET_KEY")
# refer to the Stax API Schema for valid body properties
networks = StaxClient("networking")
# example 1: create a Stax Direct Connect (DX) Gateway
body = {
"Gateway": {
"AccountId": "<stax_account_uuid>",
"Asn": 64512,
"GatewayType": "TRANSIT",
"Name": "Prod Gateway",
}
}
response = networks.CreateDxResource(**body)
print(json.dumps(response, indent=4, sort_keys=True))
# example 2: create a Stax DX Gateway and associated DX Vif
body = {
"Gateway": {
"AccountId": "<stax_account_uuid>",
"Asn": 64512,
"GatewayType": "TRANSIT",
"Name": "Prod Gateway",
},
"Vif": {
"Asn": 64513,
"AwsConnectionId": "dx-con-xxxxxx",
"AwsRouterIp": "192.168.0.2/30",
"BgpAuthKey": "secret",
"JumboMtu": True,
"Name": "Prod VIF",
"RouterIp": "192.168.0.1/30",
"Tags": {
"CostCode": "12345"
},
"Vlan": 4000,
}
}
response = networks.CreateDxResource(**body)
print(json.dumps(response, indent=4, sort_keys=True))
# example 3: create a Stax DX Vif and attach to an existing DX Gateway by provided the Dx Gateway Id
body = {
"Vif": {
"Asn": 64513,
"AwsConnectionId": "dx-con-xxxxxx",
"AwsRouterIp": "192.168.0.2/30",
"DxGatewayId": "<dx_gateway_uuid>",
"BgpAuthKey": "secret",
"JumboMtu": True,
"Name": "Prod VIF",
"RouterIp": "192.168.0.1/30",
"Tags": {
"CostCode": "12345"
},
"Vlan": 4000,
}
}
response = networks.CreateDxResource(**body)
print(json.dumps(response, indent=4, sort_keys=True))
| 1,870 |
kiqpo/core/padding.py
|
bionic-py/Bionic
| 9 |
2022959
|
def Padding(child, padding="1.5%", top="0%", bottom="0%", left="0%", right="0%"):
if(top != "0%" or bottom != "0%" or left != "0%" or right != "0%"):
return f"<div style='padding-top:{top};padding-bottom:{bottom};padding-left:{left};padding-right:{right};' >{child}</div>"
else:
return f"<div style='padding:{padding};' >{child}</div>"
| 360 |
TelegramAPI.py
|
pedromartinez079/qhatu
| 0 |
2022841
|
# importing libraries
import requests
def getUpdates(token):
#Input parameters
#Telegram Bot Token
token=token
url_base='https://api.telegram.org/bot'+token+'/getUpdates'
# defining a params dict for the parameters to be sent to the API
PARAMS = {}
myheaders = {}
# sending get request and saving the response as response object
myrequest = requests.get(url = url_base, headers = myheaders, params = PARAMS)
#print myrequest.url
# extracting data in json format
data = myrequest.json()
return data
def TelegramSendTxt(token, chat_id, text):
#Input parameters
chat_id=chat_id
text=text
#Telegram Bot Token
token=token
url_base='https://api.telegram.org/bot'+token+'/sendMessage'
# defining a params dict for the parameters to be sent to the API
PARAMS = {'chat_id':chat_id, 'text':text}
myheaders = {}
# sending get request and saving the response as response object
try:
myrequest = requests.post(url = url_base, headers = myheaders, params = PARAMS)
data = myrequest.json()
return data
except requests.exceptions.RequestException as e:
data=e
return data
def TelegramSendPhoto(token, chat_id, filepath, caption):
#Input parameters
chat_id=chat_id
filepath=filepath
caption=caption
#Telegram Bot Token
token=token
url_base='https://api.telegram.org/bot'+token+'/sendPhoto'
# defining a params dict for the parameters to be sent to the API
files = {'chat_id': (None, chat_id), 'photo': (filepath, open(filepath, 'rb')), 'caption': (None, caption)}
myheaders = {}
# sending get request and saving the response as response object
try:
myrequest = requests.post(url = url_base, headers = myheaders, files = files)
data = myrequest.json()
return data
except requests.exceptions.RequestException as e:
data=e
return data
def TelegramSendDocument(token, chat_id, filepath, caption):
#Input parameters
chat_id=chat_id
filepath=filepath
caption=caption
#Telegram Bot Token
token=token
url_base='https://api.telegram.org/bot'+token+'/sendDocument'
# defining a params dict for the parameters to be sent to the API
files = {'chat_id': (None, chat_id), 'document': (filepath, open(filepath, 'rb')), 'caption': (None, caption)}
myheaders = {}
# sending get request and saving the response as response object
try:
myrequest = requests.post(url = url_base, headers = myheaders, files = files)
data = myrequest.json()
return data
except requests.exceptions.RequestException as e:
data=e
return data
| 2,760 |
cudnn_samples_v7/RNN/compare.py
|
SaltedFishLZ/cuDNN-sample
| 27 |
2023045
|
#This script can compare the result files with the golden files and report the status: pass or failed\
#Usage: python compare_result.py results.txt golden.txt
import os, sys, re
patterns = ['{key1}\s+checksum\s+([.eE+0-9]+)\s+{key2}\s+checksum\s+([.eE+0-9]+)\s+{key3}\s+checksum\s+([.eE+0-9]+)', #3 similar keys as below each line
'{key1}\s+checksum\s+([.eE+0-9]+)\s+{key2}\s+checksum\s+([.eE+0-9]+)', #2 similar keys as below each line
'{key}\s+checksum\s+([.eE+0-9]+)', #one key each line: di checksum 6.676003E+01
'{key}[: ]+([0-9]+)\s+GFLOPS[, ]+\\(([0-9]+)\s+GFLOPS\\)[, ]+\\(([0-9]+)\s+GFLOPS\\)', #1 key each line with more returns
'{key}[: ]+([0-9]+)\s+GFLOPS'] #one key each line: Forward: 673 GFLOPS
#keys = [('i', 'c', 'h'), ('di', 'dc', 'dh'), ('i', 'h'), ('di', 'dh'), 'dw', 'Backward', 'Forward']
keys = [('i', 'c', 'h'), ('di', 'dc', 'dh'), ('i', 'h'), ('di', 'dh'), 'dw'] # skip the last 2 targets
pats = [0,0,1,1,2,3,4]
datnum = [len(k) if isinstance(k, tuple) else (3 if k == 'Backward' else 1) for k in keys]
#tol = 1.0e-3
def compare_results(ftarget, fgolden):
assert ftarget and fgolden, 'No enough input files given!'
print ftarget, fgolden
targ, _ = get_results_from_file(ftarget)
golden, tol = get_results_from_file(fgolden, golden=True)
ret = 0
assert targ and golden, 'targets or golen results not generated!'
for k, vals in golden.iteritems():
if not isinstance(vals, list):
vals = [vals]
targ[k] = [targ[k]]
for idx, v in enumerate(vals):
tval = float(targ[k][idx])
gval = float(v)
err = None
if tol[k]['type'] == 'rel':
err = abs((tval-gval)/max(gval,tval)) # clamp rel_err <= 1
elif tol[k]['type'] == 'abs':
err = abs(tval-gval)
assert err is not None, 'Error is Empty!'
tol_i = tol[k]['val']
#print 'k,t,g,err',k,tval, gval, err
if err > tol_i:
print 'FAILED %s=%s Error: %.2e vs. golden (%s) with tol (%.2e)'%(k, targ[k][idx], err, v, tol_i)
ret = 1
else:
print 'PASSED %s=%s Error: %.2e vs. golden (%s) with tol (%.2e)'%(k, targ[k][idx], err, v, tol_i)
if ret == 0:
print 'ALL PASSED'
return ret
def _get_tolerance_line(line):
"""get a data item for a tolerance line with format (each line only one item):
i: type=rel, 1e-3
"""
assert line, 'Empty line!'
line = line.strip().replace(' ','')
stmp = line.split(':')
key = stmp[0]
_type, _val = stmp[1].split(',')
_type = _type.split('=')[-1]
tol={key:{'type':_type, 'val':float(_val)}}
return tol
def get_results_from_file(fname, golden=False):
assert fname, 'No file name given!'
ret = {}
tol = {}
is_tolerance = False
with open(fname, 'r') as fin:
lines = fin.readlines()
if len(lines) == 1:
lines = lines[0].split('\r')
for idx, line in enumerate(lines):
line = line.strip()
if not line:
continue
val = get_valpat_line(line)
if val:
ret = dict(ret, **val)
if golden:
if 'TOLERANCE' in line: # the next line is the tol value
is_tolerance = True
elif is_tolerance:
_tol = _get_tolerance_line(line)
tol = dict(tol, **_tol)
return ret, tol
def get_valpat_line(line):
for idx, key in enumerate(keys):
Ndat = datnum[idx]
if isinstance(key, tuple):
format_expr = {}
for j in range(Ndat):
format_expr['key%d'%(j+1)] = keys[idx][j]
ret = re.search(patterns[pats[idx]].format(**format_expr), line)
if ret:
vals = {}
for j in range(Ndat):
vals[key[j]] = ret.group(j+1)
return vals
else:
ret = re.search(patterns[pats[idx]].format(key=key), line)
if ret:
if Ndat >1:
#print Ndat, key, datnum, idx
return {key:[ret.group(j+1) for j in range(Ndat)]}
else:
return {key:ret.group(1)}
return None
def str_test():
s='Forward: 673 GFLOPS'
s1='Backward: 835 GFLOPS, (654 GFLOPS), (1155 GFLOPS)'
s2='i checksum 1.315793E+06 h checksum 1.315212E+05'
s3='di checksum 6.676003E+01 dh checksum 6.425050E+01'
s4='dw checksum 1.453750E+09'
print get_valpat_line(s1)
print get_valpat_line(s)
print get_valpat_line(s2)
print get_valpat_line(s3)
print get_valpat_line(s4)
if __name__ == '__main__':
#str_test()
#print get_results_from_file('results.txt')
#print get_results_from_file('golden.txt', golden=True)
sys.exit(compare_results(sys.argv[1], sys.argv[2]))
| 4,930 |
dde/data_test.py
|
KEHANG/DataDrivenEstimator
| 2 |
2023488
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import shutil
import unittest
import dde
from dde.data import (get_data_from_db, prepare_folded_data, split_inner_val_from_train_data,
prepare_data_one_fold, prepare_folded_data_from_multiple_datasets,
prepare_full_train_data_from_multiple_datasets, split_test_from_train_and_val,
prepare_full_train_data_from_file, prepare_folded_data_from_file)
class TestData(unittest.TestCase):
def setUp(self):
X, y, _ = get_data_from_db('rmg', 'rmg_internal', 'kh_tricyclics_table')
self.X = X
self.y = y
def test_get_HC_polycyclics_data_from_db(self):
self.assertEqual(len(self.X), 180)
self.assertEqual(len(self.y), 180)
def test_get_data_from_db_using_Cp_data(self):
X, y, _ = get_data_from_db('rmg', 'rmg_internal',
'kh_tricyclics_table',
prediction_task='Cp(cal/mol/K)')
self.assertEqual(len(X), 180)
self.assertEqual(len(y), 180)
def test_prepare_folded_data(self):
folds = 5
shuffle_seed = 4
X, y = self.X[:], self.y[:] # Make copies because they get shuffled in place
import numpy as np
np.random.seed(shuffle_seed)
(folded_Xs, folded_ys) = prepare_folded_data(X, y, folds)
self.assertEqual(len(folded_Xs), folds)
self.assertEqual(len(folded_ys), folds)
# test shuffle is expected
all_indices = range(len(self.X))
np.random.seed(shuffle_seed)
np.random.shuffle(all_indices)
first_X = folded_Xs[0][0]
expected_first_X = self.X[all_indices[0]]
self.assertTrue(np.all(np.equal(first_X, expected_first_X)))
def test_split_inner_val_from_train_data(self):
import numpy as np
shuffle_seed = 4
training_ratio = 0.9
X, y = self.X[:], self.y[:] # Make copies because they get shuffled in place
np.random.seed(shuffle_seed)
data = split_inner_val_from_train_data(X, y, training_ratio)
X_train = data[0]
X_inner_val = data[1]
self.assertAlmostEqual(len(X_train)*.1/len(X_inner_val), training_ratio, 1)
# test shuffle is expected
training_indices = range(len(self.X))
np.random.seed(shuffle_seed)
np.random.shuffle(training_indices)
first_X_in_train = X_train[0]
expected_first_X_in_train = self.X[training_indices[0]]
self.assertTrue(np.all(np.equal(first_X_in_train, expected_first_X_in_train)))
def test_prepare_data_one_fold(self):
import numpy as np
folds = 5
training_ratio = 0.9
n = len(self.X)
target_fold_size = int(np.ceil(float(n) / folds))
folded_Xs = [self.X[i:i+target_fold_size] for i in range(0, n, target_fold_size)]
folded_ys = [self.y[i:i+target_fold_size] for i in range(0, n, target_fold_size)]
shuffle_seed = 4 # seed for method `prepare_data_one_fold()`
np.random.seed(shuffle_seed)
data = prepare_data_one_fold(folded_Xs, folded_ys, current_fold=0, training_ratio=training_ratio)
self.assertEqual(len(data), 6)
X_train = data[0]
X_val = data[1]
X_test = data[2]
self.assertAlmostEqual(len(X_train)/10.0,
training_ratio*int(np.ceil(1.0*len(self.X)/folds))*(folds - 1)/10.0,
0)
self.assertAlmostEqual(len(X_val)/10.0,
(1-training_ratio)*int(np.ceil(1.0*len(self.X)/folds))*(folds - 1)/10.0,
0)
self.assertAlmostEqual(len(X_test)/10.0,
int(np.ceil(1.0*len(self.X)/folds))/10.0,
0)
# test shuffle is expected
testset_size = len(folded_Xs[0])
training_val_indices = range(testset_size, len(self.X))
np.random.seed(shuffle_seed)
np.random.shuffle(training_val_indices)
first_X_in_train = X_train[0]
expected_first_X_in_train = self.X[training_val_indices[0]]
self.assertTrue(np.all(np.equal(first_X_in_train, expected_first_X_in_train)))
def test_prepare_folded_data_from_multiple_datasets(self):
datasets = [
('rmg','rmg_internal', 'kh_tricyclics_table', 0.1),
('rmg','rmg_internal', 'kh_tricyclics_table', 0.1),
('rmg','rmg_internal', 'kh_tricyclics_table', 0.1)
]
X_test, y_test, folded_Xs, folded_ys = prepare_folded_data_from_multiple_datasets(
datasets=datasets,
folds=5, add_extra_atom_attribute=True,
add_extra_bond_attribute=True,
differentiate_atom_type=True,
differentiate_bond_type=True,
prediction_task="Cp(cal/mol/K)")
self.assertEqual(len(folded_Xs), 5)
self.assertEqual(len(folded_ys), 5)
self.assertEqual(len(X_test), 54)
self.assertEqual(len(y_test), 54)
self.assertEqual(len(folded_Xs[0]), 99)
self.assertEqual(len(folded_Xs[0]), 99)
def test_prepare_full_train_data_from_multiple_datasets(self):
datasets = [
('rmg','rmg_internal', 'kh_tricyclics_table', 0.1),
('rmg','rmg_internal', 'kh_tricyclics_table', 0.1),
('rmg','rmg_internal', 'kh_tricyclics_table', 0.1)
]
X_test, y_test, X_train, y_train = prepare_full_train_data_from_multiple_datasets(
datasets=datasets,
add_extra_atom_attribute=True,
add_extra_bond_attribute=True,
differentiate_atom_type=True,
differentiate_bond_type=True,
save_meta=False)
self.assertEqual(len(X_train), 486)
self.assertEqual(len(y_train), 486)
self.assertEqual(len(X_test), 54)
self.assertEqual(len(y_test), 54)
def test_split_test_from_train_and_val(self):
X_test, y_test, X_train_and_val, y_train_and_val = split_test_from_train_and_val(
self.X, self.y,
testing_ratio=0.1)
self.assertEqual(len(X_test), 18)
self.assertEqual(len(y_test), 18)
self.assertEqual(len(X_train_and_val), 162)
self.assertEqual(len(y_train_and_val), 162)
def test_prepare_full_train_data_from_file(self):
datafile = os.path.join(os.path.dirname(dde.__file__),
'test_data',
'datafile.csv')
tensors_dir = os.path.join(os.path.dirname(dde.__file__),
'test_data',
'tensors')
X_test, y_test, X_train, y_train = prepare_full_train_data_from_file(
datafile,
add_extra_atom_attribute=True,
add_extra_bond_attribute=True,
differentiate_atom_type=True,
differentiate_bond_type=True,
save_meta=False,
save_tensors_dir=tensors_dir,
testing_ratio=0.0
)
self.assertTrue(os.path.exists(tensors_dir))
self.assertTrue(all(os.path.exists(os.path.join(tensors_dir, '{}.npy'.format(i))) for i in range(10)))
self.assertEqual(len(X_test), 0)
self.assertEqual(len(y_test), 0)
self.assertEqual(len(X_train), 10)
self.assertEqual(len(y_train), 10)
shutil.rmtree(tensors_dir)
def test_prepare_folded_data_from_file(self):
datafile = os.path.join(os.path.dirname(dde.__file__),
'test_data',
'datafile.csv')
X_test, y_test, folded_Xs, folded_ys = prepare_folded_data_from_file(
datafile, 5,
add_extra_atom_attribute=True,
add_extra_bond_attribute=True,
differentiate_atom_type=True,
differentiate_bond_type=True,
testing_ratio=0.0
)
self.assertEqual(len(folded_Xs), 5)
self.assertEqual(len(folded_ys), 5)
self.assertEqual(len(X_test), 0)
self.assertEqual(len(y_test), 0)
self.assertEqual(len(folded_Xs[0]), 2)
self.assertEqual(len(folded_ys[0]), 2)
| 8,766 |
vsa-2018-master/proj02_loops/proj02_02.py
|
ett274/vsa-2018
| 0 |
2022756
|
# Name: <NAME>
# Date: 7-10-18
# proj02_02: Fibonacci Sequence
"""
Asks a user how many Fibonacci numbers to generate and generates them. The Fibonacci
sequence is a sequence of numbers where the next number in the sequence is the sum of the
previous two numbers in the sequence. The sequence looks like this:
1, 1, 2, 3, 5, 8, 13...
"""
x = int(raw_input("How many Fibonacci numbers do you want to generate? "))
if x <= 0:
print "Okay, fine then, smart alec."
elif x == 1:
lister = [1]
print lister
else:
lister = [1, 1]
count = 1
while count < x - 1:
lister.append(lister[count] + lister[count - 1])
count += 1
else:
print lister
dos = int(raw_input("How many powers of 2 do you want to see? "))
listerine = []
if dos <= 0:
print "Okay, fine then, smart alec."
else:
count = 1
while count <= dos:
listerine.append(2**count)
count += 1
print listerine
mmm = int(raw_input("Give me a number and I'll tell you all its divisors: "))
blister = []
if mmm <= 0:
print "Sorry! I can only do whole numbers."
else:
cont = 1
while cont <= mmm:
if mmm % cont == 0:
blister.append(cont)
cont += 1
print blister
| 1,236 |
scanner/checks/ssl.py
|
kotnik/scanner
| 1 |
2023177
|
from gevent import ssl, socket, select
from yapsy.IPlugin import IPlugin
class SSL(IPlugin):
def description(self):
return "SSL missing"
def check(self, url, host_info):
host_info[self.description()] = None
host_ip = socket.getaddrinfo(url, 443)[0][4][0]
try:
sock = socket.socket()
sock.connect((host_ip, 443))
except socket.error:
host_info["ssl checked"].set(True)
host_info[self.description()] = True
return True
try:
sock = ssl.wrap_socket(sock,
ca_certs="/etc/ssl/certs/ca-certificates.crt",
cert_reqs=ssl.CERT_NONE,
)
cert = sock.getpeercert()
except ssl.SSLError:
host_info["ssl checked"].set(True)
host_info[self.description()] = True
return True
host_info["ssl checked"].set(False)
host_info[self.description()] = False
return False
| 1,001 |
alana_pepper/src/alana_node_classes/config.py
|
cdondrup/inaugural_pepper
| 0 |
2023376
|
import yaml
class Config(object):
def __init__(self, file_name):
self.conf = Config.load_file(file_name)
@staticmethod
def load_file(file_name):
with open(file_name, 'r') as f:
return yaml.load(f)
@property
def concepts(self):
return self.conf["concepts"]
@property
def applications(self):
return self.conf["applications"]
@property
def precanned_text(self):
return self.conf["precanned_text"]
| 488 |
tests/zquantum/core/interfaces/functions_test.py
|
alexjuda2/z-quantum-core
| 24 |
2022873
|
from zquantum.core.gradients import finite_differences_gradient
from zquantum.core.interfaces.functions import (
function_with_gradient,
has_store_artifact_param,
)
class TestAddingGradient:
class TestFunctionStoringArtifacts:
def test_makes_a_callable_that_stores_artifacts(self):
def _test_function(params, store_artifact=None):
if store_artifact:
store_artifact("x", params[0])
return (params ** 2).sum()
function = function_with_gradient(
_test_function, finite_differences_gradient(_test_function)
)
assert has_store_artifact_param(function)
class TestFunctionNotStoringArtifacts:
def test_makes_a_callable_not_storing_artifacts(self):
def _test_function(params):
return (params ** 2).sum()
function = function_with_gradient(
_test_function, finite_differences_gradient(_test_function)
)
assert not has_store_artifact_param(function)
| 1,072 |
BluePrint/ComplexBlue/manage.py
|
bestwishfang/FlaskFrameWork
| 0 |
2022887
|
from Buyer import buyer
from Seller import seller
from Manger import manager
app = Flask(__name__)
app.register_blueprint(buyer)
app.register_blueprint(seller)
app.register_blueprint(manager)
if __name__ == '__main__':
print(app.url_map)
app.run(port=9080, debug=True)
"""
Map([
<Rule '/seller/' (GET, HEAD, OPTIONS) -> seller.index>,
<Rule '/buyer/' (GET, HEAD, OPTIONS) -> buyer.index>,
<Rule '/static/<filename>' (GET, HEAD, OPTIONS) -> static>
])
"""
| 479 |
financeAPI/main.py
|
StengerP/StockScreenerGui
| 0 |
2022856
|
# Main script
from financeAPI import *
#import financeAPI as fapi
# Other libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
from urllib.request import urlopen
from financeAPI.financeAPI_lib import FinanceAPI
with open('Secret_Key.txt') as f:
key = f.read()
f = FinanceAPI()
#
print(f)
#
# f.build_dict('AAPL')
#
# f.registerKey_(key)
#
# apple_dict=f.build_dict('AAPL')
#
# for k,v in apple_dict.items():
# print("{}: {}".format(k,v))
#
# df=f.build_dataframe(['TWTR','FB','MSFT','NVDA','AAPL','CRM'])
#
# f.available_data('profile')
#
# f.available_data('metrics')
#
# f.available_data('ratios')
#
# f.bar_chart('Book Value per Share',color='orange',edgecolor='k')
#
# f.bar_chart('debtEquityRatio')
#
# f.scatter('quickRatio','Book Value per Share',color='blue')
#
# f.scatter(varX='debtEquityRatio',
# varY='Enterprise Value over EBITDA',
# sizeZ='price',
# color='red',alpha=0.6)
#
# # Only companies with market cap > 200 billion USD
# df = f.df
# df_large_cap = df[df['Market Cap']>200e9]
# df_large_cap[['companyName','Market Cap']]
#
#
# # A fresh class declration
# f2 = FinanceAPI()
# # Assigning the custom DataFrame to the `df` attribute of this new class object
# # Note we did not need to request data from the API again.
# f2.df = df_large_cap
#
# f2.bar_chart('Enterprise Value over EBITDA',color='red',edgecolor='k')
| 1,476 |
kwargs 195.py
|
blulady/python
| 0 |
2022824
|
"""kwargs is short for 'keyword arguments', a special syntax just like *args, written as
**kwargs where the asterisks** represent a key value pair
can be used to pass any number of undefined arguments into a function
acts as a dictionary: maps out the value to the corresponding variable key
https://www.youtube.com/watch?v=kB829ciAXo4"""
def myFun(**kwargs):
print("kwargs", kwargs)
myFun(first = "1", second = "2", third = "3")
def person(name, **data):
print(name)
print(data)
person('navin', age=28, city='mumbai', mob=9865432)
| 569 |
esrally/exceptions.py
|
sstults/rally
| 0 |
2022833
|
class RallyError(Exception):
"""
Base class for all Rally exceptions
"""
pass
class LaunchError(RallyError):
"""
Thrown whenever there was a problem launching the benchmark candidate
"""
pass
class SystemSetupError(RallyError):
"""
Thrown when a user did something wrong, e.g. the metrics store is not started or required software is not installed
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class RallyAssertionError(RallyError):
"""
Thrown when a (precondition) check has been violated.
"""
class DataError(RallyError):
"""
Thrown when something is wrong with the benchmark data
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SupplyError(RallyError):
pass
class BuildError(RallyError):
pass
class InvalidSyntax(RallyError):
pass
| 982 |
fantasyf1/config.py
|
Haelmorn/fantasy_f1
| 0 |
2023658
|
import os
# Use local url for local testing and os var for deployment PLZ
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
#SQLALCHEMY_DATABASE_URI = "postgresql:///f1db"
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL')
MAIL_PASSWORD = <PASSWORD>("EMAIL_PASS")
SQLALCHEMY_TRACK_MODIFICATIONS = False
CSRF_ENABLED = True
| 488 |
TREEPLOT/makecdf.py
|
willo12/NextGen-GP
| 0 |
2023393
|
import os.path
import spacegrids as sg
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
def get_report(name,qsubs=400):
HOME = os.environ['HOME']
path = os.path.join(HOME,'DATA',name,'report%d')
L=[]
for i in xrange(qsubs):
item = np.loadtxt(path%i)
L.append(item)
LL = [len(item) for item in L]
M=max(LL)
m=min(LL)
shpe = L[0].shape
data = np.nan*np.ones((M,qsubs,shpe[1]))
for i in xrange(qsubs):
data[:LL[i],i,:] = np.array(L[i])
return data
data = get_report('M_NOSCAPE_b441',441)
W=data[:,:,0].reshape((data.shape[0],21,21)).astype(np.float64)
X=sg.Ax('X')
Y=sg.Ax('Y')
T=sg.Ax('T')
t=sg.Coord(name='t',value=np.arange(W.shape[0]).astype(np.float64),axis=T)
y=sg.Coord(name='y',value=np.arange(W.shape[1]).astype(np.float64),axis=Y)
x=sg.Coord(name='x',value=np.arange(W.shape[2]).astype(np.float64),axis=X)
F = sg.Field(name='score',value=W,grid=t*y*x)
mF = F/(F/(Y*X))
mF.write()
plt.contourf(mF.value[60,:,:])
plt.colorbar()
plt.show()
| 1,022 |
intro-to-rnns/stock-price-predictor.py
|
iratao/deep-learning
| 0 |
2022903
|
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import lstm, time # helper libraries
| 169 |
python/gstgva/audio/audio_event.py
|
WilliamWright-JCI/dlstreamer_gst
| 1 |
2023820
|
# ==============================================================================
# Copyright (C) 2018-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
# ==============================================================================
## @file audio_event.py
# @brief This file contains gstgva.audio_event.AudioEvent class to control audio events for particular gstgva.audio_frame.AudioFrame with gstgva.tensor.Tensor instances attached
import ctypes
import numpy
from typing import List
from collections import namedtuple
from ..tensor import Tensor
from ..util import libgst, libgobject, GLIST_POINTER
from .audio_event_meta import AudioEventMeta
import gi
gi.require_version('GstAudio', '1.0')
gi.require_version('GLib', '2.0')
gi.require_version('Gst', '1.0')
from gi.repository import GstAudio, GLib, GObject, Gst
Segment = namedtuple("Segment", "start_time end_time")
## @brief This class represents audio event - object describing detection result (audio segment) and containing multiple
# Tensor objects (inference results) attached by multiple models. For example, it can be audio event with detected
# speech and converts speech to text. It can be produced by a pipeline with gvaaudiodetect with detection model and
# gvaspeechtotext element with speechtotext model. Such AudioEvent will have start and end timestamps filled and will
# have 2 Tensor objects attached - 1 Tensor object with detection result, other with speech to text tensor objectresult
class AudioEvent(object):
## @brief Get clip of AudioEvent as start and end time stamps
# @return Start and end time of AudioEvent
def segment(self):
return Segment(start_time = self.__event_meta.start_timestamp,
end_time = self.__event_meta.end_timestamp)
## @brief Get AudioEvent label
# @return AudioEvent label
def label(self) -> str:
return GLib.quark_to_string(self.__event_meta.event_type)
## @brief Get AudioEvent detection confidence (set by gvaaudiodetect)
# @return last added detection Tensor confidence if exists, otherwise None
def confidence(self) -> float:
detection = self.detection()
return detection.confidence() if detection else None
## @brief Get all Tensor instances added to this AudioEvent
# @return vector of Tensor instances added to this AudioEvent
def tensors(self):
param = self.meta()._params
while param:
tensor_structure = param.contents.data
yield Tensor(tensor_structure)
param = param.contents.next
## @brief Returns detection Tensor, last added to this AudioEvent. As any other Tensor, returned detection
# Tensor can contain arbitrary information. If you use AudioEvent based on GstGVAAudioEventMeta
# attached by gvaaudiodetect by default, then this Tensor will contain "label_id", "confidence", "start_timestamp",
# "end_timestamp" fields.
# If AudioEvent doesn't have detection Tensor, it will be created in-place.
# @return detection Tensor, empty if there were no detection Tensor objects added to this AudioEvent when
# this method was called
def detection(self) -> Tensor:
for tensor in self.tensors():
if tensor.is_detection():
return tensor
return None
## @brief Get label_id from detection Tensor, last added to this AudioEvent
# @return last added detection Tensor label_id if exists, otherwise None
def label_id(self) -> int:
detection = self.detection()
return detection.label_id() if detection else None
## @brief Get AudioEventMeta containing start, end time information and tensors (inference results).
# Tensors are represented as GstStructures added to GstGVAAudioEventMeta.params
# @return AudioEventMeta containing start, end time information and tensors (inference results)
def meta(self) -> AudioEventMeta:
return self.__event_meta
## @brief Iterate by AudioEventMeta instances attached to buffer
# @param buffer buffer with GstGVAAudioEventMeta instances attached
# @return generator for AudioEventMeta instances attached to buffer
@classmethod
def _iterate(self, buffer: Gst.Buffer):
try:
meta_api = hash(GObject.GType.from_name("GstGVAAudioEventMetaAPI"))
except:
return
gpointer = ctypes.c_void_p()
while True:
try:
value = libgst.gst_buffer_iterate_meta_filtered(hash(buffer), ctypes.byref(gpointer), meta_api)
except:
value = None
if not value:
return
event_meta = ctypes.cast(value, ctypes.POINTER(AudioEventMeta)).contents
yield AudioEvent(event_meta)
## @brief Construct AudioEvent instance from AudioEventMeta. After this, AudioEvent will
# obtain all tensors (detection & inference results) from AudioEventMeta
# @param event_meta AudioEventMeta containing start, end time information and tensors
def __init__(self, event_meta: AudioEventMeta):
self.__event_meta = event_meta
| 5,235 |
blender-add-on/xrs/log.py
|
MikeFesta/3xr
| 7 |
2023201
|
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
""" Log related functions
"""
def debug(message):
""" Print a color coded debug message to the console """
print("\033[94m" + message + "\033[0m")
def error(message):
""" Print a color coded error to the console """
print("\033[91m" + "ERROR: " + message + "\033[0m")
def info(message):
""" Print a color coded info message to the console """
print("\033[92m" + message + "\033[0m")
def silly(message):
""" Print a color coded silly message to the console """
print("\033[95m" + message + "\033[0m")
def verbose(message):
""" Print a color coded verbose message to the console """
print("\033[96m" + message + "\033[0m")
def warn(message):
""" Print a color coded warning to the console """
print("\033[93m" + "Warning: " + message + "\033[0m")
| 837 |
src/exportToJson.py
|
mayuanyang/doggy
| 3 |
2023473
|
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import numpy as np
import os, sys
# Load model 1
model1 = load_model('trained/family/model.hdf5')
with open('model.json', 'w') as f:
f.write(model1.to_json())
| 294 |
PythonDAdata/3358OS_12_Code/code12/bn_demo.py
|
shijiale0609/Python_Data_Analysis
| 1 |
2023641
|
import bottleneck as bn
import numpy as np
import timeit
setup = '''
import numpy as np
import bottleneck as bn
from scipy.stats import rankdata
np.random.seed(42)
a = np.random.randn(30)
'''
def time(code, setup, n):
return timeit.Timer(code, setup=setup).repeat(3, n)
if __name__ == '__main__':
n = 10**3
print n, "pass", max(time("pass", "", n))
print n, "min np.median", min(time('np.median(a)', setup, n))
print n, "min bn.median", min(time('bn.median(a)', setup, n))
a = np.arange(7)
print "Median diff", np.median(a) - bn.median(a)
func, _ = bn.func.median_selector(a, axis=0)
print "Bottleneck median func name", func
print n, "min scipy.stats.rankdata", min(time('rankdata(a)', setup, n))
print n, "min bn.rankdata", min(time('bn.rankdata(a)', setup, n))
func, _ = bn.func.rankdata_selector(a, axis=0)
print "Bottleneck rankdata func name", func
| 912 |
utilities/mcver_updater.py
|
bennettdc/MCEdit-Unified
| 237 |
2023665
|
import urllib2
import json
import os
from logging import getLogger
def run():
log = getLogger(__name__)
num = False
def download(_gamePlatform, _gameVersionNumber, url):
_download = False
dir_path = os.path.join(base, "mcver", _gamePlatform, _gameVersionNumber)
file_path = os.path.join(dir_path, os.path.basename(url))
if not (os.path.exists(dir_path) or os.path.isdir(dir_path)):
os.makedirs(dir_path)
_download = True
if not os.path.exists(file_path):
_download = True
else:
conn = urllib2.urlopen(url, timeout=7.5)
new_data = conn.read().strip()
current = open(file_path, 'rb')
current_data = current.read().strip()
conn.close()
current.close()
if new_data != current_data:
fp = open(file_path, 'wb')
fp.write(new_data.strip())
fp.close()
log.info("Updated {} {}::{}".format(_gamePlatform, _gameVersionNumber, os.path.basename(file_path)))
return True
if _download:
conn = urllib2.urlopen(url, timeout=7.5)
fp = open(file_path, 'wb')
fp.write(conn.read())
conn.close()
fp.close()
log.info("Downloaded {} {}::{}".format(_gamePlatform, _gameVersionNumber, os.path.basename(file_path)))
return True
return False
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ver = []
try:
manifest = urllib2.urlopen("https://raw.githubusercontent.com/Podshot/MCEdit-Unified/master/mcver/mcver.json")
data = json.loads(manifest.read())
manifest.close()
except:
return
for gamePlatform in data.iterkeys():
for gameVersionNumber in data[gamePlatform].iterkeys():
for f in data[gamePlatform][gameVersionNumber]:
if download(gamePlatform, gameVersionNumber, f):
num = True
return num
| 2,062 |
search/first-search.py
|
rajeshb/SelfDrivingCar-Term3
| 0 |
2023424
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 13:53:14 2019
@author: rajesh
"""
# ----------
# User Instructions:
#
# Define a function, search() that returns a list
# in the form of [optimal path length, row, col]. For
# the grid shown below, your function should output
# [11, 4, 5].
#
# If there is no valid path from the start point
# to the goal, your function should return the string
# 'fail'
# ----------
# Grid format:
# 0 = Navigable space
# 1 = Occupied space
import numpy as np
grid = [[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0], # go up
[ 0,-1], # go left
[ 1, 0], # go down
[ 0, 1]] # go right
delta_name = ['^', '<', 'v', '>']
def search(grid,init,goal,cost):
# ----------------------------------------
# insert code here
# ----------------------------------------
rows = len(grid)
cols = len(grid[0])
used = []
queue = []
queue.append([0, init[0], init[1]])
path = "fail"
while len(queue) > 0:
item = queue.pop(0)
g = item[0]
x = item[1]
y = item[2]
#print("item : ", item)
if x == goal[0] and y == goal[1]:
path = item
break
used.append([x, y])
for direction in delta:
next_pos = np.add([x,y], direction)
if next_pos[0] < 0 or next_pos[0] > rows-1:
continue
if next_pos[1] < 0 or next_pos[1] > cols-1:
continue
if grid[next_pos[0]][next_pos[1]] != 0:
continue
if [next_pos[0],next_pos[1]] in used:
continue
queue.append([g+1, next_pos[0], next_pos[1]])
#print("queue: ", queue)
return path
search(grid, init, goal, cost)
| 1,946 |
nipype/interfaces/slicer/legacy/registration.py
|
sebastientourbier/nipype_lts5
| 1 |
2022676
|
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
from nipype.interfaces.slicer.base import SlicerCommandLine
class BSplineDeformableRegistrationInputSpec(CommandLineInputSpec):
iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d")
gridSize = traits.Int(desc="Number of grid points on interior of the fixed image. Larger grid sizes allow for finer registrations.", argstr="--gridSize %d")
histogrambins = traits.Int(desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a deformable registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d")
spatialsamples = traits.Int(desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d")
constrain = traits.Bool(desc="Constrain the deformation to the amount specified in Maximum Deformation", argstr="--constrain ")
maximumDeformation = traits.Float(desc="If Constrain Deformation is checked, limit the deformation to this amount.", argstr="--maximumDeformation %f")
default = traits.Int(desc="Default pixel value used if resampling a pixel outside of the volume.", argstr="--default %d")
initialtransform = File(desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. This transform should be an affine or rigid transform. It is used an a bulk transform for the BSpline. Optional.", exists=True, argstr="--initialtransform %s")
FixedImageFileName = File(position=-2, desc="Fixed image to which to register", exists=True, argstr="%s")
MovingImageFileName = File(position=-1, desc="Moving image", exists=True, argstr="%s")
outputtransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s")
outputwarp = traits.Either(traits.Bool, File(), hash_files=False, desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", argstr="--outputwarp %s")
resampledmovingfilename = traits.Either(traits.Bool, File(), hash_files=False, desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s")
class BSplineDeformableRegistrationOutputSpec(TraitedSpec):
outputtransform = File(desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True)
outputwarp = File(desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", exists=True)
resampledmovingfilename = File(desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True)
class BSplineDeformableRegistration(SlicerCommandLine):
"""title: Fast Nonrigid BSpline registration
category: Legacy.Registration
description: Registers two images together using BSpline transform and mutual information.
version: 0.1.0.$Revision: 18864 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.0/Modules/BSplineDeformableRegistration
contributor: <NAME>
acknowledgements:
This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = BSplineDeformableRegistrationInputSpec
output_spec = BSplineDeformableRegistrationOutputSpec
_cmd = " BSplineDeformableRegistration "
_outputs_filenames = {'resampledmovingfilename':'resampledmovingfilename.nii','outputtransform':'outputtransform.txt','outputwarp':'outputwarp.nrrd'}
class AffineRegistrationInputSpec(CommandLineInputSpec):
fixedsmoothingfactor = traits.Int(desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--fixedsmoothingfactor %d")
movingsmoothingfactor = traits.Int(desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--movingsmoothingfactor %d")
histogrambins = traits.Int(desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d")
spatialsamples = traits.Int(desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d")
iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d")
translationscale = traits.Float(desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used is 1/(TranslationScale^2)). This parameter is used to \"weight\" or \"standardized\" the transform parameters and their effect on the registration objective function.", argstr="--translationscale %f")
initialtransform = File(desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, argstr="--initialtransform %s")
FixedImageFileName = File(position=-2, desc="Fixed image to which to register", exists=True, argstr="%s")
MovingImageFileName = File(position=-1, desc="Moving image", exists=True, argstr="%s")
outputtransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s")
resampledmovingfilename = traits.Either(traits.Bool, File(), hash_files=False, desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s")
class AffineRegistrationOutputSpec(TraitedSpec):
outputtransform = File(desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True)
resampledmovingfilename = File(desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True)
class AffineRegistration(SlicerCommandLine):
"""title: Fast Affine registration
category: Legacy.Registration
description: Registers two images together using an affine transform and mutual information. This module is often used to align images of different subjects or images of the same subject from different modalities.
This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks.
version: 0.1.0.$Revision: 18864 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.0/Modules/AffineRegistration
contributor: <NAME>
acknowledgements:
This module was developed by <NAME> while at GE Research with contributions from <NAME>.
This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = AffineRegistrationInputSpec
output_spec = AffineRegistrationOutputSpec
_cmd = " AffineRegistration "
_outputs_filenames = {'resampledmovingfilename':'resampledmovingfilename.nii','outputtransform':'outputtransform.txt'}
| 9,160 |
Task1D.py
|
cmr67/IA-Flood-Warning-System-68
| 1 |
2023175
|
from floodsystem.stationdata import build_station_list
from floodsystem.geo import rivers_with_station
from floodsystem.geo import stations_by_river
def run():
stations = build_station_list()
x=rivers_with_station(stations)
x_first10=x[:10]
print("the rivers thathave monitoring stations are:", x_first10)
print("the number of stations are:", len(x))
y= stations_by_river(stations)
river_aire = y["River Aire"]
sorted_ra = sorted(river_aire)
river_cam = y["River Cam"]
sorted_rc = sorted(river_cam)
river_thames=y["River Thames"]
sorted_rt = sorted(river_thames)
print("River Aire", sorted_ra)
print("River Cam", sorted_rc)
print("River Thames", sorted_rt)
if __name__ == "__main__":
print("")
print("*** Task 1D: CUED Part IA Flood Warning System ***")
print("")
run()
| 873 |
phd_courses/theoretical_high_energy_astroparticle/figures/cosmic_ray_diffusion.py
|
jacopok/notes
| 6 |
2023069
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import codata2018 as ac
import astropy.units as u
from tqdm import tqdm
from scipy.integrate import solve_ivp
from matplotlib.colors import Normalize, LogNorm, FuncNorm
from matplotlib.cm import ScalarMappable
from astropy.visualization import quantity_support
from matplotlib.ticker import MultipleLocator
from scipy.integrate import trapezoid
import cmasher as cmr
gamma = 10
v0 = ac.c * np.sqrt(1 - 1/gamma**2)
# jesus christ who invented Gaussian units
B0 = 2e-6 * u.cm**(-1/2) * u.g**(1/2) / u.s
q = 1 * ac.e.gauss
m = ac.m_p
omega_larmor = (q * B0 / (m * gamma * ac.c)).si
delta_B = B0 / 20_000
# set to 1 or -1
sign = 1
# change if needed
phi0 = 0
theta0 = 1.
def derivative_func(k):
o = omega_larmor.to(1/u.s).value
kv0 = (sign * k * v0).to(1/u.s).value
dB_over_B = (delta_B / B0).to(u.dimensionless_unscaled).value
v_over_c = (v0 / ac.c).to(u.dimensionless_unscaled).value
def theta_derivative(t, theta):
return (
- o
* dB_over_B
* v_over_c
* np.cos(
(o - kv0 * np.cos(theta)) * t
+ phi0
)
)
return theta_derivative
k_resonance = (sign * omega_larmor / v0 / np.cos(theta0)).to(1 / u.AU)
N_plotted = 250
N_plotted_margins = 100
lim_k_dex = 1
def cubic(x, center=0, width=2*lim_k_dex):
return (width/2)**(-1.5) * (abs(x - center))**1.5 * np.sign(x-center)
def inverse_cubic(y, center=0, width=2*lim_k_dex):
return center + (abs(y))**(1/1.5) * (width/2) * np.sign(y)
logs = cubic(np.linspace(-lim_k_dex, lim_k_dex, num=N_plotted))
k_range = 10**logs * k_resonance
# k_range = np.concatenate(
# (
# np.logspace(-.8, -.3, num=N_plotted_margins),
# np.logspace(-.3, .3, num=N_plotted),
# np.logspace(+.3, .8, num=N_plotted_margins),
# )
# ) * k_resonance
# k_range = np.logspace(-.3, .3, num=N_plotted) * k_resonance
n_periods = 100
larmor_period = 2 * np.pi / omega_larmor
global_pulsations = omega_larmor - k_range * v0 * np.cos(theta0)
integration_oom = 2 * np.pi / (abs(global_pulsations) + omega_larmor)
max_steps = (integration_oom / 4).si.value
t_span = (0, (n_periods * larmor_period).si.value)
t_eval = np.linspace(*t_span, num=4 * n_periods)
def solver():
for k, dt in tqdm(zip(k_range, max_steps), total=len(k_range)):
func = derivative_func(k)
sol = solve_ivp(func, t_span, y0=[theta0], max_step=dt, t_eval=t_eval)
sol.k = k
yield sol
def diffusion_over_time():
cmap = cmr.iceburn
norm = LogNorm(k_range[0].value, k_range[-1].value)
# norm = FuncNorm((
# lambda x: cubic(np.log10(x/k_resonance.value)),
# lambda x: 10**inverse_cubic(x) * k_resonance.value),
# vmin=min(k_range).value,
# vmax=max(k_range).value
# )
for sol in solver():
plt.plot(
sol.t/larmor_period.si.value,
sol.y[0],
c=cmap(norm(sol.k.value)),
alpha=2**(-1.*np.log10(N_plotted)),
lw=.5
)
mappable = ScalarMappable(norm=norm, cmap=cmap)
cbar = plt.colorbar(mappable=mappable, label=f'k [{k_resonance.unit}]')
cbar.ax.hlines(k_resonance.value, 0, 2, color='white')
# plt.gca().set_facecolor('black')
plt.xlabel('Time [Larmor periods]')
plt.ylabel('Angle [radians]')
plt.title('Diffusion varying $k$')
def final_point_variation():
theta_final = [sol.y[0][-1] for sol in solver()]
theta_diffs = (np.array(theta_final) - theta0) * u.rad
# with quantity_support():
plt.semilogx(k_range.value, theta_diffs.value, lw=.8)
plt.axvline(k_resonance.value, ls=':',
label='Resonance wavenumber', c='black')
plt.xlabel(f'$k$ [{k_range.unit}]')
plt.ylabel(f'$\\Delta \\theta$ [{theta_diffs.unit}]')
plt.grid('on')
plt.legend()
plt.title(f'$\\Delta \\theta$ over {n_periods} Larmor periods')
def final_point_integral():
theta_final = [sol.y[0][-1] for sol in solver()]
theta_diffs = (np.array(theta_final) - theta0)
theta_vars = theta_diffs**2
integral = trapezoid(y=theta_vars, x=k_range.value) * k_range.unit
th_value = (omega_larmor**2 * (delta_B / B0)**2 * np.pi / v0 / np.cos(theta0)
* t_span[1] * u.s
)
print(f'theoretical = {th_value.si}')
print(f'computed = {integral.si}')
# # with quantity_support():
# plt.semilogx(k_range.value, theta_vars, lw=.8)
# plt.axvline(k_resonance.value, ls=':',
# label='Resonance wavenumber', c='black')
# plt.xlabel(f'$k$ [{k_range.unit}]')
# plt.ylabel(f'$\\Delta \\theta$ [{theta_diffs.unit}]')
# plt.grid('on')
# plt.legend()
# plt.title(f'$\\Delta \\theta$ over {n_periods} Larmor periods')
def integration_periods_plot():
with quantity_support():
plt.semilogx(k_range, integration_oom)
plt.axvline(k_resonance)
plt.grid('on')
if __name__ == "__main__":
from make_all_figures import plot_and_save
plot_and_save(final_point_variation)
plot_and_save(diffusion_over_time)
# plot_and_save(integration_periods_plot)
final_point_integral()
| 5,235 |
Bluetooth/bluetoothReceptor.py
|
dreinoso/communicator
| 0 |
2023874
|
# coding=utf-8
import os
import time
import json
import Queue
import pickle
import threading
import bluetooth
import logger
import messageClass
JSON_FILE = 'config.json'
JSON_CONFIG = json.load(open(JSON_FILE))
BUFFER_SIZE = 4096 # Tamano del buffer en bytes (cantidad de caracteres)
DOWNLOADS = 'Downloads'
class BluetoothReceptor(threading.Thread):
receptionQueue = None
def __init__(self, _threadName, _remoteSocket, _receptionQueue):
threading.Thread.__init__(self, name = _threadName)
self.remoteSocket = _remoteSocket
self.receptionQueue = _receptionQueue
def run(self):
try:
receivedData = self.remoteSocket.recv(BUFFER_SIZE)
# Debemos iniciar una descarga de archivo
if receivedData == 'START_OF_FILE':
self.receiveFile()
# Recibimos una instancia de objeto
elif receivedData.startswith('INSTANCE'):
# Quitamos la 'etiqueta' que hace refencia a una instancia de mensaje
serializedMessage = receivedData[len('INSTANCE'):]
# 'Deserializamos' la instancia de mensaje para obtener el objeto en sí
messageInstance = pickle.loads(serializedMessage)
self.receptionQueue.put((messageInstance.priority, messageInstance))
logger.write('INFO', '[BLUETOOTH] Ha llegado una nueva instancia de mensaje!')
# Se trata de un texto plano, sólo se lo almacena
else:
self.receptionQueue.put((10, receivedData))
logger.write('INFO', '[BLUETOOTH] Ha llegado un nuevo mensaje!')
except bluetooth.BluetoothError as errorMessage:
logger.write('WARNING', '[BLUETOOTH] Error al intentar recibir un mensaje: \'%s\'.'% errorMessage )
finally:
# Cierra la conexion del socket cliente
self.remoteSocket.close()
logger.write('DEBUG', '[BLUETOOTH] \'%s\' terminado y cliente desconectado.' % self.getName())
def receiveFile(self):
try:
self.remoteSocket.send('ACK')
currentDirectory = os.getcwd() # Obtenemos el directorio actual de trabajo
fileName = self.remoteSocket.recv(BUFFER_SIZE) # Obtenemos el nombre del archivo a recibir
# Obtenemos el path relativo del archivo a descargar
relativeFilePath = os.path.join(currentDirectory, DOWNLOADS, fileName)
# Verificamos si el directorio 'DOWNLOADS' no está creado en el directorio actual
if DOWNLOADS not in os.listdir(currentDirectory):
os.mkdir(DOWNLOADS)
# Verificamos si el archivo a descargar no existe en la carpeta 'DOWNLOADS'
if not os.path.isfile(relativeFilePath):
fileObject = open(relativeFilePath, 'w+')
logger.write('DEBUG', '[BLUETOOTH] Descargando archivo \'%s\'...' % fileName)
self.remoteSocket.send('READY')
while True:
inputData = self.remoteSocket.recv(BUFFER_SIZE)
if inputData != 'EOF':
fileObject.write(inputData)
time.sleep(0.15) # IMPORTANTE, no borrar.
self.remoteSocket.send('ACK')
else:
fileObject.close()
break
self.remoteSocket.send('ACK') # IMPORTANTE, no borrar.
self.receptionQueue.put((10, fileName))
logger.write('INFO', '[BLUETOOTH] Archivo \'%s\' descargado correctamente!' % fileName)
return True
else:
self.remoteSocket.send('FILE_EXISTS') # Comunicamos al transmisor que el archivo ya existe
logger.write('WARNING', '[BLUETOOTH] El archivo \'%s\' ya existe! Imposible descargar.' % fileName)
return False
except bluetooth.BluetoothError as errorMessage:
logger.write('WARNING', '[BLUETOOTH] Error al intentar descargar el archivo \'%s\': %s' % (fileName, str(errorMessage)))
return False
| 3,498 |
codewof/utils/errors/InvalidYAMLFileError.py
|
taskmaker1/codewof
| 3 |
2023067
|
"""Custom error for invalid yaml file."""
from .Error import Error
ERROR_MESSAGE = """
Invalid YAML file (.yaml).
Options:
- Does the file match the expected layout?
- Does the file contain at least one key:value pair?
- Is the syntax correct? (are you missing a colon somewhere?)
"""
class InvalidYAMLFileError(Error):
"""custom error for invalid yaml file."""
def __init__(self, yaml_file_path):
"""Create error for invalid yaml file."""
super().__init__()
self.yaml_file_path = yaml_file_path
def __str__(self):
"""Override default error string.
Returns:
Error message for invalid yaml file.
"""
return self.base_message.format(filename=self.yaml_file_path) + ERROR_MESSAGE
| 771 |
test/connect/test_HttpConnectionResolver.py
|
banalna/pip-services3-rpc-python
| 0 |
2023027
|
# -*- coding: utf-8 -*-
"""
tests.connect.test_HttpConnectionResolver
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services3_commons.config import ConfigParams
from pip_services3_rpc.connect import HttpConnectionResolver
from pip_services3_commons.errors.ConfigException import ConfigException
class TestHttpConnectionResolver:
def test_connection_params(self):
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples("connection.protocol", "http",
"connection.host", "somewhere.com",
"connection.port", 123))
connection = connection_resolver.resolve(None)
assert connection.get_protocol() == "http"
assert connection.get_host() == "somewhere.com"
assert connection.get_port() == 123
assert connection.get_uri() == "http://somewhere.com:123"
def test_connection_uri(self):
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples("connection.uri", "https://somewhere.com:123"))
connection = connection_resolver.resolve(None)
assert connection.get_protocol() == "https"
assert connection.get_host() == "somewhere.com"
assert connection.get_port() == 123
assert connection.get_uri() == "https://somewhere.com:123"
class TestHttpsCredentials:
def test_https_with_credentials_connection_params(self):
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples(
"connection.host", "somewhere.com",
"connection.port", 123,
"connection.protocol", "https",
"credential.ssl_key_file", "ssl_key_file",
"credential.ssl_crt_file", "ssl_crt_file",
))
connection = connection_resolver.resolve(None)
assert 'https' == connection.get_protocol()
assert 'somewhere.com' == connection.get_host()
assert 123 == connection.get_port()
assert 'https://somewhere.com:123' == connection.get_uri()
assert 'ssl_key_file' == connection.get('credential.ssl_key_file')
assert 'ssl_crt_file' == connection.get('credential.ssl_crt_file')
def test_https_with_no_credentials_connection_params(self):
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples(
"connection.host", "somewhere.com",
"connection.port", 123,
"connection.protocol", "https",
"credential.internal_network", "internal_network"
))
connection = connection_resolver.resolve(None)
assert 'https' == connection.get_protocol()
assert 'somewhere.com' == connection.get_host()
assert 123 == connection.get_port()
assert 'https://somewhere.com:123' == connection.get_uri()
assert connection.get('credential.internal_network')
def test_https_with_missing_credentials_connection_params(self):
# Section missing
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples(
"connection.host", "somewhere.com",
"connection.port", 123,
"connection.protocol", "https"
))
print('Test - section missing')
try:
connection_resolver.resolve(None)
except ConfigException as err:
assert err.code == 'NO_SSL_KEY_FILE'
assert err.name == 'NO_SSL_KEY_FILE'
assert err.message == 'SSL key file is not configured in credentials'
assert err.category == 'Misconfiguration'
# ssl_crt_file missing
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples(
"connection.host", "somewhere.com",
"connection.port", 123,
"connection.protocol", "https",
"credential.ssl_key_file", "ssl_key_file"
))
print('Test - ssl_crt_file missing')
try:
connection_resolver.resolve(None)
except ConfigException as err:
assert err.code == 'NO_SSL_CRT_FILE'
assert err.name == 'NO_SSL_CRT_FILE'
assert err.message == 'SSL crt file is not configured in credentials'
assert err.category == 'Misconfiguration'
# ssl_key_file missing
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples(
"connection.host", "somewhere.com",
"connection.port", 123,
"connection.protocol", "https",
"credential.ssl_crt_file", "ssl_crt_file"
))
print('Test - ssl_key_file missing')
try:
connection_resolver.resolve(None)
except ConfigException as err:
assert err.code == 'NO_SSL_KEY_FILE'
assert err.name == 'NO_SSL_KEY_FILE'
assert err.message == 'SSL key file is not configured in credentials'
assert err.category == 'Misconfiguration'
# ssl_key_file, ssl_crt_file present
connection_resolver = HttpConnectionResolver()
connection_resolver.configure(ConfigParams.from_tuples(
"connection.host", "somewhere.com",
"connection.port", 123,
"connection.protocol", "https",
"credential.ssl_key_file", "ssl_key_file",
"credential.ssl_crt_file", "ssl_crt_file"
))
print('Test - ssl_key_file, ssl_crt_file present')
connection = connection_resolver.resolve(None)
assert 'https' == connection.get_protocol()
assert 'somewhere.com' == connection.get_host()
assert 123 == connection.get_port()
assert 'https://somewhere.com:123' == connection.get_uri()
assert 'ssl_key_file' == connection.get('credential.ssl_key_file')
assert 'ssl_crt_file' == connection.get('credential.ssl_crt_file')
| 6,292 |
src/amuse/community/distributed/set_global_options.py
|
joshuawall/amuse
| 1 |
2023106
|
from amuse.lab import *
from amuse.support import options
from amuse.community.seba.interface import SeBa
options.GlobalOptions.instance().override_value_for_option("channel_type", "blaa")
#stellar_evolution = SeBa()
del options.GlobalOptions.instance().overriden_options["channel_type"]
stellar_evolution = SeBa()
| 322 |
contrib/Matting/utils.py
|
632652101/PaddleSeg
| 1 |
2023433
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_files(root_path):
res = []
for root, dirs, files in os.walk(root_path, followlinks=True):
for f in files:
if f.endswith(('.jpg', '.png', '.jpeg', 'JPG')):
res.append(os.path.join(root, f))
return res
| 878 |
src/registration/bin/BsplineRegister.py
|
WoutDavid/ST-nextflow-pipeline
| 0 |
2023440
|
import SimpleITK as sitk
import sys
import os
import re
##parse arguments
reference = sys.argv[1]
target=sys.argv[2]
# output_dir=sys.argv[3]
prefix = os.path.splitext(target)[0]
## If it's the global first registration step, we want to add the round label to it
fixed = sitk.ReadImage(reference, sitk.sitkFloat32)
moving = sitk.ReadImage(target, sitk.sitkFloat32)
transformDomainMeshSize = [8] * moving.GetDimension()
outTx = sitk.BSplineTransformInitializer(fixed,
transformDomainMeshSize)
R = sitk.ImageRegistrationMethod()
R.SetMetricAsCorrelation()
R.SetOptimizerAsLBFGSB(gradientConvergenceTolerance=1e-5,
numberOfIterations=100,
maximumNumberOfCorrections=5,
maximumNumberOfFunctionEvaluations=1000,
costFunctionConvergenceFactor=1e+7)
R.SetInitialTransform(outTx, True)
R.SetInterpolator(sitk.sitkLinear)
outTx = R.Execute(fixed, moving)
resampled = sitk.Resample(moving, outTx, sitk.sitkLinear, 0.0, sitk.sitkUInt16)
sitk.WriteImage(resampled, f"{prefix}_registered.tif")
| 1,120 |
feature_flipper/migrations/0002_auto_20160722_1028.py
|
mypebble/django-feature-flipper
| 4 |
2023241
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-22 10:28
from __future__ import unicode_literals
from django.db import migrations, models
from feature_flipper import flipper_settings
class Migration(migrations.Migration):
dependencies = [
('feature_flipper', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='featureflipper',
name='feature',
field=models.CharField(choices=flipper_settings.FEATURE_FLIPPER_FLAGS, max_length=200),
),
]
| 551 |
src/Swarm.py
|
nico-mu/GeneticFishSwarm
| 0 |
2023325
|
import random
from time import sleep, time
from config import FishConstants, WindowConstants
from Fish import Fish
class Swarm:
random.seed(time())
GENERATION = 1
def __init__(self, root, canvas, doTrackLine, terrain, numberOfFish, maxLifeSpan, mutationRate) -> None:
self.terrain = terrain
self.root = root
self.maxLifeSpan = maxLifeSpan
self.doTrackLine = doTrackLine
self.numberOfFish = numberOfFish
self.canvas = canvas
self.deltaTime = 0
self.trackLine = 0
self.fishSwarm = self.__spawnSwarm()
self.mutationRate = mutationRate
def getBestDNA(self):
return self.__calculateFitestFish().getDNA()
def simulateSwarm(self):
for _ in range(int(self.maxLifeSpan.get())):
for fish in self.fishSwarm:
if fish.finished:
continue
fish.draw()
for k, v in self.terrain.items():
overlappingObject = self.canvas.find_overlapping(
v[0], v[1], v[2], v[3])
if fish.getId() in overlappingObject:
if k == 1:
fish.setAlive(True)
fish.setFinished(True)
self.__calculateDeltaTime(fish)
else:
fish.setAlive(False)
fish.setFinished(True)
self.__clearTrackingLine()
if self.doTrackLine.get():
self.__renderTrackingLine(
self.__calculateFitestFish().getCenter())
self.canvas.grid(row=2)
self.root.update_idletasks()
self.root.update()
sleep(1 / WindowConstants.FPS)
def mating(self):
newFishSwarm = [None] * int(self.numberOfFish.get())
MATING_POOL = []
bestReward = max([fish.calculateReward() for fish in self.fishSwarm])
for fish in self.fishSwarm:
fish.clear()
likelihood = (fish.calculateReward() / bestReward) * 100
[MATING_POOL.append(fish.getDNA())
for _ in range(int(likelihood))]
for i in range(int(self.numberOfFish.get())):
retry = 100
parentA = random.choice(MATING_POOL)
parentB = random.choice(MATING_POOL)
while parentA == parentB and retry > 0:
parentB = random.choice(MATING_POOL)
retry -= 1
child = self.__mutate(parentA[:len(parentA)//2] +
parentB[len(parentB)//2:])
newFishSwarm[i] = Fish(
canvas=self.canvas, DNA=child, terrain=self.terrain)
self.fishSwarm = newFishSwarm
def run(self):
self.__renderTitle()
self.deltaTime = 0
self.simulateSwarm()
self.mating()
self.GENERATION += 1
def resetSwarm(self):
for fish in self.fishSwarm:
fish.clear()
self.fishSwarm = self.__spawnSwarm()
def __calculateFitestFish(self):
currentReward = 0
prevReward = 0
res = None
for fish in self.fishSwarm:
currentReward = fish.calculateReward()
if currentReward > prevReward:
res = fish
prevReward = currentReward
return res
def __calculateDeltaTime(self, fish):
if fish.isAlive and fish.finished:
if self.deltaTime != 0:
self.deltaTime = (self.deltaTime + fish.lifecircle) // 2
else:
self.deltaTime = fish.lifecircle
self.__renderTitle()
def __mutate(self, dna):
for i in range(len(dna)):
if random.random() < float(self.mutationRate.get()):
dna[i] = [dna[i][0], self.__getRandomDnaString()[1]]
if random.random() < float(self.mutationRate.get()):
dna[i] = [self.__getRandomDnaString()[0], dna[i][1]]
return dna
def __getRandomDnaString(self):
return [random.uniform(-FishConstants.max_velocity, FishConstants.max_velocity), random.uniform(-FishConstants.max_drift, FishConstants.max_drift)]
def __clearTrackingLine(self):
self.canvas.delete(self.trackLine)
def __renderTrackingLine(self, pos):
self.trackLine = self.canvas.create_line(
pos[0],
pos[1],
WindowConstants.goal_delta_X,
WindowConstants.goal_delta_Y,
width=WindowConstants.tracking_line_width,
fill=WindowConstants.tracking_line_color
)
def __renderTitle(self):
self.root.title(
f"Fish Generation : {self.GENERATION} | Average completion time : {self.deltaTime}")
def __spawnSwarm(self):
MATING_POOL = [
[self.__getRandomDnaString()
for _ in range(int(self.maxLifeSpan.get()))]
for _ in range(random.randint(100, 500))]
return [Fish(canvas=self.canvas, DNA=random.choice(MATING_POOL), terrain=self.terrain)
for _ in range(int(self.numberOfFish.get()))]
| 5,169 |
gamestonk_terminal/stocks/discovery/disc_api.py
|
minhhoang1023/GamestonkTerminal
| 1 |
2023574
|
"""Discovery API."""
import os
from gamestonk_terminal.helper_classes import ModelsNamespace as _models
# flake8: noqa
# pylint: disable=unused-import
# Menu commands
from .finnhub_view import past_ipo as pipo
from .finnhub_view import future_ipo as fipo
from .yahoofinance_view import display_gainers as gainers
from .yahoofinance_view import display_losers as losers
from .yahoofinance_view import display_ugs as ugs
from .yahoofinance_view import display_gtech as gtech
from .yahoofinance_view import display_active as active
from .yahoofinance_view import display_ulc as ulc
from .yahoofinance_view import display_asc as asc
from .fidelity_view import orders_view as ford
from .ark_view import ark_orders_view as arkord
from .seeking_alpha_view import upcoming_earning_release_dates as upcoming
from .seeking_alpha_view import news as trending
from .shortinterest_view import low_float as lowfloat
from .seeking_alpha_view import display_news as cnews
from .shortinterest_view import hot_penny_stocks as hotpenny
from .nasdaq_view import display_top_retail as rtat
# Models
models = _models(os.path.abspath(os.path.dirname(__file__)))
| 1,142 |
chap09/list0945.py
|
ytianjin/GitTest
| 0 |
2023753
|
"""用lambda表达式计算两个值的和"""
a = int(input('整数a:'))
b = int(input('整数b:'))
add2 = lambda x, y: x + y
print('a和b的和是', add2(a, b), '。')
| 139 |
components/sinks/util-listeners/src/main/resources/python/eventlet/PythonIdEchoEventlet.py
|
zinic/atom-nuke
| 1 |
2022984
|
from org.atomnuke.sink.eps.eventlet import AtomEventlet
# Python <3
class PythonIdEchoEventlet(AtomEventlet):
def init(self, taskContext):
# Save a reference to the logger
self.log = taskContext.log()
# Hello world!
self.log.info("Python ID echo eventlet init.")
def destroy(self):
self.log.info("Python ID echo eventlet destroyed.")
def entry(self, entry):
self.log.info("From Python: %s" % (entry.id().toString()))
for category in entry.categories():
print("Entry category: %s" % (category.term()))
| 566 |
tests/convert_test.py
|
ryanorendorff/pyop
| 3 |
2023803
|
#pylint: disable=W0104,W0108
import pytest
import pyop
import random
import numpy as np
from tools import operatorVersusMatrix
num_tests = 100
matrix_max_size = 100
#######################################################################
# Tests #
#######################################################################
######################
# To/From a matrix #
######################
def testToLinearOperator():
for _ in range(num_tests):
A_mat = np.random.rand(random.randint(1, matrix_max_size),
random.randint(1, matrix_max_size))
A_op = pyop.toLinearOperator(A_mat)
operatorVersusMatrix(A_mat, A_op)
def testToLinearOperatorInputCheck():
vec = np.ones(1)
twod = np.ones((1,1))
threed = np.ones((1,1,1))
fourd = np.ones((1,1,1,1))
with pytest.raises(ValueError):
pyop.toLinearOperator(vec)
_ = pyop.toLinearOperator(twod)
with pytest.raises(ValueError):
pyop.toLinearOperator(threed)
with pytest.raises(ValueError):
pyop.toLinearOperator(fourd)
def testToMatrix():
for _ in range(num_tests):
shape = random.randint(1, matrix_max_size)
A_mat = np.eye(shape)
A_op = pyop.LinearOperator((shape, shape), lambda x:x, lambda x:x)
np.testing.assert_allclose(A_mat, pyop.toMatrix(A_op))
################################
# To another functional form #
################################
def testToScipyLinearOperator():
for _ in range(num_tests):
shape = random.randint(1, matrix_max_size)
## Does not pass adjoint test, just for testing
A_op = pyop.LinearOperator((shape, shape), lambda x:x, lambda x:2*x)
A_sci = pyop.toScipyLinearOperator(A_op)
input_mat = np.random.rand(shape, shape)
np.testing.assert_allclose(A_op(input_mat), A_sci(input_mat))
np.testing.assert_allclose(A_op.T(input_mat),
A_sci.rmatvec(input_mat))
| 2,055 |
transfer.py
|
ChenyangWang1/face_parsing
| 0 |
2023167
|
#!/usr/bin/env python3
# -*- coding:utf8 -*-
# @TIME :2018/9/17 9:02
# @Author:dazhan
# @File :copyfiles2dir.py
import os
import shutil
source_path = os.path.abspath(r'/home/data2/DATASET/test_set_a/test_set_a/mask')
target_path = os.path.abspath(r'/home/data2/DATASET/test_set_a/test_set_a/mask1')
if not os.path.exists(target_path):
os.makedirs(target_path)
if os.path.exists(source_path):
# root 所指的是当前正在遍历的这个文件夹的本身的地址
# dirs 是一个 list,内容是该文件夹中所有的目录的名字(不包括子目录)
# files 同样是 list, 内容是该文件夹中所有的文件(不包括子目录)
for root, dirs, files in os.walk(source_path):
for file in files:
src_file = os.path.join(root, file)
shutil.copy(src_file, target_path)
print(src_file)
print('copy files finished!')
| 756 |
cdktemplate/sagemaker_studio_audit_control/amazon_reviews_dataset_stack.py
|
aws-samples/amazon-sagemaker-studio-audit
| 5 |
2022846
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from aws_cdk import (
aws_lakeformation as lf,
aws_glue as glue,
aws_s3 as s3,
aws_iam as iam,
core
)
import os
AMAZON_REVIEWS_BUCKET_ARN = os.environ["AMAZON_REVIEWS_BUCKET_ARN"]
class AmazonReviewsDatasetStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# CloudFormation Parameters
glue_db_name = core.CfnParameter(self, "GlueDatabaseNameAmazonReviews",
type="String",
description="Name of Glue Database to be created for Amazon Reviews.",
allowed_pattern="[\w-]+",
default = "amazon_reviews_db"
)
glue_table_name = core.CfnParameter(self, "GlueTableNameAmazonReviews",
type="String",
description="Name of Glue Table to be created for Amazon Reviews (Parquet).",
allowed_pattern="[\w-]+",
default = "amazon_reviews_parquet"
)
self.template_options.template_format_version = "2010-09-09"
self.template_options.description = "Amazon Reviews Dataset."
self.template_options.metadata = { "License": "MIT-0" }
# Create Database, Table and Partitions for Amazon Reviews
amazon_reviews_bucket = s3.Bucket.from_bucket_arn(self, "ImportedAmazonReviewsBucket", AMAZON_REVIEWS_BUCKET_ARN)
lakeformation_resource = lf.CfnResource(self, "LakeFormationResource",
resource_arn = amazon_reviews_bucket.bucket_arn,
use_service_linked_role = True)
cfn_glue_db = glue.CfnDatabase(self, "GlueDatabase",
catalog_id = core.Aws.ACCOUNT_ID,
database_input = glue.CfnDatabase.DatabaseInputProperty(
name = glue_db_name.value_as_string,
location_uri=amazon_reviews_bucket.s3_url_for_object(),
)
)
amazon_reviews_table = glue.CfnTable(self, "GlueTableAmazonReviews",
catalog_id = cfn_glue_db.catalog_id,
database_name = glue_db_name.value_as_string,
table_input = glue.CfnTable.TableInputProperty(
description = "Amazon Customer Reviews (a.k.a. Product Reviews)",
name = glue_table_name.value_as_string,
parameters = {
"classification": "parquet",
"typeOfData": "file"
},
partition_keys = [{"name": "product_category","type": "string"}],
storage_descriptor = glue.CfnTable.StorageDescriptorProperty(
columns = [
{"name": "marketplace", "type": "string"},
{"name": "customer_id", "type": "string"},
{"name": "review_id","type": "string"},
{"name": "product_id","type": "string"},
{"name": "product_parent","type": "string"},
{"name": "product_title","type": "string"},
{"name": "star_rating","type": "int"},
{"name": "helpful_votes","type": "int"},
{"name": "total_votes","type": "int"},
{"name": "vine","type": "string"},
{"name": "verified_purchase","type": "string"},
{"name": "review_headline","type": "string"},
{"name": "review_body","type": "string"},
{"name": "review_date","type": "bigint"},
{"name": "year","type": "int"}],
location = amazon_reviews_bucket.s3_url_for_object() + "/parquet/",
input_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
output_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
serde_info = glue.CfnTable.SerdeInfoProperty(
serialization_library = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
parameters = {
"classification": "parquet",
"typeOfData": "file"
}
)
),
table_type = "EXTERNAL_TABLE"
)
)
# amazon_reviews_table.node.add_dependency(glue_default_permissions)
amazon_reviews_table.node.add_dependency(cfn_glue_db)
partition_list = ["Apparel", "Automotive", "Baby", "Beauty", "Books", "Camera", "Digital_Ebook_Purchase",
"Digital_Music_Purchase", "Digital_Software", "Digital_Video_Download","Digital_Video_Games", "Electronics",
"Furniture", "Gift_Card", "Grocery", "Health_&_Personal_Care", "Home", "Home_Entertainment",
"Home_Improvement", "Jewelry", "Kitchen", "Lawn_and_Garden", "Luggage", "Major_Appliances", "Mobile_Apps",
"Mobile_Electronics", "Music", "Musical_Instruments", "Office_Products", "Outdoors", "PC", "Personal_Care_Appliances",
"Pet_Products", "Shoes", "Software", "Sports", "Tools", "Toys", "Video", "Video_DVD", "Video_Games",
"Watches", "Wireless"]
partition_uri_prefix = f"{amazon_reviews_bucket.s3_url_for_object()}/parquet/{amazon_reviews_table.table_input.partition_keys[0].name}"
for partition in partition_list:
cfn_partition_location = partition_uri_prefix + "=" + partition
cfn_partition_id = "Partition"+partition
cfn_partition = glue.CfnPartition(self, cfn_partition_id,
catalog_id = amazon_reviews_table.catalog_id,
database_name = glue_db_name.value_as_string,
partition_input = glue.CfnPartition.PartitionInputProperty(
values = [ partition ],
storage_descriptor = glue.CfnPartition.StorageDescriptorProperty(
location = cfn_partition_location,
input_format = "org.apache.hadoop.mapred.TextInputFormat",
serde_info = glue.CfnPartition.SerdeInfoProperty(
serialization_library = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
parameters = {
"serialization.format": "1"
}
)
)
),
table_name = glue_table_name.value_as_string
)
cfn_partition.add_depends_on(amazon_reviews_table)
| 5,457 |
wagtail/images/permissions.py
|
stevedya/wagtail
| 0 |
2023208
|
from django.dispatch import receiver
from django.test.signals import setting_changed
from wagtail.images import get_image_model
from wagtail.images.models import Image
from wagtail.permission_policies.collections import CollectionOwnershipPermissionPolicy
permission_policy = None
class ImagesPermissionPolicyGetter:
"""
A helper to retrieve the current permission policy dynamically.
Following the descriptor protocol, this should be used as a class attribute::
class MyImageView(PermissionCheckedMixin, ...):
permission_policy = ImagesPermissionPolicyGetter()
"""
def __get__(self, obj, objtype=None):
return permission_policy
def set_permission_policy():
"""Sets the permission policy for the current image model."""
global permission_policy
permission_policy = CollectionOwnershipPermissionPolicy(
get_image_model(), auth_model=Image, owner_field_name="uploaded_by_user"
)
@receiver(setting_changed)
def update_permission_policy(signal, sender, setting, **kwargs):
"""
Updates the permission policy when the `WAGTAILIMAGES_IMAGE_MODEL` setting changes.
This is useful in tests where we override the base image model and expect the
permission policy to have changed accordingly.
"""
if setting == "WAGTAILIMAGES_IMAGE_MODEL":
set_permission_policy()
# Set the permission policy for the first time.
set_permission_policy()
| 1,443 |
tests/example/output/src/pythonSnippet.py
|
hansehe/DockerBuildSystem
| 8 |
2022640
|
def GetInfoMsg():
infoMsg = "This python snippet is just an example.\r\n"
return infoMsg
if __name__ == "__main__":
print(GetInfoMsg())
| 148 |
geolife/cluster_points/my_cluster.py
|
Empythy/datamining-geolife-with-python
| 61 |
2023463
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 13 19:57:40 2014
@author: hai
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
# Plot result
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
#from sql_base import dbutils
from base import base_op
def getPointsDistence(p1, p2):
return base_op.calc_distance(p1[0],p1[1],p2[0],p2[1])
def getPointClusterDist(c, p):
distList = []
for cpoint in c:
distList.append(getPointsDistence(p, cpoint))
return max(distList)
def updataClusterCenter(clusterP, p, num):
p = clusterP*num + p
return p/(num+1)
def myCluster(points):
points = np.array(points);
minDistence = 0.5 #km
minCout = 15;
labels = np.zeros(len(points), dtype=int)
#isDeal = np.zeros(len(points), dtype=bool)
k = 0
clusterList = np.zeros((len(points),2),dtype=float)
numInCluster = np.zeros(len(points))
clusterListSore = []
for pointIdx in range(len(points)):
i = 0
while i < k:
if (getPointClusterDist(clusterListSore[i], points[pointIdx]) < minDistence):
clusterListSore[i].append(points[pointIdx])
clusterList[i] = updataClusterCenter(clusterList[i], points[pointIdx], numInCluster[i])
labels [pointIdx] = i
numInCluster[i] = numInCluster[i] + 1
break
else:
i = i + 1
if i == k:
#clusterListSore[i].append(points[pointIdx])
tmpList = []
tmpList.append(points[pointIdx])
clusterListSore.append(tmpList)
clusterList[i] = points[pointIdx]
numInCluster[i] = numInCluster[i] + 1
k = k + 1
# clusterPoints = []
#numInClusterRet = []
mask = np.zeros(len(points),dtype=bool)
pos = 0;
for i in range(k):
if (numInCluster[i] > minCout):
mask [i] = True;
labels = [pos if j == i else j for j in labels]
pos = pos +1
else:
labels = [-1 if j == i else j for j in labels]
return clusterList[mask],numInCluster[mask],labels
import csv
userid = 0
X = []
csv_name = "staypoints_%s.csv" %userid
with open(csv_name,"rb") as csvfp:
reader = csv.reader(csvfp)
for line in reader:
X.append(line)
X = np.array(X, np.float)
csvfp.close()
res = myCluster(X);
print res[0],res[1]
centerPoints = res[0]
labels = np.array(res[2])
# Black removed and is used for noise instead.
core_samples_mask = np.zeros_like(labels, dtype=bool)
#core_samples_mask = [True for i in core_samples_mask]
core_samples_mask [:] = True
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
fig = plt.figure(5)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
continue
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
print "%d reference points contain %d points" %(k,len(xy))
#print "%f mean pos %f" %xy.
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=8)
# plt.xlim(30,42)
# plt.ylim(116,122)
'''
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
'''
plt.plot(centerPoints[:, 0], centerPoints[:, 1], 'o', markerfacecolor='k',
markeredgecolor='k', markersize=4)
plt.title('Estimated number of clusters: %d' % len(centerPoints))
| 3,813 |
users/migrations/0017_auto_20210202_2248.py
|
vconstellation/video-games-library
| 0 |
2023559
|
# Generated by Django 3.1.5 on 2021-02-02 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gamelist', '0010_auto_20210202_2244'),
('users', '0016_auto_20210202_2244'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='review',
field=models.ManyToManyField(null=True, to='gamelist.GamesReviews'),
),
]
| 464 |
mc/houston/subcommands/tests/test_info_e2e.py
|
aspuru-guzik-group/mission_control
| 3 |
2023434
|
import unittest
from mc.houston.tests import utils as _houston_test_utils
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.houston = _houston_test_utils.generate_test_houston()
def _generate_cfg(self):
return _houston_test_utils.generate_test_cfg()
def _create_flow(self, flow_spec=None):
flow_spec = flow_spec or {}
return self.houston.run_command('create_flow', flow_spec=flow_spec)
class DefaultTestCase(BaseTestCase):
def test_returns_summaries(self):
self.assertEqual(
self.houston.run_command('info'),
{
'flow': {'count': 0},
'job': {'count': 0}
}
)
self._create_flow()
self.assertEqual(
self.houston.run_command('info')['flow']['count'],
1
)
class KeyTestCase(BaseTestCase):
def test_returns_record_for_key(self):
flow = self._create_flow()
self.assertEqual(
self.houston.run_command('info', key=flow['key']),
flow
)
| 1,077 |
Cracking_the_Coding_Interview/reversed_words.py
|
VinceW0/Leetcode_Python_solutions
| 4 |
2023021
|
#!/usr/bin/env python
"""
Reverse the word order for the words in a sentence.
Example:
I like to write Python.
Becomes:
Python. write to like I
"""
def reverse_words(sentence):
words = sentence.split()
return ' '.join(reversed(words))
def main():
sentence = "I like to write Python."
print(reverse_words(sentence))
if __name__ == '__main__':
main()
| 386 |
src/utils/data_util.py
|
yoshitomo-matsubara/hnd-ghnd-object-detectors
| 19 |
2022656
|
import torch
from structure.sampler import GroupedBatchSampler, create_aspect_ratio_groups
from structure.transformer import ToTensor, RandomHorizontalFlip, Compose
from utils import misc_util
from utils.coco_util import get_coco
def get_coco_dataset(split_dict, is_train):
transforms = [ToTensor()]
if is_train:
transforms.append(RandomHorizontalFlip(0.5))
return get_coco(img_dir_path=split_dict['images'], ann_file_path=split_dict['annotations'],
transforms=Compose(transforms), remove_non_annotated_imgs=split_dict['remove_non_annotated_imgs'],
jpeg_quality=split_dict['jpeg_quality'])
def get_coco_data_loaders(dataset_config, batch_size, distributed):
num_workers = dataset_config['num_workers']
aspect_ratio_group_factor = dataset_config['aspect_ratio_group_factor']
dataset_splits = dataset_config['splits']
train_dataset = get_coco_dataset(dataset_splits['train'], True)
val_dataset = get_coco_dataset(dataset_splits['val'], False)
test_dataset = get_coco_dataset(dataset_splits['test'], False)
print('Creating data loaders')
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
train_sampler = torch.utils.data.RandomSampler(train_dataset)
val_sampler = torch.utils.data.SequentialSampler(val_dataset)
test_sampler = torch.utils.data.SequentialSampler(test_dataset)
if aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups(train_dataset, k=aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, batch_size)
else:
train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler,
num_workers=num_workers, collate_fn=misc_util.collate_fn)
val_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, sampler=val_sampler,
num_workers=num_workers, collate_fn=misc_util.collate_fn)
test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, sampler=test_sampler,
num_workers=num_workers, collate_fn=misc_util.collate_fn)
return train_sampler, train_data_loader, val_data_loader, test_data_loader
| 2,666 |
application/app.py
|
demetrius-mp/flask-template
| 0 |
2023524
|
from application import flask_app
def create_app():
flask_application = flask_app.create_app()
return flask_application
| 131 |
Project2/c)ActivationFunctions.py
|
marianylund/fysstkprojects
| 0 |
2023555
|
# Useful info: https://towardsdatascience.com/implementing-different-activation-functions-and-weight-initialization-methods-using-python-c78643b9f20f
from nnreg.model import Model
from nnreg.trainer import Trainer
from nnreg.dataloader import DataLoader
from RegLib.HelperFunctions import create_frankie_data, create_X, plot_values_with_info,plot_values_with_two_y_axis
from nnreg.config import Config
from PROJECT_SETUP import ROJECT_ROOT_DIR
from RegLib.load_save_data import load_best_checkpoint, write_json, get_previous_checkpoints, load_data_as_dict
from nnreg.analysis_fun import show_heatmap, get_min_value, unpack, get_paths_of_results_where, plot_values_with_steps_and_info, param_search, train_save_configs, plot_lr_tran_val
from sklearn.model_selection import ParameterGrid
# For testing:
from sklearn.neural_network import MLPRegressor
# For Analysis:
from math import inf, isnan
import seaborn as sb
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
# Compare to sklearn, is there a better function to compare to?:
def test(cfg, data: DataLoader, best_data_dict):
regr = MLPRegressor(hidden_layer_sizes = cfg.MODEL.HIDDEN_LAYERS,
activation = "identity",
solver = "sgd",
early_stopping=True).fit(data.X_train, data.y_train.ravel())
regr_test_pred = regr.predict(data.X_test)
print("sklearn: R2 : % .4f, MSE : % .4f" % (Trainer.R2(data.y_test.ravel(), regr_test_pred), Trainer.MSE(data.y_test.ravel(), regr_test_pred)))
print("Ours: R2 : % .4f, MSE : % .4f" % (best_data_dict["Test_r2"], best_data_dict["Test_eval"]))
config_override = [
"MODEL.ACTIVATION_FUNCTIONS", ["sigmoid", "identity"], # will need to vary activations in c)
"MODEL.HIDDEN_LAYERS", [5], # this will need to vary
"MODEL.WEIGHT_INIT", "xavier", # this will need to vary in c) # {'random', 'he', 'xavier', 'zeros'}
"MODEL.EVAL_FUNC", "mse",
"MODEL.COST_FUNCTION", "mse",
"OPTIM.REGULARISATION", "none",
"OPTIM.BATCH_SIZE", 60,
"OPTIM.LR", 1e-3, # for now just concentrate on this lr
"DATA.NAME", "franke",
'DATA.FRANKIE.P', 5,
'DATA.FRANKIE.N', 1000,
'DATA.FRANKIE.NOISE', 0.1,
"OUTPUT_DIR", "Testc)ActFun",
]
cfg = Config(config_override = config_override)
output_dir = ROJECT_ROOT_DIR.joinpath(cfg.OUTPUT_DIR)
data_loader = DataLoader(cfg)
train_save_configs(cfg, data_loader, output_dir)
best_data_dict = load_best_checkpoint(output_dir)
test(cfg, data_loader, best_data_dict)
plot_lr_tran_val(best_data_dict)
# ------------------------Parameter search-----------------------------------
param_grid = {
'MODEL.WEIGHT_INIT': ['random', 'he', 'xavier', 'zeros'],
"MODEL.ACTIVATION_FUNCTIONS": [["sigmoid", "identity"], ["tanh", "identity"], ["relu", "identity"]],
}
#param_search(config_override, output_dir, param_grid, train, test)
param_grid = {
'MODEL.WEIGHT_INIT': ['random', 'he', 'xavier', 'zeros'],
"MODEL.ACTIVATION_FUNCTIONS": [["leaky_relu", "identity"]],
"MODEL.LEAKY_SLOPE": [-0.1, 0.1] # Maybe 0.01 is better?
}
#param_search(config_override, output_dir, param_grid, train, test)
# ------------------------Analysis of results-----------------------------------
def get_all_results_for_weight_init(path:Path, leaky = False):
weight_inits = ['random', 'he', 'xavier', 'zeros']
all_dir = [x for x in path.iterdir() if x.is_dir()]
results = []
for i in range(len(all_dir)):
d = all_dir[i]
cfg = Config(config_file = Path(d).joinpath("multilayer_model.yaml"))
if (leaky and cfg.MODEL.ACTIVATION_FUNCTIONS[0] == "leaky_relu") or (not leaky and cfg.MODEL.ACTIVATION_FUNCTIONS[0] != "leaky_relu"):
best = load_best_checkpoint(d)
last_ckp = get_previous_checkpoints(d)[0]
last = load_data_as_dict(Path(d).joinpath(last_ckp))
new_val = list(last["Val_eval"].values())
new_steps = list(map(int, last["Val_eval"].keys()))
results.append({"WEIGHT_INIT": cfg.MODEL.WEIGHT_INIT, "ACTIVATION": cfg.MODEL.ACTIVATION_FUNCTIONS[0], "LEAKY_SLOPE": cfg.MODEL.LEAKY_SLOPE, "Eval": best["Test_eval"],"Time": best["Proccess_time"], "Step": best["Step"], "Val_eval": new_val, "Val_steps": new_steps, "Name": d})
return results
def analyse_results(results, values_to_analyse = ("LR_DECAY", "LR"), round_up_to: float = 1, save_fig = False):
min_val = get_min_value(results, "Eval") # MAX WHEN ACC AND MIN WHEN MSE
print("Best val: ", min_val)
best_checkpoint = load_best_checkpoint(min_val["Name"])
cfg = Config(config_file = Path(min_val["Name"], "multilayer_model.yaml"))
p = str(cfg.MODEL.WEIGHT_INIT)
time_for_best_run = f'{min_val["Time"][0]:.0f} min {min_val["Time"][1]:.0f}'
best_test_eval = f'{min_val["Eval"]:.5f}'
# HEAT_MAP
info_to_add = {}
s_results = unpack(results, values_to_unpack_on = values_to_analyse, replace_val_bigger = inf)
position_index = s_results.index.get_loc(min_val[values_to_analyse[0]])
position_column = s_results.columns.get_loc(min_val[values_to_analyse[1]])
show_heatmap(s_results, info_to_add = info_to_add, patch_placement= (position_column, position_index), title = f"Franke NN", xlabel = values_to_analyse[1], ylabel = values_to_analyse[0], show_bar = True, save_fig = save_fig)
new_info = f'test score={best_test_eval}, time: {time_for_best_run}'
# PLOTS
info_to_add = {
"Results: ": new_info,
"File name: ": str(min_val["Name"]).replace("\\", "_"),
}
print(info_to_add)
#plot_lr_tran_val(best_checkpoint, y1_label = "Error", title = f'Best Run Weight init = {p}', info_to_add = info_to_add, save_fig = save_fig)
#path_to_results = Path("Results", "SimpleNN")
#all_results_with_leaky = get_all_results_for_weight_init(path_to_results, leaky=True)
#analyse_results(all_results_with_leaky, values_to_analyse = ("LEAKY_SLOPE", "WEIGHT_INIT"))
def analyse_without_leaky():
all_results_without_leaky = get_all_results_for_weight_init(path_to_results)
analyse_results(all_results_without_leaky, values_to_analyse = ("ACTIVATION", "WEIGHT_INIT"), save_fig = True)
values_to_plot = {}
steps_to_plot = {}
clean_of_exploding = True
for result in all_results_without_leaky:
new_val = result["Val_eval"]
weight_init = result["WEIGHT_INIT"]
act = result["ACTIVATION"]
if(not clean_of_exploding or (new_val[-1] != inf and not isnan(new_val[-1]))):
new_key = f"{weight_init}_{act}"
values_to_plot[new_key] = new_val
steps_to_plot[new_key] = result["Val_steps"]
info_to_add = {}
ylimit = (0.01, 0.04) #
xlimit = None #(0, 50000) #
save_fig = False
#test(cfg, data_loader, best_checkpoint_10_5)
#plot_values_with_steps_and_info(steps_to_plot, values_to_plot, title = "Weight Init and Activations on Franke", xlimit = xlimit, ylabel = "Error", info_to_add = info_to_add, ylimit = ylimit, save_fig = save_fig)
def analyse_with_leaky():
save_fig = True
all_results_with_leaky = get_all_results_for_weight_init(path_to_results, leaky=True)
analyse_results(all_results_with_leaky, values_to_analyse = ("LEAKY_SLOPE", "WEIGHT_INIT"), save_fig = save_fig)
values_to_plot = {}
steps_to_plot = {}
clean_of_exploding = True
for result in all_results_with_leaky:
new_val = result["Val_eval"]
weight_init = result["WEIGHT_INIT"]
act = result["LEAKY_SLOPE"]
if(not clean_of_exploding or (new_val[-1] != inf and not isnan(new_val[-1]))):
new_key = f"{weight_init}{act}"
values_to_plot[new_key] = new_val
steps_to_plot[new_key] = result["Val_steps"]
info_to_add = {}
ylimit = (0.01, 0.04) #
xlimit = None #(0, 50000) #
#test(cfg, data_loader, best_checkpoint_10_5)
plot_values_with_steps_and_info(steps_to_plot, values_to_plot, title = "Weight Init with leaky ReLU on Franke", xlimit = xlimit, ylabel = "Error", info_to_add = info_to_add, ylimit = ylimit, save_fig = save_fig)
#analyse_without_leaky()
#analyse_with_leaky()
| 8,194 |
scripts/solved/014_LCSM.py
|
akikuno/rosalind
| 0 |
2022658
|
# https://rosalind.info/problems/lcsm/
import sys
file = sys.argv[-1]
def read_fasta(file: str):
"""
Args
file: path of fasta file
"""
with open(file) as f:
fa = f.read().splitlines()
prev = True
header = []
seq = []
for f in fa:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
_, seq = read_fasta(file)
def longest_common_substring(seq1, seq2):
n = len(seq1)
m = len(seq2)
dp = [[0] * (m + 1) for _ in range(n + 1)]
index = []
maxi = 0
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
dp[i][j] = 0
continue
if seq1[i - 1] == seq2[j - 1]:
dp[i][j] = max(dp[i - 1][j - 1] + 1, dp[i][j])
if dp[i][j] == maxi:
index.append([i, j])
if dp[i][j] > maxi:
maxi = dp[i][j]
index = [[i, j]]
else:
dp[i][j] = 0
results = []
for idx in index:
n, m = idx
tmp_results = ""
while n > 0 and m > 0:
if dp[n][m] == 0:
break
if dp[n][m] == dp[n - 1][m]:
n -= 1
elif dp[n][m] == dp[n][m - 1]:
m -= 1
elif dp[n][m] == dp[n - 1][m - 1] + 1:
tmp_results += seq1[n - 1]
n -= 1
m -= 1
results.append(tmp_results[::-1])
return results
if len(seq) == 1:
print(seq[0])
exit()
seq1 = seq[0]
seq2 = seq[1]
A = longest_common_substring(seq1, seq2)
for i in range(2, len(seq)):
tmp_A = []
for a in A:
tmp_A.append(longest_common_substring(a, seq[i]))
maxi = 0
for a in tmp_A:
if a:
maxi = max(map(len, a))
A = []
for a in tmp_A:
if a:
if max(map(len, a)) == maxi:
A.append(a)
A = sum(A, [])
print(*A)
####
| 2,125 |
scrim-captcha-hack.py
|
sahildua2305/scrim-captcha-hack
| 2 |
2023898
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: sahildua2305
# @Date: 2016-01-25 20:21:45
# @Last Modified by: sahildua2305
# @Last Modified time: 2016-01-25 21:17:57
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import *
from time import sleep
import unittest
SCRIM_URL = "http://scr.im/fake"
"""
sub-class of unittest.TestCase for testing the automated hack
https://docs.python.org/2/library/unittest.html#unittest.TestCase
"""
class HackTest(unittest.TestCase):
"""
Instructions that will be executed before the test case
https://docs.python.org/2/library/unittest.html#unittest.TestCase.setUp
"""
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get(SCRIM_URL)
"""
Main method that will be executed and tested
"""
def test_hack_captcha(self):
driver = self.driver
# find all the options available for captcha
optionsXPath = "(//ul//li)"
options = WebDriverWait(driver, 20).until(lambda driver: driver.find_elements(By.XPATH, optionsXPath))
print "%d total captcha options found!" % len(options)
while True:
# randomly select any option
# Intentionally chosen the middle one to increase probability of getting email as soon as possible
selectedOption = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(optionsXPath+"[5]"))
selectedOption.click()
try:
# check if email has been revealed
emailElementId = "mailto"
emailElement = WebDriverWait(driver, 2).until(lambda driver: driver.find_element_by_id(emailElementId))
except TimeoutException:
# if email isn't revealed, let it try again
print "Email not found!"
pass
else:
# reveal email
emailId = emailElement.get_attribute("innerHTML")
print emailId
break
# Find try again button and click, if email hasn't been already revealed
tryAgainButtonText = "try again"
tryAgainButton = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_link_text(tryAgainButtonText))
tryAgainButton.click()
"""
Instructions that will be executed after the test case
https://docs.python.org/2/library/unittest.html#unittest.TestCase.tearDown
"""
def tearDown(self):
sleep(2)
self.driver.quit()
# run all the unit test cases written
unittest.main()
| 2,396 |
AO3/makeTagTimeSearchesFlex.py
|
ecjoseph42/toastystats
| 27 |
2023814
|
import re
import sys
import time
import os
#MAXMONTHS = 50
if len(sys.argv) < 6:
sys.exit('Usage: %s fandom freeform_tag time_period number_of_periods outfile' % sys.argv[0])
verbose = False
#if len(sys.argv) > 6:
# arg = sys.argv[6]
# if arg == "-verbose" or arg == "-v":
# verbose = True
fandom = sys.argv[1]
freeform = sys.argv[2]
timep = sys.argv[3]
nump = int(sys.argv[4])
outfile = sys.argv[5]
# **************************************
fout = open(outfile, "w")
fout.write("{ \"searches\": [\n")
# iterate through all time slices except the last one
for t in range(1, nump):
# write to outfile
fout.write("{ \"fandom\": \"")
fout.write(fandom)
fout.write("\",")
fout.write(" \"freeform\": \"")
fout.write(freeform)
fout.write("\",")
fout.write(" \"date\": \"")
fout.write(str(t))
fout.write(" ")
fout.write(str(timep))
fout.write("s ago\"},\n")
# treat the last item differently -- no final comma
fout.write("{ \"fandom\": \"")
fout.write(fandom)
fout.write("\",")
fout.write(" \"freeform\": \"")
fout.write(freeform)
fout.write("\",")
fout.write(" \"date\": \"")
fout.write(str(t+1))
fout.write(" ")
fout.write(str(timep))
fout.write("s ago\"}\n")
fout.write("] }\n")
| 1,281 |
tools/api-key-management/bin/create-api-key.py
|
tate2301/covid19-app-system-public
| 0 |
2023030
|
#!/usr/bin/env python3.8
import argparse
import base64
import os
import random
import string
import subprocess
import sys
import zipfile
from datetime import datetime, timedelta
import bcrypt
# you can test this with: tools/api-key-management/bin/create-api-key.py --api npex --party test --environment dev
# if you import the private key for test-dev, you'll be able to verify the output.
def random_string(length):
return "".join(random.choice(string.ascii_letters) for _ in range(length))
def log(*args):
print(f"{datetime.now()} {os.path.basename(sys.argv[0])}", end=": ", file=sys.stderr)
print(*args, file=sys.stderr)
sys.stderr.flush()
def run(cmd, env=None, **kwargs):
env = env if env is not None else {}
try:
return subprocess.run(cmd, env=dict(os.environ, **env),
check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding="UTF-8", **kwargs)
except subprocess.CalledProcessError as e:
raise IOError(f"Failed: {cmd}\n Stdout: {e.stdout}\n StdErr: {e.stderr}")
def random_hex(digits):
c = run(["openssl", "rand", "-hex", str(digits)])
return c.stdout.strip()
def import_public_key(path):
run(["gpg", "--import", path])
def gpg_key_fingerprint(keyname):
return run(["gpg", "--fingerprint", keyname]).stdout.strip()
def generate_ephemeral_key(expiry):
date = datetime.today().strftime("%Y-%m-%d")
random_bits = random_string(4)
distribution_keyname = f"tt-api-distribution-{date}-{random_bits}"
run(["gpg", "--quick-generate-key", distribution_keyname, "rsa4096", "sign,auth,encr",
expiry.strftime("%Y-%m-%d")])
return distribution_keyname
def encrypt(plaintext, encryption_keyname, recipient_keyname):
try:
return subprocess.run(
["gpg",
"--encrypt", "--cipher-algo", "AES256",
"--sign", "--digest-algo", "SHA256",
"--armor",
"--local-user", encryption_keyname,
"--recipient", recipient_keyname],
check=True,
encoding="UTF-8",
input=plaintext,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).stdout.strip()
except subprocess.CalledProcessError as e:
raise IOError(f"Failed: gpg encrypt\n Stdout: {e.stdout}\n StdErr: {e.stderr}")
def export_public_key(keyname):
return run(["gpg", "--export", "--armor", keyname]).stdout.strip()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create an API key for a third party, using their public key")
parser.add_argument("--api", required=True,
help="Type name of the api, e.g. `SubmitAPI` or `TestLabAPI` (validation expected externally)")
parser.add_argument("--party", required=True, help="name of third party system, e.g. test, npex-cta, ...")
parser.add_argument("--environment", required=True,
help="Deployment Environment, either dev, integration or prod (validation expected externally")
parser.add_argument("--output", default=".", help="Output directory for the resulting zip file")
parser.add_argument("--output-file", help="filename for the output zip file")
parser.add_argument("--output-secret",
help="filename where to write the key name and bcrypt'd secret (colon delimited)")
args = parser.parse_args()
environment = args.environment
party = args.party
api = args.api
scriptdir = os.path.realpath(os.path.dirname(__file__))
topdir = os.path.dirname(scriptdir)
keydir = f"{topdir}/public-keys/{environment}"
recipient_public_key_file = f"{keydir}/{party}-{environment}-public-key.txt"
if not os.path.exists(recipient_public_key_file):
log(f"Public key {recipient_public_key_file} not found")
exit(1)
log(f"Importing public key from {recipient_public_key_file}")
import_public_key(f"{recipient_public_key_file}")
today = datetime.today()
expiry = today + timedelta(days=180)
formatted_expiry = expiry.strftime("%Y%m%d")
keyname = f"{party}-{environment}-{formatted_expiry}"
keyvalue = random_hex(32)
entire_key = f"{keyname}:{keyvalue}"
entire_key_encoded = base64.b64encode(entire_key.encode("utf-8")).decode("utf-8")
secret_manager_location = f"/{api}/{keyname}"
hashed_value = bcrypt.hashpw(keyvalue.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
if args.output_secret is not None:
with open(args.output_secret, "w") as secret_file:
secret_file.write(f"{keyname}:{hashed_value}")
log("Generating an ephemeral distribution key, enter some password when prompted")
distribution_key_name = generate_ephemeral_key(today + timedelta(days=7))
ciphertext = encrypt(entire_key_encoded, encryption_keyname=distribution_key_name,
recipient_keyname=f"{party}-{environment}-key")
zip_dir = os.path.realpath(args.output)
zip_name = args.output_file if args.output_file is not None else f"{keyname}.zip"
zip_path = os.path.join(zip_dir, zip_name)
with zipfile.ZipFile(zip_path, mode="w") as z:
with z.open(f"{keyname}.gpg.asc", mode="w") as ct:
ct.write(ciphertext.encode("utf-8"))
with z.open(f"{distribution_key_name}-public-key.txt", mode="w") as pk:
pk.write(export_public_key(distribution_key_name).encode("utf-8"))
log(f"(1) Action Required: You need to run the following in ** {environment.upper()} ** ")
log(f"aws secretsmanager create-secret --name {secret_manager_location} --secret-string '{hashed_value}'")
log(f"(2) Action Required: You need to email to file {zip_path} to the third party")
log(f"and follow the trust validation procedure in confluence page: Distribution of API Keys to Third Parties")
log(f"(3) Action Required: The key fingerprint you need to verify with them is {distribution_key_name}")
log(f" : must have key fingerprint {gpg_key_fingerprint(distribution_key_name)}")
| 6,142 |
setup.py
|
brandizzi/retwill
| 2 |
2023474
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
print('importing distutils, not setuptools')
from distutils.core import setup
#### retwill info.
setup(name = 'retwill',
version = '1.0.1dev',
download_url = 'https://bitbucket.org/brandizzi/retwill/downloads/retwill-1.0.0.tar.gz',
description = "retwill - fork of C. Titus Brown's twill Web browsing language",
author = '<NAME>',
author_email = '<EMAIL>',
packages = ['twill',
'twill.extensions',
'twill.extensions.match_parse'],
# allow both
entry_points = dict(console_scripts=['twill-sh = twill.shell:main'],),
scripts = ['twill-fork'],
maintainer = '<NAME> ',
maintainer_email = '<EMAIL>',
url = 'http://bitbucket.org/brandizzi/retwill/',
long_description = """\
retwill is a fork of the acclaimed but apparently abandoned twill Web browsing
language.
Twill is a scripting system for automating Web browsing. Useful for testing
Web pages or grabbing data from password-protected sites automatically.
""",
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Other Scripting Engines',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Testing',
],
obsoletes = 'twill',
test_suite = 'nose.collector',
install_requires = [
'lxml', 'pyparsing', 'BeautifulSoup4'
],
tests_require = ['quixote', 'nose'],
)
| 1,984 |
msdorkdump.py
|
Reva2194/msdorkdump
| 1 |
2023444
|
from ssl import SSL_ERROR_SSL
from urllib.error import HTTPError
from colorama import Fore, Style, init
import urllib.request
import time
import sys
import os
from os.path import exists
from googlesearch import search
import random
global domain
global success, info, fail
success, info, fail = Fore.GREEN + Style.BRIGHT, Fore.YELLOW + \
Style.BRIGHT, Fore.RED + Style.BRIGHT
global file_types
file_types = ['doc', 'docx', 'ppt', 'pptx', 'csv', 'pdf', 'xls', 'xlsx']
global user_agents
user_agents = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36']
def banner():
styles = [Fore.LIGHTBLACK_EX, Fore.LIGHTBLUE_EX, Fore.LIGHTCYAN_EX,
Fore.LIGHTGREEN_EX, Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX]
random_index = random.randint(0, len(styles)-1)
print(styles[random_index] + "")
print('██████╗ ██████╗ ██████╗ ██╗ ██╗ ██████╗ ██╗ ██╗███╗ ███╗██████╗ ')
print('██╔══██╗██╔═══██╗██╔══██╗██║ ██╔╝ ██╔══██╗██║ ██║████╗ ████║██╔══██╗')
print('██║ ██║██║ ██║██████╔╝█████╔╝ ██║ ██║██║ ██║██╔████╔██║██████╔╝')
print('██║ ██║██║ ██║██╔══██╗██╔═██╗ ██║ ██║██║ ██║██║╚██╔╝██║██╔═══╝ ')
print('██████╔╝╚██████╔╝██║ ██║██║ ██╗ ██████╔╝╚██████╔╝██║ ╚═╝ ██║██║ ')
print('╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ \n')
print(' Google Dork File Finder ')
print(' Version 1.0.0 ')
print(' A project by The Mayor ')
print(' python3 msdorkdump.py <domain> to start ' + Style.RESET_ALL)
print("-" * 73)
def msdorker():
request = 0
path = domain
isdir = os.path.isdir(path)
if isdir is True:
pass
else:
os.mkdir(domain)
os.chdir(domain)
for files in file_types:
try:
file_exists = exists('.google-cookie')
if file_exists == True:
os.remove('.google-cookie')
print(info + f'[info] Checking for {files} extensions.')
rand_user_agent = random.choice(user_agents)
for results in search(f'site:{domain} filetype:{files}', tld='com', lang='en', num=100, start=0, stop=None, pause=5, user_agent=rand_user_agent):
print(success + f'[{files} extension found] - {results}')
url_path = results
head, tail = os.path.split(url_path)
urllib.request.urlretrieve(url_path, f'{tail}')
request = request + 1
if request == 100:
break
time.sleep(1)
except urllib.error.HTTPError as e:
if e.code == 404:
print(
fail + f'[Error Code 404] Web server is responding with 404 error. Skipping.')
continue
if e.code == 429:
print(
fail + f'\n[Error Code 429] Google is timing out queries. Wait a while and try again.\n')
quit()
else:
print(
fail + f'\n[warn] Error code {e.code} identified. Please create a new issue on the Github repo so it can be added.\n')
continue
except urllib.error.URLError:
print(fail + f'[Error] File could not be downloaded. Skipping.')
continue
if __name__ == "__main__":
try:
init()
banner()
domain = sys.argv[1]
msdorker()
print(info + f'\n[info] Dork scanning for {domain} completed.\n')
except KeyboardInterrupt:
print("\nYou either fat fingered this, or meant to do it. Either way, goodbye!\n")
quit()
except IndexError:
print(fail + '\nSyntax - python3 msdorkdump.py <domain>\n')
| 5,382 |
src/sokoban/templatetags/sokoban_tags.py
|
BusyJay/sokoban
| 1 |
2022728
|
from django import template
from django.forms import fields, CheckboxInput
__author__ = 'jay'
register = template.Library()
def add_attrs(widget_or_field, attrs, is_replace=True):
"""Add attributes from attrs to widget_or_field
:param widget_or_field: object it can be a widget, field
:param attrs: dict name, value map that contains all the attributes
want to be added
:param is_replace: whether replace the origin value, if not, will just
append a space then attach the value
"""
try:
field = getattr(widget_or_field, 'field') # for BoundField
except AttributeError:
field = widget_or_field
try:
widget = getattr(field, 'widget')
except AttributeError:
widget = field
try:
widget_attrs = getattr(widget, 'attrs')
except AttributeError:
# It's not a valid widget like object, so just do nothing
return widget
if is_replace:
widget_attrs.update(attrs)
return widget
for name in attrs:
if name in widget_attrs:
widget_attrs[name] = widget_attrs[name] + ' ' + attrs[name]
else:
widget_attrs[name] = attrs[name]
return widget_or_field
@register.filter
def add_cls(widget_or_field, cls):
"""Dynamic add class to widget or field.
"""
return add_attrs(widget_or_field, {'class': cls}, is_replace=False)
@register.filter
def add_attr(widget_or_field, name_value_pair):
"""Add an attribute to an object
:param widget_or_field: object it can be a widget, field
:param name_value_pair: str attribute string,
in "name:value[:isreplace]" format
"""
name_value_pair = name_value_pair.split(':')
assert len(name_value_pair) >= 2
attr_dict = {name_value_pair[0]: name_value_pair[1]}
if len(name_value_pair) == 2 or name_value_pair[2] == 'false':
return add_attrs(widget_or_field, attr_dict, False)
else:
return add_attrs(widget_or_field, attr_dict, True)
@register.filter
def angularfy(origin_field, prefix):
"""Add bind attr to a field
"""
widget = origin_field.field.widget
if prefix:
widget.attrs['ng-model'] = prefix + '.' + origin_field.name
if not widget.attrs:
widget.attrs = {}
if 'maxlength' in widget.attrs:
widget.attrs['ng-maxlength'] = widget.attrs['maxlength']
if widget.is_required:
widget.attrs['ng-required'] = 'true'
if isinstance(origin_field.field, fields.EmailField):
widget.input_type = 'email'
elif isinstance(origin_field.field, fields.URLField):
widget.input_type = 'url'
elif isinstance(origin_field.field, fields.TimeInput):
widget.input_type = 'datetime'
elif isinstance(origin_field.field, fields.IntegerField):
widget.input_type = 'number'
if origin_field.field.min_value is not None:
widget.attrs['min'] = origin_field.field.min_value
if origin_field.field.max_value is not None:
widget.attrs['max'] = origin_field.field.max_value
return origin_field
@register.filter(name='is_checkbox')
def is_checkbox(field):
return isinstance(field.field.widget, CheckboxInput)
| 3,200 |
example_usage.py
|
dangkunal/chars2vec
| 152 |
2022624
|
import chars2vec
import sklearn.decomposition
import matplotlib.pyplot as plt
# Load Inutition Engineering pretrained model
# Models names: 'eng_50', 'eng_100', 'eng_150', 'eng_200', 'eng_300'
c2v_model = chars2vec.load_model('eng_50')
words = ['Natural', 'Language', 'Understanding',
'Naturael', 'Longuge', 'Updderctundjing',
'Motural', 'Lamnguoge', 'Understaating',
'Naturrow', 'Laguage', 'Unddertandink',
'Nattural', 'Languagge', 'Umderstoneding']
# Create word embeddings
word_embeddings = c2v_model.vectorize_words(words)
# Project embeddings on plane using the PCA
projection_2d = sklearn.decomposition.PCA(n_components=2).fit_transform(word_embeddings)
# Draw words on plane
f = plt.figure(figsize=(8, 6))
for j in range(len(projection_2d)):
plt.scatter(projection_2d[j, 0], projection_2d[j, 1],
marker=('$' + words[j] + '$'),
s=500 * len(words[j]), label=j,
facecolors='green' if words[j]
in ['Natural', 'Language', 'Understanding'] else 'black')
plt.show()
| 1,090 |
deepclustering/meters2/__init__.py
|
jizongFox/deep-clustering-toolbox
| 34 |
2022717
|
from .meter_interface import MeterInterface
from .historicalContainer import HistoricalContainer
from .individual_meters import *
from .storage_interface import Storage
# todo: improve the stability of each meter
| 214 |
heronpy/streamlet/impl/filterbolt.py
|
pjfanning/incubator-heron
| 3,348 |
2023752
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""module for filter bolt: FilterBolt"""
from heronpy.api.state.stateful_component import StatefulComponent
from heronpy.api.bolt.bolt import Bolt
from heronpy.api.component.component_spec import GlobalStreamId
from heronpy.api.stream import Grouping
from heronpy.streamlet.streamlet import Streamlet
from heronpy.streamlet.impl.streamletboltbase import StreamletBoltBase
# pylint: disable=unused-argument
class FilterBolt(Bolt, StatefulComponent, StreamletBoltBase):
"""FilterBolt"""
FUNCTION = 'function'
def init_state(self, stateful_state):
# Filter does not have any state
pass
def pre_save(self, checkpoint_id):
# Filter does not have any state
pass
def initialize(self, config, context):
self.logger.debug("FilterBolt's Component-specific config: \n%s", str(config))
self.processed = 0
self.emitted = 0
if FilterBolt.FUNCTION in config:
self.filter_function = config[FilterBolt.FUNCTION]
else:
raise RuntimeError("FilterBolt needs to be passed filter function")
def process(self, tup):
if self.filter_function(tup.values[0]):
self.emit([tup.values[0]], stream='output')
self.emitted += 1
self.processed += 1
self.ack(tup)
# pylint: disable=protected-access
class FilterStreamlet(Streamlet):
"""FilterStreamlet"""
def __init__(self, filter_function, parent):
super().__init__()
if not callable(filter_function):
raise RuntimeError("Filter function has to be callable")
if not isinstance(parent, Streamlet):
raise RuntimeError("Parent of Filter Streamlet has to be a Streamlet")
self._parent = parent
self._filter_function = filter_function
self.set_num_partitions(parent.get_num_partitions())
def _calculate_inputs(self):
return {GlobalStreamId(self._parent.get_name(), self._parent._output) :
Grouping.SHUFFLE}
def _build_this(self, builder, stage_names):
if not self.get_name():
self.set_name(self._default_stage_name_calculator("filter", stage_names))
if self.get_name() in stage_names:
raise RuntimeError("Duplicate Names")
stage_names.add(self.get_name())
builder.add_bolt(self.get_name(), FilterBolt, par=self.get_num_partitions(),
inputs=self._calculate_inputs(),
config={FilterBolt.FUNCTION : self._filter_function})
return True
| 3,220 |
tests/test_colour_filters.py
|
UBC-MDS/pymagine
| 0 |
2022724
|
import os
import pytest
import matplotlib.pyplot as plt
import numpy as np
from pymagine import colour_filters as cf
fname = os.path.join(
os.path.dirname(__file__),
'../tests/imgs/coronado_beach.jpeg')
bad_ftype = 'imgs/coronado_beach.csv'
url_fname = 'https:/Python-logo-notext.svg'
def test_inputs():
"""
Applies tests to the colour_filters function to ensure proper usage.
"""
with pytest.raises(TypeError):
cf.colour_filters(2) # Not a string for the file path
with pytest.raises(TypeError):
cf.colour_filters(bad_ftype) # Filetype is not image
with pytest.raises(TypeError):
cf.colour_filters(url_fname) # Filepath can't be URL
with pytest.raises(ValueError):
cf.colour_filters(fname, tone="pink") # Invalid tone value
# Invalid output file type
with pytest.raises(TypeError):
cf.colour_filters(fname, file_name=bad_ftype)
def test_outputs():
"""
Applies tests to the colour_filters function output
"""
test_array = plt.imread(fname)
returned_arr_grayscale = cf.colour_filters(fname, tone="grayscale")
returned_arr_negative = cf.colour_filters(fname, tone="negative")
returned_arr_red = cf.colour_filters(fname, tone="red_tone")
returned_arr_green = cf.colour_filters(fname, tone="green_tone")
returned_arr_blue = cf.colour_filters(fname, tone="blue_tone")
returned_arr_sepia = cf.colour_filters(fname, tone="sepia")
assert returned_arr_grayscale.shape == test_array[:, :, 0].shape
assert returned_arr_negative.shape == test_array[:, :, 0].shape
assert returned_arr_red.shape == test_array.shape
assert returned_arr_blue.shape == test_array.shape
assert returned_arr_green.shape == test_array.shape
assert returned_arr_sepia.shape == test_array.shape
assert all(isinstance(x, np.uint8) for x in returned_arr_red.ravel())
assert all(isinstance(x, np.uint8) for x in returned_arr_blue.ravel())
assert all(isinstance(x, np.uint8) for x in returned_arr_green.ravel())
assert all(isinstance(x, np.uint8) for x in returned_arr_sepia.ravel())
| 2,119 |
Twitter.py
|
MurasameOpen/botter
| 0 |
2022693
|
# -*- encoding: utf-8 -*-
import tweepy
__author__ = 'miyatake_y'
base_url = "https://api.twitter.com/1.1/"
update_url="statuses/update.json"
class Twitter:
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):
self.consumer_key_ = consumer_key
self.consumer_secret_ = consumer_secret
self.auth_ = tweepy.OAuthHandler(consumer_key,consumer_secret)
self.access_token_ = access_token
self.access_token_secret_ = access_token_secret
self.auth_.set_access_token(access_token,access_token_secret)
self.twitter_ = tweepy.API(self.auth_)
def update(self, msg):
print("Twitter.update:" + msg)
return(self.twitter_.update_status(status=msg))
def home_timeline(self):
return(self.twitter_.home_timeline())
def test(self):
return(self.twitter_.home_timeline()[0].text)
| 904 |
creepy/query/__init__.py
|
itesoro/reepyc
| 1 |
2023267
|
import os
import re
import logging
import requests
import warnings
import importlib
from typing import Tuple
from contextlib import contextmanager
from ..serialization import load_private_key
from ..protocol import make_cipher, HandshakeProtocol
from ..protocol.constants import NONCE_SIZE
from . import pickle
from .proxy import ProxyObject, VersionQuery, DownloadQuery, DelQuery, proxy_flags
logger = logging.getLogger('creepy')
def unproxy(*objs):
"""
Get the object behind a proxy or object itself if it isn't instance of ProxyObject.
"""
res = list(objs)
remotes = {}
for i, obj in enumerate(objs):
if isinstance(obj, ProxyObject):
v = remotes.get(obj._remote)
if v is None:
v = remotes[obj._remote] = [], []
indices, proxied_objs = v
indices.append(i)
proxied_objs.append(obj)
for remote, (indices, proxied_objs) in remotes.items():
unproxied = remote._get(*proxied_objs)
if len(indices) == 1:
res[indices[0]] = unproxied
else:
for src_i, dst_i in enumerate(indices):
res[dst_i] = unproxied[src_i]
return res[0] if len(res) == 1 else res
def _make_request(url, data=None, **kwargs):
response = requests.post(url, data, **kwargs)
if response.status_code == 200:
return response.content
logger.error(f'{url}: {response.content}')
return None
class _Local:
def __init__(self):
self.os = os
def __repr__(self):
return 'self'
def path(self, path: str):
return (self, path)
def import_module(self, name):
return importlib.import_module(name)
@property
def open(self):
return open
class Remote:
def __init__(self, url, session_id, cipher):
self._url = url
self._session_id = session_id
self._cipher = cipher
self._nonce = 0
self._imports = {}
self._del_queue = []
try:
self._version = self._post(VersionQuery())
except Exception:
self._version = 0
def disconnect(self):
if self._url is None:
return
self._del_queue.append(0)
try:
self._post()
finally:
self._url = None
self._session_id = None
self._cipher = None
self._nonce = None
self._imports = None
def path(self, path: str):
return (self, path)
def __repr__(self):
return self._url
# TODO(<NAME>): Impelement [named] scopes instead of misleading globals() function.
@property
def globals(self):
assert os.__class__ == re.__class__
flags = proxy_flags(os.__class__) # it's ok to use any module instead of `os`
return ProxyObject(self, 0, flags, 'module')
def import_module(self, name):
module = self._imports.get(name, None)
if module is None:
self._imports[name] = module = self.globals.__import__(name)
return module
@property
def open(self):
return self.globals.open
@property
def os(self):
return self.import_module('os')
def _make_del_query(self):
res = None
if len(self._del_queue) > 0:
res = DelQuery(self._del_queue)
self._del_queue = []
return res
def _post(self, *query):
query = list(query)
if len(self._del_queue) > 0:
query.append(DelQuery(self._del_queue))
self._del_queue = []
data = self._nonce.to_bytes(NONCE_SIZE, 'big') + pickle.dumps(*query)
self._nonce += 1
response = _make_request(self._url, self._session_id + self._cipher.encrypt(data))
if response is None:
raise ValueError()
res = pickle.loads(self._cipher.decrypt(response))
if isinstance(res, Exception):
raise res
return res
def _lazy_delete(self, id):
self._del_queue.append(id)
def _get(self, *objs: Tuple[ProxyObject]):
ids = []
for obj in objs:
assert obj._remote == self
ids.append(obj._id)
if self._version == 0:
res = []
for id in ids:
res.append(self._post(DownloadQuery(id=id)))
else:
res = self._post(DownloadQuery(ids=ids))
return res[0] if len(res) == 1 else res
def download(self, obj: ProxyObject):
warnings.warn('Deprecated, use ProxyObject._get() instead', category=DeprecationWarning)
return self._get(obj)
@contextmanager
def connect(url, private_key=None):
if url == 'self':
try:
yield _self_node
finally:
return
if not re.search(r'^(\w+)://', url):
url = 'http://' + url
if private_key is None:
private_key = load_private_key()
if private_key is None:
return None
def public_channel(endpoint, data=None):
return _make_request(f'{url}{endpoint}', data, timeout=10)
session_id, cipher_name, cipher_key = HandshakeProtocol.hi_alice(private_key, public_channel)
cipher = make_cipher(cipher_name, cipher_key)
try:
remote = Remote(url, session_id, cipher)
# TODO(<NAME>): Make Remote class to be contextmanager.
yield remote
finally:
remote.disconnect()
_self_node = _Local()
| 5,445 |
cloud-runner/cloud_runner/cluster/google_cluster.py
|
flegac/deep-experiments
| 0 |
2023139
|
import subprocess
from typing import List, Union
from cloud_runner.cluster.cloud_cluster import CloudCluster
from surili_core.utils import shell
class GoogleCluster(CloudCluster):
CREATE_COMMAND = 'gcloud compute instances create {instances} {config} --zone={zone}'
DELETE_COMMAND = 'gcloud compute instances delete {instances} --zone={zone} -q'
STOP_COMMAND = 'gcloud compute instances stop {instances} --zone={zone} {wait}'
START_COMMAND = 'gcloud compute instances start {instances} --zone={zone} {wait}'
SSH_COMMAND = 'gcloud compute ssh --zone {zone} {instances} --command "{command}"'
PUSH_COMMAND = 'gcloud compute scp --recurse --zone {zone} "{local_path}" "{instance}:{remote_path}"'
PULL_COMMAND = 'gcloud compute scp --recurse --zone {zone} "{instance}:{remote_path}" "{local_path}"'
CONNECT_COMMAND = 'gcloud beta compute ssh --zone "{zone}" "{instance}"'
def __init__(self,
name: str,
cluster_size: int,
zone: str = 'europe-west1-b',
cluster_config: List[str] = None,
remote_workspace: str = '/home/workspace',
):
super().__init__(cluster_size, remote_workspace)
self.cluster_config = cluster_config or []
self.zone = zone
self.name = name
self.instances = ['{}-{}'.format(name, i) for i in range(cluster_size)]
self.instances_string = ' '.join(self.instances)
def create(self) -> subprocess.Popen:
return shell(GoogleCluster.CREATE_COMMAND.format(
instances=self.instances_string,
config=' '.join(self.cluster_config),
zone=self.zone
))
def delete(self) -> subprocess.Popen:
return shell(GoogleCluster.DELETE_COMMAND.format(
instances=self.instances_string,
zone=self.zone
))
def start(self, wait: bool = False) -> subprocess.Popen:
return shell(GoogleCluster.START_COMMAND.format(
instances=self.instances_string,
zone=self.zone,
wait='--async' if wait else ''
))
def stop(self, wait: bool = True) -> subprocess.Popen:
return shell(GoogleCluster.STOP_COMMAND.format(
instances=self.instances_string,
zone=self.zone,
wait='--async' if wait else ''
))
def ssh(self, commands: Union[str, List[str]], instance_id: int) -> subprocess.Popen:
cmd = commands if isinstance(commands, str) else ' ; '.join(commands)
return shell(GoogleCluster.SSH_COMMAND.format(
instances=self.instances[instance_id],
zone=self.zone,
command=cmd
))
def push(self, local_path: str, remote_path: str, instance_id: int) -> subprocess.Popen:
cmd = GoogleCluster.PUSH_COMMAND.format(
zone=self.zone,
instance=self.instances[instance_id],
local_path=local_path,
remote_path=remote_path)
return shell(cmd)
def pull(self, local_path: str, remote_path: str, instance_id: int) -> subprocess.Popen:
# os.makedirs(local_path, exist_ok=True)
cmd = GoogleCluster.PULL_COMMAND.format(
zone=self.zone,
instance=self.instances[instance_id],
local_path=local_path,
remote_path=remote_path)
return shell(cmd)
def connection_command(self, instance_id: int) -> str:
return GoogleCluster.CONNECT_COMMAND.format(zone=self.zone, instance=self.instances[instance_id])
def _instance_string(self, instance_id: int):
return self.instances[instance_id] if instance_id else self.instances_string
| 3,699 |
whatis/app.py
|
vapor-ware/whatis
| 0 |
2022888
|
import json
import logging
import os
import uuid
from hashlib import sha224
from pathlib import Path
from alembic import command
from alembic.migration import MigrationContext
from cachetools import cached, TTLCache
from flask import Flask
from flask_migrate import Migrate
from slack.errors import SlackApiError
from slack.web.client import WebClient
from .constants import WHATIS_FIELDS
from .models import db as sqlalchemy_db, WhatisPreloader, Whatis
logging.basicConfig(level=logging.DEBUG)
class WhatisApp(Flask):
def __init__(self, db_uri=None, config=None, preload_path=None, **kwargs):
Flask.__init__(self, __name__)
# Preload default configuration
self.config.from_object(config)
self.config.from_mapping(kwargs)
# Set the secret key for this instance (creating one if one does not exist already)
self.config["SECRET_KEY"] = self.config["SECRET_KEY"] or str(uuid.uuid4())
# Configure database
if db_uri:
self.config["SQLALCHEMY_DATABASE_URI"] = db_uri
if self.config["SQLALCHEMY_DATABASE_URI"] == "sqlite:///:memory:":
self.logger.warning(
"Using Sqlite in-memory database, all data will be lost when server shuts down!"
)
# DB dialect logic - used for lookup operations
db_dialect = self.config["SQLALCHEMY_DATABASE_URI"].split(":")[0]
self.logger.info(f"Attempting to use db dialect {db_dialect}")
if self.config.get("DEBUG") is not True:
self.logger.warning(
"It is strongly recommended that you do not use Sqlite for production deployments!"
)
if not any([i == db_dialect for i in ["postgres", "sqlite"]]):
raise RuntimeError(
f"Dialect {db_dialect} not supported - please use sqlite or postgres"
)
self.config["DB_DIALECT"] = db_dialect
# Register database schema with flask app
sqlalchemy_db.init_app(self)
# Set up database migration information
# Registers Migrate plugin in self.extensions['migrate']
Migrate(self, self.db)
# Try to create the database if it does not already exist
# Existence is determined by whether there is an existing alembic migration revision
db_auto_create = self.config.get("DB_AUTO_CREATE", True)
db_auto_upgrade = self.config.get("DB_AUTO_UPGRADE", True)
if db_auto_create and self.db_revision is None:
self.db_init()
elif db_auto_upgrade:
self.db_upgrade()
self.logger.setLevel(logging.DEBUG)
# Install postgres fuzzystrmatch extension
if db_dialect == "postgres":
self.logger.info("Enabling Postgres fuzzy string matching")
with self.app_context(), self.db.engine.connect() as conn:
conn.execute("CREATE EXTENSION IF NOT EXISTS fuzzystrmatch")
# Handle preloading an existing Terminology set
self.handle_whatis_preload(preload_path)
# Register Slack client on the current application instance
if all(
[
self.config.get(i) is None
for i in ["SLACK_SIGNING_SECRET", "SLACK_TOKEN"]
]
):
raise RuntimeError(
"Whatis must have both a slack signing secret and slack bot token set"
)
self.sc = WebClient(self.config.get("SLACK_TOKEN"), ssl=False)
from whatis.routes.slack_route import slack_blueprint
self.register_blueprint(slack_blueprint, url_prefix="/slack")
if not all(
[
type(self.config[i]) == list
for i in ["ADMIN_USER_IDS", "ADMIN_CHANNEL_IDS"]
]
):
raise RuntimeError(
"ADMIN_USER_IDS and ADMIN_CHANNEL_IDS must be lists of Admin user IDs or channel IDs"
)
try:
au = self.admin_users
self.logger.info(f"Initial Admin users set as {au}")
except SlackApiError as s:
raise RuntimeError(
f"Failed to get Admin users from specified Admin channels - are you sure the whatis bot "
f"is invited and has the necessary scopes {s}"
)
# Register a basic route for healthchecking
@self.route("/ping")
def healthcheck():
return "pong"
@property
def db(self):
return sqlalchemy_db
@property
def _alembic_config(self):
if not hasattr(self, "extensions") or "migrate" not in self.extensions:
raise RuntimeError(
"KnowledgeApp has not yet been configured. Please instantiate it via `get_app_for_repo`."
)
migrations_path = os.path.join(os.path.dirname(__file__), "migrations")
# This is terrible but seems to be needed for packaging
migrations_path = (
migrations_path
if Path(migrations_path).exists() is True
else os.path.join(os.path.dirname(__file__), "whatis/migrations")
)
return self.extensions["migrate"].migrate.get_config(migrations_path)
def db_init(self):
with self.app_context():
# Create all tables
sqlalchemy_db.create_all()
# Stamp table as being current
command.stamp(self._alembic_config, "head")
return self
@property
def db_revision(self):
with self.app_context():
conn = self.db.engine.connect()
context = MigrationContext.configure(conn)
return context.get_current_revision()
@cached(TTLCache(ttl=3600, maxsize=2048))
def _get_admin_users(self):
channel_admin_members = []
for channel in self.config["ADMIN_CHANNEL_IDS"]:
try:
channel_admin_members.extend(
self.sc.conversations_members(channel=channel)["members"]
)
except SlackApiError as s:
self.logger.warning(
f"Could not get members from the specified Admin channel {channel} has the whatis "
f"bot been removed or scopes been changed? {s}"
)
return self.config["ADMIN_USER_IDS"] + channel_admin_members
@property
def admin_users(self):
"""
Get all users approved as admins
"""
return self._get_admin_users()
def db_upgrade(self):
with self.app_context():
command.upgrade(self._alembic_config, "head")
return self
def db_downgrade(self, revision):
with self.app_context():
command.downgrade(self._alembic_config, revision)
return self
def db_migrate(self, message, autogenerate=True):
with self.app_context():
command.revision(
self._alembic_config, message=message, autogenerate=autogenerate
)
return self
def handle_whatis_preload(self, preload_path):
with self.app_context():
if preload_path is not None:
filepath = Path(preload_path)
if filepath.exists():
file_contents = open(filepath).read()
file_hash = sha224(file_contents.encode()).hexdigest()
existing_preload = (
self.db.session.query(WhatisPreloader)
.filter(WhatisPreloader.hash == file_hash)
.first()
)
if existing_preload is not None:
self.logger.info(
f"Existing whatis load found for the file {existing_preload}, skipping"
)
else:
raw = json.loads(file_contents)
self.logger.info(
f"Attempting to preload terminology from the file {filepath}, {len(raw)} records found"
)
for raw_whatis in raw:
if all(
[i in raw_whatis for i in ["terminology", "definition"]]
) is False or not set(raw_whatis).issubset(
set(WHATIS_FIELDS)
):
raise RuntimeError(
f"Attempt to preload Terminology failed, Whatis {raw_whatis} has an unrecognised attribute or does not contain both of [terminology, definiton]"
)
wi = Whatis(
**{"version": 0, "added_by": "WHATIS BOT", **raw_whatis}
)
self.db.session.add(wi)
self.logger.debug(f"Added the Whatis {wi}")
# Now register the fact that we have loaded this file so it is ignored for future deployments
self.db.session.add(
WhatisPreloader(hash=file_hash, filename=str(filepath))
)
self.db.session.commit()
else:
raise FileNotFoundError(
f"Preload filepath specified {preload_path} but no file found"
)
| 9,432 |
Python/Extra_Algo/subset.py
|
yash0307jain/competitive-programming
| 0 |
2023673
|
def subset(arr, ind, sub, ans):
if ind == len(arr):
return ans
for i in range(ind, len(arr)):
temp = sub.copy()
temp.append(arr[i])
ans.append(temp)
subset(arr, i + 1, temp, ans)
return ans
arr = [1, 2, 3]
ans = []
ans = subset(arr, 0, [], ans)
for i in range(len(ans)):
print(ans[i])
| 318 |
accelerator_abstract/models/base_member_profile.py
|
masschallenge/django-accelerator
| 6 |
2023317
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from accelerator_abstract.models.base_core_profile import BaseCoreProfile
class BaseMemberProfile(BaseCoreProfile):
user_type = 'member'
default_page = "member_homepage"
class Meta(BaseCoreProfile.Meta):
db_table = 'accelerator_memberprofile'
abstract = True
| 387 |
snl/ml/models/layers.py
|
mackelab/SNL_py3port
| 30 |
2023249
|
import numpy as np
import theano
import theano.tensor as tt
dtype = theano.config.floatX
class BatchNorm:
def __init__(self, x, n_units, eps=1.0e-5):
self.input = x
# parameters
self.log_gamma = theano.shared(np.zeros(n_units, dtype=dtype), name='log_gamma', borrow=True)
self.beta = theano.shared(np.zeros(n_units, dtype=dtype), name='beta', borrow=True)
self.parms = [self.log_gamma, self.beta]
# minibatch statistics
self.m = tt.mean(x, axis=0)
self.v = tt.mean((x - self.m) ** 2, axis=0) + eps
# transformation
x_hat = (x - self.m) / tt.sqrt(self.v)
self.y = tt.exp(self.log_gamma) * x_hat + self.beta
# batch statistics to be used at test time
self.bm = theano.shared(np.zeros(n_units, dtype=dtype), name='bm', borrow=True)
self.bv = theano.shared(np.ones(n_units, dtype=dtype), name='bv', borrow=True)
# theano evaluation functions, will be compiled when needed
self.set_stats_f = None
self.eval_f = None
def reset_theano_functions(self):
"""
Resets theano functions, so that they are compiled again when needed.
"""
self.set_stats_f = None
self.eval_f = None
def set_batch_stats(self, x):
"""
Sets the batch statistics to be equal to the statistics computed on dataset x.
:param x: numpy array, rows are datapoints
"""
if self.set_stats_f is None:
self.set_stats_f = theano.function(
inputs=[self.input],
updates=[(self.bm, self.m), (self.bv, self.v)]
)
self.set_stats_f(x.astype(dtype))
def eval(self, x):
"""
Evaluates the batch norm transformation for input x.
:param x: input as numpy array
:return: output as numpy array
"""
if self.eval_f is None:
self.eval_f = theano.function(
inputs=[self.input],
outputs=[self.y],
givens=[(self.m, self.bm), (self.v, self.bv)]
)
x = np.asarray(x, dtype=dtype)
return self.eval_f(x[np.newaxis, :])[0] if x.ndim == 1 else self.eval_f(x)
def eval_inv(self, y):
"""
Evaluates the inverse batch norm transformation for output y.
NOTE: this calculation is done with numpy and not with theano.
:param y: output as numpy array
:return: input as numpy array
"""
x_hat = (y - self.beta.get_value(borrow=True)) * np.exp(-self.log_gamma.get_value(borrow=True))
x = np.sqrt(self.bv.get_value(borrow=True)) * x_hat + self.bm.get_value(borrow=True)
return x
| 2,722 |
mcx/distributions/dirichlet.py
|
tblazina/mcx
| 1 |
2023558
|
import jax
from jax import random
from mcx.distributions import constraints
from mcx.distributions.distribution import Distribution
class Dirichlet(Distribution):
parameters = {"a": constraints.strictly_positive}
support = constraints.simplex
def __init__(self, alpha):
# check that it matches the Categorical's shape conventions
self.event_shape = alpha.shape[0]
self.batch_shape = alpha.shape[1]
self.alpha = alpha
def sample(self, rng_key, sample_shape=()):
shape = sample_shape + self.batch_shape + self.event_shape
return random.dirichlet(rng_key, self.alpha, shape)
@constraints.limit_to_support
def logpdf(self, x):
log_x = jax.nn.log_softmax(x, axis=0)
unnormalized = jax.numpy.sum((self.alpha - 1) * log_x)
normalization = jax.numpy.sum(
jax.numpy.log(jax.scipy.special.lgamma(self.alpha))
) - jax.numpy.log(jax.scipy.special.lgamma(jax.numpy.sum(self.alpha)))
return unnormalized - normalization
| 1,036 |
indy_node/test/pool_restart/test_fail_pool_restart.py
|
panickervinod/indy-node
| 1 |
2023782
|
import pytest
from plenum.common.exceptions import RequestRejectedException, \
RequestNackedException
from indy_common.constants import POOL_RESTART, ACTION, START, DATETIME
from plenum.common.constants import TXN_TYPE, STEWARD_STRING
from plenum.test.helper import sdk_gen_request, sdk_sign_and_submit_req_obj, \
sdk_get_reply, sdk_get_and_check_replies
def test_fail_pool_restart_with_steward_role(
sdk_pool_handle, sdk_wallet_steward, looper):
op = {
TXN_TYPE: POOL_RESTART,
ACTION: START,
}
req_obj = sdk_gen_request(op, identifier=sdk_wallet_steward[1])
req = sdk_sign_and_submit_req_obj(looper,
sdk_pool_handle,
sdk_wallet_steward,
req_obj)
with pytest.raises(RequestRejectedException) as excinfo:
sdk_get_and_check_replies(looper, [req], 100)
assert excinfo.match('{} can not do this action'.format(STEWARD_STRING))
def test_fail_pool_restart_with_invalid_datetime(
sdk_pool_handle, sdk_wallet_steward, looper):
invalid_datetime = "12.05.2018 4/40"
op = {
TXN_TYPE: POOL_RESTART,
ACTION: START,
DATETIME: invalid_datetime
}
req_obj = sdk_gen_request(op, identifier=sdk_wallet_steward[1])
req = sdk_sign_and_submit_req_obj(looper,
sdk_pool_handle,
sdk_wallet_steward,
req_obj)
with pytest.raises(RequestNackedException) as excinfo:
sdk_get_and_check_replies(looper, [req], 100)
assert excinfo.match("datetime " + invalid_datetime + " is not valid")
| 1,722 |
python/core/auto_additions/qgslocatorfilter.py
|
dyna-mis/Hilabeling
| 0 |
2022741
|
# The following has been generated automatically from src/core/locator/qgslocatorfilter.h
QgsLocatorFilter.Priority.baseClass = QgsLocatorFilter
QgsLocatorFilter.Flags.baseClass = QgsLocatorFilter
Flags = QgsLocatorFilter # dirty hack since SIP seems to introduce the flags in module
| 285 |
robobattleship/__main__.py
|
apopelo/robobattleship
| 1 |
2022651
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103,C0321
"""
This module contains RoboBattleship Server http views and http server runner.
"""
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
from gevent import monkey; monkey.patch_all()
from bottle import route, run, response, hook, static_file, TEMPLATE_PATH
from bottle import Jinja2Template, jinja2_template as template
# XXX: think about this line
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import robobattleship.log
from robobattleship.settings import TEMPLATES_ROOT, STATIC_ROOT, HOST, PORT
from robobattleship.server import Server
from robobattleship.utils import JsonResponse, delay
from robobattleship.errors import RoboBattleshipException, ERRORS
from robobattleship.players.stupid import StupidBot
LOG = robobattleship.log.getLogger("robobattleship.main")
# -----
# Hooks
# -----
@hook('after_request')
def enable_crossdomain():
"Allow cross domain requests from browsers"
response.headers[b'Access-Control-Allow-Origin'] = b'*'
# --------------
# Standard views
# --------------
@route('/')
def index():
"""
Shows index page of the server.
"""
try:
return template('index.html',
players=SERVER.players.values(),
battles=SERVER.battles.values(),
archived_battles=SERVER.archived_battles.values(),
errorcodes=[(code, ERRORS[code]) for code in sorted(ERRORS)])
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to show server index page")
return JsonResponse.error(101)
@route('/about/')
def about():
"""
Shows about page
"""
try:
return template('about.html')
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to show about page")
return JsonResponse.error(101)
@route('/players/')
def players():
"""
Shows a list of all registered players on the server
"""
try:
return template('players.html', players=SERVER.players.values())
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to show a list of all registered players on the "
"server")
return JsonResponse.error(101)
@route('/battle/<bid>')
def battle(bid):
"""
Shows battle between two players on the screen
"""
try:
return template('battle.html', battle=SERVER.get_battle(bid))
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to show battle with bid '%s'", bid)
return JsonResponse.error(101)
@route('/gameboard/<bid>')
def gameboard(bid):
"""
Shows battle board between two players on the screen
"""
try:
battle = SERVER.get_battle(bid)
return JsonResponse.success({"battle": {
"active": battle.is_active(),
"html": template('gameboard.html', battle=battle)
}})
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to show game board with bid '%s'", bid)
return JsonResponse.error(101)
# ------------
# Static files
# ------------
@route('/static/<filepath:path>')
def static(filepath):
"Serves static files"
try:
return static_file(filepath, root=STATIC_ROOT)
except:
LOG.exception("Failed to show static file '%s'", filepath)
return JsonResponse.error(101)
# ----------------
# REST API methods
# ----------------
@route('/register/<name>')
@delay()
def register(name):
"""
Registers a new player on the server.
:param name: player name
"""
try:
name = name.decode("utf-8")
player = SERVER.register_player(name)
BOT_STUPID1.add_opponent(player.uid)
BOT_STUPID2.add_opponent(player.uid)
return JsonResponse.success({'player': player.to_dict()})
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to register player '%s'", name)
return JsonResponse.error(101)
return JsonResponse.success()
@route('/setships/<uid>/<secret>/<ships>')
@delay()
def setships(uid, secret, ships):
"""
Set player's ships arrangement
"""
try:
SERVER.validate_player(uid, secret)
SERVER.setships(uid, ships)
except RoboBattleshipException as e:
return JsonResponse.error(e)
except:
LOG.exception("Failed to set ships '%s' for player '%s'", ships, uid)
return JsonResponse.error(101)
return JsonResponse.success()
@route('/shoot/<uid>/<secret>/<enemy_uid>/<x:int>/<y:int>')
@delay()
def shoot(uid, secret, enemy_uid, x, y):
"""
One player shoots at another player.
"""
try:
SERVER.validate_player(uid, secret)
result = SERVER.shoot(uid, enemy_uid, x, y)
except RoboBattleshipException as e:
# if battle is over - archive it
if e.code == 304:
SERVER.archive_battle(uid, enemy_uid)
return JsonResponse.error(e)
except:
LOG.exception("Failed to shoot at player '%s' at [%s,%s]",
enemy_uid, x, y)
return JsonResponse.error(101)
return JsonResponse.success({'result': result})
# Service methods
@route('/dumpstate/')
@route('/dumpstate/<filename>')
@delay()
def dumpstate(filename=None):
"""
Dumps server state into a file.
"""
try:
SERVER.dumpstate(filename)
except:
LOG.exception("Failed to dump server state")
return JsonResponse.error(101)
return JsonResponse.success()
# Configure path to templates
TEMPLATE_PATH.append(TEMPLATES_ROOT)
# Enable autoescaping in templates
Jinja2Template.settings['autoescape'] = True
# Create an instance of the server
SERVER = Server()
# Restore server state from a dumpfile
#from robobattleship.dumps.latest import server
#SERVER = server
# Create a thread with stupid bot
BOT_STUPID1 = StupidBot(name="Garry (bot)", uid="uid-63aaf540",
secret="usec-d821af30")
BOT_STUPID1.start()
# Create a thread with stupid bot
BOT_STUPID2 = StupidBot(name="Barry (bot)", uid="uid-566f73bf",
secret="usec-3f8718cb")
BOT_STUPID2.start()
# Run the web server
LOG.info("Starting RoboBattleship Web Server on {host}:{port}"
.format(host=HOST, port=PORT))
run(host=HOST, port=PORT, server='gevent')
# Stop fighting after webserver terminates
BOT_STUPID1.stop_fight()
BOT_STUPID2.stop_fight()
| 6,688 |
tests/unit/tensorflow/models/test_discrete_model.py
|
chiragnagpal/probflow
| 134 |
2023479
|
import matplotlib.pyplot as plt
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from probflow.distributions import Normal, Poisson
from probflow.models import DiscreteModel
from probflow.parameters import Parameter
tfd = tfp.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_DiscreteModel(plot):
"""Tests probflow.models.DiscreteModel"""
class MyModel(DiscreteModel):
def __init__(self):
self.weight = Parameter([5, 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
def __call__(self, x):
return Poisson(tf.nn.softplus(x @ self.weight() + self.bias()))
# Instantiate the model
model = MyModel()
# Data
x = np.random.randn(100, 5).astype("float32")
w = np.random.randn(5, 1).astype("float32")
y = np.round(np.exp(x @ w + 1))
# Fit the model
model.fit(x, y, batch_size=50, epochs=100, lr=0.1)
# plot the predictive dist
model.pred_dist_plot(x[:1, :])
if plot:
plt.title("should be one discrete dist")
plt.show()
model.pred_dist_plot(x[:3, :])
if plot:
plt.title("should be three discrete dists")
plt.show()
model.pred_dist_plot(x[:3, :], cols=2)
if plot:
plt.title("should be three discrete dists, two cols")
plt.show()
# r_squared shouldn't work!
with pytest.raises(RuntimeError):
model.r_squared(x)
# r_squared shouldn't work!
with pytest.raises(RuntimeError):
model.r_squared_plot(x)
class MyModel(DiscreteModel):
def __init__(self):
self.weight = Parameter([5, 1], name="Weight")
self.bias = Parameter([1, 1], name="Bias")
def __call__(self, x):
return Normal(x, 1.0)
# Instantiate the model
model = MyModel()
# Shouldn't work with non-discrete/scalar outputs
with pytest.raises(NotImplementedError):
model.pred_dist_plot(x[:1, :])
| 2,026 |
1.Chapter-Python/presentation/ch02/sequence_abc.py
|
zinebabercha/zineb-abercha
| 8 |
2023777
|
# Copyright 2013, <NAME>
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# <NAME>, <NAME>, and <NAME>
# <NAME> & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod # need these definitions
class Sequence(metaclass=ABCMeta):
"""Our own version of collections.Sequence abstract base class."""
@abstractmethod
def __len__(self):
"""Return the length of the sequence."""
@abstractmethod
def __getitem__(self, j):
"""Return the element at index j of the sequence."""
def __contains__(self, val):
"""Return True if val found in the sequence; False otherwise."""
for j in range(len(self)):
if self[j] == val: # found match
return True
return False
def index(self, val):
"""Return leftmost index at which val is found (or raise ValueError)."""
for j in range(len(self)):
if self[j] == val: # leftmost match
return j
raise ValueError('value not in sequence') # never found a match
def count(self, val):
"""Return the number of elements equal to given value."""
k = 0
for j in range(len(self)):
if self[j] == val: # found a match
k += 1
return k
| 1,917 |
python3/hackerrank_leetcode/network_delay_time/main_objects_threading.py
|
seLain/codesnippets
| 0 |
2022674
|
import copy, time
from threading import Thread
class Node:
def __init__(self, id):
self.id = id
self.dest = {} # {Node: travel_time}
self.visited = False
def add_dest(self, dest_node, travel_time):
self.dest[dest_node] = travel_time
def receive(self, message):
if not self.visited:
self.visited = True
message.visited(self)
for d in self.dest.keys():
copied_msg = copy.copy(message)
copied_msg.info_center.notify_active(copied_msg)
Thread(target=copied_msg.goto, args=(self, d, self.dest[d])).start()
message.info_center.notify_stop(message)
class Message:
def __init__(self, info_center):
self.visited_nodes = []
self.travel_time = 0
self.info_center = info_center
def been_there(self, node):
if node in self.visited_nodes:
return True
else:
return False
def visited(self, node):
if node not in self.visited_nodes:
self.visited_nodes.append(node)
def goto(self, src, dest, travel_time):
time.sleep(travel_time * 0.05)
if src not in self.visited_nodes:
self.visited_nodes.append(src)
if dest not in self.visited_nodes:
self.travel_time += travel_time
self.visited_nodes.append(dest)
dest.receive(self)
else:
self.info_center.notify_stop(self)
class InfoCenter:
def __init__(self, N):
self.active_messages = []
self.stopped_messages = []
self.visited_nodes = []
self.numbers_of_nodes = N
self.done_delivery = False
self.all_nodes_covered = False
self.longest_travel_time = -1
def notify_active(self, msg):
if msg not in self.active_messages:
self.active_messages.append(msg)
def notify_stop(self, msg):
self.stopped_messages.append(msg)
if msg in self.active_messages:
self.active_messages.remove(msg)
self.visited_nodes += [n for n in msg.visited_nodes \
if n not in self.visited_nodes]
# check if there is no active messages,
# if yes, it means available nodes visited,
# but there might be nodes unvisitable
if len(self.active_messages) == 0:
self.done_delivery = True
if len(self.visited_nodes) == self.numbers_of_nodes:
self.all_nodes_covered = True
for m in self.stopped_messages:
if m.travel_time > self.longest_travel_time:
self.longest_travel_time = m.travel_time
class Solution:
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
# build the network
info_center = InfoCenter(N)
all_nodes = {}
for i in range(1, N+1):
all_nodes[i] = Node(i)
for path in times:
all_nodes[path[0]].add_dest(all_nodes[path[1]], path[2])
# run the network
all_nodes[K].receive(Message(info_center))
# check the result
while info_center.done_delivery is False:
continue
if info_center.all_nodes_covered:
return info_center.longest_travel_time
else:
return -1
| 3,448 |
reports/tasks/cancel.py
|
kids-first/kf-task-release-reports
| 0 |
2023091
|
import boto3
import logging
from flask import current_app, jsonify
from reports.tasks.validation import validate_state
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def cancel(task_id, release_id):
"""
Immediately cancel a task.
In the case of reports, this only results in the state of the task
being updated to 'canceled'.
"""
endpoint_url = current_app.config['DYNAMO_ENDPOINT']
db = boto3.resource('dynamodb', endpoint_url=endpoint_url)
table = db.Table(current_app.config['TASK_TABLE'])
validate_state(task_id, 'cancel')
logger.info(f'{task_id} was told to cancel by the coordinator')
# Update the task to staged in db
task = table.update_item(
Key={'task_id': task_id},
UpdateExpression='SET #st = :new',
ExpressionAttributeNames={'#st': 'state'},
ExpressionAttributeValues={':new': 'canceled'},
ReturnValues='ALL_NEW'
)
logger.info(f'{task_id} was canceled')
return jsonify(task['Attributes']), 200
| 1,026 |
pixelizer.py
|
alexandrepoulin/Pixelizer
| 0 |
2022995
|
from PIL import Image
import numpy as np
from sklearn.cluster import KMeans
import colorsys as cs
###################################
## Script parameters
###################################
#path to file
image_filename = "sunset.jpeg"
output_filename = "sunset_pixelized.png"
output_type = "png"
n_colors = 5
#how many colors to add to the pallet for each main color.
extend_brighter = 2 #adds this number of brighter shades
extend_darker = 2 #adds this number of darker shades
#How big of a square in pixels to compress to a single pixel
#if this is not a multiple of the size, then the image will be
#cut off at the bottom and right.
compression_factor = 4
#whether to simply replace the color, or create a smaller image
same_size = True
##multipliers for shade
##For making something brighter: new_c = h, s/c_sat, v * c_val
##For making something darker: new_c = h*c_hue, s*c_sat, v / c_val
c_hue = 1.1 ##used for darkening only
c_sat = 1.2
c_val = 1.2
#pallet mods
#changes to the centroid colors for the pallet
e_sat = 1.2
e_val = 1.0
#change the average of the square to find the paller
i_sat = 1.2
i_val = 1.2
###################################
## Script brains
###################################
##funtion to get the main colors of an image
def getMainColors(flat_data,num_clusters):
kmeans = KMeans(init="k-means++", n_clusters=num_clusters, n_init=3)
kmeans.fit(flat_data)
return kmeans.cluster_centers_ ##RGB values
##image file processing
def getImageData(filename):
"""Get the file data"""
with Image.open(filename,'r') as fin:
return np.asarray(fin)
def flatten(x):
data= []
for y in x:
data.extend(y)
return np.array(data)
def extendColor(c, num_brighter, num_darker):
#expect HSV value
h, s, v= cs.rgb_to_hsv(*(x/255 for x in c))
s = min(1,max(0,s*e_sat))
v = min(1,max(0,v*e_val))
colors = []
##For making something brighter: new_c = h, s/c_sat, v * c_val
##For making something darker: new_c = h*c_hue, s*c_sat, v / c_val
for i in range(num_brighter, -num_darker-1, -1):
new_c =[]
if i>=0:
new_c = (h,min(1, max(0,s/pow(c_sat,i))), min(1,max(0,v*pow(c_val,i))) )
else:
new_h = (np.sign(np.cos(((h-1/6)%1)* np.pi))*c_hue + h)%1
new_c = (new_h,min(1, max(0,s*pow(c_sat,i))), min(1,max(0,v/pow(c_val,i))) )
colors.append([x*255 for x in cs.hsv_to_rgb(*new_c)])
return colors
def findPalletColor(c, pallet):
bestScore = 1000000000
best_c = None
for col in pallet:
score= np.linalg.norm(c-col)
if score< bestScore:
bestScore = score
best_c = col
return best_c
##Use the pallet to create a new image.
#c i the compression factor, just saving on typing.
#ss is same_size
def createNewImage(data, pallet, c, ss):
new_data = data
if not ss:
new_data = np.zeros((len(data)//c,len(data[0])//c))
csq = c*c
for row in range(0,len(data),c):
for col in range(0, len(data[0]),c):
r = data[row:row+c,col:col+c,0].flatten()
g = data[row:row+c,col:col+c,1].flatten()
b = data[row:row+c,col:col+c,2].flatten()
r = [int(x)*int(x) for x in r]
g = [int(x)*int(x) for x in g]
b = [int(x)*int(x) for x in b]
r = np.sqrt(sum(r)/csq)/255
g = np.sqrt(sum(g)/csq)/255
b = np.sqrt(sum(b)/csq)/255
h,s,v = cs.rgb_to_hsv(r,g,b)
r,g,b = (round(255*x) for x in cs.hsv_to_rgb(h,s*i_sat, v*i_val))
chosen_c = findPalletColor(np.array([r,g,b]), pallet)
if ss:
for i in range(c):
for j in range(c):
new_data[row+i][col+j] = chosen_c
else:
new_data[row//c][col//c] = chosen_c
return new_data
#step 1: Get image data
data = getImageData(image_filename)
height_cutoff = len(data) - (len(data)%compression_factor)
width_cutoff = len(data[0]) - (len(data[0])%compression_factor)
data = np.array([x[:width_cutoff] for x in data[:height_cutoff]])
flat_data = flatten(data)
#step 2: Get the centroids
centroids = getMainColors(flat_data,n_colors)
#step 3: Extend main colors include brighter and darker versions
pallet = []
for c in centroids:
#c is an rgb with vals between 0 and 1
print("centroid: ", c)
pallet.extend(extendColor(c,extend_brighter,extend_darker))
##the pallet is now all in RGB
for c in pallet:
print(c)
#step 4: create a new image using out pallet
new_data = createNewImage(data,pallet, compression_factor, same_size)
im = Image.fromarray(new_data)
im.save(output_filename, output_type)
| 4,744 |
src/clikit/api/config/command_config.py
|
sdispater/clik
| 78 |
2023046
|
from contextlib import contextmanager
from typing import Any
from typing import List
from typing import Optional
from clikit.api.args.args_parser import ArgsParser
from clikit.api.args.format.args_format import ArgsFormat
from clikit.api.args.format.args_format_builder import ArgsFormatBuilder
from clikit.api.args.format.command_name import CommandName
from clikit.api.command.exceptions import NoSuchCommandException
from .config import Config
class CommandConfig(Config):
"""
The configuration of a console command.
"""
def __init__(self, name=None): # type: (Optional[str]) -> None
super(CommandConfig, self).__init__()
self._name = name
self._aliases = []
self._description = ""
self._help = None
self._enabled = True
self._hidden = False
self._process_title = None
self._default = None
self._anonymous = None
self._sub_command_configs = [] # type: List[CommandConfig]
self._parent_config = None # type: Optional[CommandConfig]
@property
def name(self): # type: () -> Optional[str]
return self._name
def set_name(self, name): # type: (Optional[str]) -> CommandConfig
self._name = name
return self
@property
def aliases(self): # type: () -> List[str]
return self._aliases
def add_alias(self, alias): # type: (str) -> CommandConfig
self._aliases.append(alias)
return self
def add_aliases(self, aliases): # type: (List[str]) -> CommandConfig
for alias in aliases:
self.add_alias(alias)
return self
def set_aliases(self, aliases): # type: (List[str]) -> CommandConfig
self._aliases = []
return self.add_aliases(aliases)
@property
def description(self): # type: () -> str
return self._description
def set_description(self, description): # type: (str) -> CommandConfig
self._description = description
return self
@property
def help(self): # type: () -> Optional[str]
return self._help
def set_help(self, help): # type: (Optional[str]) -> CommandConfig
self._help = help
return self
def is_enabled(self): # type: () -> bool
return self._enabled
def enable(self): # type: () -> CommandConfig
self._enabled = True
return self
def disable(self): # type: () -> CommandConfig
self._enabled = False
return self
def is_hidden(self): # type: () -> bool
return self._hidden
def hide(self, hidden=True): # type: (bool) -> CommandConfig
self._hidden = hidden
return self
@property
def process_title(self): # type: () -> Optional[str]
return self._process_title
def set_process_title(
self, process_title
): # type: (Optional[str]) -> CommandConfig
self._process_title = process_title
return self
def default(self, default=True): # type: (bool) -> CommandConfig
"""
Marks the command as the default command.
"""
self._default = default
self._anonymous = False
return self
def anonymous(self): # type: () -> CommandConfig
self._default = True
self._anonymous = True
return self
def is_default(self): # type: () -> bool
return self._default
def is_anonymous(self): # type: () -> bool
return self._anonymous
@property
def parent_config(self): # type: () -> Optional[CommandConfig]
return self._parent_config
def set_parent_config(
self, parent_config
): # type: (Optional[CommandConfig]) -> CommandConfig
self._parent_config = parent_config
return self
def is_sub_command_config(self): # type: () -> bool
return self._parent_config is not None
def build_args_format(
self, base_format=None
): # type: (Optional[ArgsFormat]) -> ArgsFormat
builder = ArgsFormatBuilder(base_format)
if not self._anonymous:
builder.add_command_name(CommandName(self.name, self.aliases))
builder.add_options(*self.options.values())
builder.add_arguments(*self.arguments.values())
return builder.format
@contextmanager
def sub_command(self, name): # type: (str) -> CommandConfig
sub_command_config = CommandConfig(name)
self.add_sub_command_config(sub_command_config)
yield sub_command_config
def create_sub_command(self, name): # type: (str) -> CommandConfig
sub_command_config = CommandConfig(name)
self.add_sub_command_config(sub_command_config)
return sub_command_config
@contextmanager
def edit_sub_command(self, name): # type: (str) -> CommandConfig
sub_command_config = self.get_sub_command_config(name)
yield sub_command_config
def add_sub_command_config(
self, sub_command_config
): # type: (CommandConfig) -> CommandConfig
self._sub_command_configs.append(sub_command_config)
return self
def add_sub_command_configs(
self, sub_command_configs
): # type: (List[CommandConfig]) -> CommandConfig
for sub_command_config in sub_command_configs:
self.add_sub_command_config(sub_command_config)
return self
def get_sub_command_config(self, name): # type: (str) -> CommandConfig
for sub_command_config in self._sub_command_configs:
if sub_command_config.name == name:
return sub_command_config
raise NoSuchCommandException(name)
@property
def sub_command_configs(self): # type: () -> List[CommandConfig]
return self._sub_command_configs
def has_sub_command_config(self, name): # type: (str) -> bool
for sub_command_config in self._sub_command_configs:
if sub_command_config.name == name:
return True
return False
def has_sub_command_configs(self): # type: () -> bool
return len(self._sub_command_configs) > 0
@property
def default_args_parser(self): # type: () -> ArgsParser
if self._parent_config:
return self._parent_config.default_args_parser
return super(CommandConfig, self).default_args_parser
@property
def default_lenient_args_parsing(self): # type: () -> bool
if self._parent_config:
return self._parent_config.default_lenient_args_parsing
return super(CommandConfig, self).default_lenient_args_parsing
@property
def default_handler(self): # type: () -> Any
if self._parent_config:
return self._parent_config.default_handler
return super(CommandConfig, self).default_handler
@property
def default_handler_method(self): # type: () -> str
if self._parent_config:
return self._parent_config.default_handler_method
return super(CommandConfig, self).default_handler_method
| 7,053 |
xoinvader/animation.py
|
pankshok/xoinvader
| 13 |
2023044
|
"""Animation.
Animation is set of keyframes.
Value of selected attribute changes in time.
Keyframe:
(time, value)
Objects have animation manager which manages animation graph and switching."""
from operator import itemgetter
from eaf import Timer
from xoinvader.utils import Point
class AnimationBoundariesExceeded(Exception):
"""Exception to show that interpolated value will be incorrect."""
def __init__(self, first, current_time, second):
super(AnimationBoundariesExceeded, self).__init__(
self,
f"Animation frame boundaries exceeded: {first} <= {current_time} <= {second}",
)
class InterpolationUnknownTypes(Exception):
"""Such type combination is unsupported."""
def __init__(self, first, second):
super(InterpolationUnknownTypes, self).__init__(
self, f"Unknown types of interpolating values: {first} and {second}"
)
# TODO: Implement animation graph and etc
class AnimationManager(object):
"""Manage list of object animation."""
def __init__(self):
self._animations = {}
self._animation = None
@property
def animation(self):
"""AnimationManager's current animation name.
To set animation - assign it's name.
:getter: yes
:setter: yes
:type: str
"""
if self._animation:
return self._animation.name
else:
raise AttributeError("There is no available animation.")
@animation.setter
def animation(self, name):
if name in self._animations:
self._animation = self._animations[name]
else:
raise ValueError(f"No such animation: '{name}'.")
def add(self, name, *args, **kwargs):
"""Add new animation, pass args to Animation class.
See interface of `class::xoinvader.animation.Animation`.
:param str name: animation name
"""
animation = Animation(name, *args, **kwargs)
self._animations[name] = animation
if not self._animation:
self._animation = animation
def update(self, dt):
"""Update manager's state."""
if not self._animation:
return
try:
self._animation.update(dt)
except StopIteration:
return # TODO: think about method to change animation
# pylint: disable=too-many-instance-attributes,too-many-arguments
# pylint: disable=too-few-public-methods
class Animation(object):
"""Animation unit.
Animation object holds sorted list of (time, value) items and changes
selected attribute of bound object according to local animation time.
Time measured by timer. When current time is greater or equal then time
of next keyframe - animation object changes it to appropriate value.
When animation is done and if not looped - raise StopIteration.
In case of interpolated animation value calculation occurs within two
bounding frames and on frame switch.
:param str name: animation name
:param object bind: object to bind animation
:param str attr: attribute to change in frames
:param list keyframes: (float, object) tuples
:param bool interp: interpolate values between frames or not
:param bool loop: loop animation or not
"""
def __init__(self, name, bind, attr, keyframes, interp=False, loop=False):
self._name = name
self._obj = bind
self._attr = attr
if not keyframes:
raise ValueError("Animation keyframes must not be empty.")
self._keyframes = sorted(keyframes, key=itemgetter(0))
self._interp = interp
self._loop = loop
# Timer for tracking local time
self._timer = Timer(self._keyframes[-1][0], lambda: True)
self._timer.start()
# Current keyframe index
self._current = 0
if self._interp:
self.update = self._update_interpolated
else:
self.update = self._update_discrete
@property
def name(self):
"""Animation's name.
:getter: yes
:setter: no
:type: str
"""
return self._name
def _apply_value(self, value):
"""Apply new value to linked object.
:param obj value: value to apply
"""
setattr(self._obj, self._attr, value)
def _update_interpolated(self, dt):
"""Advance animation and interpolate value.
NOTE: animation frame switching depends on interp mode
animation with interpolation switches frame only when current local
time exceeds NEXT frames' time border.
"""
self._check_animation_state()
self._timer.update(dt)
current_time = self._timer.elapsed
keyframe = self._keyframes[self._current]
next_keyframe = self._keyframes[self._current + 1]
# it's time to switch keyframe
if current_time >= next_keyframe[0]:
self._current += 1
keyframe = self._keyframes[self._current]
if self._current == len(self._keyframes) - 1:
self._apply_value(keyframe[1])
self._current += 1
self._check_animation_state()
return
next_keyframe = self._keyframes[self._current + 1]
value = interpolate(keyframe, next_keyframe, current_time)
self._apply_value(value)
def _update_discrete(self, dt):
"""Advance animation without interpolating value.
NOTE: animation frame switching depends on interp mode
discrete animation swiches frame and updates value only if
current local time is >= time of current keyframe.
No need to worry about calculating value between frames - thus
no need to complicate behaviour.
"""
self._check_animation_state()
self._timer.update(dt)
keyframe = self._keyframes[self._current]
# Check if animation need to switch keyframe
if self._timer.elapsed >= keyframe[0]:
self._apply_value(keyframe[1])
self._current += 1
def _check_animation_state(self):
"""Check animation state and restart if needed.
:raise StopIteration: when animation exceeded frames.
"""
if len(self._keyframes) == self._current:
if self._loop:
self._current = 0
self._timer.restart()
else:
self._timer.stop()
raise StopIteration
def linear_equation(val1, val2, time1, time2, current_time):
"""Linear equation to get interpolated value.
:param float val1: first keyframe value
:param float val2: second keyframe value
:param float time1: first keyframe local time
:param float time2: second keyframe local time
:param float current_time: current animation local time
"""
return val1 + (val2 - val1) / (time2 - time1) * (current_time - time1)
def same_type(values, types):
"""Check if values are belongs to same type or type tuple.
:param collections.Iterable values: values to check type similarity
:param tuple|type types: type or tuple of types
"""
return all(map(lambda it: isinstance(it, types), values))
def interpolate(first, second, current_time):
"""Interpolate value by two bounding keyframes.
:param collections.Iterable first: first bounding keyframe
:param collections.Iterable second: second bounding keyframe
:param float current_time: current animation local time
:raises AnimationBoundariesExceeded: when time interval is invalid
:raises InterpolationUnknownTypes: when interpolating invalid types
"""
if not first[0] <= current_time <= second[0]:
raise AnimationBoundariesExceeded(first[0], current_time, second[0])
def frames_of(*args):
"""If frames both of specified type."""
return same_type((first[1], second[1]), args)
if frames_of(int, float):
value = linear_equation(
float(first[1]),
float(second[1]),
float(first[0]),
float(second[0]),
float(current_time),
)
elif frames_of(Point):
value = linear_equation(
first[1],
second[1],
float(first[0]),
float(second[0]),
float(current_time),
)
else:
raise InterpolationUnknownTypes(type(first[1]), type(second[1]))
return value
| 8,490 |
02_strings.py
|
fernandobd42/Introduction_Python
| 1 |
2023103
|
print("Hello World\nYou're Welcome"); # \n é usado para pular uma linha, printa a frase dividida pelo \n em duas linhas
#Resultado:
#Hello World
#You're Welcome
print(r"Hello World\nYou're Welcome"); # r significa row, ou seja, ira printar a frase inteira de forma literal, sem saltar linha;
#Resultado
#Hello World\nYou're Welcome
name = 'Fernando'; #atribuindo um valor a variável name
last_name = 'Gontijo'; #atribuindo um valor a variável last_name
full_name = name +" "+last_name; #concatenando name com last_name
print(name); #printa o name, resultado: 'Fernando'
print(last_name); #printa o last_name, resultado: 'Gontijo'
print(full_name); #printa o full_name, resultado: '<NAME>'
print(name[0]); #printa o caractere a posição 0 do name, resultado: 'F'
print(name[0:3]); #printa os caracteres que estão no intervalo entre a posição 0 e 3, resultado: 'Fer'
#OBS: quando se trata de intervalo, sempre será incluído o primeiro valor e será excluido o último valor.
#No exemplo acima o indíce 0 referencia a letra 'F' e o indíce 3 referencia a letra 'n', então o retorno será 'Fer'
print(full_name[6:11]); #printa os caracteres que estão no intervalo entre a posição 6 e 11
#o indíce 6 referencia a letra 'd' e o indíce 11 referencia a letra 'n', então o retorno será 'do Go'
| 1,284 |
calculator_class.py
|
joaovictordemiranda/calculadora
| 1 |
2023887
|
import re
import tkinter as tk
from logging import exception
from typing import List
import math
class Calculator:
"""Test"""
def __init__(
self,
root: tk.Tk,
label: tk.Label,
display: tk.Entry,
buttons: List[List[tk.Button]]
):
self.root = root
self.label = label
self.display = display
self.buttons = buttons
def start(self):
self._config_buttons()
self._config_display()
self.root.mainloop()
def _config_buttons(self):
buttons = self.buttons
for row_value in buttons:
for button in row_value:
button_text = button['text']
if button_text == 'C':
button.bind('<Button-1>', self.clear)
button.config(bg='#4795F6', fg='#fff')
if button_text in '0123456789.+-*/()^':
button.bind('<Button-1>', self.add_text_to_display)
if button_text == '=':
button.bind('<Button-1>', self.calculate)
button.config(bg='#4785F4', fg='#fff')
def _config_display(self):
self.display.bind('<Return>', self.calculate)
self.display.bind('<KP_Enter>', self.calculate)
def _fix_text(self, text):
# Substitui tudo que nao for 0123456789.+-*/^ para nada.
text = re.sub(r'[^\d\.\/\*\-\+\^(\)e]', r'', text, 0)
# Substitui sinais repetidos para apenas um sinal
text = re.sub(r'([\.\+\/\*\-\^])\1+', r'\1', text, 0)
# Substitui () ou *() para nada
text = re.sub(r'\*?\(\)', '', text)
return text
def clear(self, event=None):
self.display.delete(0, 'end')
def add_text_to_display(self, event=None):
self.display.insert('end', event.widget['text'])
def calculate(self, event=None):
fixed_text = self._fix_text(self.display.get())
equations = self._get_equations(fixed_text)
try:
if len(equations) == 1:
result = eval(self._fix_text(equations[0]))
else:
result = eval(self._fix_text(equations[0]))
for equation in equations[1:]:
result = math.pow(result, eval(self._fix_text(equation)))
self.display.delete(0, 'end')
self.display.insert('end', result)
self.label.config(text=f'{fixed_text} = {result}')
except OverflowError:
self.label.config(text='Não consegui realizar essa Conta, Sorre!')
except exception as e:
print(e)
self.label.config(text='Conta Invalida')
def _get_equations(self, text):
return re.split(r'\^', text, 0)
| 2,725 |
ludwig_playground/ludwig_playground.py
|
jacopotagliabue/how-to-grow-a-product-tree
| 3 |
2023591
|
"""
This script is a stand-alone entry point to train SessionPath with Ludwig starting from existing files.
You will need a prod2vec.tsv for embeddings and a data.csv for the training set - sample formats are included
in this folder.
The hello_ludwig script is a simple function that wraps Ludwig training and testing methods, with a couple of options
to let you easily try out model behavior on specific input pairs, or avoid training if you wish to re-use
the model in the folder.
"""
import os
from logging import DEBUG
from ludwig.api import LudwigModel
# script variables
FOLDER = os.path.dirname(os.path.abspath(__file__)) # folder is the current one with the playground script
LUDWIG_MODEL_DEFINITION = {
'input_features': [
{'name': 'skus_in_session', 'type': 'set',
'pretrained_embeddings': 'prod2vec.tsv', 'embedding_size': 48,
'embeddings_trainable': False},
{'name': 'query', 'type': 'text', 'encoder': 'rnn', 'level': 'char'}
],
'combiner': {'type': 'concat', 'num_fc_layers': 2},
'output_features': [
{'name': 'path', 'cell_type': 'lstm', 'type': 'sequence'}
],
'training': {'epochs': 100, 'early_stopping': 5}
}
DATASET = 'data.csv'
# if false, skip training, if true, train and test on dataset from scratch
# after model is trained once, you can set it to False to just generate predictions
IS_TRAINING = True
PREDICTIONS = {
'skus_in_session': ['SKU_123'],
'query': ['nike jordan']
}
# if not empty, it needs to follow ludwig specs: https://uber.github.io/ludwig/api/LudwigModel/#predict
def train_and_test(model_definition, dataset_file, target_folder):
model = LudwigModel(model_definition, logging_level=DEBUG)
train_stats = model.train(data_csv=dataset_file)
model.save(target_folder)
# optionally a separate test file can be supplied OR
# Ludwig built-in "split" column mechanism can be used
predictions, test_stats = model.test(data_csv=dataset_file)
print(test_stats['combined']['accuracy'])
model.close()
def do_predictions(prediction_dictionary, target_folder):
# reload the model
model = LudwigModel.load(target_folder)
# get predictions
predictions = model.predict(data_dict=prediction_dictionary)
for input_q, input_skus, output in zip(prediction_dictionary['query'],
prediction_dictionary['skus_in_session'],
predictions['path_predictions']):
print("\nInput: <{}, {}>, predicted path: {}".format(input_q,
input_skus,
' > '.join([o for o in output if o != '<PAD>'])
))
return
def hello_ludwig(model_definition, ludwig_folder, is_training, dataset_file, prediction_dictionary):
if is_training:
print("\n===> Now training...")
train_and_test(model_definition, dataset_file, ludwig_folder)
# if predictions are supplied, run predictions
if prediction_dictionary:
print("\n===>Now predicting user-supplied rows...")
do_predictions(prediction_dictionary, ludwig_folder)
# all done
print("\n\nAll done! See you, space cowboy...")
return
if __name__ == "__main__":
hello_ludwig(model_definition=LUDWIG_MODEL_DEFINITION,
ludwig_folder=FOLDER,
is_training=IS_TRAINING,
dataset_file=DATASET,
prediction_dictionary=PREDICTIONS)
| 3,566 |
vlgp/__init__.py
|
yuanz271/vlgp
| 1 |
2023831
|
from .api import *
import sys
import logging
import warnings
logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
# # create a logging format
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# # create a file file_handler
# # file_handler = logging.FileHandler('vlgp.log')
# handler = logging.StreamHandler()
# handler.setLevel(logging.INFO)
# handler.setFormatter(formatter)
# logger.addHandler(handler)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
logger.addHandler(stdout_handler)
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
logger.warning(str(sys.version_info))
warnings.warn("Python 3.5 or later is required.")
| 738 |
python/fixfmt/pandas.py
|
gwgundersen/fixfmt
| 1 |
2023659
|
import pandas as pd
from . import table
from .lib import container
#-------------------------------------------------------------------------------
def from_dataframe(df, cfg, names=container.ALL):
tbl = table.Table(cfg)
def get_values(series):
if series.dtype.name == "category":
# Construct an explicit array of values.
# FIXME: Wasteful. We should instead read through the categories.
return series.cat.categories.values[series.cat.codes]
else:
return series.values
if cfg["index"]["show"]:
idx = df.index
if isinstance(idx, pd.core.index.MultiIndex):
for name, labels, levels in zip(idx.names, idx.labels, idx.levels):
# FIXME: Wasteful. Handle like category columns.
tbl.add_index_column(name, levels.values[labels])
else:
tbl.add_index_column(idx.name, get_values(idx))
names = container.select_ordered(tuple(df.columns), names)
for name in names:
series = df[name]
arr = get_values(series)
tbl.add_column(series.name, arr)
tbl.finish()
return tbl
def print_dataframe(df, cfg=table.DEFAULT_CFG, names=container.ALL):
tbl = from_dataframe(df, cfg, names=names)
tbl.print()
#-------------------------------------------------------------------------------
def main():
from argparse import ArgumentParser
try:
import cPickle as pickle
except ImportError:
import pickle
# FIXME: Add format arguments.
parser = ArgumentParser()
parser.add_argument(
"filename", metavar="FILENAME",
help="read from FILENAME")
args = parser.parse_args()
# FIXME
# cfg = table.DEFAULT_CFG
# cfg = table.UNICODE_CFG
cfg = table.UNICODE_BOX_CFG
table._colorize(cfg)
# FIXME: Support "-".
with open(args.filename, "rb") as file:
df = pickle.load(file)
print_dataframe(df, cfg)
if __name__ == "__main__":
main()
| 2,027 |
src/onegov/onboarding/models/__init__.py
|
politbuero-kampagnen/onegov-cloud
| 0 |
2022923
|
from onegov.onboarding.models.assistant import (
Assistant,
DefaultAssistant,
Step
)
from onegov.onboarding.models.town_assistant import TownAssistant
__all__ = [
'Assistant',
'DefaultAssistant',
'Step',
'TownAssistant'
]
| 251 |
model_zoo/official/cv/lenet_quant/src/dataset.py
|
nudt-eddie/mindspore
| 1 |
2023598
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Produce the dataset
"""
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore.common import dtype as mstype
def create_dataset(data_path, batch_size=32, repeat_size=1,
num_parallel_workers=1):
"""
create dataset for train or test
"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# apply map operations on images
mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
# apply DatasetOps
buffer_size = 10000
mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
mnist_ds = mnist_ds.repeat(repeat_size)
return mnist_ds
| 2,492 |
working/fakedata.py
|
mnicosia99/grover
| 0 |
2023618
|
#!/usr/bin/env python
from faker import Faker
import json, random
from datetime import timedelta
from datetime import datetime
def get_number_authors():
return random.randint(1, 5)
def get_date(start_days_ago=1, end_days_ago=365):
days_ago = random.randint(start_days_ago, end_days_ago)
pub_datetime = datetime.now() - timedelta(days=days_ago)
publish_date = pub_datetime.strftime('%m-%d-%Y')
iso_date = pub_datetime.isoformat()
print(publish_date)
print(iso_date)
return publish_date
def create_authors(nbr_authors=get_number_authors()):
authors = list()
for i in range(nbr_authors):
faker = Faker()
authors.append(faker.name())
# print(f'name: {faker.name()}')
# address = faker.address().replace("\n", " ")
# print(f'address: {address}')
return authors
def get_random_school():
f = open("working/universities.json")
universities = json.load(f)
inx = random.randint(0, len(universities) - 1)
return universities[inx]["institution"]
def get_random_department():
mf = open("working/majors.json")
majors = json.load(mf)
# print(f'school: {data[inx]["institution"]}')
# print(f'department: {majors["majors"][m_inx]["department"]}')
# print(f'text: {faker.text()}')
# print(f'name: {faker.name()}')
# address = faker.address().replace("\n", " ")
# print(f'address: {address}')
inx = random.randint(0, len(majors["majors"]) - 1)
return "School of " + majors["majors"][inx]["department"]
# def create_authors(nbr_authors=get_number_authors()):
# authors = list()
# f = open("working/universities.json")
# mf = open("working/majors.json")
# data = json.load(f)
# majors = json.load(mf)
# for i in range(nbr_authors):
# faker = Faker()
# print(f'name: {faker.name()}')
# address = faker.address().replace("\n", " ")
# print(f'address: {address}')
# inx = random.randint(0, len(data) - 1)
# m_inx = random.randint(0, len(majors["majors"]) - 1)
# print(f'school: {data[inx]["institution"]}')
# print(f'department: {majors["majors"][m_inx]["department"]}')
# # print(f'text: {faker.text()}')
| 2,331 |
tributary/tests/streaming/output/test_file_streaming.py
|
mohanrajofficial87/tributary
| 1 |
2023802
|
import os
import time
import tributary.streaming as ts
class TestFile:
def setup(self):
time.sleep(0.5)
def test_file(self):
file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_file_data.json")
)
if os.path.exists(file):
os.remove(file)
def func():
yield 1
yield 2
yield 3
yield 4
def read_file(file):
with open(file, "r") as fp:
data = fp.read()
return [int(x) for x in data]
# Test that output is equal to what is read (generalized)
out = ts.FileSink(ts.Func(func), filename=file, json=True)
assert ts.run(out) == read_file(file)
| 748 |
app.py
|
sadfsdfdsa/world_model
| 1 |
2023253
|
from world_simulation import WorldEngine, EngineConfig
mp = WorldEngine(5, 5)
EngineConfig.ModelsConfig.HerbivoreConfig.number_min = 1
EngineConfig.ModelsConfig.HerbivoreConfig.number_max = 1
EngineConfig.ModelsConfig.HerbivoreConfig.health_min = 4
EngineConfig.ModelsConfig.HerbivoreConfig.health_max = 6
EngineConfig.ModelsConfig.HerbivoreConfig.age_min = 20
EngineConfig.ModelsConfig.HerbivoreConfig.age_max = 35
EngineConfig.ModelsConfig.FoodConfig.number_min = 0
EngineConfig.ModelsConfig.FoodConfig.number_max = 2
EngineConfig.ModelsConfig.FoodConfig.spawn_number_min = 2
EngineConfig.ModelsConfig.FoodConfig.spawn_number_max = 3
EngineConfig.ModelsConfig.FoodConfig.age_min = 5
EngineConfig.ModelsConfig.FoodConfig.age_max = 5
EngineConfig.ModelsConfig.FoodConfig.health_min = 2
EngineConfig.ModelsConfig.FoodConfig.health_max = 5
EngineConfig.WorldConfig.load_model(EngineConfig.ModelsConfig.HerbivoreConfig)
EngineConfig.WorldConfig.load_model(EngineConfig.ModelsConfig.FoodConfig)
mp.run_loop(while_alive=True)
| 1,031 |
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/contrib/unconfigconfig/cdp/iosxe/unconfigconfig.py
|
miott/genielibs
| 0 |
2022779
|
#import statements
import re
import logging
import time
log = logging.getLogger()
from ats import aetest
from pprint import pprint as pp
from genie.harness.base import Trigger
import pdb
from ats.utils.objects import Not, NotExists
from genie.libs.sdk.triggers.template.unconfigconfig import \
TriggerUnconfigConfig as UnconfigConfigTemplate
# Genie
from genie.harness.exceptions import GenieConfigReplaceWarning
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.unconfigconfig.unconfigconfig import TriggerUnconfigConfig
class Triggerunconfigconfigcdp(Trigger):
''' Config and Unconfig of cdp '''
@aetest.setup
def prerequisites(self,uut):
output = uut.execute('show cdp')
cdp_status=re.search(r'(Global\s+CDP)',output)
print(cdp_status.group(1))
if cdp_status.group(1) == 'Global CDP':
self.skipped("CDP is enabled globally")
else:
self.failed("CDP is not enabled globally")
@aetest.test
def save_configuration(self, uut, method, abstract, steps):
'''Save current configuration
Can be either done via TFTP or checkpoint feature (If exists for OS)
Args:
uut (`obj`): Device object.
method (`str`): Save method from trigger datafile.
Only accpet "local" and "checkpoint"
Returns:
None
Raises:
pyATS Results
'''
self.lib = abstract.sdk.libs.abstracted_libs.restore.Restore()
default_dir = getattr(self.parent, 'default_file_system', {})
try:
self.lib.save_configuration(uut, method, abstract, default_dir)
except Exception as e:
self.failed('Saving the configuration failed', from_exception=e,
goto=['next_tc'])
@aetest.test
def unconfig(self,uut):
uut.configure('no cdp run')
@aetest.test
def Verify_unconfig(self,uut):
# ''' Verify unconfig for cdp worked or not '''
output = uut.execute('show cdp')
if 'Global CDP' not in output:
self.passed("CDP is not enabled globally")
else:
self.failed("CDP is enabled globally")
@aetest.test
def config(self,uut):
uut.configure('cdp run')
@aetest.test
def Verify_config(self,uut):
# ''' Verify config for cdp worked or not '''
output = uut.execute('show cdp')
cdp_status=re.search(r'(Global\s+CDP)',output)
if cdp_status.group(1) == 'Global CDP':
self.passed("CDP is enabled globally")
else:
self.failed("CDP is not enabled globally")
@aetest.test
def restore_configuration(self, uut, method, abstract, steps):
'''Rollback the configuration
Can be either done via TFTP or checkpoint feature (If exists for OS)
Args:
uut (`obj`): Device object.
method (`str`): Save method from trigger datafile.
Only accpet "local" and "checkpoint"
Returns:
None
Raises:
pyATS Results
'''
try:
self.lib.restore_configuration(uut, method, abstract)
except GenieConfigReplaceWarning as e:
self.passx('Configure replace requires device reload')
except Exception as e:
self.failed('Failed to restore the configuration', from_exception=e)
| 3,560 |
ROMS/roms_glider-sandy-temp.py
|
petercunning/notebook
| 32 |
2023250
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# ROMS Glider
# <markdowncell>
# Virtual glider extraction: (lon,lat,time) interpolation from ROMS files using the OKEAN python package: https://github.com/martalmeida/okean
# <codecell>
%matplotlib inline
from okean.roms import glider
from okean import netcdf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# <markdowncell>
# Lets use some ROMS-ESPRESSO output and load info from a glider:
# <codecell>
froms='http://tds.marine.rutgers.edu/thredds/dodsC/roms/espresso/2009_da/his'
fglider='http://tds.marine.rutgers.edu/thredds/dodsC/cool/glider/mab/Gridded/20121025T000000_20121105T000000_maracoos_ru23.nc'
x=netcdf.use(fglider,'longitude')
y=netcdf.use(fglider,'latitude')
t=netcdf.nctime(fglider,'time')
a=glider.RomsGlider(froms,x,y,t)
a.plot()
# <markdowncell>
# Extract and plot the glider data
# <codecell>
z=netcdf.use(fglider,'depth')
v=netcdf.use(fglider,'temperature')
# <codecell>
print z.shape
print v.shape
print t.shape
# <codecell>
vmin=10.0
vmax=17.0
fig = plt.figure(figsize=(12,4))
plt.pcolormesh(t,z,v.T,vmin=vmin,vmax=vmax)
plt.ylim([-60,0])
plt.colorbar()
wk=plt.matplotlib.dates.WeekdayLocator(byweekday=MO)
fmt=plt.matplotlib.dates.DateFormatter('%d-%b-%Y')
ax=plt.gca()
ax.xaxis.set_major_locator(wk)
ax.xaxis.set_major_formatter(fmt)
plt.title('Observed Glider data: 20121025T000000_20121105T000000_maracoos_ru23.nc)');
# <markdowncell>
# Extract and plot a ROMS-Espresso variable:
# <codecell>
v2=a.extract('temp',method='fast')
z2=a.depth('temp')
t2=np.tile(a.t[:,np.newaxis],(1,v2.shape[1]))
# <markdowncell>
# Plot with same vertical scale as obs data
# <codecell>
fig = plt.figure(figsize=(12,4))
plt.pcolormesh(t2,z2,v2,vmin=vmin,vmax=vmax)
plt.ylim([-60,0])
plt.colorbar()
wk=plt.matplotlib.dates.WeekdayLocator(byweekday=MO)
fmt=plt.matplotlib.dates.DateFormatter('%d-%b-%Y')
ax=plt.gca()
ax.xaxis.set_major_locator(wk)
ax.xaxis.set_major_formatter(fmt)
plt.title('Virtual Glider data from ROMS Espresso');
# <codecell>
fig = plt.figure(figsize=(12,4))
plt.pcolormesh(t2,z2,v2,vmin=vmin,vmax=vmax)
plt.colorbar()
wk=plt.matplotlib.dates.WeekdayLocator(byweekday=MO)
fmt=plt.matplotlib.dates.DateFormatter('%d-%b-%Y')
ax=plt.gca()
ax.xaxis.set_major_locator(wk)
ax.xaxis.set_major_formatter(fmt)
plt.title('Virtual Glider data from ROMS Espresso');
# <codecell>
# <codecell>
| 2,482 |
setup.py
|
iatacodes/python-sdk
| 5 |
2023856
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def get_file_content(file_name):
with open(path.join(here, file_name), encoding='utf-8') as f:
return f.read()
version = {}
exec(open(path.join(here, 'iata_codes/version.py')).read(), version)
setup(
name="iata_codes",
version=version['__version__'],
description="REST API Client for IATA Codes database",
long_description=get_file_content('README.rst'),
url="https://github.com/otetz/iata_codes",
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
keywords='iata development',
packages=find_packages(),
include_package_data=True,
setup_requires=['pytest-runner'],
install_requires=get_file_content('requirements.txt'),
tests_require=get_file_content('requirements_test.txt'),
test_suite='tests',
)
| 1,569 |
classesandobjects.py
|
expilu/LearnPython
| 0 |
2023351
|
class MyClass:
variable = "blah"
def function(self):
print("This is a message inside the class.")
myobjectx = MyClass()
print(myobjectx.variable)
myobjecty = MyClass()
myobjecty.variable = "yackity"
# Then print out both values
print(myobjectx.variable)
print(myobjecty.variable)
myobjectx.function()
# Exercise
# We have a class defined for vehicles. Create two new vehicles called car1 and car2. Set car1 to be a red convertible worth $60,000.00 with a name of Fer, and car2 to be a blue van named Jump worth $10,000.00.
# define the Vehicle class
class Vehicle:
name = ""
kind = "car"
color = ""
value = 100.00
def description(self):
desc_str = "%s is a %s %s worth $%.2f." % (self.name, self.color, self.kind, self.value)
return desc_str
# your code goes here
car1 = Vehicle()
car1.color = "red"
car1.kind = "convertible"
car1.value = 60000.00
car2 = Vehicle()
car2.color = "blue"
car2.kind = "van"
car2.value = 10000.00
# test code
print(car1.description())
print(car2.description())
| 1,052 |
setup.py
|
FarmLogs/pysobus
| 49 |
2022930
|
#!/usr/bin/env python
import os
import setuptools
from pysobus import __version__
setuptools.setup(
name='pysobus',
version=__version__,
description='Proprietary ISOBUS message specifications and decoding tools for yield data',
author='<NAME>',
author_email='<EMAIL>',
packages=['pysobus'],
package_dir={
'pysobus':'pysobus'
},
package_data={
'pysobus': ['message_definitions.csv']
},
url='https://github.com/FarmLogs/pysobus',
download_url='https://github.com/FarmLogs/pysobus/tarball/%s' % __version__,
install_requires=['spanner>=0.3.4']
)
| 634 |
Optimizacion/bin_packing_SA.py
|
FCARRILLOM/InteligenciaComputacional
| 0 |
2022678
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
<NAME> A01194204
Bin packing 2D
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from PIL import Image, ImageDraw
### PROBLEM VALUES
containerSize = (10, 10)
objects = [(5, 3), (3, 4), (1, 1), (2, 2), (2, 2), (4, 5), (3, 4), (4, 2), (5, 3), (2, 6),
(4, 3), (2, 4), (2, 1), (2, 3), (6, 2), (3, 3), (4, 4), (6, 6), (6, 7), (3, 5),
(3, 3), (2, 2), (3, 5), (4, 7), (8, 7), (5, 2), (3, 1), (1, 4), (2, 5), (5, 6),
(3, 3), (4, 4), (2, 1), (3, 2), (4, 3), (1, 1), (3, 2), (5, 7), (5, 6), (5, 2),
(3, 3), (4, 3), (2, 3), (1, 2), (6, 3), (2, 2), (3, 2), (1, 2), (5, 3), (2, 2),
(4, 5), (3, 4), (4, 2), (5, 3), (2, 6), (4, 3), (2, 4), (2, 1), (2, 3), (6, 2),
(3, 3), (4, 4), (6, 6), (6, 7), (3, 5), (3, 3), (2, 2), (3, 5), (4, 7), (4, 4),
(5, 2), (3, 1), (1, 4), (2, 5), (5, 6), (3, 3), (4, 4), (2, 1), (3, 2), (4, 3),
(1, 1), (3, 2), (5, 7), (5, 6), (5, 2), (3, 3), (4, 3), (2, 3), (1, 2), (6, 3),
(2, 2), (3, 2), (1, 2), (5, 3), (2, 2), (5, 3), (3, 4), (1, 1), (2, 2), (2, 2)]
#objects = [(5, 3), (3, 4), (1, 1), (2, 2), (2, 2), (10, 8)]
### AUX FUNCTIONS
# Fill in containers with objets starting by lower left corner
# 0 - width, 1 - height
def FillContainers(container, objects):
containersUsed = 1
widthLeft = containerSize[0]
heightLeft = containerSize[1]
rowHeight = 0
spaceWasted = 0
objsInRow = []
for obj in objects:
# Adding object to current row
if obj[0] <= widthLeft and obj[1] <= heightLeft:
widthLeft -= obj[0]
rowHeight = max(rowHeight, obj[1])
objsInRow.append(obj)
# Adding new row
elif obj[0] > widthLeft and obj[1] <= (heightLeft - rowHeight):
spaceWasted += CalcSpaceWasted(containerSize[0], rowHeight, objsInRow)
objsInRow = [obj]
widthLeft = containerSize[0] - obj[0]
heightLeft -= rowHeight
rowHeight = obj[1]
# Adding new container
else:
spaceWasted += CalcSpaceWasted(containerSize[0], rowHeight, objsInRow)
spaceWasted += (heightLeft-rowHeight) * containerSize[0] # space left in container
objsInRow = [obj]
widthLeft = containerSize[0] - obj[0]
heightLeft = containerSize[1]
rowHeight = obj[1]
containersUsed += 1
spaceWasted += CalcSpaceWasted(containerSize[0], rowHeight, objsInRow) # last obj in [objects]
return (spaceWasted, containersUsed)
# Calculates the space wasted in a row
def CalcSpaceWasted(width, height, objects):
spaceWasted = 0
widthLeft = width
for obj in objects:
spaceWasted += obj[0] * (height - obj[1])
widthLeft -= obj[0]
spaceWasted += widthLeft * height
return spaceWasted
# Evaluation for solution that evaluates the order of the objects
def Evaluate(container, objects):
# 0 - spaceWasted, 1 - containersUsed
return FillContainers(container, objects)[0]
# Generates a new list with objects arranged in a different order
def GenerateNeighbour(temp, objects):
newOrder = list(objects)
for i in range(len(objects) - 1):
p = np.random.rand()
if p < 0.5 + temp / 100:
newOrder[i], newOrder[i+1] = newOrder[i+1], newOrder[i]
return newOrder
# Generate Markov chain for Lk iterations
def MarkovChain(temp, Lk, objects):
global containerSize
accepted = 0
for _ in range(Lk):
newOrder = GenerateNeighbour(temp, objects)
currentEval = Evaluate(containerSize, objects)
newEval = Evaluate(containerSize, newOrder)
if newEval < currentEval:
objects = newOrder
accepted += 1
else:
p = np.random.rand()
if p < math.exp(-(newEval - currentEval) / temp):
objects = newOrder
accepted += 1
return accepted / Lk
# Initialize temperature
def InitTemp(temp, objects):
beta = 1.5 # constant b > 1 for temp heating rate
Lk = 100 #
r_min = 0.7 # Minimum acceptance percentage
r_a = 0
while r_a < r_min:
r_a = MarkovChain(temp, Lk, objects)
temp = beta * temp
return temp
# Graph better solution
def ShowLineGraph(x, y):
data = np.array(list(zip(x, y)))
x_val, y_val = data.T
plt.scatter(x_val, y_val)
# Draw lines
for i in range(len(data)-1):
a = data[i]
b = data[i+1]
x_values = [a[0], b[0]]
y_values = [a[1], b[1]]
plt.plot(x_values, y_values)
plt.xlabel("Iteracion")
plt.ylabel("Espacio desperdiciado")
plt.show()
# Shows 500 x 500 grid with objects inside containers
# Max. 25 containers shown
def ShowContainers(containerSize, objects, id):
# Resize for image
containerSize = tuple([10*x for x in containerSize])
objects = tuple([(10*x[0], 10*x[1]) for x in objects])
im = Image.new('RGB', (501, 501), (128, 128, 128))
draw = ImageDraw.Draw(im)
draw.rectangle((containerSize[0], containerSize[1], 0, 0), fill=(0, 0, 0), outline=(255, 255, 255))
widthLeft = containerSize[0]
heightLeft = containerSize[1]
rowHeight = 0
# Current container position
containerX = 0
containerY = 0
# Current object position (0, 0) top left
currX = 0
currY = 0
for obj in objects:
# Adding object to current row
if obj[0] <= widthLeft and obj[1] <= heightLeft:
# Draw object in same row
draw.rectangle((currX, currY, currX+obj[0], currY+obj[1]), fill=(255, 255, 255), outline=(255, 0, 0))
currX += obj[0]
widthLeft -= obj[0]
rowHeight = max(rowHeight, obj[1])
# Adding new row
elif obj[0] > widthLeft and obj[1] <= (heightLeft - rowHeight):
# Draw object in new row
currY += rowHeight
currX = containerX
draw.rectangle((currX, currY, currX+obj[0], currY+obj[1]), fill=(255, 255, 255), outline=(255, 0, 0))
currX += obj[0]
widthLeft = containerSize[0] - obj[0]
heightLeft -= rowHeight
rowHeight = obj[1]
# Adding new container
else:
# Draw new container
containerX += containerSize[0]
if containerX >= 500:
containerX = 0
containerY += containerSize[1]
draw.rectangle((containerX, containerY, containerX+containerSize[0], containerY+containerSize[1]),
fill=(0, 0, 0), outline=(255, 255, 255))
# Draw new object
currY = containerY
currX = containerX
draw.rectangle((currX, currY, currX+obj[0], currY+obj[1]), fill=(255, 255, 255), outline=(255, 0, 0))
currX += obj[0]
widthLeft = containerSize[0] - obj[0]
heightLeft = containerSize[1]
rowHeight = obj[1]
im.save('./containers/container' + str(id) + '.jpg', quality=95)
"""
#objects = sorted(objects, key=lambda x:x[1])
ShowContainers(containerSize, objects, 1)
spaceWasted, numContainers = FillContainers(containerSize, objects)
print("NO. OF CONTAINERS: ", numContainers)
print("SPACE WASTED: ", spaceWasted)
"""
### SIMULATED ANNEALING
Lk = 100
alpha = 0.8 # constant 0 < a < 1 for temp cooling rate
temp = 0.1 # initial temperature
temp = InitTemp(temp, objects)
print("STARTING TEMP.: ", temp)
currSolution = objects
ShowContainers(containerSize, currSolution, 1)
# Stop conditions
minTemp = 0.01
maxSameEval = 20 # max number of iterations with same evaluation
sameEvalCounter = 0
lastEval = 0
# Graph variables
itertations = 0
evaluations = []
# Main loop
while (temp > minTemp) and (sameEvalCounter < maxSameEval):
for _ in range(Lk):
newSolution = GenerateNeighbour(temp, currSolution)
currEval = Evaluate(containerSize, currSolution)
newEval = Evaluate(containerSize, newSolution)
if newEval <= currEval:
currSolution = newSolution
temp = newEval / currEval * temp
else:
p = np.random.rand()
if p < math.exp(-(newEval - currEval) / temp):
currSolution = newSolution
temp = newEval / currEval * temp
temp = alpha * temp
# Compare with previous evaluation
currEval = Evaluate(containerSize, currSolution)
if currEval == lastEval:
sameEvalCounter += 1
else:
sameEvalCounter = 0
lastEval = currEval
# Save data for graph
itertations += 1
evaluations.append(currEval)
# Results
ShowContainers(containerSize, currSolution, 2)
spaceWasted, numContainers = FillContainers(containerSize, currSolution)
print("NO. OF CONTAINERS: ", numContainers)
print("SPACE WASTED: ", spaceWasted)
print("OPTIMAL ORDER: ", currSolution)
x = [i for i in range(len(evaluations))]
ShowLineGraph(x, evaluations)
| 9,000 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.