max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
architectures/protoshotxai.py
|
gditzler/ProtoShotXAI-1
| 0 |
2169839
|
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Lambda, TimeDistributed
from tqdm import tqdm
from utils.tensor_operations import *
from utils.distance_functions import *
class ProtoShotXAI:
def __init__(self, model, input_layer=0, feature_layer=-2, class_layer=-1):
if class_layer != None:
self.class_weights = model.layers[class_layer].get_weights()[0]
self.class_bias = model.layers[class_layer].get_weights()[1]
else:
self.class_weights = None
self.class_bias = None
input_shape = model.input.shape
# input_shape = model.layers[input_layer].input_shape[0]
output_vals = model.layers[feature_layer].output
model = Model(inputs=model.input, outputs=output_vals)
model_5d = TimeDistributed(model)
support = Input(input_shape)
support_features = model_5d(support)
support_features = Lambda(reduce_tensor)(support_features)
query = Input(input_shape)
query_features = model_5d(query)
query_features = Lambda(reshape_query)(query_features)
features = Lambda(cosine_dist_features)([support_features, query_features]) #negative distance
self.model = Model([support, query], features)
def compute_score_from_features(self,features,iclass):
s_feature_t, q_feature_t, s_feature_norm, q_feature_norm = features
s_feature_t = s_feature_t.numpy()
q_feature_t = q_feature_t.numpy()
# if self.class_weights != None:
# s_feature_t = s_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(s_feature_t.shape[0],s_feature_t.shape[1],1))
# q_feature_t = q_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(q_feature_t.shape[0],q_feature_t.shape[1],1))
s_feature_t = s_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(s_feature_t.shape[0],s_feature_t.shape[1],1))
q_feature_t = q_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(q_feature_t.shape[0],q_feature_t.shape[1],1))
s_feature_norm = np.sqrt(np.sum(s_feature_t*s_feature_t,axis=-1))
q_feature_norm = np.sqrt(np.sum(q_feature_t*q_feature_t,axis=-1))
den = s_feature_norm * q_feature_norm
score = np.squeeze(np.sum(s_feature_t*q_feature_t,axis=-1)/den)
return score
def compute_features(self,support_data_expand,query_expand,iclass):
features = self.model([support_data_expand,query_expand])
s_feature_t, q_feature_t, s_feature_norm, q_feature_norm = features
s_feature_t = s_feature_t.numpy()
q_feature_t = q_feature_t.numpy()
# if self.class_weights != None:
# s_feature_t = s_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(s_feature_t.shape[0],s_feature_t.shape[1],1))
# q_feature_t = q_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(q_feature_t.shape[0],q_feature_t.shape[1],1))
s_feature_t = s_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(s_feature_t.shape[0],s_feature_t.shape[1],1))
q_feature_t = q_feature_t*np.tile(np.expand_dims(self.class_weights[:,iclass],axis=(0,1)),(q_feature_t.shape[0],q_feature_t.shape[1],1))
s_feature_norm = np.sqrt(np.sum(s_feature_t*s_feature_t,axis=-1))
q_feature_norm = np.sqrt(np.sum(q_feature_t*q_feature_t,axis=-1))
den = s_feature_norm * q_feature_norm
return s_feature_t, q_feature_t, den
def compute_score(self, support_data_expand, query_expand, class_indx):
# query_expand = np.expand_dims(np.copy(query),axis=0) # Batch size of 1
# support_data_expand = np.expand_dims(np.copy(support_data),axis=0) # Only 1 support set
features = self.model([support_data_expand,query_expand])
scores = self.compute_score_from_features(features,class_indx)
return scores
def image_feature_attribution(self,support_data,query, class_indx, ref_pixel, pad=4 , progress_bar=True):
rows = np.shape(query)[1]
cols = np.shape(query)[2]
chnls = np.shape(query)[3]
query_expand = np.expand_dims(np.copy(query),axis=0) # Batch size of 1
support_data_expand = np.expand_dims(np.copy(support_data),axis=0) # Only 1 support set
features = self.model([support_data_expand,query_expand])
ref_score = self.compute_score_from_features(features,class_indx)
score_matrix = np.zeros((rows,cols))
peturbed_images = np.zeros((cols,rows,cols,chnls))
for ii in tqdm(range(rows),disable=(not progress_bar)):
for jj in range(cols):
peturbed_images[jj,:,:,:] = np.copy(query)
min_ii = np.max([ii-pad,0])
max_ii = np.min([ii+pad,rows])
min_jj = np.max([jj-pad,0])
max_jj = np.min([jj+pad,cols])
for ichnl in range(chnls):
peturbed_images[jj,min_ii:max_ii,min_jj:max_jj,ichnl] = ref_pixel[ichnl]
peturbed_images_expand = np.expand_dims(np.copy(peturbed_images),axis=0)
features = self.model([support_data_expand,peturbed_images_expand])
scores = self.compute_score_from_features(features,class_indx)
score_matrix[ii,:] = ref_score - scores
return score_matrix
| 5,548 |
pyregex/valid_card/__init__.py
|
JASTYN/pythonmaster
| 3 |
2170760
|
def valid_card(card):
return not sum([d if i & 1 else d % 5 * 2 + d / 5 for i, d in enumerate(map(int, card.replace(" ", "")))]) % 10
| 138 |
app/models/login_dao.py
|
gss214/Sexto-Andar
| 4 |
2171038
|
class Login:
def __init__(self, codigo, email, senha, permissao):
self.codigo = codigo
self.email = email
self.senha = senha
self.permissao = permissao
# Login Padrao Data Access Object
class LoginDAO:
def __init__(self):
pass
def create(self, cursor, login):
try:
data = {'email': login.email, 'senha': login.senha, 'permissao': login.permissao}
sql = "INSERT INTO login(email, senha, permissao) VALUES (%(email)s, %(senha)s, %(permissao)s)"
cursor.execute(sql, data)
# verificar necessidade do MAX
#sql_max = f"SELECT MAX(codigo) from login"
#cursor.execute(sql_max)
#id_login = cursor.fetchone()
#login.codigo = id_login[0]
print("------", cursor.lastrowid)
login.codigo = cursor.lastrowid
except Exception as ex:
print(ex)
def update(self, cursor, login, codigo):
try:
data = {'codigo': codigo, 'email': login.email, 'senha': login.senha, 'permissao' : login.permissao}
sql = "UPDATE login SET email = %(email)s, senha = %(senha)s, permissao = %(permissao)s WHERE codigo = %(codigo)s"
cursor.execute(sql, data)
except Exception as ex:
print(ex)
def delete(self, cursor, codigo):
try:
sql = f"DELETE FROM login WHERE codigo = '{codigo}'"
cursor.execute(sql)
except Exception as ex:
print(ex)
def find_by_id(self, cursor, codigo):
try:
sql = f"SELECT * FROM login WHERE codigo = '{codigo}'"
cursor.execute(sql)
result = cursor.fetchone()
codigo, email, senha, permissao = result
login = Login(codigo, email, senha, permissao)
return login
except Exception as ex:
print(ex)
return None
def find_without_id(self, cursor, email, senha):
try:
sql = f"SELECT * FROM login where email = '{email}' AND senha = '{senha}'"
cursor.execute(sql)
result = cursor.fetchone()
codigo, email, senha, permissao = result
login = Login(codigo, email, senha, permissao)
return login
except:
return None
def find_all(self, cursor):
sql = "SELECT * FROM login"
cursor.execute(sql)
result = cursor.fetchall()
# depois ver como retornar
return result
| 2,530 |
language.py
|
NR-SkaterBoy/LinuxSystemUpdate
| 0 |
2171234
|
#!/usr/bin/python3
# Developer/Author: NR-SkaterBoy
# Github: https://github.com/NR-SkaterBoy
# Website: https://richardneuvald.tk
# E-mail: <EMAIL>
# Linux Systems source package Updater
import json
import os
if (not os.path.isdir("files")):
os.mkdir(os.path.join("files"))
if (not os.path.isfile("files/language.json")):
language = {}
language["Language"] = "English"
json_object = json.dumps(language, indent=3)
with open("files/language.json", "w") as language_file:
language_file.write(json_object)
json.dumps(json_object)
# Set language
read_lang_file = open("files/language.json", "r")
getLang = json.load(read_lang_file)
if getLang["Language"] == "English":
eng_lang = open("./locales/englis.json", "r")
lang = json.load(eng_lang)
elif getLang["Language"] == "Hungary":
hun_lang = open("./locales/hungary.json", "r")
lang = json.load(hun_lang)
# Main (pref.: m)
m_app_name = lang["main"]["app_name"]
m_app_title = lang["main"]["app_title"]
m_upd_btn = lang["main"]["upd_btn"]
m_web_btn = lang["main"]["web_btn"]
m_git_btn = lang["main"]["git_btn"]
m_h_category = lang["main"]["help"]["category"]
m_h_sup_sys = lang["main"]["help"]["sup_sys"]
m_h_about = lang["main"]["help"]["about"]
m_h_settings = lang["main"]["help"]["setting"]
m_h_quit = lang["main"]["help"]["quit"]
m_n_category = lang["main"]["news"]["category"]
m_n_questionaire = lang["main"]["news"]["questionaire"]
m_i_category = lang["main"]["info"]["category"]
m_i_log_file = lang["main"]["info"]["log_file"]
m_i_last_log = lang["main"]["info"]["last_log"]
m_i_system = lang["main"]["info"]["system"]
# Settings (pref.: s)
s_window_name = lang["settings"]["window_name"]
s_title = lang["settings"]["title"]
s_node = lang["settings"]["node"]
s_pm2 = lang["settings"]["pm2"]
s_lang_title = lang["settings"]["l_title"]
s_lang_en = lang["settings"]["english"]
s_lang_hu = lang["settings"]["hungary"]
s_h_category = lang["settings"]["help"]["category"]
s_h_whatisit = lang["settings"]["help"]["whatisit"]
s_m_category = lang["settings"]["modules"]["category"]
s_m_node = lang["settings"]["modules"]["node"]
s_m_pm2 = lang["settings"]["modules"]["pm2"]
# Title of messagebox (pref.: t)
t_howtouse = lang["tmessagebox"]["howtouseit"]
t_error = lang["tmessagebox"]["error"]
t_sup_sys = lang["tmessagebox"]["sup_sys"]
t_sys_inf = lang["tmessagebox"]["sys_inf"]
t_about = lang["tmessagebox"]["about_project"]
t_lastlog = lang["tmessagebox"]["lastlog"]
t_notify = lang["tmessagebox"]["notify"]
# Description of messagebox (pref.: d)
d_restart = lang["dmessagebox"]["restart"]
d_howtouse = lang["dmessagebox"]["howtouse"]
d_about = lang["dmessagebox"]["about"]
d_not_res = lang["dmessagebox"]["notify_res"]
| 2,724 |
bclib.py
|
eun2ce/bancor
| 1 |
2170937
|
class bancor:
total_supply = 0
reserved_token = 0
price = 0
cw = 0
def __init__(self, token, rate, cw):
bancor.total_supply = token * rate
bancor.reserved_token = (bancor.total_supply * cw)
bancor.price = bancor.reserved_token / (bancor.total_supply * cw)
bancor.cw = cw
@staticmethod
def issue_by_reserve_token(amount):
smart_token_amount = bancor.total_supply * (((1 + (amount / bancor.reserved_token)) ** bancor.cw) -1)
bancor.reserved_token = bancor.reserved_token + amount
bancor.total_supply = bancor.total_supply + smart_token_amount
bancor.price = bancor.reserved_token / (bancor.total_supply * bancor.cw)
return smart_token_amount
@staticmethod
def destroy_by_reserve_token(amount):
smart_token_amount = bancor.reserved_token * (1 - ((1 - (amount / bancor.total_supply)) ** (1/bancor.cw)))
bancor.reserved_token = bancor.reserved_token -smart_token_amount
bancor.total_supply = bancor.total_supply - amount
bancor.price = bancor.reserved_token / (bancor.total_supply * bancor.cw)
return smart_token_amount
| 1,166 |
egs/avspeech/looking-to-listen/local/loader/remove_corrupt.py
|
ldelebec/asteroid
| 722 |
2170443
|
import os
import pandas as pd
df_train = pd.read_csv("../../data/train.csv")
df_val = pd.read_csv("../../data/val.csv")
print(df_train.shape)
print(df_val.shape)
corrupt_files = []
with open("../../data/corrupt_frames_list.txt") as f:
corrupt_files = f.readlines()
corrupt_files = set(corrupt_files)
print(len(corrupt_files))
corrupt_files = [c[:-1] for c in corrupt_files]
print(corrupt_files)
df_train = df_train[~df_train["video_1"].isin(corrupt_files)]
df_val = df_val[~df_val["video_1"].isin(corrupt_files)]
df_train = df_train[~df_train["video_2"].isin(corrupt_files)]
df_val = df_val[~df_val["video_2"].isin(corrupt_files)]
print(df_train.shape)
print(df_val.shape)
df_train.to_csv("../../data/train.csv", index=False)
df_val.to_csv("../../data/val.csv", index=False)
| 789 |
workbench/logbook/migrations/0010_auto_20190709_2140.py
|
yoshson/workbench
| 15 |
2169971
|
# Generated by Django 2.2.2 on 2019-07-09 19:40
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("expenses", "0001_initial"),
("logbook", "0009_auto_20190628_1659"),
]
operations = [
migrations.RemoveField(model_name="loggedcost", name="expenses_reimbursed_at"),
migrations.AddField(
model_name="loggedcost",
name="expense_report",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="expenses",
to="expenses.ExpenseReport",
verbose_name="expense report",
),
),
]
| 805 |
verboselib/cli/main.py
|
oblalex/verboselib
| 3 |
2170250
|
import argparse
import functools
from verboselib.version import VERSION
from .utils import print_out
from .command_compile import CompileCommand
from .command_extract import ExtractCommand
def show_version() -> None:
print_out(f"verboselib {VERSION}")
def make_parser() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="run a verboselib command",
add_help=True,
)
parser.add_argument(
"-V", "--version",
dest="show_version",
action="store_true",
help="show version of verboselib and exit"
)
subparsers = parser.add_subparsers(
title="subcommands",
dest="command_name",
)
extract_cmd_parser = ExtractCommand.make_parser(
factory=functools.partial(
subparsers.add_parser,
name=ExtractCommand.name,
aliases=ExtractCommand.aliases,
),
)
extract_cmd_parser.set_defaults(executor_factory=ExtractCommand.make_executor)
compile_cmd_parser = CompileCommand.make_parser(
factory=functools.partial(
subparsers.add_parser,
name=CompileCommand.name,
aliases=CompileCommand.aliases,
),
)
compile_cmd_parser.set_defaults(executor_factory=CompileCommand.make_executor)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
if args.show_version:
show_version()
return
if hasattr(args, "executor_factory"):
executor = args.executor_factory(args)
executor()
else:
parser.print_help()
| 1,464 |
sense_demo.py
|
williamhaley/pi-scripts
| 0 |
2167933
|
#!/usr/bin/env python3
from sense_hat import SenseHat, ACTION_PRESSED, ACTION_HELD, ACTION_RELEASED
from signal import pause
import time
import math
import atexit
import subprocess
import os
from subprocess import Popen
from sense import rainbow
"""
Response to joystick presses and update the Sense Hat for a Raspberry Pi
"""
x = 0
y = 0
sense = SenseHat()
rainbow_pixels = rainbow.default_pixels()
total_pixels = len(sense.get_pixels())
dim_size = int(math.sqrt(total_pixels))
max_value = dim_size - 1
processes = []
last_press_time = 0
def on_exit():
sense.clear()
def index(x, y):
return y * dim_size + x
def clamp(value, min_value=0, max_value=max_value):
return min(max_value, max(min_value, value))
def pushed_up(event):
global y
if event.action != ACTION_RELEASED:
y = clamp(y - 1)
def pushed_down(event):
global y
if event.action != ACTION_RELEASED:
y = clamp(y + 1)
def pushed_left(event):
global x
if event.action != ACTION_RELEASED:
x = clamp(x - 1)
def pushed_right(event):
global x
if event.action != ACTION_RELEASED:
x = clamp(x + 1)
def refresh():
sense.clear()
[r, g, b] = rainbow_pixels[index(x, y)]
sense.set_pixel(x, y, r, g, b)
def play_audio(file_name):
global processes
print('playing:', file_name)
processes.append(Popen(
['mpg123', file_name],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
))
def kill_running_processes():
global processes
print('kill running processes')
for process in processes:
process.kill()
# Must zero out or processes will be zombies and never GCed
processes = []
def pressed(event):
global last_pressed_time
global processes
print('action:', event.action)
if event.action == ACTION_RELEASED:
if time.time() - last_pressed_time < 0.5:
i = index(x, y) + 1
file_name = '{}/{}.mp3'.format(os.getcwd(), i)
play_audio(file_name)
else:
kill_running_processes()
elif event.action == ACTION_HELD:
print('held:')
elif event.action == ACTION_PRESSED:
last_pressed_time = time.time()
else:
print('unknown action')
atexit.register(on_exit)
sense.stick.direction_up = pushed_up
sense.stick.direction_down = pushed_down
sense.stick.direction_left = pushed_left
sense.stick.direction_right = pushed_right
sense.stick.direction_middle = pressed
sense.stick.direction_any = refresh
refresh()
pause()
| 2,535 |
binpackp/__init__.py
|
ibigpapa/bin_packing_problem
| 5 |
2171468
|
from .bins import SimpleBin, NumberBin, VolumeError
from .fit import Fit, BinReduction, BinOrdering
| 101 |
Module3_Data_for_ML/4_Gradient_descent/Gradient_descent_notebook.py
|
EllieBrakoniecki/AICOREDATASCIENCE
| 0 |
2169398
|
#%%
## STOCHASTIC GRADIENT DESCENT
import matplotlib.pyplot as plt
def plot_loss(losses):
"""Helper function for plotting loss against epoch"""
plt.figure() # make a figure
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.plot(losses) # plot costs
plt.show()
# %%
### THE DATA ###
from sklearn import datasets, model_selection
from aicore.ml import data
import pandas as pd
import numpy as np
# Use `data.split` in order to split the data into train, validation, test
(X_train, y_train), (X_validation, y_validation), (X_test, y_test) = data.split(
datasets.load_boston(return_X_y=True)
)
X_train, X_validation, X_test = data.standardize_multiple(X_train, X_validation, X_test)
# %%
### THE MODEL ###
# Here's the same model we implemented before
class LinearRegression:
def __init__(self, optimiser, n_features): # initalize parameters
self.w = np.random.randn(n_features) ## randomly initialise weight
self.b = np.random.randn() ## randomly initialise bias
self.optimiser = optimiser
def predict(self, X): # how do we calculate output from an input in our model?
ypred = X @ self.w + self.b ## make a prediction using a linear hypothesis
return ypred # return prediction
def fit(self, X, y):
all_costs = [] ## initialise empty list of costs to plot later
for epoch in range(self.optimiser.epochs): ## for this many complete runs through the dataset
# MAKE PREDICTIONS AND UPDATE MODEL
predictions = self.predict(X) ## make predictions
print('shape_pred',predictions.shape)
new_w, new_b = self.optimiser.step(self.w, self.b, X, predictions, y) ## calculate updated params
self._update_params(new_w, new_b) ## update model weight and bias
# CALCULATE LOSS FOR VISUALISING
cost = LinearRegression.mse_loss(predictions, y) ## compute loss
all_costs.append(cost) ## add cost for this batch of examples to the list of costs (for plotting)
plot_loss(all_costs)
print('Final cost:', cost)
print('Weight values:', self.w)
print('Bias values:', self.b)
def _update_params(self, new_w, new_b):
self.w = new_w ## set this instance's weights to the new weight value passed to the function
self.b = new_b ## do the same for the bias
@staticmethod
def mse_loss(y_hat, labels): # define our criterion (loss function)
errors = y_hat - labels ## calculate errors
squared_errors = errors ** 2 ## square errors
mean_squared_error = sum(squared_errors) / len(squared_errors) ## calculate mean
return mean_squared_error # return loss
# %%
# THE OPTIMISER _ gradient descent
import numpy as np
class SGDOptimiser:
def __init__(self, lr, epochs):
self.lr = lr
self.epochs = epochs
def _calc_deriv(self, features, predictions, labels):
m = len(labels) ## m = number of examples
diffs = predictions - labels ## calculate errors
dLdw = 2 * np.sum(features.T * diffs).T / m ## calculate derivative of loss with respect to weights
dLdb = 2 * np.sum(diffs) / m ## calculate derivative of loss with respect to bias
return dLdw, dLdb ## return rate of change of loss wrt w and wrt b
def step(self, w, b, features, predictions, labels):
dLdw, dLdb = self._calc_deriv(features, predictions, labels)
new_w = w - self.lr * dLdw
new_b = b - self.lr * dLdb
return new_w, new_b
# %%
# PUTTING ALL TOGETHER
num_epochs = 1000
learning_rate = 0.001
optimiser = SGDOptimiser(lr=learning_rate, epochs=num_epochs)
model = LinearRegression(optimiser=optimiser, n_features=X_train.shape[1])
model.fit(X_train, y_train)
#%%
##### sklearn example #######
# sklearn packs everything we just did above into it's simple LinearRegression API.
from sklearn.linear_model import LinearRegression
linear_regression_model = LinearRegression() ## instantiate the linear regression model
def mse_loss(y_hat, labels): # define our criterion (loss function)
errors = y_hat - labels ## calculate errors
squared_errors = errors ** 2 ## square errors
mean_squared_error = sum(squared_errors) / len(squared_errors) ## calculate mean
return mean_squared_error # return loss
def calculate_loss(model, X, y):
return mse_loss(model.predict(X),y)
model = linear_regression_model.fit(X_train, y_train) ## fit the model
print(f"Training loss before fit: {calculate_loss(model, X_train, y_train)}")
print(
f"Validation loss before fit: {calculate_loss(model, X_validation, y_validation)}"
)
print(f"Test loss before fit: {calculate_loss(model, X_validation, y_validation)}")
# %%
epochs = 10000
model.fit(X_train, y_train)
print(f"Training loss after fit: {calculate_loss(model, X_train, y_train)}")
print(f"Validation loss after fit: {calculate_loss(model, X_validation, y_validation)}")
print(f"Test loss after fit: {calculate_loss(model, X_validation, y_validation)}")
print('final weights:', model.coef_)
print('final bias:', model.intercept_)
#%%
# FUNCTION TO NORMALISE DATA
def standardize_data(dataset, mean=None, std=None):
if mean is None and std is None:
mean, std = np.mean(dataset, axis=0), np.std(
dataset, axis=0
) ## get mean and standard deviation of dataset
standardized_dataset = (dataset - mean) / std
return standardized_dataset, (mean, std)
X_train, (mean, std) = standardize_data(X_train)
# %%
| 5,571 |
sabnzbd.py
|
anast20sm/Addarr
| 135 |
2171750
|
import requests
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import ConversationHandler
from commons import authentication, checkAdmin, checkId, generateApiQuery
from config import config
from translations import i18n
import logging
import logger
# Set up logging
logLevel = logging.DEBUG if config.get("debugLogging", False) else logging.INFO
logger = logger.getLogger("addarr.radarr", logLevel, config.get("logToConsole", False))
config = config["sabnzbd"]
SABNZBD_SPEED_LIMIT_25 = '25'
SABNZBD_SPEED_LIMIT_50 = '50'
SABNZBD_SPEED_LIMIT_100 = '100'
def sabnzbd(update, context):
if not config["enable"]:
context.bot.send_message(
chat_id=update.effective_message.chat_id,
text=i18n.t("addarr.Sabnzbd.NotEnabled"),
)
return ConversationHandler.END
if not checkId(update):
context.bot.send_message(
chat_id=update.effective_message.chat_id, text=i18n.t("addarr.Authorize")
)
return SABNZBD_SPEED_LIMIT_100
if not checkAdmin(update):
context.bot.send_message(
chat_id=update.effective_message.chat_id,
text=i18n.t("addarr.NotAdmin"),
)
return SABNZBD_SPEED_LIMIT_100
keyboard = [[
InlineKeyboardButton(
'\U0001F40C ' + i18n.t("addarr.Sabnzbd.Limit25"),
callback_data=SABNZBD_SPEED_LIMIT_25
),
InlineKeyboardButton(
'\U0001F40E ' + i18n.t("addarr.Sabnzbd.Limit50"),
callback_data=SABNZBD_SPEED_LIMIT_50
),
InlineKeyboardButton(
'\U0001F406 ' + i18n.t("addarr.Sabnzbd.Limit100"),
callback_data=SABNZBD_SPEED_LIMIT_100
),
]]
markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text(
i18n.t("addarr.Sabnzbd.Speed"), reply_markup=markup
)
return SABNZBD_SPEED_LIMIT_100
def changeSpeedSabnzbd(update, context):
if not checkId(update):
if (
authentication(update, context) == "added"
): # To also stop the beginning command
return ConversationHandler.END
choice = update.callback_query.data
url = generateApiQuery("sabnzbd", "",
{'output': 'json', 'mode': 'config', 'name': 'speedlimit', 'value': choice})
req = requests.get(url)
message = None
if req.status_code == 200:
if choice == SABNZBD_SPEED_LIMIT_100:
message = i18n.t("addarr.Sabnzbd.ChangedTo100")
elif choice == SABNZBD_SPEED_LIMIT_50:
message = i18n.t("addarr.Sabnzbd.ChangedTo50")
elif choice == SABNZBD_SPEED_LIMIT_25:
message = i18n.t("addarr.Sabnzbd.ChangedTo25")
else:
message = i18n.t("addarr.Sabnzbd.Error")
context.bot.send_message(
chat_id=update.effective_message.chat_id,
text=message,
)
return ConversationHandler.END
| 2,940 |
dns/dns-client.py
|
elaineo/playground21
| 85 |
2170646
|
#
# Command line usage:
# $ python3 dns-client.py --help
#
import json
import os
import sys
import click
import pprint
# import from the 21 Developer Library
from two1.commands.config import Config
from two1.lib.wallet import Wallet
from two1.lib.bitrequests import BitTransferRequests
pp = pprint.PrettyPrinter(indent=2)
# set up bitrequest client for BitTransfer requests
wallet = Wallet()
username = Config().username
requests = BitTransferRequests(wallet, username)
DNSCLI_VERSION = '0.1'
DEFAULT_ENDPOINT = 'http://localhost:12005/'
@click.group()
@click.option('--endpoint', '-e',
default=DEFAULT_ENDPOINT,
metavar='STRING',
show_default=True,
help='API endpoint URI')
@click.option('--debug', '-d',
is_flag=True,
help='Turns on debugging messages.')
@click.version_option(DNSCLI_VERSION)
@click.pass_context
def main(ctx, endpoint, debug):
""" Command-line Interface for the DDNS API service
"""
if ctx.obj is None:
ctx.obj = {}
ctx.obj['endpoint'] = endpoint
@click.command(name='info')
@click.pass_context
def cmd_info(ctx):
sel_url = ctx.obj['endpoint']
answer = requests.get(url=sel_url.format())
print(answer.text)
@click.command(name='domains')
@click.pass_context
def cmd_domains(ctx):
sel_url = ctx.obj['endpoint'] + 'dns/1/domains'
answer = requests.get(url=sel_url.format())
print(answer.text)
@click.command(name='register')
@click.argument('name')
@click.argument('domain')
@click.argument('days')
@click.argument('recordlist', nargs=-1)
@click.pass_context
def cmd_register(ctx, name, domain, days, recordlist):
pubkey = wallet.get_message_signing_public_key()
addr = pubkey.address()
print("Registering with key %s" % (addr,))
records = []
for arg in recordlist:
words = arg.split(',')
host_obj = {
'ttl': int(words[0]),
'rec_type': words[1],
'address': words[2],
}
records.append(host_obj)
req_obj = {
'name': name,
'domain': domain,
'days': int(days),
'pkh': addr,
'hosts': records,
}
sel_url = ctx.obj['endpoint'] + 'dns/1/host.register'
body = json.dumps(req_obj)
headers = {'Content-Type': 'application/json'}
answer = requests.post(url=sel_url.format(), headers=headers, data=body)
print(answer.text)
@click.command(name='simpleregister')
@click.argument('name')
@click.argument('domain')
@click.argument('days')
@click.argument('ipaddress')
@click.pass_context
def cmd_simpleRegister(ctx, name, domain, days, ipaddress):
sel_url = ctx.obj['endpoint'] + 'dns/1/simpleRegister?name={0}&domain={1}&days={2}&ip={3}'
answer = requests.get(url=sel_url.format(name, domain, days, ipaddress))
print(answer.text)
@click.command(name='update')
@click.argument('name')
@click.argument('domain')
@click.argument('pkh')
@click.argument('records', nargs=-1)
@click.pass_context
def cmd_update(ctx, name, domain, pkh, records):
req_obj = {
'name': name,
'domain': domain,
'hosts': [],
}
for record in records:
words = record.split(',')
host_obj = {
'ttl': int(words[0]),
'rec_type': words[1],
'address': words[2],
}
req_obj['hosts'].append(host_obj)
body = json.dumps(req_obj)
sig_str = wallet.sign_bitcoin_message(body, pkh)
if not wallet.verify_bitcoin_message(body, sig_str, pkh):
print("Cannot self-verify message")
sys.exit(1)
sel_url = ctx.obj['endpoint'] + 'dns/1/records.update'
headers = {
'Content-Type': 'application/json',
'X-Bitcoin-Sig': sig_str,
}
answer = requests.post(url=sel_url.format(), headers=headers, data=body)
print(answer.text)
@click.command(name='delete')
@click.argument('name')
@click.argument('domain')
@click.argument('pkh')
@click.pass_context
def cmd_delete(ctx, name, domain, pkh):
req_obj = {
'name': name,
'domain': domain,
'pkh': pkh
}
body = json.dumps(req_obj)
sig_str = wallet.sign_bitcoin_message(body, pkh)
if not wallet.verify_bitcoin_message(body, sig_str, pkh):
print("Cannot self-verify message")
sys.exit(1)
sel_url = ctx.obj['endpoint'] + 'dns/1/host.delete'
headers = {
'Content-Type': 'application/json',
'X-Bitcoin-Sig': sig_str,
}
answer = requests.post(url=sel_url.format(), headers=headers, data=body)
print(answer.text)
main.add_command(cmd_info)
main.add_command(cmd_domains)
main.add_command(cmd_register)
main.add_command(cmd_simpleRegister)
main.add_command(cmd_update)
main.add_command(cmd_delete)
if __name__ == "__main__":
main()
| 4,802 |
stream_video_audio_delay.py
|
oyzzo/simpleStreamTests
| 0 |
2170688
|
#!/usr/bin/python
#sudo ports install ffmpeg pkg-config
#sudo pip install av
import av
#settings
#teststream = "./mega1.ts"
threshold = float(0.2) #Delay should be less than this (0.2 Recommended)
if __name__ == "__main__":
video_time = float()
audio_time = [] #More than 1 audio possible
delay = float()
print "Testing stream..."
print teststream
container = av.open(teststream)
#Compute start time based on each stream time_base
print "Checking audio-video delay...."
for s in container.streams:
if (s.type == 'audio'):
audio_time.append( float(s.start_time * s.time_base))
elif (s.type == 'video'):
video_time = float(s.start_time * s.time_base)
#Print the results
print "%d audio streams" % len(audio_time)
print "Audio-Video delay (max):"
for at in audio_time:
delay = max(delay,abs(video_time - at))
print "%f (s)" % delay
#Doing this we let know other that something went wrong
#For example we could use this script in monit
if delay>= threshold:
exit(1)
| 1,095 |
FirstAllNodeEdge/NodeAttributeNum.py
|
CocoGzh/Bioentity2vec
| 4 |
2169355
|
from numpy import *
import numpy as np
import random
import math
import os
import time
import pandas as pd
import csv
import math
import random
# 定义函数
def ReadMyCsv(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader: # 注意表头
SaveList.append(row)
return
def ReadMyCsv2(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader:
for i in range(len(row)): # 转换数据类型
row[i] = float(row[i])
SaveList.append(row)
return
def StorFile(data, fileName):
with open(fileName, "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(data)
return
# 数据
# 节点
def MyNodeAttributeNum():
AllNode = []
ReadMyCsv(AllNode, "FirstAllNodeEdge\AllNode.csv")
print('len(AllNode)', len(AllNode))
print('AllNode[0]', AllNode[0])
AllNodeNum = []
counter = 0
while counter < len(AllNode):
pair = []
pair.append(counter)
AllNodeNum.append(pair)
counter = counter + 1
print('AllNodeNum[0]', AllNodeNum[0])
StorFile(AllNodeNum, 'FirstAllNodeEdge\AllNodeNum.csv')
AllNodeAttribute = []
ReadMyCsv(AllNodeAttribute, "FirstAllNodeEdge\AllNodeAttribute.csv")
print('len(AllNodeAttribute)', len(AllNodeAttribute))
print('AllNodeAttribute[0]', AllNodeAttribute[0])
AllNodeAttributeNum = []
counter = 0
while counter < len(AllNodeAttribute):
AllNodeAttributeNum.append(AllNodeAttribute[counter][1:])
counter = counter + 1
print('AllNodeAttributeNum[0]', AllNodeAttributeNum[0])
StorFile(AllNodeAttributeNum, 'FirstAllNodeEdge\AllNodeAttributeNum.csv')
return AllNodeNum, AllNodeAttributeNum
| 1,823 |
examples/weaving-circuits/run_hierarchical_design.py
|
PhilippBoeing/synbioweaver
| 0 |
2171303
|
from synbioweaver.core import *
from synbioweaver.aspects.designRulesAspect import *
from instantiateAbstractMoleculesAspect import *
from synbioweaver.aspects.pigeonOutputAspect import *
declareNewMolecule('AHL')
declareNewMolecule('GFP')
declareNewMolecule('mCherry')
declareNewMolecule('CFP')
declareNewMolecule('LacI')
declareNewMolecule('IPTG')
declareNewMolecule('LacI_IPTG')
declareNewMolecule('zero')
declareNewPart('t1',Terminator)
declareNewPart('t2',Terminator)
declareNewPart('t3',Terminator)
declareNewPart('r1',RBS )
declareNewPart('r2',RBS )
declareNewPart('r3',RBS )
declareNewPart('P_lux', PositivePromoter, [AHL] )
declareNewPart('cGFP', CodingRegion, moleculesAfter=[GFP] )
declareNewPart('P_lac', NegativePromoter, [LacI] )
declareNewPart('cLacI', CodingRegion, moleculesAfter=[LacI] )
declareNewPart('Pc', ConstitutivePromoter )
declareNewPart('cmCherry', CodingRegion, moleculesAfter=[mCherry] )
class AHLReceiver(Circuit):
def mainCircuit(self):
self.createMolecule(AHL)
self.addPart(P_lux)
self.addPart(r1)
self.addPart(cGFP)
self.addPart(t1)
class Inverter(Circuit):
def mainCircuit(self):
self.createMolecule(IPTG)
#self.addPart(Pc)
self.addPart(r2)
self.addPart(cLacI)
self.addPart(t2)
self.addPart(P_lac)
#self.addPart(r3)
#self.addPart(cmCherry)
#self.addPart(t3)
#inducer must come first to display in pigeon properly
self.reactionFrom(IPTG, LacI) >> self.reactionTo( LacI_IPTG )
self.reactionFrom(LacI_IPTG) >> self.reactionTo( zero )
class InverterFull(Circuit):
def mainCircuit(self):
self.createMolecule(IPTG)
self.addPart(Pc)
self.addPart(r2)
self.addPart(cLacI)
self.addPart(t2)
self.addPart(P_lac)
self.addPart(r3)
self.addPart(cmCherry)
self.addPart(t3)
#inducer must come first to display in pigeon properly
self.reactionFrom(IPTG, LacI) >> self.reactionTo( LacI_IPTG )
self.reactionFrom(LacI_IPTG) >> self.reactionTo( zero )
class Inversion(Aspect):
def mainAspect(self):
self.Inverter = Inverter()
PosPromoter = PartSignature('AHLReceiver.PositivePromoter+')
afterPosPromoter = PointCut(PosPromoter,PointCut.AFTER)
self.addAdvice(afterPosPromoter,self.insertInverter)
def insertInverter(self,context):
#print context, context.part
self.addPart(Inverter())
class SwapReporter(Aspect):
def mainAspect(self):
Reporter = PartSignature('*.CodingRegion+(GFP)')
atReporter = PointCut(Reporter,PointCut.REPLACE)
self.addAdvice(atReporter,self.replaceWithCFP)
#self.addAdvice(atReporter,self.replaceWithCircuit)
def replaceWithCFP(self,context):
self.createMolecule(CFP)
declareNewPart('cCFP', CodingRegion, moleculesAfter=[CFP])
self.addPart(cCFP)
return True
#def replaceWithCircuit(self,context):
# self.addPart(InverterFull())
# return True
print "AHL receiver"
d1 = Weaver(AHLReceiver, PigeonOutput).output()
print d1.printPigeonOutput()
print "Inverter"
d2 = Weaver(Inverter, PigeonOutput).output()
print d2.printPigeonOutput()
#print "InverterFull"
#d = Weaver(InverterFull, PigeonOutput).output()
#print d.printPigeonOutput()
# Here we use the Inverter circuit as a new part to insert into the AHL receiver
print "Inverted AHL receiver"
d3 = Weaver(AHLReceiver, Inversion, PigeonOutput).output()
print d3.printPigeonOutput()
print "Inverted AHL receiver + GFP -> CFP"
d4 = Weaver(AHLReceiver, Inversion, SwapReporter, PigeonOutput).output()
print d4.printPigeonOutput()
| 3,782 |
curveROC_frontiers2016_causality.py
|
danilobenozzo/supervised_causality_detection
| 2 |
2170598
|
"""
Compute the ROC curve and AUC for CBC and MBC (in this latter case by defining different cost matrices)
"""
import numpy as np
import pickle
import matplotlib.pyplot as plt
from score_function import best_decision, compute_score_matrix
from create_trainset import class_to_configuration
import itertools
def compute_roc_auc(y_test_pred, y_test_level2, predicted_probability, nTrial, nCh, mvgc_flag):
print "Assigning label"
index_x = np.append(np.triu_indices(nCh,1)[0], np.tril_indices(nCh,-1)[0])
index_y = np.append(np.triu_indices(nCh,1)[1], np.tril_indices(nCh,-1)[1])
#################################
print "In case of mvgc"
if mvgc_flag:
pred_prob_tmp = []
pred_prob_tmp += [predicted_probability[i][index_x[None,:],index_y] for i in range(nTrial)]
pred_prob_tmp = np.vstack(pred_prob_tmp)
predicted_probability = np.zeros([nTrial, len(index_x), 2])
predicted_probability[:,:,0] = pred_prob_tmp
predicted_probability[:,:,1] = 1-pred_prob_tmp
del pred_prob_tmp
print "Stacking for using sklearn roc_curve, for MVGC and SL"
y_true = []
y_true += [y_test_level2[i][index_x[None,:],index_y] for i in range(nTrial)]
y_true = np.hstack(np.vstack(y_true))
predicted_probability_class1 = np.hstack(np.squeeze(predicted_probability[:,:,1]))
print "Roc curve computed by sklearn"
from sklearn import metrics
idx_not_nan=np.logical_not(np.isnan(predicted_probability_class1))
fpr, tpr, thresholds = metrics.roc_curve(y_true[idx_not_nan], predicted_probability_class1[idx_not_nan], pos_label=1)
auc = metrics.roc_auc_score(y_true[idx_not_nan], predicted_probability_class1[idx_not_nan])
return fpr,tpr,auc
if __name__ == '__main__':
cbc = True #CBC or MBC
pwd = '<PASSWORD>/'
if cbc:
print "CBC cell based classifier"
filename_open = '%ssimulated_Ldataset_tws10_r2_mse_granger_binary_class_rowNorm_fEng_cv.pickle' % (pwd)
print "Opening %s" % filename_open
data = pickle.load(open(filename_open))
y_test_pred = data['y_test_pred']
y_test_level2 = data['y_test_true']
predicted_probability = data['predicted_probability']
nTrial, nCh = y_test_level2.shape[:2]
mvgc_flag=0
fpr,tpr,auc_score = compute_roc_auc(y_test_pred, y_test_level2, predicted_probability, nTrial, nCh, mvgc_flag)
plot_label = 'CBC'
else:
print "MBC matrix based classifier"
filename_open = '%ssimulated_Ldataset_tws10_r2_mse_granger_notBinary_class_rowNorm_fEng_cv.pickle' % (pwd)
print "Opening %s" % filename_open
data = pickle.load(open(filename_open))
y_test_pred = data['y_test_pred']
y_test_level2 = data['y_test_true']
predicted_probability = data['predicted_probability']
nTrial, nCh = y_test_level2.shape[:2]
n_iter=50
fpr = np.zeros(n_iter)
tpr = np.zeros(n_iter)
for i_iter, iter_i in enumerate(itertools.product(np.arange(-3,0,0.3),np.arange(0,1,0.2))):
print "Building score matrix"
print iter_i
binary_score = [1,0,iter_i[0],iter_i[1]]
score_matrix = compute_score_matrix(n=64, binary_score=binary_score)
print "Compute prediction according to the score matrix"
y_pred = np.array([best_decision(prob_configuration, score_matrix=score_matrix)[0] for prob_configuration in predicted_probability])
y_pred_conf = []
y_pred_conf += [class_to_configuration(y_pred[i_trial], verbose=False) for i_trial in range(nTrial)]
y_pred_conf = np.array(y_pred_conf)
print "Confusion matrices"
conf_mat = np.zeros([2,2])
n_conect = np.array(y_test_level2.sum(-1).sum(-1), dtype=np.float)
n_noconect = np.repeat(nCh*(nCh-1), nTrial) - n_conect
true_pos = np.zeros(nTrial)
false_pos = np.zeros(nTrial)
false_neg = np.zeros(nTrial)
true_neg = np.zeros(nTrial)
for i_trial in range(nTrial):
true_pos[i_trial] = np.logical_and(y_test_level2[i_trial], y_pred_conf[i_trial]).sum()
false_pos[i_trial] = np.logical_and( np.logical_xor(y_test_level2[i_trial], y_pred_conf[i_trial]), y_pred_conf[i_trial]).sum() - nCh #to remove the diagonal
false_neg[i_trial] = np.logical_and( np.logical_xor(y_test_level2[i_trial], y_pred_conf[i_trial]), y_test_level2[i_trial]).sum()
true_neg[i_trial] = np.logical_and(np.logical_not(y_test_level2[i_trial]), np.logical_not(y_pred_conf[i_trial])).sum()
conf_mat[0,0] = np.sum(true_pos)/np.sum(n_conect)#true_pos[i_bin_th].mean()
conf_mat[0,1] = np.sum(false_neg)/np.sum(n_conect)#false_neg[i_bin_th].mean()
conf_mat[1,0] = np.sum(false_pos)/np.sum(n_noconect)#false_pos[i_bin_th].mean()
conf_mat[1,1] = np.sum(true_neg)/np.sum(n_noconect)#true_neg[i_bin_th].mean()#1 - conf_mat[i_bin_th,1,0]
fpr[i_iter] = conf_mat[1,0]
tpr[i_iter] = conf_mat[0,0]
print "Compute auc"
from sklearn import metrics
x_point = np.append(np.insert(fpr,0,0),1)
y_point = np.append(np.insert(tpr,0,0),1)
auc_score = metrics.auc(x_point[np.argsort(x_point)], y_point[np.argsort(x_point)])
plot_label = 'MBC'
print "Roc curve"
plt.plot(fpr, tpr,'.k', label=plot_label)
plt.legend(loc=4, numpoints=1, scatterpoints=1,fontsize='x-large')
fontsize=15
plt.plot([0,0,1], [0,1,1], '-.k')
plt.plot([0,1], [0,1], '-.k')
plt.xlabel('False Positive Rate', fontsize=fontsize)
plt.ylabel('True Positive Rate', fontsize=fontsize)
plt.show()
| 5,965 |
telgen.py
|
vsr2158/sapros
| 0 |
2171164
|
''''
Sapro Config generator tool
'''
from utils import convert_ip_to_mac
from utils import ip_normalizer
with open('/Users/vijshekh/PycharmProjects/sapros/18.20.40.2.tel', "r") as f:
lines = f.readlines()
svariable = raw_input ("Enter s-variable in format X or XX or XXX : ")
evariable = raw_input ("Enter e-variable in format X or XX or XXX : ")
sapro_ip_range = raw_input ("Enter Sapro IP range in format x.x.x : ")
sapro_ip_split = sapro_ip_range.split(".")
sapro_first_octect = sapro_ip_split[0]
sapro_second_octect = sapro_ip_split[1]
sapro_third_octect = sapro_ip_split[2]
print ("sapro_first_octect : %s" %sapro_first_octect)
print ("sapro_second_octect : %s" %sapro_second_octect)
print ("sapro_third_octect : %s" %sapro_third_octect)
if len((sapro_first_octect)) > 4:
exit(0)
sapro_first_octect_normalized = ip_normalizer(sapro_first_octect)
sapro_second_octect_normalized = ip_normalizer(sapro_second_octect)
sapro_third_octect_normalized = ip_normalizer(sapro_third_octect)
print ("sapro_first_octect_normalized : %s" %sapro_first_octect_normalized)
print ("sapro_second_octect_normalized : %s" %sapro_second_octect_normalized)
print ("sapro_third_octect_normalized : %s" %sapro_third_octect_normalized)
for i in range(int(svariable), int(evariable)):
variable = str(i)
digits = [int(x) for x in str(variable)]
print digits
client_addr = sapro_second_octect + "." + sapro_third_octect + "." + variable
client_macf = convert_ip_to_mac(client_addr)
client_vlan = sapro_third_octect + variable
client_vlan = client_vlan[0:4]
sapro_ip = sapro_ip_range + "." + variable
sapro_loppback_ip = client_addr + ".1"
sapro_mac = convert_ip_to_mac(sapro_ip)
out_file = "{}.tel".format(sapro_ip)
print "CLIENT MAC == " + client_macf
print "CLIENT ADDR == " + client_addr
print "CLIENT VLAN == " + client_vlan
print "SAPRO INSTANCE IP == " + sapro_ip
print "SAPRO INSTANCE Lo0 IP == " + sapro_loppback_ip
print "SAPRO INSTANCE MAC == " + sapro_mac
print ("Generating files based on input variable : " + variable)
w = open(out_file, 'w')
for l in lines:
lnew = l.replace('0200.4000.2', client_macf).replace('20.40.2', client_addr).replace('2002', client_vlan)\
.replace('18.20.40.2',sapro_ip).replace('0180.2004.002', sapro_mac).replace('20.40.2.1', sapro_loppback_ip)
w.write(lnew)
print "++++ DONE One file ++++"
| 2,718 |
stl/tree/nodes/signalnodes/signalnode.py
|
pieter-hendriks/STL-monitoring
| 0 |
2171586
|
""" Implementation for a SignalNode. """
from ..node import Node
from ....signals import SignalList, Signal, BooleanSignal
class SignalNode(Node):
""" Class representing a Signal from an STL formula in as an AST node. """
def __init__(self):
super().__init__()
self.signalName = None
def processToken(self, token: str) -> None:
self.signalName = str(token)
def booleanValidate(self, signals: SignalList, plot: bool) -> BooleanSignal:
signal = signals.getByName(self.signalName)
if not isinstance(type, BooleanSignal):
# Should be only other option
assert isinstance(signal, Signal)
signal = BooleanSignal.fromSignal(signal)
return signal
def quantitativeValidate(self, signals: SignalList, plot: bool) -> Signal:
signal = signals.getByName(self.signalName)
if isinstance(signal, BooleanSignal):
signal = Signal.fromBooleanSignal(signal)
signal.recomputeDerivatives()
return signal
def text(self) -> str:
return 'Signal: ' + self.signalName
| 988 |
nn_dataflow/tests/unit_test/test_scheduling_result.py
|
afinci/nn_dataflow
| 5 |
2171200
|
""" $lic$
Copyright (C) 2016-2017 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
If you use this program in your research, we request that you reference the
TETRIS paper ("TETRIS: Scalable and Efficient Neural Network Acceleration with
3D Memory", in ASPLOS'17. April, 2017), and that you send us a citation of your
work.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from collections import OrderedDict
from nn_dataflow.core import DataLayout
from nn_dataflow.core import FmapRange, FmapRangeMap
from nn_dataflow.core import NodeRegion
from nn_dataflow.core import PhyDim2
from nn_dataflow.core import SchedulingResult
class TestSchedulingResult(unittest.TestCase):
''' Tests for SchedulingResult. '''
def setUp(self):
self.dict_loop = OrderedDict([('cost', 1.234),
('time', 123.4),
('ops', 1234),
('access', [[2, 3, 4],
[30, 40, 50],
[400, 500, 600],
[5000, 6000, 7000]]),
])
self.dict_part = OrderedDict([('cost', 9.876),
('total_nhops', [123, 456, 789]),
])
frmap = FmapRangeMap()
frmap.add(FmapRange((0, 0, 0, 0), (2, 4, 16, 16)), (PhyDim2(0, 0),))
self.ofmap_layout = DataLayout(origin=PhyDim2(0, 0), frmap=frmap,
type=NodeRegion.DATA)
def test_valid_args(self):
''' Valid arguments. '''
result = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
self.assertIn('ops', result.dict_loop)
self.assertIn('total_nhops', result.dict_part)
self.assertEqual(result.ofmap_layout, self.ofmap_layout)
def test_invalid_dict_loop(self):
''' Invalid dict_loop. '''
with self.assertRaisesRegexp(TypeError,
'SchedulingResult: .*dict_loop.*'):
_ = SchedulingResult(dict_loop={},
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
def test_invalid_dict_part(self):
''' Invalid dict_part. '''
with self.assertRaisesRegexp(TypeError,
'SchedulingResult: .*dict_part.*'):
_ = SchedulingResult(dict_loop=self.dict_loop,
dict_part={},
ofmap_layout=self.ofmap_layout)
def test_invalid_ofmap_layout(self):
''' Invalid ofmap_layout. '''
with self.assertRaisesRegexp(TypeError,
'SchedulingResult: .*ofmap_layout.*'):
_ = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=None)
def test_total_cost(self):
''' Accessor total_cost. '''
result = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
self.assertAlmostEqual(result.total_cost, 1.234 + 9.876)
def test_total_time(self):
''' Accessor total_time. '''
result = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
self.assertAlmostEqual(result.total_time, 123.4)
def test_total_ops(self):
''' Accessor total_ops. '''
result = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
self.assertEqual(result.total_ops, 1234)
def test_total_accesses(self):
''' Accessor total_cost. '''
result = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
self.assertSequenceEqual(result.total_accesses,
[9, 120, 1500, 18000])
def test_total_noc_hops(self):
''' Accessor total_noc_hops. '''
result = SchedulingResult(dict_loop=self.dict_loop,
dict_part=self.dict_part,
ofmap_layout=self.ofmap_layout)
self.assertEqual(result.total_noc_hops, 1368)
| 5,266 |
config.py
|
dataplayer12/tracking
| 0 |
2170984
|
FPS = 24
F_0 = 0.5
AUDIO_RATE = 44100
FOURCC= [*'mp4v']
FIND_OSCILLATING=True
with open('temp/template_path.txt','r') as f:
last_template=f.read()
BASEDIR='/home/sandhulab/Dropbox/BioCloud' #a new folder should be created here for analysis
biosensing_flag='ok' #after uploading videos to a folder,
#create a folder named 'ok' (case insensitive) to begin analysis
gui_flag=True #set to Flse if your computer does not have a monitor connected to it
timeout=900 #in seconds
NUM_FRAMES_IN_HISTORY=2 #first 2 frames are used to initialize the objects being tracked
MAX_KALMAN_LEARNING_TIME=30 #how much time to allow kalman filter to learn motion model
cropwindow=(1920,1080)
delay=5 #time interval at which folders will be checked
stopfile='./service_running.txt' #this file will be created automatically.
#If you want to stop the server, delete this file. This will make sure that
#if some analysis job is running, it is finished successfully before stopping the server
| 982 |
hs_geo_raster_resource/patches/manual_custom_migration_raster_ori_cov_meta_update_20161121.py
|
hydroshare/hydroshare
| 178 |
2169173
|
# This script is to update original coverage metadata to add crs string and crs datum info
# to all raster resources (github issue #1520)
# This should be run after model migration of 0006_auto_20161129_0121.py
# how to run:
# docker exec -i hydroshare python manage.py shell \
# < "hs_geo_raster_resource/patches/manual_custom_migration_raster_ori_cov_meta_update_20161121.py"
# Note: use "-i" instead of "-it" in above command as
# the latter may cause error "cannot enable tty mode on non tty input"
import os
import shutil
import tempfile
from hs_core.hydroshare.utils import resource_modified, get_file_from_irods
from hs_file_types import raster_meta_extract
from hs_geo_raster_resource.models import RasterResource
copy_res_fail = []
meta_update_fail = []
meta_update_success = []
# start migration for each raster resource that has raster files
for res in RasterResource.objects.all():
# copy all the resource files to temp dir
temp_dir = ''
res_file_tmp_path = ''
try:
temp_dir = tempfile.mkdtemp()
for res_file in res.files.all():
res_file_tmp_path = get_file_from_irods(res_file)
shutil.copy(res_file_tmp_path,
os.path.join(temp_dir, os.path.basename(res_file_tmp_path)))
shutil.rmtree(os.path.dirname(res_file_tmp_path))
vrt_file_path = [os.path.join(temp_dir, f)
for f in os.listdir(temp_dir) if '.vrt' == f[-4:]].pop()
except Exception as e:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
if os.path.isfile(res_file_tmp_path):
shutil.rmtree(os.path.dirname(res_file_tmp_path))
copy_res_fail.append('{}:{}'.format(res.short_id, res.metadata.title.value))
continue
# update the metadata for the original coverage information of all the raster resources
try:
if temp_dir and vrt_file_path:
meta_updated = False
# extract meta.
# the reason to change current working directory to temp_dir is to make sure
# the raster files can be found by Gdal for metadata extraction
# when "relativeToVRT" parameter is set as "0"
ori_dir = os.getcwd()
os.chdir(temp_dir)
res_md_dict = {}
res_md_dict = raster_meta_extract.get_raster_meta_dict(vrt_file_path)
os.chdir(ori_dir)
shutil.rmtree(temp_dir)
# update original coverage information for datum and coordinate string in django
if res_md_dict['spatial_coverage_info']['original_coverage_info'].\
get('datum', None):
res.metadata.originalCoverage.delete()
v = {'value': res_md_dict['spatial_coverage_info']['original_coverage_info']}
res.metadata.create_element('OriginalCoverage', **v)
meta_updated = True
# update the bag if meta is updated
if meta_updated:
resource_modified(res, res.creator)
meta_update_success.append('{}:{}'.format(res.short_id,
res.metadata.title.value))
except Exception as e:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
meta_update_fail.append('{}:{}'.format(res.short_id, res.metadata.title.value))
print((str(e)))
print(res_md_dict)
print(('Copy Fail Number: {} List: {}'.format(len(copy_res_fail), copy_res_fail)))
print(('Success Number: {} List {}'.format(len(meta_update_success), meta_update_success)))
print(('Update Fail Number: {} List {}'.format(len(meta_update_fail), meta_update_fail)))
| 3,696 |
src/models/architectures/efflab.py
|
AlessandroRuzzi/Computational-Intelligence-Lab-2021
| 0 |
2171656
|
from torch import Tensor, nn
from .decoders.deeplabv3plus import DeepLabV3PlusDecoder
from .encoders.efficientnet import EfficientNetEncoder
class EffLab(nn.Module):
def __init__(
self,
in_channels: int = 3,
out_channels: int = 1,
encoder_name: str = "efficientnet-b0",
decoder_out_channels: int = 256,
) -> None:
super().__init__()
self.encoder = EfficientNetEncoder(name=encoder_name, dilated=True)
self.decoder = DeepLabV3PlusDecoder(
in_channels=self.encoder.out_channels,
out_channels=decoder_out_channels,
dilations=[12, 24, 36],
)
self.head = nn.Sequential(
nn.Conv2d(in_channels=decoder_out_channels, out_channels=1, kernel_size=1),
nn.UpsamplingBilinear2d(scale_factor=4),
)
def forward(self, x: Tensor) -> Tensor:
xs = self.encoder(x)
x = self.decoder(xs)
return self.head(x)
| 977 |
paperoni/commands/command_search.py
|
notoraptor/paperoni
| 88 |
2171269
|
from coleo import Option, default, tooled
from ..io import PapersFile, ResearchersFile
from ..papers import Paper
from .interactive import InteractiveCommands, default_commands
from .searchutils import search
search_commands = InteractiveCommands("Enter a command", default="s")
@search_commands.register("b", "[b]ibtex")
def _b(self, paper, **_):
"""Generate bibtex"""
print(paper.bibtex())
return None
@search_commands.register("p", "[p]df")
def _p(self, paper, **_):
"""Download the PDF"""
if not paper.download_pdf():
print("No PDF direct download link is available for this paper.")
print(
"Try to follow the paper's URLs (see the complete list"
" with the l command)"
)
return None
@search_commands.register("s", "[s]kip")
def _s(self, paper, **_):
"""Skip and see the next paper"""
return True
search_commands_with_coll = search_commands.copy()
@search_commands_with_coll.register("r", "[r]emove")
def _r(self, paper, collection):
"""Remove the paper from the collection"""
collection.exclude(paper)
print(f"Removed '{paper.title}' from collection")
return True
search_commands.update(default_commands)
search_commands_with_coll.update(default_commands)
@tooled
def command_search():
"""Query the Microsoft Academic database."""
# File containing the collection
# [alias: -c]
collection: Option & PapersFile = default(None)
# Researchers file (JSON)
# [alias: -r]
researchers: Option & ResearchersFile = default(None)
# Command to run on every paper
command: Option = default(None)
# Display long form for each paper
long: Option & bool = default(False)
papers = search(collection=collection, researchers=researchers)
sch = search_commands if collection is None else search_commands_with_coll
for paper in papers:
instruction = sch.process_paper(
paper,
command=command,
collection=collection,
formatter=Paper.format_term_long if long else Paper.format_term,
)
if instruction is False:
break
if collection is not None:
collection.save()
| 2,216 |
aicup-python/model/custom_data.py
|
arijitgupta42/RAIC-2019
| 0 |
2170924
|
class CustomData:
@staticmethod
def read_from(stream):
discriminant = stream.read_int()
if discriminant == Log.TAG:
return CustomData.Log.read_from(stream)
if discriminant == Rect.TAG:
return CustomData.Rect.read_from(stream)
if discriminant == Line.TAG:
return CustomData.Line.read_from(stream)
if discriminant == Polygon.TAG:
return CustomData.Polygon.read_from(stream)
if discriminant == PlacedText.TAG:
return CustomData.PlacedText.read_from(stream)
raise Exception("Unexpected discriminant value")
class Log(CustomData):
TAG = 0
def __init__(self, text):
self.text = text
@staticmethod
def read_from(stream):
text = stream.read_string()
return Log(text)
def write_to(self, stream):
stream.write_int(self.TAG)
stream.write_string(self.text)
def __repr__(self):
return "Log(" + \
repr(self.text) + \
")"
CustomData.Log = Log
from .vec2_float import Vec2Float
from .vec2_float import Vec2Float
from .color_float import ColorFloat
class Rect(CustomData):
TAG = 1
def __init__(self, pos, size, color):
self.pos = pos
self.size = size
self.color = color
@staticmethod
def read_from(stream):
pos = Vec2Float.read_from(stream)
size = Vec2Float.read_from(stream)
color = ColorFloat.read_from(stream)
return Rect(pos, size, color)
def write_to(self, stream):
stream.write_int(self.TAG)
self.pos.write_to(stream)
self.size.write_to(stream)
self.color.write_to(stream)
def __repr__(self):
return "Rect(" + \
repr(self.pos) + "," + \
repr(self.size) + "," + \
repr(self.color) + \
")"
CustomData.Rect = Rect
from .vec2_float import Vec2Float
from .vec2_float import Vec2Float
from .color_float import ColorFloat
class Line(CustomData):
TAG = 2
def __init__(self, p1, p2, width, color):
self.p1 = p1
self.p2 = p2
self.width = width
self.color = color
@staticmethod
def read_from(stream):
p1 = Vec2Float.read_from(stream)
p2 = Vec2Float.read_from(stream)
width = stream.read_float()
color = ColorFloat.read_from(stream)
return Line(p1, p2, width, color)
def write_to(self, stream):
stream.write_int(self.TAG)
self.p1.write_to(stream)
self.p2.write_to(stream)
stream.write_float(self.width)
self.color.write_to(stream)
def __repr__(self):
return "Line(" + \
repr(self.p1) + "," + \
repr(self.p2) + "," + \
repr(self.width) + "," + \
repr(self.color) + \
")"
CustomData.Line = Line
from .colored_vertex import ColoredVertex
class Polygon(CustomData):
TAG = 3
def __init__(self, vertices):
self.vertices = vertices
@staticmethod
def read_from(stream):
vertices = []
for _ in range(stream.read_int()):
vertices_element = ColoredVertex.read_from(stream)
vertices.append(vertices_element)
return Polygon(vertices)
def write_to(self, stream):
stream.write_int(self.TAG)
stream.write_int(len(self.vertices))
for element in self.vertices:
element.write_to(stream)
def __repr__(self):
return "Polygon(" + \
repr(self.vertices) + \
")"
CustomData.Polygon = Polygon
from .vec2_float import Vec2Float
from .text_alignment import TextAlignment
from .color_float import ColorFloat
class PlacedText(CustomData):
TAG = 4
def __init__(self, text, pos, alignment, size, color):
self.text = text
self.pos = pos
self.alignment = alignment
self.size = size
self.color = color
@staticmethod
def read_from(stream):
text = stream.read_string()
pos = Vec2Float.read_from(stream)
alignment = TextAlignment(stream.read_int())
size = stream.read_float()
color = ColorFloat.read_from(stream)
return PlacedText(text, pos, alignment, size, color)
def write_to(self, stream):
stream.write_int(self.TAG)
stream.write_string(self.text)
self.pos.write_to(stream)
stream.write_int(self.alignment)
stream.write_float(self.size)
self.color.write_to(stream)
def __repr__(self):
return "PlacedText(" + \
repr(self.text) + "," + \
repr(self.pos) + "," + \
repr(self.alignment) + "," + \
repr(self.size) + "," + \
repr(self.color) + \
")"
CustomData.PlacedText = PlacedText
| 4,802 |
RaspberryPi/test/tsl2561.py
|
ajaichemmanam/greenServer
| 3 |
2169944
|
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# TSL2561 address, 0x39(57)
# Select control register, 0x00(00) with command register, 0x80(128)
# 0x03(03) Power ON mode
bus.write_byte_data(0x39, 0x00 | 0x80, 0x03)
# TSL2561 address, 0x39(57)
# Select timing register, 0x01(01) with command register, 0x80(128)
# 0x02(02) Nominal integration time = 402ms
bus.write_byte_data(0x39, 0x01 | 0x80, 0x02)
time.sleep(0.5)
# Read data back from 0x0C(12) with command register, 0x80(128), 2 bytes
# ch0 LSB, ch0 MSB
data = bus.read_i2c_block_data(0x39, 0x0C | 0x80, 2)
# Read data back from 0x0E(14) with command register, 0x80(128), 2 bytes
# ch1 LSB, ch1 MSB
data1 = bus.read_i2c_block_data(0x39, 0x0E | 0x80, 2)
# Convert the data
ch0 = data[1] * 256 + data[0]
ch1 = data1[1] * 256 + data1[0]
# Output data to screen
print("Full Spectrum(IR + Visible) :%d lux" %ch0)
print("Infrared Value :%d lux" %ch1)
print("Visible Value :%d lux" %(ch0 - ch1))
| 964 |
my_games/snake/lib/model/block.py
|
hsadler/learn-pygame
| 0 |
2170978
|
from lib.model.game_model import GameModel
# block game object
class Block(GameModel):
def __init__(
self,
game,
surface,
parent,
x_pos,
y_pos,
grid_index,
collidable=False,
color=None,
stroke_color=None,
stroke_width=None
):
super().__init__(
game=game,
surface=surface,
parent=parent,
x_pos=x_pos,
y_pos=y_pos,
collidable=collidable
)
self.grid_index = grid_index
self.color = color
self.stroke_color = stroke_color
self.stroke_width = stroke_width
def draw(self):
# block stroke fill
stroke_rect = self.surface.get_rect()
self.surface.fill(color=self.stroke_color, rect=stroke_rect)
# block color fill
block_color_rect = self.surface.get_rect().inflate(
self.stroke_width * -1,
self.stroke_width * -1
)
self.surface.fill(color=self.color, rect=block_color_rect)
def get_grid_index(self):
return self.grid_index
def set_appearance(self, color, stroke_color, stroke_width):
self.set_color(color)
self.set_stroke_color(stroke_color)
self.set_stroke_width(stroke_width)
def set_color(self, color):
self.color = color
def get_color(self):
return self.color
def set_stroke_color(self, stroke_color):
self.stroke_color = stroke_color
def get_stroke_color(self):
return self.stroke_color
def set_stroke_width(self, stroke_width):
self.stroke_width = stroke_width
def get_stroke_width(self):
return self.stroke_width
def get_string_formatted_grid_index(self):
x, y = self.get_grid_index()
return "x={0}__y={1}".format(x, y)
def inspect(self):
print({
'rect': self.get_pos_rect(),
'pos': [self.x, self.y],
'collidable': self.collidable
})
| 1,670 |
backend/app/app/models/__init__.py
|
AndreyKlychnikov/startup-together
| 0 |
2171317
|
from .item import Item
from .project import Project
from .user import User, UserProfile
from .project_membership import ProjectMembership
| 138 |
smtenv/envs/avoidgame.py
|
0xSMT/smt-env
| 0 |
2171525
|
# <NAME> (2020)
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame, math, sys, random
import pygame.constants
from smtenv.basegame import BaseGame
# from gym.spaces import Box, Dict
def unitvec(x, y):
mag = math.sqrt(x * x + y * y)
if mag == 0:
return 0, 0
else:
return (x / mag), (y / mag)
def rescale(x, y, scale):
return scale * x, scale * y
def clamp(x, mini, maxi):
return max(min(x, maxi), mini)
class Entity(pygame.sprite.Sprite):
def __init__(self, max_speed, radius, fric, SCREEN_BOUNDS, x, y):
self.max_speed = max_speed
self.radius = radius
self.fric = fric
self.vx = 0
self.vy = 0
self.ax = 0
self.ay = 0
self.SCREEN_BOUNDS = SCREEN_BOUNDS
pygame.sprite.Sprite.__init__(self)
image = pygame.surface.Surface((2 * radius, 2 * radius))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
self.x = x
self.y = y
self.image = image
def speed(self):
return math.sqrt(self.vx ** 2 + self.vy ** 2)
def update(self, dt):
uvx, uvy = unitvec(self.vx, self.vy)
if uvx != 0 or uvy != 0:
self.dirx = uvx
self.diry = uvy
uvx, uvy = rescale(uvx, uvy, -self.fric * (self.speed() / self.max_speed))
self.vx = self.vx + (self.ax + uvx) * dt
self.vy = self.vy + (self.ay + uvy) * dt
spd = self.speed()
if spd > self.max_speed:
self.vx = (self.vx / spd) * self.max_speed
self.vy = (self.vy / spd) * self.max_speed
elif spd < 0.5:
self.vx = 0
self.vy = 0
self.x = self.x + self.vx * dt
self.y = self.y + self.vy * dt
self.x = clamp(self.x, 0 + self.radius, self.SCREEN_BOUNDS[0] - self.radius)
self.y = clamp(self.y, 0 + self.radius, self.SCREEN_BOUNDS[1] - self.radius)
def draw(self, screen):
screen.blit(self.image, (self.x - self.radius, self.y - self.radius))
class Player(Entity):
def __init__(self, max_speed, radius, fric, SCREEN_BOUNDS):
super(Player, self).__init__(max_speed, radius, fric, SCREEN_BOUNDS, SCREEN_BOUNDS[0] // 2, SCREEN_BOUNDS[1] // 2)
pygame.draw.circle(
self.image,
(0, 255, 0),
(radius, radius),
radius,
0
)
class Enemy(Entity):
def __init__(self, max_speed, radius, fric, SCREEN_BOUNDS, x, y, acc):
super(Enemy, self).__init__(max_speed, radius, fric, SCREEN_BOUNDS, x, y)
self.acc = acc
pygame.draw.circle(
self.image,
(255, 0, 0),
(radius, radius),
radius,
0
)
def update(self, dt, px, py):
ux, uy = unitvec(-self.x + px, -self.y + py)
ux, uy = rescale(ux, uy, self.acc)
self.ax = ux
self.ay = uy
super(Enemy, self).update(dt)
class AvoidGame(BaseGame):
# REQUIRED method
def __init__(self, width=200, height=200):
# Actions the player agent in the simulation can take
actions = {
"left": pygame.constants.K_LEFT,
"right": pygame.constants.K_RIGHT,
"up": pygame.constants.K_UP,
"down": pygame.constants.K_DOWN
}
# Simulation specific initialization (include here stuff for intializing class itself)
config = {
'enemy': {
'radius': 10,
'max_speed': 30,
'acc': 800
},
'player': {
'radius': 20,
'max_speed': 50,
'acc': 900
},
'fric': 400,
'width': width,
'height': height
}
# Run the initialization on the base class
BaseGame.__init__(self, width, height, actions=actions, config=config)
def _handle_player_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions["left"]:
self.player.ax = -self.config['player']['acc']
elif key == self.actions["right"]:
self.player.ax = self.config['player']['acc']
elif key == self.actions["up"]:
self.player.ay = -self.config['player']['acc']
elif key == self.actions["down"]:
self.player.ay = self.config['player']['acc']
elif event.type == pygame.KEYUP:
key = event.key
if key == self.actions["left"] and self.player.vx < 0:
self.player.ax = 0
elif key == self.actions["right"] and self.player.vx > 0:
self.player.ax = 0
elif key == self.actions["up"] and self.player.vy < 0:
self.player.ay = 0
elif key == self.actions["down"] and self.player.vy > 0:
self.player.ay = 0
# REQUIRED method
def init(self):
self.score = 0
# Simulation specific initialization (include here stuff for intializing a new game
# in this class, since a class instance can run its game multiple times)
self.player = Player(
self.config['player']['max_speed'],
self.config['player']['radius'],
self.config['fric'],
(self.config['width'], self.config['height'])
)
x = round((self.config['width'] / 4) * random.random())
y = round((self.config['height'] / 4) * random.random())
self.enemy = Enemy(
self.config['enemy']['max_speed'],
self.config['enemy']['radius'],
self.config['fric'],
(self.config['width'], self.config['height']),
x, # TODO: Make it generate some distance from player
y, # (random dist, random theta => convert to x, y)
self.config['enemy']['acc']
)
self.lives = 1
# REQUIRED method
def get_score(self):
return self.score
# REQUIRED method
def is_game_over(self):
# Simulation specific game over condition
return self.lives == 0
# REQUIRED method -- body of game itself
def step(self, dt):
# Adjust score
# Update game state for checking game_over
# (NOTE: Game can be locked to a certain FPS (keeping dt constant) by
# setting self.allowed_fps to desried value)
dt /= 1000
self.screen.fill((0, 0, 0))
self._handle_player_events()
self.player.update(dt)
# self.player.draw(self.screen)
self.enemy.update(dt, self.player.x, self.player.y)
dist = math.hypot(self.player.x - self.enemy.x, self.player.y - self.enemy.y) - (self.player.radius + self.enemy.radius)
if dist < 0:
self.lives = 0
# elif self.player.x
# self.enemy.draw(self.screen)
self.score += dt * dist
def draw(self):
self.player.draw(self.screen)
self.enemy.draw(self.screen)
# REQUIRED method (if you want the state space to be not the screen itself, highly
# advised for simulation purposes)
def get_game_state(self):
# query for particular state information here
state = {
"player_x": self.player.x,
"player_y": self.player.y,
"player_vx": self.player.vx,
"player_vy": self.player.vy,
"enemy_x": self.enemy.x,
"enemy_y": self.enemy.y,
"enemy_vx": self.enemy.vx,
"enemy_vy": self.enemy.vy
}
return state
if __name__ == "__main__":
import numpy as np
game = AvoidGame(width=256, height=256)
game.setup(display=True)
game.init()
while True:
dt = game.clock.tick_busy_loop(30)
if game.is_game_over():
game.reset()
game.step(dt)
game.draw()
pygame.display.update()
| 8,312 |
setup.py
|
zmitchell/trcdproc
| 0 |
2169973
|
import io
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
readme = '\n' + f.read()
# with open('README.md') as readme_file:
# readme = readme_file.read()
requirements = [
'h5py>=2.7.0',
'numpy>=1.13.1',
'scipy>=0.19.1',
]
test_requirements = [
'pytest',
]
setup(
name='trcdproc',
version='0.0.0',
description="This is a library for handling and processing TRCD data stored in HDF5 files.",
long_description=readme + '\n\n',
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/zmitchell/trcdproc',
packages=find_packages(exclude='tests',),
package_dir={'trcdproc':
'trcdproc'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='trcdproc',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6'
],
test_suite='tests',
tests_require=test_requirements,
entry_points={
'console_scripts': [
'trcdproc = trcdproc.__main__:main'
]
}
)
| 1,549 |
src/merge_sorted_linked_lists.py
|
redfast00/daily-algorithm-challenge
| 0 |
2170793
|
import heapq
def merge_sorted_linked_lists(*nodes):
'''Merges sorted linked lists into one linked list.
>>> from utils.linked_list import Node
>>> first = Node.from_list([1,2,3,5,8])
>>> second = Node.from_list([2,3,5,6,7,10])
>>> merge_sorted_linked_lists(first, second).to_list()
[1, 2, 2, 3, 3, 5, 5, 6, 7, 8, 10]
'''
# Added idx to have an additional sorting parameter to prevent
# nodes from linked lists getting compared
priority_queue = [(node.value, idx, node) for idx, node in enumerate(nodes)]
heapq.heapify(priority_queue)
first = get_first_and_update(priority_queue)
current = first
while priority_queue:
node = get_first_and_update(priority_queue)
current.next = node
current = node
return first
def get_first_and_update(priority_queue):
(value, idx, node) = heapq.heappop(priority_queue)
new_node = node.next
if new_node is not None:
heapq.heappush(priority_queue, (new_node.value, idx, new_node))
return node
| 1,039 |
src/generate/export/xtemp/configures.py
|
jwpttcg66/ExcelToTransfer
| 1 |
2170364
|
# -*- coding: utf-8 -*-
configures = {
"example": {"arguments": {"describe": "这是一张范例表", "multiKey": False, "version": 100, }, "types": {"ID": (0, "ID", "编号", "int", ), "describe": (2, "describe", "描述", "String", ), "drops": (4, "drops", "掉落关卡", "String", ), "name": (1, "name", "名称", "String", ), "quality": (3, "quality", "品质", "int", ), }, },
}
| 350 |
BOJ2740.py
|
INYEONGKIM/BOJ
| 2 |
2167660
|
n,m=map(int,input().split());a=[];r=[];res=""
for i in range(n):
a.append([int(j) for j in input().split()])
m,k=map(int,input().split());b=[]
for i in range(m):
b.append([int(j) for j in input().split()])
for i in range(n):
r.append([0]*k)
for i in range(n):
for j in range(k):
for x in range(m):
r[i][j]+=a[i][x]*b[x][j]
res+=str(r[i][j])
if j<k-1:
res+=" "
else:
res+="\n"
print(res,end="")
| 478 |
tests/decorate_client_test.py
|
drolando/swagger_zipkin
| 11 |
2171631
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import pytest
from swagger_zipkin.decorate_client import decorate_client
def test_decorate_client_non_attr():
client = object()
with pytest.raises(AttributeError):
decorate_client(client, mock.Mock(), 'attr')
def test_decorate_client_non_callable():
client = mock.Mock()
client.attr = 1
decorated = decorate_client(client, mock.Mock(), 'attr')
assert client.attr == decorated
def test_decorate_client_callable_being_invoked():
def foo(a, b, c):
pass
client = mock.Mock()
client.attr = foo
decorated_foo = mock.Mock()
decorated_callable = decorate_client(client, decorated_foo, 'attr')
assert decorated_callable.operation == foo
# Ensure that it's `decorated_foo` being called, not `foo`
decorated_callable()
decorated_foo.assert_called_once_with('attr')
def test_decorate_client_callable_attribute_retrieved():
class Foo(object):
def __init__(self):
self.bar = 'bar'
def __call__(self, a, b, c):
return a + b + c
client = mock.Mock()
client.attr = Foo()
decorated_foo = mock.Mock(return_value=100)
decorated_callable = decorate_client(client, decorated_foo, 'attr')
# `decorated_foo` is called, not `Foo().__call__`
assert decorated_callable(2, 3, 7) == 100
# Foo().bar is accessible after it is decorated
assert decorated_callable.bar == 'bar'
| 1,530 |
enEngineUT.py
|
keshava/kampa
| 1 |
2171134
|
# -*- coding: utf-8 -*-
"""
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
httpwww.apache.orglicensesLICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Created on Wed Nov 16 20:45:40 2016
@author: kdeyev
"""
import enEngine
def runTest(filename):
engine = enEngine.Engine(True)
engine.loadFromFile (filename)
engine.calc()
if __name__ == "__main__":
runTest ("jobs/UT.job")
| 851 |
Check.py
|
Bob-Z/RandoMame
| 2 |
2171754
|
import Config
import XmlGetter
from pathlib import Path
def start():
print("Parsing MAME input")
machine_list, soft_list = XmlGetter.get()
print("Parsing MAME input DONE")
print("File type:", "merged")
needed_rom_file_list = get_needed_rom_file("merged", machine_list)
existing_rom_file = get_existing_rom_file()
print("")
print("Missing ROMS file:")
print("==================")
print("")
for needed in needed_rom_file_list:
if needed not in existing_rom_file:
print(needed)
print("")
print("Useless ROMS file:")
print("==================")
print("")
for existing in existing_rom_file:
if existing not in needed_rom_file_list:
print(existing)
# needed_directory_list = get_dir("merged")
# for soft_list_dir in needed_directory_list:
# needed_soft_file_list = get_soft_file("merged", soft_list_dir)
def get_needed_rom_file(rom_type, machine_xml):
needed_rom_file_list = []
total_machine_qty = 0
total_parent_qty = 0
total_clone_qty = 0
for machine in machine_xml:
total_machine_qty = total_machine_qty + 1
if 'cloneof' in machine.attrib:
total_clone_qty = total_clone_qty + 1
else:
total_parent_qty = total_parent_qty+1
machine_qty = 0
parent_qty = 0
clone_qty = 0
for machine in machine_xml:
machine_qty = machine_qty + 1
if "cloneof" not in machine.attrib: # Not a clone
parent_qty = parent_qty + 1
has_rom = is_machine_need_roms(machine, machine_xml)
need_file = True
if "romof" in machine.attrib: # use BIOS
need_file = False
# Get romof machine XML
romof_machine = None
romof_name = machine.attrib['romof']
# for romof_m in machine_xml:
# if romof_m.attrib['name'] == romof_name:
# romof_machine = romof_m
# break
command = ".//machine[@name=\"" + romof_name + "\"]"
romof_machine = machine_xml.find(command)
# Check if one ROM of current machine is not in romof machine
for rom_current in machine.findall('rom'): # for each current machine's ROMs
if 'status' not in rom_current.attrib or rom_current.attrib['status'] != 'nodump': # if it's dumped
rom_found = False
for romof_rom in romof_machine.findall('rom'):
if 'sha1' in romof_rom:
if romof_rom.attrib['sha1'] == rom_current.attrib['sha1']: # if ROM exists in romof machine
rom_found = True
break
elif 'crc' in romof_rom:
if romof_rom.attrib['crc'] == rom_current.attrib['crc']: # if ROM exists in romof machine
rom_found = True
break
else:
if romof_rom.attrib['name'] == rom_current.attrib['name']: # if ROM exists in romof machine
rom_found = True
break
if rom_found is False:
need_file = True
break
if has_rom is True and need_file is True:
if rom_type == "merged":
if "cloneof" not in machine.attrib: # Not a clone
needed_rom_file_list.append(machine.attrib['name'])
else:
needed_rom_file_list.append(machine.attrib['name'])
else:
clone_qty = clone_qty + 1
if machine_qty % 10 == 0:
print(int(machine_qty / total_machine_qty * 100), "% : parent = ", parent_qty, "/", total_parent_qty, ", clone "
"= ",
clone_qty, "/", total_clone_qty, end='\r')
return needed_rom_file_list
def is_machine_need_roms(parent_machine, machine_xml):
parent_name = parent_machine.attrib['name']
parent_and_clone = [parent_machine]
for clone_machine in machine_xml:
if 'cloneof' in clone_machine.attrib and clone_machine.attrib['cloneof'] == parent_name:
parent_and_clone.append(clone_machine)
has_rom = False
for m in parent_and_clone:
if has_rom is False:
for r in m.findall("rom"):
if r.attrib['status'] != 'nodump':
has_rom = True
break
return has_rom
def get_existing_rom_file():
existing_rom_file = []
path = Path(Config.check)
for file in path.iterdir():
if file.is_file():
existing_rom_file.append(file.stem)
return existing_rom_file
| 5,074 |
src/HavokMud/data_loader.py
|
Beirdo/HavokMud-redux
| 0 |
2171034
|
import json
import logging
import os
import sys
logger = logging.getLogger(__name__)
dataDir = os.path.join(os.getcwd(), "data")
def load_data_file(filename):
filename = os.path.join(dataDir, filename)
try:
with open(filename, "r") as f:
data = json.load(f)
except Exception as e:
logger.error("Exception while loading data: %s" % e)
data = {}
return data
| 413 |
tests/functional/test_removed_from_room.py
|
ciandt/google_hangouts_chat_bot
| 8 |
2169222
|
from google_hangouts_chat_bot.commands import Commands
from google_hangouts_chat_bot.event_handler import EventHandler
from tests.functional.helpers import load_payload
def test_removed_from_room():
payload = load_payload("removed_from_room")
EventHandler(payload, Commands()).process()
| 297 |
tests/utils/observer_test.py
|
dnephin/Tron
| 0 |
2170885
|
from testify import run, setup, assert_equal, TestCase, turtle
from tests.assertions import assert_length
from tron.utils.observer import Observable, Observer
class ObservableTestCase(TestCase):
@setup
def setup_observer(self):
self.obs = Observable()
def test_attach(self):
func = lambda: 1
self.obs.attach('a', func)
assert_equal(len(self.obs._observers), 1)
assert_equal(self.obs._observers['a'], [func])
def test_listen_seq(self):
func = lambda: 1
self.obs.attach(['a', 'b'], func)
assert_equal(len(self.obs._observers), 2)
assert_equal(self.obs._observers['a'], [func])
assert_equal(self.obs._observers['b'], [func])
def test_notify(self):
handler = turtle.Turtle()
self.obs.attach(['a', 'b'], handler)
self.obs.notify('a')
assert_equal(len(handler.handler.calls), 1)
self.obs.notify('b')
assert_equal(len(handler.handler.calls), 2)
class ObserverClearTestCase(TestCase):
@setup
def setup_observer(self):
self.obs = Observable()
func = lambda: 1
self.obs.attach('a', func)
self.obs.attach('b', func)
self.obs.attach(True, func)
self.obs.attach(['a', 'b'], func)
def test_clear_listeners_all(self):
self.obs.clear_observers()
assert_equal(len(self.obs._observers), 0)
def test_clear_listeners_some(self):
self.obs.clear_observers('a')
assert_equal(len(self.obs._observers), 2)
assert_equal(set(self.obs._observers.keys()), set([True, 'b']))
def test_remove_observer_none(self):
observer = lambda: 2
self.obs.remove_observer(observer)
assert_equal(set(self.obs._observers.keys()), set([True, 'a', 'b']))
assert_length(self.obs._observers['a'], 2)
assert_length(self.obs._observers['b'], 2)
assert_length(self.obs._observers[True], 1)
def test_remove_observer(self):
observer = lambda: 2
self.obs.attach('a', observer)
self.obs.attach('c', observer)
self.obs.remove_observer(observer)
assert_length(self.obs._observers['a'], 2)
assert_length(self.obs._observers['b'], 2)
assert_length(self.obs._observers[True], 1)
assert_length(self.obs._observers['c'], 0)
class MockObserver(Observer):
def __init__(self, obs, event):
self.obs = obs
self.event = event
self.watch(obs, event)
self.has_watched = 0
def handler(self, obs, event):
assert_equal(obs, self.obs)
assert_equal(event, self.event)
self.has_watched += 1
class ObserverTestCase(TestCase):
@setup
def setup_observer(self):
self.obs = Observable()
def test_watch(self):
event = "FIVE"
handler = MockObserver(self.obs, event)
self.obs.notify(event)
assert_equal(handler.has_watched, 1)
self.obs.notify("other event")
assert_equal(handler.has_watched, 1)
self.obs.notify(event)
assert_equal(handler.has_watched, 2)
if __name__ == "__main__":
run()
| 3,152 |
Mundo 3/Aulas/Aula 20 - Funcoes Parte 1.py
|
marioarl/python
| 1 |
2169470
|
def lin30():
print('-=' * 30)
a = 4
b = 5
s = a + b
print(s)
a = 8
b = 9
s = a + b
print(s)
a = 2
b = 1
s = a + b
print(s)
lin30()
def soma(a, b):
s = a + b
print(s)
#programa principal
soma(4, 5)
soma(8, 9)
soma(2, 1)
lin30()
soma(a=4, b=5)
lin30()
soma(b=4, a=5) #pode tambem trocar os parametros
lin30()
def somb(a, b):
print(f'A = {a} e B = {b}')
s = a+ b
print(f'A soma A + B = {s}')
somb(b=4, a=5)
lin30()
def contador(*num):
print(num)
contador(2, 1, 7)
contador(8, 0)
contador(4, 4, 7, 6, 2)
lin30()
def contador1(*num):
tam = len(num)
print(f'Recebi os valores {num} e são ao todo {tam} numeros')
contador1(2, 1, 7)
contador1(8, 0)
contador1(4, 4, 7, 6, 2)
| 721 |
examples/AIJ Case D/correlation_comparison.py
|
SimScaleGmbH/external-building-aerodynamics
| 0 |
2171732
|
import pathlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
name = "TemoporalIndipendence.csv"
path = pathlib.Path.cwd() / name
df = pd.read_csv(path, index_col=0)
X = np.arange(3)
mpl.rcParams['figure.dpi'] = 2400
fig1, ax = plt.subplots(figsize=(1, 1), dpi=1200)
# ax = fig1.add_axes([0,0,1,1])
test = df.iloc[1].to_numpy()
velocity = ax.bar(X + 0.00, df.iloc[0].to_numpy(), color='b', width=0.25)
tke = ax.bar(X + 0.25, df.iloc[1].to_numpy(), color='r', width=0.25)
ax.set_ylabel("R Value (-)", fontsize=5)
ax.set_xlabel("Temporal Resolution", fontsize=5)
ax.legend(labels=['Velocity', 'TKE'], loc='lower center', fontsize=3, bbox_to_anchor=(0.5, -0.6), frameon=False)
ax.set_ylim(0.5, 1)
ax.bar_label(velocity, padding=3, fontsize=3, rotation=90)
ax.bar_label(tke, padding=3, fontsize=3, rotation=90)
plt.title("Correlations of Different" + "\n" +
"Temporal Resolutions", fontsize=7)
plt.xticks([0, 1, 2], ('Moderate', 'High', 'Very High'), fontsize=3)
plt.yticks(fontsize=3)
plt.savefig("temporal_resolutions.png", bbox_inches='tight')
| 1,111 |
scripts/GwasCatalogDigger.py
|
lorenzo-bioinfo/ms_data_analysis
| 0 |
2171563
|
import requests
import pandas as pd
import urllib.error
import os
''' This module provides a class to work with the downloaded db
of Gwas Catalog. I had to write this because I couldn't access
(for whatever reason) the Gwas Catalog Rest API documentation.
The class is initialitiated by downloading the GWAS Catalog
and then extracting the various fields to create a Gwas Catalog
object. Getter methods will be defined to obtain required information '''
url = 'https://www.ebi.ac.uk/gwas/api/search/downloads/alternative'
#catalog class
class GwasCatalog:
def __init__(self, catalog):
self.catalog = catalog
#prints the catalog
def showCatalog(self):
print(self.catalog)
#shows catalog columns indexes
def showAttributes(self):
for column in list(self.catalog.columns):
print(column)
#extracts single column from catalog as pandas Series object
def getColumn(self, index):
series = self.catalog[index]
return series
#search a column and returns all matching values rows
def batchSearch(self, column_name, ids):
results = self.catalog[self.catalog[column_name].isin(ids)]
return results
#same as batchSearch() but returns only selected columns (features)
def batchRetrieve(self, column_name, ids, features):
df = self.catalog[self.catalog[column_name].isin(ids)]
series = [df[column_name]]
for feature in features:
series.append(df[feature])
print(len(series))
dataf = pd.DataFrame(series).T
return dataf
#get catalog from local file
def getCatalog(path):
df = pd.read_csv(path, sep = '\t', header = 0, low_memory = False)
catalog = GwasCatalog(df)
return catalog
#download latest catalog from EBI GwasCatalog
def updatedCatalog(filename, url = url, remove = True):
try:
print('Downloading GWAS catalog')
print('Depending on your connection speed this may take up to some minutes...')
f = requests.get(url).text
print('Download Completed\n')
with open(filename, 'w') as file:
file.write(f)
df = pd.read_csv(filename, sep = '\t', low_memory = False)
if remove:
os.remove(filename)
except urllib.error.HTTPError:
print('The URL is no longer valid or the server is unreachable.\n')
url = input('Please insert a new URL: ')
try:
print('Downloading GWAS catalog')
print('Depending on your connection speed this may take up to some minutes...')
f = requests.get(url).text
print('Download Completed\n')
with open(filename, 'w') as file:
file.write(f)
df = pd.read_csv(filename, sep = '\t', low_memory = False)
os.remove('catalog.tsv')
except urllib.error.HTTPError:
print('Operation couldn\'t be performed. Exiting...')
exit()
catalog = GwasCatalog(df)
return catalog
| 2,682 |
bot.py
|
FatherUsarox/generadorqr
| 0 |
2163095
|
import os
import time
import logging
from telegram import ChatAction
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
import qrcode
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
INPU = range(4)
def start(update, context):
update.message.reply_text("Qué deseas hacer?\n\n Usa \Qr para generar un qr")
def qr_command_handler(update, context):
update.message.reply_text("Envía el texto para generar el qr:\n")
return INPU
def generate_qr(text):
filename = text + ".jpg"
image = qrcode.make(text)
image.save(filename)
return filename
def send_qr(img, chat):
chat.send_action(
action=ChatAction.UPLOAD_PHOTO,
timeout = None
)
chat.send_photo(
photo = open(img, 'rb')
)
os.unLink(img)
def inputtext(update,context):
#bot.send_message(chat_id=update.message.chat_id, text=update.message.text)
text = update.message.text #here because we want to retrieve the text from the original message and send the same thing back
chat = update.message.chat
filename = generate_qr(text)
send_qr(filename, chat)
return ConversationHandler.END
def main() -> None:
updater = Updater("5202799890:AAESmZj4AR7SsTo6UlJUQYnR5Mbnq-bBWEM")
dp = updater.dispatcher
dp.add_handler(CommandHandler("start",start))
conv_handler = ConversationHandler(
entry_points=[CommandHandler('qr', qr_command_handler)],
states={
INPU: [MessageHandler(Filters.text & ~Filters.command, inputtext)],
},
fallbacks=[],
)
dp.add_handler(conv_handler)
# Start the Bot
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| 1,963 |
methods/CAE/run_exp.py
|
IvanNik17/Seasonal-Changes-in-Thermal-Surveillance-Imaging
| 2 |
2171515
|
from argparse import ArgumentParser
import os, sys
import cv2
import numpy as np
import pandas as pd
import torch
import pytorch_lightning as pl
from pytorch_lightning import Trainer, loggers
from torchsummary import summary
import torch.nn.functional as F
sys.path.append('../../loaders/pytorch_lightning/')
from datamodule import DataModule
from models.autoencoder import Autoencoder
from argparse import Namespace
def train(model, hparams, logger):
train = pd.read_csv("../../splits/{}_{}_5000.csv".format(hparams.season, hparams.dataset))
train["DateTime"] = pd.to_datetime(train['DateTime'])
hparams.train_selection = train
test = pd.read_csv("../../splits/apr_month.csv")
test["DateTime"] = pd.to_datetime(test['DateTime'])
hparams.test_selection = test
dm = DataModule(hparams)
dm.setup()
print("Training set contains {} samples".format(len(dm.data_train)))
print("with shape {}".format(dm.data_train[0].shape))
# print detailed summary with estimated network size
summary(model, (1, 384, 288), device="cpu")
trainer = Trainer(gpus=hparams.gpus, max_epochs=hparams.max_epochs, logger=logger)
trainer.fit(model, dm)
output_dir = 'trained_models/'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
exp_dir = os.path.join(output_dir,"{}_{}".format(hparams.season, hparams.dataset))
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
torch.save(model.encoder, os.path.join(exp_dir,"encoder.pt"))
torch.save(model.decoder, os.path.join(exp_dir,"decoder.pt"))
trainer.test(model)
def test(model, hparams, test_sets, show=False):
# runs on CPU
with torch.no_grad():
model.encoder = torch.load("trained_models/{}_{}/encoder.pt".format(hparams.season, hparams.dataset))
model.decoder = torch.load("trained_models/{}_{}/decoder.pt".format(hparams.season, hparams.dataset))
model.encoder.eval()
model.decoder.eval()
hparams.get_metadata = True
for test_set in test_sets:
test = pd.read_csv("../../splits/{}.csv".format(test_set))
test["DateTime"] = pd.to_datetime(test['DateTime'])
hparams.test_selection = test
dm = DataModule(hparams)
dm.setup(stage='test')
results_list = []
for batch_id, batch in enumerate(dm.test_dataloader()):
imgs, paths, metas = batch
encs = model.encoder(imgs)
recs = model.decoder(encs)
for img, path, meta, enc, rec in zip(imgs, paths, metas, encs, recs):
#folder_name, clip_name, image_number, DateTime = meta.split(',')[:4]
results = meta.split(',')
loss = F.mse_loss(rec, img).numpy()
results_list.append(results+[str(np.round(loss, 4))])
if show:
diff = img - rec
diff = torch.abs(diff)
diff = diff[0].mul(255).byte().numpy()
img = img[0].mul(255).byte().numpy()
rec = rec[0].mul(255).byte().numpy()
cv2.imshow("in_vs_rec_vs_diff",cv2.vconcat([img, rec, diff]))
key = cv2.waitKey()
if key == 27:
break
if show:
if key == 27:
break
# save all the new info
results_df = pd.DataFrame.from_records(results_list, columns=['Folder name',
'Clip Name',
'Image Number',
'DateTime',
'Temperature',
'Humidity',
'Precipitation',
'Dew Point',
'Wind Direction',
'Wind Speed',
'Sun Radiation Intensity',
'Min of sunshine latest 10 min',
'MSE'])
results_df.to_csv("trained_models/{}_{}/results_{}.csv".format(hparams.season, hparams.dataset, test_set), index=False)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--exp", type=str, default="month", help="specify which experiment to run: day, week, month")
parser.add_argument("--path", type=str, default="/home/aau/github/data/thermal/sensor_paper", help="specify where images are located")
parser.add_argument("--model", type=str, default="CAE", help="specify which experiment model to use")
parser.add_argument("--train", default=False, action="store_true", help="Set if necessary to train a new model")
parser.add_argument("--show", default=False, action="store_true", help="Set to show input and reconstructions")
args = parser.parse_args()
# defualt configuration
hparams = Namespace(**{'model': 'CAE',
'dataset': 'day',
'season': 'feb',
'img_dir': '/home/aau/github/data/thermal/sensor_paper',
'train_selection': None,
'test_selection': None,
'get_metadata': False,
# model
'nc': 1,
'nz': 8,
'nfe': 32,
'nfd': 32,
# training
'log_dir': 'lightning_logs',
'gpus': 1,
'max_epochs': 100,
'learning_rate': 1e-4,
'batch_size': 128,
'num_workers':12})
hparams.model = args.model
hparams.dataset = args.exp
logger = loggers.TensorBoardLogger(hparams.log_dir, name=f"{hparams.season}_{hparams.dataset}", default_hp_metric=False)
model = Autoencoder(hparams)
if args.train:
train(model, hparams, logger)
test_sets = ['jan', 'apr', 'aug']
test(model, hparams, test_sets, show=args.show)
| 6,784 |
tests/unit/test_intercom.py
|
sbarysiuk/python-intercom
| 0 |
2171630
|
#
# Copyright 2012 keyes.ie
#
# License: http://jkeyes.mit-license.org/
#
import os
from . import create_response
from mock import patch
from nose.tools import raises
from unittest import TestCase
from intercom import ServerError
from intercom import AuthenticationError
from intercom import ResourceNotFound
from intercom import Intercom
from intercom.user import CustomData
from intercom.user import SocialProfile
from intercom.user import User
class IntercomUsersTest(TestCase):
@raises(AuthenticationError)
@patch('requests.request', create_response(401))
def test_create_user_identifiers(self):
Intercom.create_user()
@patch('requests.request', create_response(200, 'create_user_valid.json'))
def test_create_valid(self):
resp = Intercom.create_user(email='<EMAIL>')
self.assertEqual(None, resp['user_id'])
self.assertEqual('<EMAIL>', resp['email'])
@raises(AuthenticationError)
@patch('requests.request', create_response(401))
def test_get_user_identifiers(self):
Intercom.get_user()
@patch('requests.request', create_response(200, 'get_user_valid.json'))
def test_get_user_valid(self):
resp = Intercom.get_user(email='<EMAIL>')
self.assertEqual(None, resp['user_id'])
self.assertEqual('<EMAIL>', resp['email'])
@raises(AuthenticationError)
@patch('requests.request', create_response(401))
def test_create_user_identifiers(self):
Intercom.update_user()
@patch('requests.request', create_response(200, 'update_user_valid.json'))
def test_update_user_valid(self):
resp = Intercom.update_user(
email='<EMAIL>', custom_data={'age': '42'} )
self.assertEqual(None, resp['user_id'])
self.assertEqual('<EMAIL>', resp['email'])
self.assertEqual('42', resp['custom_data']['age'])
@raises(AuthenticationError)
@patch('requests.request', create_response(401))
def test_get_users_identifiers(self):
Intercom.create_user()
@patch('requests.request', create_response(200, 'get_users_valid.json'))
def test_get_users_valid(self):
resp = Intercom.get_users()
self.assertEqual(3, len(resp['users']))
self.assertEqual(3, resp['total_count'])
self.assertEqual(1, resp['total_pages'])
@raises(ResourceNotFound)
@patch('requests.request', create_response(404, '404.json'))
def test_not_found(self):
resp = Intercom.get_users()
@raises(ServerError)
@patch('requests.request', create_response(500, '500.json'))
def test_api_error(self):
resp = Intercom.get_users()
| 2,641 |
python_actr/version.py
|
osaaso1/python_actr
| 0 |
2171329
|
name = "python_actr"
version_info = (1, 9, 2)
dev = True
version = '.'.join([str(x) for x in version_info])
if dev:
version = version + 'dev'
| 146 |
smuthi/__init__.py
|
KMCzajkowski/smuthi
| 3 |
2171722
|
import pkg_resources
import sys
try:
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
except:
mpi_rank = 0
def print_smuthi_header():
version = pkg_resources.get_distribution("smuthi").version
welcome_msg = ("\n" + "*" * 32 + "\n SMUTHI version " + version + "\n" + "*" * 32 + "\n")
sys.stdout.write(welcome_msg)
sys.stdout.flush()
if mpi_rank == 0:
print_smuthi_header()
| 446 |
process_results.py
|
mujm/CCS21_GNNattack
| 0 |
2171499
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Pytorch graph isomorphism network for graph classification')
#these are parameters for attack model
parser.add_argument('--effective', type=int, default=1)
parser.add_argument('--id', type=int, default=1)
parser.add_argument('--search', type=int, default=1)
#these are parameters for GIN model
parser.add_argument('--dataset', type=str, default="IMDB-BINARY")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
dataset_name = args.dataset
effective = int(args.effective)
init_path = './out1/init_{}_{}_{}_{}_'.format(dataset_name,args.id, args.effective, args.search)
with open(init_path+'search_type.txt', 'r') as f:
search_type = eval(f.read())
with open(init_path+'P.txt', 'r') as f:
init_perturbation = eval(f.read())
with open(init_path+'PR.txt', 'r') as f:
init_perturbation_ratio = eval(f.read())
with open(init_path+'D.txt', 'r') as f:
init_distortion = eval(f.read())
with open(init_path+'Q.txt', 'r') as f:
init_query = eval(f.read())
with open(init_path+'T.txt', 'r') as f:
init_time = eval(f.read())
our_path = './out1/our_{}_{}_{}_{}_'.format(dataset_name, args.id, args.effective, args.search)
with open(our_path+'Q.txt', 'r') as f:
our_query = eval(f.read())
with open(our_path+'P.txt', 'r') as f:
our_perturbation = eval(f.read())
with open(our_path+'PR.txt', 'r') as f:
our_perturbation_ratio = eval(f.read())
with open(our_path+'D.txt', 'r') as f:
our_distortion = eval(f.read())
with open(our_path+'T.txt', 'r') as f:
our_time = eval(f.read())
random_path = './out1/ran_{}_{}_{}_{}_'.format(dataset_name, args.id, args.effective, args.search)
with open(random_path+'P.txt', 'r') as f:
ran_perturbation = eval(f.read())
with open(random_path+'PR.txt', 'r') as f:
ran_perturbation_ratio = eval(f.read())
with open(random_path+'T.txt', 'r') as f:
ran_time = eval(f.read())
#所有的.txt中,攻击失败:值为-1, 无需攻击:值为0
#delete no need instances
#根据query挑选,query中只有0或>0,因为即便失败也会记录query次数
L = len(our_query)
index = []
for i in range(L):
if our_query[i] > 0:
index.append(i)
print(index)
L = len(index) #number of target graphs
print('the number of candadite test instances: {}'.format(L))
search_type = [search_type[x] for x in index]
init_distortion = [init_distortion[x] for x in index]
init_perturbation = [init_perturbation[x] for x in index]
init_perturbation_ratio = [init_perturbation_ratio[x] for x in index]
init_query = [init_query[x] for x in index]
init_time = [init_time[x] for x in index]
our_distortion = [our_distortion[x] for x in index]
our_perturbation = [our_perturbation[x] for x in index]
our_perturbation_ratio = [our_perturbation_ratio[x] for x in index]
our_query = [our_query[x] for x in index]
our_time = [our_time[x] for x in index]
ran_perturbation = [ran_perturbation[x] for x in index]
ran_perturbation_ratio = [ran_perturbation_ratio[x] for x in index]
ran_time = [ran_time[x] for x in index]
#process query and time
init_avg_query = sum(init_query) / L
our_avg_query = sum(our_query) / L
ran_avg_query = our_avg_query
init_avg_time = sum(init_time) / L
our_avg_time = sum(our_time) / L
ran_avg_time = sum(ran_time) / L
print('Init: avg query: {:.2f}'.format(init_avg_query))
print('Init: avg attack time: {:.2f}'.format(init_avg_time))
print('Our: avg query: {:.2f}'.format(our_avg_query))
print('Our: avg attack time: {:.2f}'.format(our_avg_time))
print('Ran: avg query: {:.2f}'.format(ran_avg_query))
print('Ran: avg attack time: {:.2f}'.format(ran_avg_time))
#process search type
count_type = [0,0,0]
for t in search_type:
if t>=0:
count_type[t] += 1
print('the percentage of type1: {:.2f}'.format(count_type[0]/sum(count_type)*100))
print('the percentage of type2: {:.2f}'.format(count_type[1]/sum(count_type)*100))
print('the percentage of type3: {:.2f}'.format(count_type[2]/sum(count_type)*100))
'''
#process pertub of init
init_distortion = [x for x in init_distortion if x>0]
init_perturbation = [x for x in init_perturbation if x>0]
init_perturbation_ratio = [x for x in init_perturbation_ratio if x>0]
init_avg_distortion = sum(init_distortion) / L
init_avg_perturbation = sum(init_perturbation) / L
init_avg_pertub_ratio = sum(init_perturbation_ratio) / L * 100
print('Init: avg distortion: {:.4f}'.format(init_avg_distortion))
print('Init: avg perturbation: {:.4f}'.format(init_avg_perturbation))
print('Init: avg perturb ratio: {:.4f}'.format(init_avg_pertub_ratio))
'''
#compute perturbation and distortion ubder different budget
#先挑出来攻击成功的信息
our_distortion = [x for x in our_distortion if x>0]
our_perturbation = [x for x in our_perturbation if x>0]
our_perturbation_ratio = [x for x in our_perturbation_ratio if x>0]
ran_perturbation = [x for x in ran_perturbation if x>0]
ran_perturbation_ratio = [x for x in ran_perturbation_ratio if x>0]
init_distortion = [x for x in init_distortion if x>0]
init_perturbation = [x for x in init_perturbation if x>0]
init_perturbation_ratio = [x for x in init_perturbation_ratio if x>0]
our_avg_perturbation = []
our_avg_perturb_ratio = []
our_avg_distortion = []
init_avg_distortion = []
init_avg_perturbation = []
init_avg_perturb_ratio = []
ran_avg_perturbation = []
ran_avg_pertub_ratio = []
our_success_ratio = []
ran_success_ratio = []
init_success_ratio = []
for b in range(1, 21):
budget = b / 100
our_success_index = [x for x in list(range(len(our_distortion))) if our_perturbation_ratio[x] <= budget]
our_success_distortion = [our_distortion[i] for i in our_success_index]
our_success_perturbation = [our_perturbation[i] for i in our_success_index]
our_success_perturb_ratio = [our_perturbation_ratio[i] for i in our_success_index]
init_success_index = [x for x in list(range(len(init_distortion))) if init_perturbation_ratio[x]<= budget]
init_success_distortion = [init_distortion[i] for i in init_success_index]
init_success_perturbation = [init_perturbation[i] for i in init_success_index]
init_success_perturb_ratio = [init_perturbation_ratio[i] for i in init_success_index]
ran_success_index = [x for x in list(range(len(ran_perturbation))) if ran_perturbation_ratio[x] <= budget]
ran_success_perturbation = [ran_perturbation[i] for i in ran_success_index]
ran_success_perturb_ratio = [ran_perturbation_ratio[i] for i in ran_success_index]
our_success_count = len(our_success_index)
our_success_ratio.append(our_success_count / L)
if our_success_count > 0:
our_avg_perturbation.append(sum(our_success_perturbation) / our_success_count)
our_avg_perturb_ratio.append(sum(our_success_perturb_ratio) / our_success_count)
our_avg_distortion.append(sum(our_success_distortion) / our_success_count)
else:
our_avg_perturbation.append(0)
our_avg_perturb_ratio.append(0)
our_avg_distortion.append(0)
init_success_count = len(init_success_index)
init_success_ratio.append(init_success_count / L)
if init_success_count > 0:
init_avg_perturbation.append(sum(init_success_perturbation) / init_success_count)
init_avg_perturb_ratio.append(sum(init_success_perturb_ratio) / init_success_count)
init_avg_distortion.append(sum(init_success_distortion) / init_success_count)
else:
init_avg_perturbation.append(0)
init_avg_perturb_ratio.append(0)
init_avg_distortion.append(0)
ran_success_count = len(ran_success_index)
ran_success_ratio.append(ran_success_count / L)
if ran_success_count > 0:
ran_avg_perturbation.append(sum(ran_success_perturbation) / ran_success_count)
ran_avg_pertub_ratio.append(sum(ran_success_perturb_ratio) / ran_success_count)
else:
ran_avg_perturbation.append(0)
ran_avg_pertub_ratio.append(0)
print('init success ratio'+'-'*20)
for i in init_success_ratio:
print('{:.4f}'.format(i))
print('init avg perturbation'+'-'*20)
for i in init_avg_perturbation:
print('{:.4f}'.format(i))
print('init avg perturb ratio'+'-'*20)
for i in init_avg_perturb_ratio:
print('{:.4f}'.format(i))
print('init avg distortion' + '-'*20)
for i in init_avg_distortion:
print('{:.4f}'.format(i))
print('our success ratio'+'-'*20)
for i in our_success_ratio:
print('{:.4f}'.format(i))
print('our avg perturbation'+'-'*20)
for i in our_avg_perturbation:
print('{:.4f}'.format(i))
print('our avg perturb ratio'+'-'*20)
for i in our_avg_perturb_ratio:
print('{:.4f}'.format(i))
print('our avg distortion'+'-'*20)
for i in our_avg_distortion:
print('{:.4f}'.format(i))
print('random success ratio'+'-'*20)
for i in ran_success_ratio:
print('{:.4f}'.format(i))
print('random perturbation'+'-'*20)
for i in ran_avg_perturbation:
print('{:.4f}'.format(i))
print('random perturb ratio'+'-'*20)
for i in ran_avg_pertub_ratio:
print('{:.4f}'.format(i))
| 9,740 |
tests/test_config.py
|
iflb/ifconf
| 0 |
2170916
|
#!/usr/bin/env python
# the inclusion of the tests module is not meant to offer best practices for
# testing in general, but rather to support the `find_packages` example in
# setup.py that excludes installing the "tests" package
import unittest
from pathlib import Path
from collections import namedtuple
#from recordclass import recordclass
import sys
sys.path.append('..')
from ifconf import configure_main, configure_module
from ifconf.main import clear_main_cofig
import model
class TestConfigureModule(unittest.TestCase):
def setUp(self):
clear_main_cofig()
def test_default_value_load(self):
configure_main()
server = model.Server()
self.assertEqual(server.addr, '0.0.0.0')
self.assertEqual(server.port, 8080)
self.assertEqual(server.udp, False)
self.assertEqual(server.val_f, 0.8)
self.assertEqual(server.val_d, {'a':1,'b':2,'c':3})
self.assertEqual(server.val_l, [1,2,3])
self.assertEqual(server.home, Path('..'))
with self.assertRaises(AttributeError):
self.conf.addr = '172.16.17.32' # not editable
def test_file_path_value_load(self):
configure_main(config_path=['test.ini'], config_arg=None)
server = model.Server()
self.assertEqual(server.addr, '127.0.0.1')
self.assertEqual(server.port, 80)
self.assertEqual(server.udp, True)
self.assertEqual(server.val_f, 0.5)
self.assertEqual(server.val_d, {'a':10,'b':20,'c':30})
self.assertEqual(server.val_l, [1,2,3,4,5,6])
self.assertEqual(server.home, Path('../../..'))
with self.assertRaises(AttributeError):
self.conf.addr = '172.16.17.32' # not editable
def test_file_path_value_load_with_two_files(self):
configure_main(config_path=['test.ini', 'test2.ini'], config_arg=None)
server = model.Server()
self.assertEqual(server.addr, '127.0.0.1')
self.assertEqual(server.port, 80)
self.assertEqual(server.udp, True)
self.assertEqual(server.val_f, 0.5)
self.assertEqual(server.val_d, {'a':10,'b':20,'c':30})
self.assertEqual(server.val_l, [1,2,3,4,5,6])
self.assertEqual(server.home, Path('../../..'))
with self.assertRaises(AttributeError):
self.conf.addr = '172.16.17.32' # not editable
server = model.Database(immutable=True)
self.assertEqual(server.addr, '192.168.0.1')
self.assertEqual(server.port, 3333)
def test_file_path_value_load_test2(self):
configure_main(config_path='test2.ini', config_arg=None)
server = model.Database(immutable=True)
self.assertEqual(server.addr, '192.168.0.1')
self.assertEqual(server.port, 3333)
def test_file_path_value_load_test3_override(self):
configure_main(config_path=['test3.ini', 'test2.ini'], config_arg=None)
server = model.Database(immutable=True)
self.assertEqual(server.addr, '192.168.0.100')
self.assertEqual(server.port, 4444)
def test_file_arg_value_load(self):
configure_main(config_arg='test.ini')
server = model.Server()
self.assertEqual(server.addr, '127.0.0.1')
self.assertEqual(server.port, 80)
self.assertEqual(server.udp, True)
self.assertEqual(server.val_f, 0.5)
self.assertEqual(server.val_d, {'a':10,'b':20,'c':30})
self.assertEqual(server.val_l, [1,2,3,4,5,6])
self.assertEqual(server.home, Path('../../..'))
with self.assertRaises(AttributeError):
self.conf.addr = '172.16.17.32' # not editable
def test_default_value_load_mutable(self):
configure_main(with_config_logging = False)
server = model.Database(immutable=False)
self.assertEqual(server.addr, '127.0.0.1')
self.assertEqual(server.port, 3306)
server.addr = '192.168.0.1'
server.port = 8888
self.assertEqual(server.addr, '192.168.0.1')
self.assertEqual(server.port, 8888)
def test_no_main_config(self):
with self.assertRaises(RuntimeError):
configure_module(model.server)
def test_override_fail(self):
configure_main()
with self.assertRaises(ValueError):
server = model.Server(override={'server_addr', '192.168.0.1'})
def test_override_value(self):
configure_main()
server = model.Server(override={'server_addr': '192.168.0.1'})
self.assertEqual(server.addr, '192.168.0.1')
def test_ifconf_config_path(self):
configure_main(config_path='test_all.ini', config_arg=None)
server = model.Server()
self.assertEqual(server.addr, '127.0.0.1')
self.assertEqual(server.port, 80)
self.assertEqual(server.udp, True)
self.assertEqual(server.val_f, 0.5)
self.assertEqual(server.val_d, {'a':10,'b':20,'c':30})
self.assertEqual(server.val_l, [9,9,9])
self.assertEqual(server.home, Path('../../..'))
with self.assertRaises(AttributeError):
self.conf.addr = '172.16.17.32' # not editable
server = model.Database(immutable=True)
self.assertEqual(server.addr, '192.168.0.1')
self.assertEqual(server.port, 3333)
if __name__ == '__main__':
unittest.main()
| 5,352 |
adam_api_repo_curve_anomaly_detection/setup.py
|
mChataign/smileCompletion
| 4 |
2171628
|
from setuptools import setup, find_packages
setup(name='Delta One Anomaly Detection',
version='0.0.1',
description='project',
license='HSBC',
packages=['package'],
author='<NAME>', '<NAME>',
author_email='<EMAIL>', '<EMAIL>',
keywords=['example'],
install_requires=['numpy','pandas'],
url='')
| 329 |
attacks/examples/testInference.py
|
m3eeza/code
| 0 |
2171210
|
import sys
import pprint
sys.path.append('../../common')
from gdaScore import gdaAttack, gdaScores
from myUtilities import checkMatch
# Anon: None
# Attack: List DB contents
# Criteria: Inference
# Database: Banking.Accounts
pp = pprint.PrettyPrinter(indent=4)
verbose = 0
v = verbose
# -------------------------- subroutines ---------------------------
# -------------------------- body ---------------------------
# Note in following that since there is no anonymization, the anonymized
# DB is the same as the raw DB
params = dict(name=__file__,
rawDb='localBankingRaw',
anonDb='localBankingRaw',
criteria='inference',
table='accounts',
flushCache=False,
verbose=False)
# TEST ALL CORRECT
x = gdaAttack(params)
# ------------------- Exploration Phase ------------------------
# This attack doesn't require any exploratory queries
# ------------------- Prior Knowledge Phase --------------------
# This attack doesn't require any prior knowledge
# ------------------- Attack Phase -----------------------------
# In the List attack for Inference, we look for columns where there
# is only a single distinct instance of another column. In this case,
# acct_date has relatively many distinct values, and frequency has
# relatively few, so it isn't hard to find instances where users with
# different acct_date all have the same frequency. Note that rather
# than acct_date, we could have used several columns each with fewer
# distinct values.
query = {}
sql = """select acct_date, max(frequency)
from accounts
group by 1
having count(distinct frequency) = 1
limit 10"""
query['sql'] = sql
x.askAttack(query)
replyCorrect = x.getAttack()
# ------------------- Claims Phase ----------------------------
# the first askClaim will pass on committing to the claim
claim = 0
for row in replyCorrect['answer']:
query = {}
sql = "select client_id, frequency "
sql += "from accounts where "
sql += str(f"acct_date = {row[0]} ")
query['sql'] = sql
spec = {'uid':'client_id',
'known':[{'col':'acct_date','val':row[0]}],
'guess':[{'col':'frequency','val':row[1]}]
}
x.askClaim(spec,claim=claim)
claim = 1
while True:
reply = x.getClaim()
if v: print(f"Query: {reply['query']['sql']}")
if v: print(f"Result: {reply['claimResult']}")
if reply['stillToCome'] == 0:
break
# ------------------- Scores Phase ----------------------------
print("Test all correct:")
attackResult = x.getResults()
sc = gdaScores(attackResult)
score = sc.getScores()
pp.pprint(score['col']['frequency'])
expect = {'attackCells': 20,
'attackGets': 1,
'claimCorrect': 9,
'claimError': 0,
'claimMade': 9,
'claimPassCorrect': 1,
'claimTrials': 10,
'knowledgeCells': 0,
'knowledgeGets': 0
}
checkMatch(score,expect,'frequency')
x.cleanUp(doExit=False)
# TEST ALL WRONG
x = gdaAttack(params)
# ------------------- Exploration Phase ------------------------
# This attack doesn't require any exploratory queries
# ------------------- Prior Knowledge Phase --------------------
# This attack doesn't require any prior knowledge
# ------------------- Attack Phase -----------------------------
query = {}
sql = """select acct_date, max(frequency)
from accounts
group by 1
having count(distinct frequency) > 1
limit 10"""
query['sql'] = sql
x.askAttack(query)
replyWrong = x.getAttack()
# ------------------- Claims Phase ----------------------------
claim = 0
for row in replyWrong['answer']:
query = {}
sql = "select client_id, frequency "
sql += "from accounts where "
sql += str(f"acct_date = {row[0]} ")
query['sql'] = sql
spec = {'uid':'client_id',
'known':[{'col':'acct_date','val':row[0]}],
'guess':[{'col':'frequency','val':row[1]}]
}
x.askClaim(spec,claim=claim)
claim = 1
while True:
reply = x.getClaim()
if v: print(f"Query: {reply['query']['sql']}")
if v: print(f"Result: {reply['claimResult']}")
if reply['stillToCome'] == 0:
break
# ------------------- Scores Phase ----------------------------
print("Test all wrong:")
attackResult = x.getResults()
sc = gdaScores(attackResult)
score = sc.getScores()
pp.pprint(score['col']['frequency'])
expect = {'attackCells': 20,
'attackGets': 1,
'claimCorrect': 0,
'claimError': 0,
'claimMade': 9,
'claimPassCorrect': 0,
'claimTrials': 10,
'knowledgeCells': 0,
'knowledgeGets': 0
}
checkMatch(score,expect,'frequency')
x.cleanUp(doExit=False)
# TEST MIX CORRECT and WRONG
x = gdaAttack(params)
# ------------------- Exploration Phase ------------------------
# This attack doesn't require any exploratory queries
# ------------------- Prior Knowledge Phase --------------------
# This attack doesn't require any prior knowledge
# ------------------- Attack Phase -----------------------------
# Use prior attack phases
# ------------------- Claims Phase ----------------------------
claim = 0
for row in replyWrong['answer']:
query = {}
sql = "select client_id, frequency "
sql += "from accounts where "
sql += str(f"acct_date = {row[0]} ")
query['sql'] = sql
spec = {'uid':'client_id',
'known':[{'col':'acct_date','val':row[0]}],
'guess':[{'col':'frequency','val':row[1]}]
}
x.askClaim(spec,claim=claim)
claim = 1
claim = 0
for row in replyCorrect['answer']:
query = {}
sql = "select client_id, frequency "
sql += "from accounts where "
sql += str(f"acct_date = {row[0]} ")
query['sql'] = sql
spec = {'uid':'client_id',
'known':[{'col':'acct_date','val':row[0]}],
'guess':[{'col':'frequency','val':row[1]}]
}
x.askClaim(spec,claim=claim)
claim = 1
while True:
reply = x.getClaim()
if v: print(f"Query: {reply['query']['sql']}")
if v: print(f"Result: {reply['claimResult']}")
if reply['stillToCome'] == 0:
break
# ------------------- Scores Phase ----------------------------
print("Test mix of correct and wrong:")
attackResult = x.getResults()
sc = gdaScores(attackResult)
score = sc.getScores()
pp.pprint(score['col']['frequency'])
expect = {'attackCells': 0,
'attackGets': 0,
'claimCorrect': 9,
'claimError': 0,
'claimMade': 18,
'claimPassCorrect': 1,
'claimTrials': 20,
'knowledgeCells': 0,
'knowledgeGets': 0
}
checkMatch(score,expect,'frequency')
print("\nOperational Parameters:")
op = x.getOpParameters()
pp.pprint(op)
x.cleanUp()
| 6,881 |
django_tgbot/types/stickerset.py
|
purwowd/django-tgbot
| 52 |
2171076
|
from . import BasicType
from . import sticker, photosize
class StickerSet(BasicType):
fields = {
'name': str,
'title': str,
'is_animated': BasicType.bool_interpreter,
'contains_masks': BasicType.bool_interpreter,
'stickers': {
'class': sticker.Sticker,
'array': True
},
'thumb': photosize.PhotoSize
}
def __init__(self, obj=None):
super(StickerSet, self).__init__(obj)
| 472 |
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/CreateApplicationTemplateRequest.py
|
yndu13/aliyun-openapi-python-sdk
| 1,001 |
2170043
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class CreateApplicationTemplateRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'CreateApplicationTemplate','Edas')
self.set_uri_pattern('/pop/v5/cnedas/app_template')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NasId(self):
return self.get_body_params().get('NasId')
def set_NasId(self,NasId):
self.add_body_params('NasId', NasId)
def get_EnableAhas(self):
return self.get_body_params().get('EnableAhas')
def set_EnableAhas(self,EnableAhas):
self.add_body_params('EnableAhas', EnableAhas)
def get_SlsConfigs(self):
return self.get_body_params().get('SlsConfigs')
def set_SlsConfigs(self,SlsConfigs):
self.add_body_params('SlsConfigs', SlsConfigs)
def get_CommandArgs(self):
return self.get_body_params().get('CommandArgs')
def set_CommandArgs(self,CommandArgs):
self.add_body_params('CommandArgs', CommandArgs)
def get_Readiness(self):
return self.get_body_params().get('Readiness')
def set_Readiness(self,Readiness):
self.add_body_params('Readiness', Readiness)
def get_Liveness(self):
return self.get_body_params().get('Liveness')
def set_Liveness(self,Liveness):
self.add_body_params('Liveness', Liveness)
def get_Description(self):
return self.get_body_params().get('Description')
def set_Description(self,Description):
self.add_body_params('Description', Description)
def get_Envs(self):
return self.get_body_params().get('Envs')
def set_Envs(self,Envs):
self.add_body_params('Envs', Envs)
def get_EnvFroms(self):
return self.get_body_params().get('EnvFroms')
def set_EnvFroms(self,EnvFroms):
self.add_body_params('EnvFroms', EnvFroms)
def get_RequestCpu(self):
return self.get_body_params().get('RequestCpu')
def set_RequestCpu(self,RequestCpu):
self.add_body_params('RequestCpu', RequestCpu)
def get_RequestMem(self):
return self.get_body_params().get('RequestMem')
def set_RequestMem(self,RequestMem):
self.add_body_params('RequestMem', RequestMem)
def get_ShowName(self):
return self.get_body_params().get('ShowName')
def set_ShowName(self,ShowName):
self.add_body_params('ShowName', ShowName)
def get_LimitMem(self):
return self.get_body_params().get('LimitMem')
def set_LimitMem(self,LimitMem):
self.add_body_params('LimitMem', LimitMem)
def get_ConfigMountDescs(self):
return self.get_body_params().get('ConfigMountDescs')
def set_ConfigMountDescs(self,ConfigMountDescs):
self.add_body_params('ConfigMountDescs', ConfigMountDescs)
def get_DeployAcrossZones(self):
return self.get_body_params().get('DeployAcrossZones')
def set_DeployAcrossZones(self,DeployAcrossZones):
self.add_body_params('DeployAcrossZones', DeployAcrossZones)
def get_DeployAcrossNodes(self):
return self.get_body_params().get('DeployAcrossNodes')
def set_DeployAcrossNodes(self,DeployAcrossNodes):
self.add_body_params('DeployAcrossNodes', DeployAcrossNodes)
def get_PreStop(self):
return self.get_body_params().get('PreStop')
def set_PreStop(self,PreStop):
self.add_body_params('PreStop', PreStop)
def get_Replicas(self):
return self.get_body_params().get('Replicas')
def set_Replicas(self,Replicas):
self.add_body_params('Replicas', Replicas)
def get_LimitCpu(self):
return self.get_body_params().get('LimitCpu')
def set_LimitCpu(self,LimitCpu):
self.add_body_params('LimitCpu', LimitCpu)
def get_WebContainerConfig(self):
return self.get_body_params().get('WebContainerConfig')
def set_WebContainerConfig(self,WebContainerConfig):
self.add_body_params('WebContainerConfig', WebContainerConfig)
def get_PackageConfig(self):
return self.get_body_params().get('PackageConfig')
def set_PackageConfig(self,PackageConfig):
self.add_body_params('PackageConfig', PackageConfig)
def get_IsMultilingualApp(self):
return self.get_body_params().get('IsMultilingualApp')
def set_IsMultilingualApp(self,IsMultilingualApp):
self.add_body_params('IsMultilingualApp', IsMultilingualApp)
def get_NasMountDescs(self):
return self.get_body_params().get('NasMountDescs')
def set_NasMountDescs(self,NasMountDescs):
self.add_body_params('NasMountDescs', NasMountDescs)
def get_LocalVolumes(self):
return self.get_body_params().get('LocalVolumes')
def set_LocalVolumes(self,LocalVolumes):
self.add_body_params('LocalVolumes', LocalVolumes)
def get_Command(self):
return self.get_body_params().get('Command')
def set_Command(self,Command):
self.add_body_params('Command', Command)
def get_NasStorageType(self):
return self.get_body_params().get('NasStorageType')
def set_NasStorageType(self,NasStorageType):
self.add_body_params('NasStorageType', NasStorageType)
def get_ImageConfig(self):
return self.get_body_params().get('ImageConfig')
def set_ImageConfig(self,ImageConfig):
self.add_body_params('ImageConfig', ImageConfig)
def get_SourceConfig(self):
return self.get_body_params().get('SourceConfig')
def set_SourceConfig(self,SourceConfig):
self.add_body_params('SourceConfig', SourceConfig)
def get_EmptyDirs(self):
return self.get_body_params().get('EmptyDirs')
def set_EmptyDirs(self,EmptyDirs):
self.add_body_params('EmptyDirs', EmptyDirs)
def get_PvcMountDescs(self):
return self.get_body_params().get('PvcMountDescs')
def set_PvcMountDescs(self,PvcMountDescs):
self.add_body_params('PvcMountDescs', PvcMountDescs)
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name)
def get_Attributes(self):
return self.get_body_params().get('Attributes')
def set_Attributes(self,Attributes):
self.add_body_params('Attributes', Attributes)
def get_RuntimeClassName(self):
return self.get_body_params().get('RuntimeClassName')
def set_RuntimeClassName(self,RuntimeClassName):
self.add_body_params('RuntimeClassName', RuntimeClassName)
def get_JavaStartUpConfig(self):
return self.get_body_params().get('JavaStartUpConfig')
def set_JavaStartUpConfig(self,JavaStartUpConfig):
self.add_body_params('JavaStartUpConfig', JavaStartUpConfig)
def get_PostStart(self):
return self.get_body_params().get('PostStart')
def set_PostStart(self,PostStart):
self.add_body_params('PostStart', PostStart)
| 7,546 |
datam/dect.py
|
fffasttime/cs_misc
| 5 |
2168927
|
# a slow and poor decision tree
import numpy as np
import math
def infoEnpt(x):
return -sum(map(lambda a:a*math.log2(a) if a else 0,x))
def infoEnpt2(x):
if x==1 or x==0:
return 0
return -x*math.log2(x)-(1-x)*math.log2(1-x)
def gini(x):
return 1-sum(map(lambda a:a*a,x))
def gini2(x):
return 1-x**2-(1-x)**2
def minpb2(x):
return min(x,1-x)
def pureness(x, f):
n=sum(x)
return f(map(lambda a:a/n,x))
# average pureness of children
def sum_pureness(x, f=infoEnpt):
n=0
s=0.0
for x1 in x:
n+=sum(x1)
s+=sum(x1)*pureness(x1, f)
return s/n
# average pureness of two children
# s1: positve, c1: first part, pos: positive in first part
def sum_pureness2(n, s1, c1, pos1, f=infoEnpt2):
return (f(pos1/c1)*c1+f((s1-pos1)/(n-c1))*(n-c1))/n
# try spilt point for continous
# try_value([3.0,1.0,2.0],[1,0,1])
def try_value(val, label, f=infoEnpt2):
v1=sorted(zip(val, label))
val,label=zip(*v1)
n=len(val)
s1=sum(label)
pos=0
for i in range(n-1):
pos+=label[i]
if i and val[i]==val[i-1]:
continue
s=sum_pureness2(n,s1,i+1,pos)
print(("<=%f : %f")%(val[i], s))
class MetaData:
# attr: None for continious value
def __init__(self, xname, attr):
self.xname=xname
self.attr=attr
self.nx=len(xname)
class Node:
def __init__(self):
self.ch=[]
def print(self, deep=0):
for i in range(deep):
print(' ',end='')
print("%s : %d/%d"%(self.name,self.pos,self.n))
for c in self.ch:
c.print(deep+1)
def fit(self, tree,datax,datay,freq,nodename,deep=0):
self.name=nodename
self.n=sum(freq)
self.pos=sum(freq[datay==1])
if deep==2: return
if self.pos==0 or self.pos==self.n:
return
minsp=None ; mini=None
curpure=sum_pureness([[self.pos,self.n-self.pos]], tree.mode)
for i,x in enumerate(datax):
if not tree.idlex[i]:
continue
if type(tree.metadata.attr[i]) is int:
cnt=np.zeros((tree.metadata.attr[i],2))
# print(datax[i])
for j in range(len(freq)):
cnt[datax[i][j],datay[j]]+=freq[j]
print(cnt)
s=sum_pureness(cnt, tree.mode)
print(nodename,':',tree.metadata.xname[i],s,curpure-s)
if minsp==None or minsp>s:
minsp=s
mini=i
else:
pass
# Todo : continious value
if deep==0: mini=0
if mini is not None:
tree.idlex[mini]=False
self.pid=mini
for j in range(tree.metadata.attr[mini]):
self.ch.append(Node())
sel=datax[mini]==j
newx=[x[sel] for x in datax]
self.ch[-1].fit(tree, newx, datay[sel], freq[sel], "%s_%d"%(tree.metadata.xname[mini],j), deep+1)
self.ch[-1].id=j
tree.idlex[mini]=True
class Tree:
def __init__(self, metadata, mode=infoEnpt):
self.metadata=metadata
self.mode=mode
def fit(self, datax, datay, freq=None):
self.root=Node()
self.nitem=len(datax)
self.idlex=np.ones(self.metadata.nx).astype(bool)
if freq is None:
freq=np.ones(self.nitem)
self.root.fit(self,datax,datay, freq,'root')
def print(self):
self.root.print()
def predict(self, datax):
node=self.root
while len(node.ch):
node=node.ch[datax[node.pid]]
return node.pos>node.n-node.pos
if __name__=="__main__":
datax=np.array([
[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1],
[0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1],
[0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
])
datay=np.array([1,0]*8)
freq=np.array([5,40,0,15,10,5,45,0,10,5,25,0,5,20,0,15])
freq=np.array([5,0,0,20,20,0,0,5,0,0,25,0,0,0,0,25])
tree=Tree(MetaData("X Y Z".split(),[2,2,2]),min)
tree.fit(datax, datay, freq)
tree.print()
acc=0
for i in range(len(datay)):
ret=tree.predict([x[i] for x in datax])
if ret==datay[i]:
acc+=freq[i]
print(acc/sum(freq))
| 4,305 |
python/primes_in_numbers.py
|
lukasjoc/random
| 1 |
2171248
|
def primeFactors(n):
facts, by_two = {}, 0
start = n
while n % 2 == 0:
n //= 2
by_two += 1
for t in range(by_two):
facts[2] = by_two
for i in range(3, int(n**0.5)+1, 2):
while n % i == 0:
n = n / i
if i in facts:
facts[i] += 1
else:
facts[i] = 1
return facts
def prime_fac(num):
for i in range(2,num + 1):
if(num % i == 0):
prime = True
for j in range(2,(i//2 + 1)):
if(i % j == 0):
prime = False
break
if prime:
return True
for i in range(2, 100):
print(i, prime_fac(i))
| 725 |
setup.py
|
p0o0uya/binapi
| 0 |
2170292
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='binapi',
version='0.4',
author='<NAME>',
author_email='<EMAIL>',
description='A wrapper for binance futures API',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/p0o0uya/binapi',
project_urls = {
"Bug Tracker": "https://github.com/p0o0uya/binapi/issues"
},
license='MIT',
packages=['binapi'],
install_requires=['requests', 'pandas'],
)
| 605 |
src/LatentDirichletAllocation.py
|
cjuracek/STA_663_Final
| 0 |
2170726
|
from collections import Counter
from random import choices
from src.utility import get_unique_words
import numpy as np
from tqdm import trange
from scipy.stats import mode
class LatentDirichletAllocation:
def __init__(self, iden_to_tokens, K, alpha, beta=0.01):
self.iden_to_tokens = iden_to_tokens
self.K = K
self.alpha = alpha
self.beta = beta
self.vocabulary = get_unique_words(iden_to_tokens.values())
self.W = len(self.vocabulary)
self.theta_matrix = np.zeros((K, len(iden_to_tokens)))
self.phi_matrix = np.zeros((K, self.W))
def fit(self, niter):
""" Perform collapsed Gibbs sampling to discover latent topics in corpus
:param niter: Number of iterations to run the Gibbs sampler for
"""
document_word_topics_MC, document_topic_counts, word_topic_counts, total_topic_counts = self._initialize_topics()
for j in trange(niter): # One iteration of Gibbs sampler
print(f'Running iteration {j + 1} out of {niter}')
for doc, words in self.iden_to_tokens.items():
for i, word in enumerate(words):
densities = np.zeros(self.K)
curr_topic = document_word_topics_MC[doc][i][-1] # Get most recent topic of MC chain
# Calculate probability that a given latent topic z_ij belongs to topic k for each k
for k in range(self.K):
# Relevant counts needed for computation - see paragraph before Eq. 1
N_kj = document_topic_counts[doc][k]
N_wk = word_topic_counts[word][k]
N_k = total_topic_counts[k]
# New draw is conditioned on everything BUT this observation
if curr_topic == k:
N_kj -= 1
N_wk -= 1
N_k -= 1
# Eq. 1
a_kj = N_kj + self.alpha
b_wk = (N_wk + self.beta) / (N_k + self.W * self.beta)
densities[k] = a_kj * b_wk
# Draw a new topic and append to MC - normalization not needed
new_topic = choices(range(self.K), weights=densities)[0]
document_word_topics_MC[doc][i].append(new_topic)
# No need to update counts if topic is the same
if new_topic == curr_topic:
continue
# Update counts
document_topic_counts[doc][curr_topic] -= 1
document_topic_counts[doc][new_topic] += 1
word_topic_counts[word][curr_topic] -= 1
word_topic_counts[word][new_topic] += 1
total_topic_counts[curr_topic] -= 1
total_topic_counts[new_topic] += 1
# Determine topic for word from the chain
self._compute_MC_topic_approx(document_word_topics_MC)
# Estimate other model parameters we are interested in
self._compute_phi_estimates(word_topic_counts, total_topic_counts)
self._compute_theta_estimates(document_topic_counts)
def _compute_phi_estimates(self, word_topic_counts, total_topic_counts):
"""
Compute estimate of the phi matrix, containing word distributions per topic
:param word_topic_counts: Dictionary that maps words to their respective counts per topic
:param total_topic_counts: Dictionary that maps each topic to the number of times it appears in corpus
"""
for w, word in enumerate(self.vocabulary):
for k in range(self.K):
N_wk = word_topic_counts[word][k]
N_k = total_topic_counts[k]
self.phi_matrix[k, w] = (N_wk + self.beta) / (N_k + self.W * self.beta)
def _compute_theta_estimates(self, document_topic_counts):
"""
Compute a matrix containing the mixture components of each document
:param document_topic_counts: A dictionary mapping titles to topic counts in that document
"""
for j, (doc, topics) in enumerate(document_topic_counts.items()):
for topic in topics:
N_kj = document_topic_counts[doc][topic]
N_j = sum(document_topic_counts[doc].values())
self.theta_matrix[topic, j] = (N_kj + self.alpha) / (N_j + self.K * self.alpha)
def _initialize_topics(self):
"""
Randomly initialize topic / word count information needed for sampling
:return: 4 dictionaries of counts (see comments below)
"""
# Contains the ordered list of topics for each document (Dict of lists)
document_word_topics_MC = {}
# Counts of each topic per document (Dict of dicts)
document_topic_counts = {title: Counter() for title in self.iden_to_tokens.keys()}
# Counts number of times a given word is assigned to each topic (dict of dicts)
word_topic_counts = {word: Counter() for word in self.vocabulary}
# Counts of each topic across all documents
total_topic_counts = Counter()
for doc, words in self.iden_to_tokens.items():
# Start with randomly assigned topics - update appropriate counts
topics = np.random.randint(low=0, high=self.K, size=len(words))
document_word_topics_MC[doc] = [[topic] for topic in topics]
document_topic_counts[doc].update(topics)
total_topic_counts.update(topics)
# Update the topic counts per word
for unique_word in set(words):
unique_word_topics = [topic for idx, topic in enumerate(topics) if words[idx] == unique_word]
word_topic_counts[unique_word].update(unique_word_topics)
return document_word_topics_MC, document_topic_counts, word_topic_counts, total_topic_counts
def _compute_MC_topic_approx(self, document_word_topics_MC):
"""
Given a Markov chain of word topics, compute a Monte Carlo approximation by picking mode of topics
:param document_word_topics_MC: Dictionary that maps identifiers (titles) to a Markov chain of their topics
:return: Dictionary that maps identifiers (titles) to the Monte Carlo approx of their topics (mode)
"""
document_word_topics = {title: [] for title in document_word_topics_MC.keys()}
for doc, words in document_word_topics_MC.items():
for i, word in enumerate(words):
most_frequent_topic = mode(document_word_topics_MC[doc][i], axis=None)[0][0]
document_word_topics[doc].append(most_frequent_topic)
self.document_word_topics = document_word_topics
def get_top_n_words(self, n, return_probs=False):
"""
Calculate the top n words with highest posterior probability for every topic
:param n: Top number of words to find
:param return_probs: Should we return probabilities with these words?
:return: A dictionary mapping topics to the respective top words
"""
topic_top_words = {}
for k in range(self.phi_matrix.shape[0]):
# Find the top probability indices, then take the first n of them
top_n_idx = np.argsort(self.phi_matrix[k, :])[::-1][:n]
top_n_words = [self.vocabulary[i] for i in top_n_idx]
if return_probs:
top_n_probs = self.phi_matrix[k, top_n_idx]
top_n_probs = np.around(top_n_probs, 4)
topic_top_words[k] = [(word, prob) for word, prob in zip(top_n_words, top_n_probs)]
else:
topic_top_words[k] = top_n_words
return topic_top_words
| 7,850 |
get_gesture_exp.py
|
swatsw/isg_official
| 2 |
2171668
|
import os
import sys
from sklearn.preprocessing import StandardScaler
import numpy as np
import joblib as jl
from pymo.writers import BVHWriter
from pymo.parsers import BVHParser
from pymo.preprocessing import *
from sklearn.pipeline import Pipeline
import math
from numpy.lib.stride_tricks import sliding_window_view
import pdb
from scipy.stats import norm
from scipy.signal import savgol_filter
BVH_DIR = "data/bvh"
FLISTS = {
'train': 'data/filelists/genea_train.txt',
'dev': 'data/filelists/genea_dev.txt',
'test': 'data/filelists/genea_dev.txt'
}
GESTURE_JOINTS = ['Spine', 'Spine1', 'Spine2', 'Spine3', 'Neck', 'Neck1', 'Head', 'RightShoulder', 'RightArm',
'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand']
def extract_joint_angles(bvh_dir, files, destpath, fps=60, set_name=""):
p = BVHParser()
data_all = list()
print("Importing data...")
for f in files:
ff = os.path.join(bvh_dir, f + '.bvh')
print(ff)
data_all.append(p.parse(ff))
data_pipe = Pipeline([
('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)),
('root', RootTransformer('hip_centric')),
('jtsel', JointSelector(
GESTURE_JOINTS,
include_root=True)),
('exp', MocapParameterizer('expmap')),
('cnst', ConstantsRemover()),
('np', Numpyfier())
])
print("Processing...")
out_data = data_pipe.fit_transform(data_all)
jl.dump(data_pipe, os.path.join(destpath, f'data_pipe-{set_name}.sav'))
# optional saving
# fi = 0
# for f in files:
# ff = os.path.join(destpath, f)
# print(ff)
# np.savez(ff + ".npz", clips=out_data[fi])
# fi = fi + 1
return out_data
def fit_and_standardize(data):
# shape = data.shape
flat = np.concatenate(data, axis=0)
# flat = data.copy().reshape((shape[0] * shape[1], shape[2]))
scaler = StandardScaler().fit(flat)
scaled = [scaler.transform(x) for x in data]
return scaled, scaler
def standardize(data, scaler):
scaled = [scaler.transform(x) for x in data]
return scaled
def load_scaler(fpath='std_exp_scaler.sav'):
assert os.path.isfile(fpath), "specified scaler file does not exist"
return jl.load(fpath)
def load_data_pipeline(fpath):
assert os.path.isfile(fpath), "specified data_pipe file does not exist"
# fpath = f'{dir}/{fname}'
return jl.load(fpath)
# def reverse_standardize(data, scaler):
# unscaled = [scaler.inverse_transform(x) for x in data]
# return unscaled
def mel_resample(bvh_arr, hop_length=256, sampling_rate=22050, bvh_fps=60):
audio_frame_hop_time = hop_length / sampling_rate
bvh_frame_time = 1.0 / bvh_fps
total_bvh_time = bvh_arr.shape[0] * audio_frame_hop_time
num_out_frame = math.floor(total_bvh_time / bvh_frame_time)
align_indices = np.arange(num_out_frame, dtype=np.float32)
align_indices *= bvh_frame_time
align_indices /= audio_frame_hop_time
align_indices = np.rint(align_indices).astype(np.int)
out_bvh = bvh_arr[align_indices, :]
return out_bvh
SMOOTHING_METHODS = ["box", "normal", "savgol"]
NORMAL_STEP_SIZE = 1
def smoothing_arr(arr, half_window_size: int, method):
assert method in SMOOTHING_METHODS
arr = np.pad(arr, ((half_window_size,), (0,)), 'edge')
window_size = half_window_size*2+1
if method == "box":
arr = sliding_window_view(arr, window_shape=window_size, axis=0)
box_filter = np.ones((window_size, 1))
arr = np.matmul(arr, box_filter) / window_size
arr = arr.squeeze(-1)
if method == "normal":
arr = sliding_window_view(arr, window_shape=window_size, axis=0)
normal_filter_steps = np.arange(window_size) - half_window_size
normal_filter_steps = normal_filter_steps * NORMAL_STEP_SIZE
normal_filter = norm.pdf(normal_filter_steps)
normal_filter = np.expand_dims(normal_filter, -1)
arr = np.matmul(arr, normal_filter) / normal_filter.sum()
arr = arr.squeeze(-1)
if method == "savgol":
arr = savgol_filter(arr, window_length=window_size, polyorder=2)
return arr
def std_exp_to_bvh(exp_arr, out_bvh_fpath, out_fps=60, smoothing='normal', smoothing_half_ws=3,
scaler_fpath=f'{BVH_DIR}/std_exp_scaler.sav',
data_pipe_fpath=f'{BVH_DIR}/data_pipe-train.sav'):
# flist = os.listdir(dir)
# flist = [x for x in flist if model_name in x and x.endswith(npf_suffix)]#x.endswith("gesture.npy")]
assert scaler_fpath, "must specify fpath for saved scaler"
scaler = load_scaler(scaler_fpath)
assert data_pipe_fpath, "must specify fpath for saved data pipe"
data_pipeline = load_data_pipeline(data_pipe_fpath)
if len(exp_arr.shape) == 3:
exp_arr = exp_arr[0].T
exp_arr = mel_resample(exp_arr, bvh_fps=out_fps)
if smoothing is not None:
exp_arr = smoothing_arr(exp_arr, half_window_size=smoothing_half_ws, method=smoothing)
exp_arr = scaler.inverse_transform(exp_arr)
gesture_bvh = data_pipeline.inverse_transform([exp_arr])[0]
# pdb.set_trace()
bvh_writer = BVHWriter()
gesture_bvh.framerate = 1 / out_fps
bvh_writer.write(gesture_bvh, open(out_bvh_fpath, "w"))
def get_gesture_exp(bvh_list, set_name="", fps=60):
if len(bvh_list) == 0:
return []
return extract_joint_angles(BVH_DIR, bvh_list, BVH_DIR, fps=fps, set_name=set_name)
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
# a segment is only valid if has both audio-transcript(in filelist.txt) and bvh
def get_valid_segments():
bvh_dir_flist = os.listdir(BVH_DIR)
bvh_dir_flist = [x[:x.index(".bvh")] for x in bvh_dir_flist if x.endswith(".bvh")]
segments_dict = {}
for flist_set in FLISTS:
flist = load_filepaths_and_text(FLISTS[flist_set])
segment_list = [x[0] for x in flist]
segment_list = [os.path.basename(x) for x in segment_list]
segment_list = [os.path.splitext(x)[0] for x in segment_list]
valid_segment_list = [x for x in segment_list if x in bvh_dir_flist]
invalid_segment_list = [x for x in segment_list if x not in bvh_dir_flist]
print("Invalid segments in {}: ".format(flist_set), invalid_segment_list)
segments_dict[flist_set] = valid_segment_list
return segments_dict
if __name__ == "__main__":
segments_dict = get_valid_segments()
gesture_exp_dict = {x: get_gesture_exp(segments_dict[x], set_name=x)
for x in segments_dict}
out, scaler = fit_and_standardize(gesture_exp_dict["train"])
gesture_exp_dict["train"] = out
gesture_exp_dict["dev"] = standardize(gesture_exp_dict["dev"], scaler)
gesture_exp_dict["test"] = standardize(gesture_exp_dict["test"], scaler)
for k in gesture_exp_dict:
for name, x in zip(segments_dict[k], gesture_exp_dict[k]):
np.save(f"{BVH_DIR}/{name}_std_exp.npy", x)
jl.dump(scaler, f'{BVH_DIR}/std_exp_scaler.sav')
| 7,177 |
examples/ex02_taudem_simple_usage.py
|
crazyzlj/PyGeoC
| 6 |
2170073
|
# -*- coding: utf-8 -*-
# Exercise 2: Run TauDEM functions with PyGeoC
from pygeoc.TauDEM import TauDEM
def pitremove_simple_usage():
"""Simple usage of pitremove.
Workspace will be set as the base directory of input file.
"""
dem = '../tests/data/Jamaica_dem.tif'
fel = '../tests/data/tmp_results/dem_pitremoved.tif'
num_proc = 2
TauDEM.pitremove(num_proc, dem, fel)
if __name__ == "__main__":
pitremove_simple_usage()
| 477 |
institutional/migrations/0005_auto_20150928_1217.py
|
roberzguerra/rover
| 2 |
2171464
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('institutional', '0004_auto_20150928_1156'),
]
operations = [
migrations.RemoveField(
model_name='slide',
name='caption',
),
migrations.RemoveField(
model_name='slide',
name='site',
),
migrations.AddField(
model_name='slide',
name='url',
field=models.CharField(help_text='Cole aqui a URL de destino do link.', max_length=500, verbose_name='URL', blank=True),
preserve_default=True,
),
]
| 722 |
etl_e2e/census_etl/dfxml/python/demos/demo_mac_timeline_objects.py
|
thinkmoore/das
| 35 |
2171358
|
#!/usr/bin/env python
# This software was developed at the National Institute of Standards
# and Technology in whole or in part by employees of the Federal
# Government in the course of their official duties. Pursuant to
# title 17 Section 105 of the United States Code portions of this
# software authored by NIST employees are not subject to copyright
# protection and are in the public domain. For portions not authored
# by NIST employees, NIST has been granted unlimited rights. NIST
# assumes no responsibility whatsoever for its use by other parties,
# and makes no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
# produce a MAC-times timeline using the DFXML Objects interface.
# works under either Python2 or Python3
import os
import sys
sys.path.append( os.path.join(os.path.dirname(__file__), ".."))
import dfxml
import dfxml.objects as Objects
def main():
if len(sys.argv) < 2:
print("Usage: {} <filename.xml>".format(sys.argv[0]))
exit(1)
timeline = []
for (event, obj) in Objects.iterparse( sys.argv[1] ):
#Only work on FileObjects
if not isinstance(obj, Objects.FileObject):
continue
if not obj.mtime is None: timeline.append([obj.mtime, obj.filename," modified"])
if not obj.crtime is None: timeline.append([obj.crtime,obj.filename," created"])
if not obj.ctime is None: timeline.append([obj.ctime, obj.filename," changed"])
if not obj.atime is None: timeline.append([obj.atime, obj.filename," accessed"])
timeline.sort()
for record in timeline:
print("\t".join( map(str, record)) )
if __name__ == "__main__":
main()
| 1,770 |
sudachipy/dartsclone/__init__.py
|
lintaoren/SudachiPy
| 0 |
2170077
|
from . import doublearray
from . import keyset
from . import doublearraybuilder
from . import doublearraybuilderunit
from . import dawgbuilder
from . import bitvector
| 167 |
saas/backend/biz/model_event.py
|
Canway-shiisa/bk-iam-saas
| 0 |
2171541
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import List
from django.db import transaction
from pydantic import parse_obj_as
from backend.api.authorization.models import AuthAPIAllowListConfig
from backend.apps.approval.models import ActionProcessRelation
from backend.apps.policy.models import Policy as PolicyModel
from backend.apps.role.models import RoleScope
from backend.apps.template.models import PermTemplate, PermTemplatePolicyAuthorized
from backend.service.action import ActionService
from backend.service.constants import ModelChangeEventStatusEnum, ModelChangeEventTypeEnum
from backend.service.model_event import ModelEventService
from backend.service.models import ModelEvent
from backend.service.policy.operation import PolicyOperationService
class ModelEventBean(ModelEvent):
"""继承ModelEvent的数据属性"""
pass
class BaseEventExecutor:
def __init__(self, event: ModelEventBean):
self.event = event
def run(self):
self.execute()
self.finish()
def execute(self):
"""每个子类事件需要实现各自的执行逻辑"""
raise NotImplementedError("subclasses of BaseEventExecutor must provide an execute() method")
def finish(self):
"""事件执行结束后执行-更新状态"""
ModelEventService().update_status(self.event.id, ModelChangeEventStatusEnum.Finished.value)
class DeleteActionPolicyEventExecutor(BaseEventExecutor):
"""删除操作的策略"""
def execute(self):
"""删除某个操作的所有策略"""
system_id, action_id = self.event.system_id, self.event.model_id
# 策略删除,包括自定义和模板权限
with transaction.atomic():
# 1. 用户或用户组自定义权限删除
PolicyModel.delete_by_action(system_id=system_id, action_id=action_id)
# 2. 权限模板:变更权限模板里的action_ids及其授权的数据
template_ids = PermTemplate.delete_action(system_id, action_id)
PermTemplatePolicyAuthorized.delete_action(system_id, action_id, template_ids)
# 3. 调用后台根据action_id删除Policy的API, 实际测试在150万策略里删除10万+策略,大概需要3秒多
PolicyOperationService().delete_backend_policy_by_action(system_id, action_id)
# 4. 分级管理员授权范围
RoleScope.delete_action_from_scope(system_id, action_id)
# 5. Action的审批流程配置
ActionProcessRelation.delete_by_action(system_id=system_id, action_id=action_id)
# 6. API白名单授权撤销
AuthAPIAllowListConfig.delete_by_action(system_id, action_id)
class DeleteActionEventExecutor(BaseEventExecutor):
"""删除操作"""
def execute(self):
"""删除Action权限模型"""
ActionService().delete(self.event.system_id, self.event.model_id)
class ModelEventBiz:
svc = ModelEventService()
def list(self, status: str, limit: int = 1000) -> List[ModelEventBean]:
"""有限制条数的查询"""
events = self.svc.list(status, limit)
return parse_obj_as(List[ModelEventBean], events)
def get_executor(self, event: ModelEventBean) -> BaseEventExecutor:
"""
获取事件执行者,用于执行事件
"""
# Note: 目前只能处理某个操作的策略删除和操作本身的删除
if event.type == ModelChangeEventTypeEnum.ActionPolicyDeleted.value:
return DeleteActionPolicyEventExecutor(event=event)
if event.type == ModelChangeEventTypeEnum.ActionDeleted.value:
return DeleteActionEventExecutor(event=event)
raise NotImplementedError(f"{event.type} executor not implement")
def delete_finished_event(self, before_updated_at: int, limit: int = 1000):
"""有限制条数和时间的删除事件"""
self.svc.delete_finished_event(before_updated_at, limit)
| 4,187 |
test/python/test_or_and_max_abs.py
|
tomjaguarpaw/knossos-ksc
| 31 |
2169137
|
from ksc.utils import translate_and_import
def test_abs():
ks_str = """
(edef abs Integer (Integer))
(def test Integer ((x : Integer))
(abs x)
)
"""
py_out = translate_and_import(__file__, ks_str, "common")
assert py_out.test(0) == 0
assert py_out.test(1) == 1
assert py_out.test(-1) == 1
def test_max():
ks_str = """
(edef max Integer (Tuple Integer Integer))
(def test Integer ((x : Integer) (y : Integer) (z : Integer))
(max (max x y) z)
)
"""
py_out = translate_and_import(__file__, ks_str, "common")
x, y, z = 1, 2, -1
assert py_out.test(x, y, z) == 2
def test_or():
ks_str = """
(edef or Bool (Tuple Bool Bool))
(edef lt Bool (Tuple Integer Integer))
(edef gt Bool (Tuple Integer Integer))
(def test Bool ((x : Integer))
(or (lt x 0) (gt x 0))
)
"""
py_out = translate_and_import(__file__, ks_str, "common")
assert py_out.test(1) == True
assert py_out.test(0) == False
assert py_out.test(-1) == True
def test_and():
ks_str = """
(edef and Bool (Tuple Bool Bool))
(edef lt Bool (Tuple Integer Integer))
(edef gt Bool (Tuple Integer Integer))
(def test Bool ((x : Integer))
(and (gt x 0) (lt x 2))
)
"""
py_out = translate_and_import(__file__, ks_str, "common")
assert py_out.test(0) == False
assert py_out.test(1) == True
assert py_out.test(2) == False
| 1,358 |
microcosm_postgres/tests/encryption/fixtures/sub_encryptable.py
|
globality-corp/microcosm-postgres
| 2 |
2171726
|
from typing import Sequence, Tuple
from microcosm.api import binding
from sqlalchemy import (
CheckConstraint,
Column,
ForeignKey,
String,
)
from sqlalchemy.orm import relationship
from sqlalchemy_utils import UUIDType
from microcosm_postgres.encryption.models import EncryptableMixin, EncryptedMixin
from microcosm_postgres.encryption.store import EncryptableStore
from microcosm_postgres.models import EntityMixin, Model
from microcosm_postgres.store import Store
class Parent(EntityMixin, Model):
__tablename__ = "parent"
name = Column(String)
__mapper_args__ = {
"polymorphic_identity": "parent",
"polymorphic_on": name,
}
class SubEncrypted(EntityMixin, EncryptedMixin, Model):
__tablename__ = "sub_encrypted"
class SubEncryptable(Parent, EncryptableMixin):
"""
A model for conditionally-encrypted plaintext.
"""
__tablename__ = "sub_encryptable"
id = Column(UUIDType, ForeignKey("parent.id"), primary_key=True)
# key used for encryption context
key = Column(String, nullable=False)
# value is not encrypted
value = Column(String, nullable=True)
# foreign key to encrypted data
sub_encrypted_id = Column(UUIDType, ForeignKey("sub_encrypted.id"), nullable=True)
# load and update encrypted relationship automatically
sub_encrypted = relationship(
SubEncrypted,
lazy="joined",
)
__mapper_args__ = {
"polymorphic_identity": "sub",
}
__table_args__ = (
CheckConstraint(
name="value_or_encrypted_is_not_null",
sqltext="value IS NOT NULL OR sub_encrypted_id IS NOT NULL",
),
CheckConstraint(
name="value_or_encrypted_is_null",
sqltext="value IS NULL OR sub_encrypted_id IS NULL",
),
)
__encrypted_identifier__ = "sub_encrypted_id"
@property
def ciphertext(self) -> Tuple[bytes, Sequence[str]]:
return (self.sub_encrypted.ciphertext, self.sub_encrypted.key_ids)
@ciphertext.setter
def ciphertext(self, value: Tuple[bytes, Sequence[str]]) -> None:
ciphertext, key_ids = value
self.sub_encrypted = SubEncrypted(
ciphertext=ciphertext,
key_ids=key_ids,
)
@binding("sub_encrypted_store")
class SubEncryptedStore(Store):
def __init__(self, graph):
super().__init__(graph, SubEncrypted)
@binding("sub_encryptable_store")
class SubEncryptableModelStore(EncryptableStore):
def __init__(self, graph):
super().__init__(graph, SubEncryptable, graph.sub_encrypted_store)
@binding("parent_store")
class ParentStore(Store):
def __init__(self, graph):
super().__init__(graph, Parent)
| 2,732 |
pizzapy/__init__.py
|
dzeban/python-imports
| 5 |
2171447
|
# Uncomment to drag all the package symbols to the top-level
#
# from pizzapy.pizza import *
# from pizzapy.menu import *
#
# or use this short form
#
# from .pizza import *
# from .menu import *
| 196 |
image_segmentation/image_Segmentation.py
|
Sujay-Tandel/Optical-Character-Recognition
| 6 |
2170128
|
# coding: utf-8
# In[2]:
# importing the dependencies
import os
import os.path
import cv2
import glob
import imutils
# In[53]:
# a function to take a path to an image and then segments the image into constituent letters and then return the letter
def image_segmentation(image_name):
counter = 0
# reading the image
image = cv2.imread(image_name)
# converting the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# threshold to convert the image to pure black and white
thresh = cv2.threshold(gray, 0,255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# find the contours (continous blob of pixels ) in the image
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Hack for compatibility with different OpenCV versions
contours = contours[0] if imutils.is_cv2() else contours[1]
letter_image_regions = []
# now loop through each of the letter in the image
for contour in contours:
# get the rectangle that contains the contour
x,y,w,h = cv2.boundingRect(contour)
# compare the width and height of the contour to detect if it
# has one letter or not
if w/h >1.25:
# this is too wide for a single letter
continue
elif w<3 or h<3:
# this is a very small image probably a noise
continue
else:
# this is a normal letter by itself
letter_image_regions.append((x,y,w,h))
# Extract the letter from the original image with a 2-pixel margin around the edge
letter_image = gray[y - 2:y + h + 2, x - 2:x + w + 2]
#constructing the name of the images
name = str(counter) + '.png'
# incrementing the counter to store the next image
counter = counter +1
return letter_image
# In[58]:
letter_images = image_segmentation("pic.jpg")
| 1,920 |
00_In Class Activities/09_LoopsAndListsBasicPractice/09_LoopsAndListsBasicPractice.py
|
Kraussie/CS550--Project-Repo
| 0 |
2171229
|
#Loops and Lists Basic Practice
import random
import math
# 1. Create a list that holds the numbers 1-99 in reverse order.
'''
COMPLETE
###a = list(range(99,0,-1))
###print(a)
'''
# 2. Create a loop that loops through a list of random numbers (ranging from 0-100) and throws away or removes any number greater than 10. This should work for lists of any length.
'''
COMPLETE
purgeList = [random.randint(0,100) for i in range(30)]
print(purgeList)
purgedList = [i for i in purgeList if i < 10]
print(purgedList)
'''
# 3. Write a function that will return a list of the first n numbers in the fibonacci sequence. No recursion allowed!
phi = ( 1 + math.sqrt(5) ) / 2
numberFib_UR = int(input("\n\n\nHow many values of the fibonacci sequence would you like to see?\n>>"))
fibPrint = []
for i in range(numberFib_UR):
fibPrint.append(round((((phi)**i)-((1-phi)**i)) / (math.sqrt(5))))
print(fibPrint)
# 4. Write some code that, given a list, will tell you if a given value is in the list.
'''
COMPLETE
valList = [1,4,5,6,9,10]
if int(input("Enter Number\n>>")) in valList:
print("\nYes it is in the list!")
'''
# 5. Write some code that, given a list of 30 random numbers between 1 and 30, will print to the screen "Yahtzee!" if the first six multiples under 30 of any number 1-6 are all in the list. For example, 1, 2, 3, 4, 5, and 6 = yahtzee, 3, 6, 9, 12, 15 = yahtzee, etc.
'''
INCOMPLETE
ranList = [random.randint(1,30) for i in range(30)]
if X:
print("Yahtzee!")
'''
# 6. Write a function that accepts an array of numbers, and returns the sum of the numbers in the array, except sections of numbers starting with a 6 and extending to the next 7 will be ignored in the sum (every 6 will be followed by at least one 7). Return 0 for no numbers. For example: 1, 2, 3, 6, 4, 5, 7, 7 would return 13.
| 1,816 |
tests/test_micropolygon.py
|
bracket/handsome
| 0 |
2170187
|
from handsome.Micropolygon import Micropolygon
from handsome.util import point
import numpy as np
def test_micropolygon():
m = Micropolygon(
point(0, 0),
point(1, 0),
point(1, 1),
point(2, 1),
)
tests = [
[ (.5 , .5) , np.array([ 1, .5 , 1, 1 ]), ],
[ (.25, .75), np.array([ 1, .75, 1, 1 ]), ],
[ (0. , 0.) , np.array([ 0, 0 , 1, 1 ]), ],
[ (1. , 0.) , np.array([ 1, 0 , 1, 1 ]), ],
[ (0. , 1.) , np.array([ 1, 1 , 1, 1 ]), ],
[ (1. , 1.) , np.array([ 2, 1 , 1, 1 ]), ],
]
for input, expected in tests:
actual = m(*input)
np.testing.assert_array_equal(actual, expected)
| 693 |
sokoban/map_entities/__init__.py
|
JacobChen258/AI-Markov-Probability
| 0 |
2171140
|
from .map_entity import MapEntity
from .entity_types import EntityType
from .player import Player
from .box import Box
from .armory_point import ArmoryPoint
from .monster import Monster
from .random_monster import RandomMonster
from .chase_monster import ChaseMonster
from .entity_grid import EntityGrid
from .mouse import Mouse
| 329 |
way/python/exercises/codewars/codewars005.py
|
only-romano/junkyard
| 0 |
2171806
|
#! Replace With Alphabet Position
def find_short(s):
return min([len(w) for w in s.split()])
assert(find_short("bitcoin take over the world maybe who knows perhaps"), 3)
assert(find_short("turns out random test cases are easier than writing out basic ones"), 3)
assert(find_short("lets talk about javascript the best language"), 3)
assert(find_short("i want to travel the world writing code one day"), 1)
assert(find_short("Lets all go on holiday somewhere very cold"), 2)
| 493 |
rx_registration/views.py
|
shezi/django-rx-registration
| 0 |
2169450
|
# -*- encoding: utf-8 -*-
from django.contrib import messages
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth import get_user_model
from django.core.mail import send_mail
from django.db import transaction
from django.shortcuts import render, redirect
from django.template import Context
from django.template.loader import get_template
from .forms import RegistrationForm, LoginForm
from .utils import rxsettings
@transaction.atomic
def register(request):
"""Register a new user."""
data = {}
initial = {}
data['form'] = RegistrationForm(initial=initial)
if request.method == 'POST':
data['form'] = form = RegistrationForm(request.POST)
if form.is_valid():
user = get_user_model()(
username=form.cleaned_data['username'],
email=form.cleaned_data['email'],
)
user.set_password(form.cleaned_data['<PASSWORD>'])
user.save()
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['<PASSWORD>'])
login(request, user)
if rxsettings.confirm_registration:
subject_template = get_template('rx-registration/register_subject.djtxt')
text_template = get_template('rx-registration/register_text.djtxt')
ctx = Context({
'user': user,
})
subject = subject_template.render(ctx)
text = text_template.render(ctx)
email_from = rxsettings.confirm_registration_from
try:
send_mail(
subject, text,
email_from,
[user.email], fail_silently=False)
except IOError:
# TODO: signal this to the user, probably?
pass
return redirect(rxsettings.redirect_after_register)
return render(request, 'rx-registration/register.djhtml', data)
def v_login(request):
data = {}
data['registration_form'] = RegistrationForm()
data['form'] = LoginForm()
if request.method == 'POST':
data['form'] = form = LoginForm(request.POST)
if form.is_valid():
login(request, form.user)
return redirect(rxsettings.redirect_after_login)
return render(request, 'rx-registration/login.djhtml', data)
def v_logout(request):
messages.info(request, _('You have been logged out.'))
logout(request)
return redirect(rxsettings.redirect_after_logout)
| 2,625 |
yurt/lxc/util.py
|
ckmetto/yurt
| 2 |
2169797
|
import logging
import os
from typing import List, Dict
import pylxd
from yurt import config
from yurt import vm
from yurt.exceptions import LXCException, VMException
NETWORK_NAME = "yurt-int"
PROFILE_NAME = "yurt"
REMOTES = {
"images": {
"Name": "images",
"URL": "https://images.linuxcontainers.org",
},
"ubuntu": {
"Name": "ubuntu",
"URL": "https://cloud-images.ubuntu.com/releases",
},
}
def _setup_yurt_socat():
name = "yurt-lxd-socat"
vm.run_cmd("mkdir -p /tmp/yurt")
tmp_unit_file = f"/tmp/yurt/{name}.service"
installed_unit_file = f"/etc/systemd/system/{name}.service"
vm.run_cmd("sudo apt install socat -y")
vm.put_file(os.path.join(config.provision_dir,
f"{name}.service"), tmp_unit_file)
vm.run_cmd(f"sudo cp {tmp_unit_file} {installed_unit_file}")
vm.run_cmd("sudo systemctl daemon-reload")
vm.run_cmd(f"sudo systemctl enable {name}")
vm.run_cmd(f"sudo systemctl start {name}")
def get_pylxd_client():
lxd_port = config.get_config(config.Key.lxd_port)
try:
return pylxd.Client(endpoint=f"http://127.0.0.1:{lxd_port}")
except pylxd.exceptions.ClientConnectionFailed as e:
logging.debug(e)
raise LXCException(
"Error connecting to LXD. Try restarting the VM: 'yurt vm restart'")
def get_instance(name: str):
client = get_pylxd_client()
try:
return client.instances.get(name) # pylint: disable=no-member
except pylxd.exceptions.NotFound:
raise LXCException(f"Instance {name} not found.")
except pylxd.exceptions.LXDAPIException:
raise LXCException(
f"Could not fetch instance {name}. API Error.")
def is_initialized():
return config.get_config(config.Key.is_lxd_initialized)
def get_ip_config():
from ipaddress import ip_interface
host_ip_address = config.get_config(
config.Key.interface_ip_address)
network_mask = config.get_config(
config.Key.interface_netmask)
if not (host_ip_address and network_mask):
raise LXCException("Bad IP Configuration. ip: {0}, mask: {1}".format(
host_ip_address, network_mask))
full_host_address = ip_interface(
"{0}/{1}".format(host_ip_address, network_mask))
bridge_address = ip_interface(
"{0}/{1}".format((full_host_address + 1).ip, network_mask)).exploded
return {
"bridgeAddress": bridge_address,
"dhcpRangeLow": (full_host_address + 10).ip.exploded,
"dhcpRangeHigh": (full_host_address + 249).ip.exploded
}
def initialize_lxd():
if is_initialized():
return
try:
with open(os.path.join(config.provision_dir, "lxd-init.yaml"), "r") as f:
init = f.read()
except OSError as e:
raise LXCException(f"Error reading lxd-init.yaml {e}")
try:
logging.info("Updating package information...")
vm.run_cmd("sudo apt update", show_spinner=True)
vm.run_cmd("sudo usermod yurt -a -G lxd")
logging.info("Initializing LXD...")
vm.run_cmd(
"sudo lxd init --preseed",
stdin=init,
show_spinner=True
)
_setup_yurt_socat()
logging.info("Done.")
config.set_config(config.Key.is_lxd_initialized, True)
except VMException as e:
logging.error(e)
logging.error("Restart the VM to try again: 'yurt vm restart'")
raise LXCException("Failed to initialize LXD.")
def check_network_config():
client = get_pylxd_client()
if client.networks.exists(NETWORK_NAME): # pylint: disable=no-member
return
logging.info("Configuring network...")
ip_config = get_ip_config()
bridge_address = ip_config["bridgeAddress"]
dhcp_range_low = ip_config["dhcpRangeLow"]
dhcp_range_high = ip_config["dhcpRangeHigh"]
client.networks.create( # pylint: disable=no-member
NETWORK_NAME, description="Yurt Network", type="bridge",
config={
"bridge.external_interfaces": "enp0s8",
"ipv6.address": "none",
"ipv4.nat": "true",
"ipv4.dhcp": "true",
"ipv4.dhcp.expiry": "24h",
"ipv4.address": bridge_address,
"ipv4.dhcp.ranges": f"{dhcp_range_low}-{dhcp_range_high}",
"dns.domain": config.app_name
})
def check_profile_config():
client = get_pylxd_client()
if client.profiles.exists(PROFILE_NAME): # pylint: disable=no-member
return
logging.info("Configuring profile...")
client.profiles.create( # pylint: disable=no-member
PROFILE_NAME,
devices={
"eth0": {
"name": "eth0",
"nictype": "bridged",
"parent": NETWORK_NAME,
"type": "nic"
},
"root": {
"type": "disk",
"pool": "yurtpool",
"path": "/"
}
}
)
def shortest_alias(aliases: List[Dict[str, str]], remote: str):
import re
aliases = list(map(lambda a: str(a["name"]), aliases))
if remote == "ubuntu":
aliases = list(filter(lambda a: re.match(
r"^\d\d\.\d\d", a), aliases))
try:
alias = aliases[0]
for a in aliases:
if len(a) < len(alias):
alias = a
return alias
except (IndexError, KeyError) as e:
logging.debug(e)
logging.error(f"Unexpected alias schema: {aliases}")
def filter_remote_images(images: List[Dict]):
aliased = filter(lambda i: i["aliases"], images)
container = filter(
lambda i: i["type"] == "container", aliased)
x64 = filter(
lambda i: i["architecture"] == "x86_64", container)
return x64
def get_remote_image_info(remote: str, image: Dict):
try:
return {
"Alias": shortest_alias(image["aliases"], remote),
"Description": image["properties"]["description"]
}
except KeyError as e:
logging.debug(e)
logging.debug(f"Unexpected image schema: {image}")
def exec_interactive(instance_name: str, cmd: List[str], environment=None):
from . import term
instance = get_instance(instance_name)
response = instance.raw_interactive_execute(cmd, environment=environment)
lxd_port = config.get_config(config.Key.lxd_port)
try:
ws_url = f"ws://127.0.0.1:{lxd_port}{response['ws']}"
term.run(ws_url)
except KeyError as e:
raise LXCException(f"Missing ws URL {e}")
def unpack_download_operation_metadata(metadata):
if metadata:
if "download_progress" in metadata:
return f"Download progress: {metadata['download_progress']}"
if "create_instance_from_image_unpack_progress" in metadata:
return f"Unpack progress: {metadata['create_instance_from_image_unpack_progress']}"
else:
return ""
def follow_operation(operation_uri: str, unpack_metadata=None):
"""
Params:
operation_uri: URI of the operation to follow.
unpack_metadata: Function to unpack the operation's metadata. Return a line of text to summarize
the current progress of the operation.
If not given, progress will not be shown.
"""
import time
from yurt.util import retry
operations = get_pylxd_client().operations
# Allow time for operation to be created.
try:
retry(
lambda: operations.get(operation_uri), # pylint: disable=no-member
retries=10,
wait_time=0.5
)
operation = operations.get(operation_uri) # pylint: disable=no-member
except pylxd.exceptions.NotFound:
raise LXCException(
f"Timed out while waiting for operation to be created.")
logging.info(operation.description)
while True:
try:
operation = operations.get( # pylint: disable=no-member
operation_uri
)
if unpack_metadata:
print(f"\r{unpack_metadata(operation.metadata)}", end="")
time.sleep(0.5)
except pylxd.exceptions.NotFound:
print("\nDone")
break
except KeyboardInterrupt:
break
| 8,341 |
examples/mycroft_master.py
|
flo-mic/HiveMind-core
| 43 |
2171786
|
from jarbas_hive_mind import get_listener
from jarbas_hive_mind.configuration import CONFIGURATION
def start_mind(config=None, bus=None):
config = config or CONFIGURATION
# listen
listener = get_listener(bus=bus)
# use http
# config["ssl"]["use_ssl"] = False
# read port and ssl settings
listener.load_config(config)
listener.listen()
if __name__ == '__main__':
# TODO argparse
start_mind()
# that's it, now external applications can connect to the HiveMind
# use configuration to set things like
# - blacklisted/whitelisted ips
# - blacklisted/whitelisted message_types
# - blacklisted/whitelisted intents - Coming soon
# - blacklisted/whitelisted skills - Coming soon
# you can send messages to the mycroft bus to send/broadcast to clients
# 'Message(hive.client.broadcast',
# {"payload":
# {"msg_type": "speak",
# "data": {"utterance": "Connected to the HiveMind"}
# })
# or you can listen to hive mind events
# "hive.client.connection.error"
# "hive.client.connect"
# "hive.client.disconnect"
# "hive.client.send.error"
| 1,198 |
Tutorials/10 Days of Statistics/Day 7/spearman_rank_correlation.py
|
xuedong/hacker-rank
| 1 |
2171511
|
#!/bin/python3
def get_rank(arr):
n = len(arr)
rank = [0 for i in range(n)]
for i in range(n):
rank_i = 0
for j in range(n):
if arr[j] > arr[i]:
rank_i += 1
rank[i] = rank_i + 1
return rank
def spearman(rank1, rank2, n):
total = 0
for i in range(n):
total += (rank1[i] - rank2[i]) ** 2
return 1 - 6*total/(n*(n**2-1))
n = int(input())
arr1 = [float(arr_i) for arr_i in input().strip().split(' ')]
arr2 = [float(arr_i) for arr_i in input().strip().split(' ')]
print(round(spearman(get_rank(arr1), get_rank(arr2), n), 3))
| 620 |
code/groups.py
|
ron-rivest/audit-lab
| 3 |
2171615
|
# groups.py
# <NAME>
# July 25, 2017 (rev Sept. 21, 2017)
# python3
"""
This module implements "contest groups" for the post-election audit program
"OpenAuditTool.py".
"""
import warnings
def expand_contest_group_defs(e):
"""
Expand contest group definitions so that we have a definition
for each contest group purely in terms of its contests.
The input definitions are in e.cgids_g, which gives definition
of each contests group as an ordered list of cids and gids,
for each gid.
The output goes into e.cids_g, which gives just an ordered list
e.cids_g[gid] for the cids in each group.
This is a simple reachability computation in a directed graph,
using repeated depth-first search starting from each gid node.
A warning is printed if the gid graph has any cycles.
When the graph is acyclic, this is just like doing a derviation of
a string in a context-free grammar, where the cids are the "terminals"
and the gids are the "nonterminals". Each gid generates exactly one
string.
The reason for using ordered lists here (as opposed to sets)
is that this may reflect the order in which the contests appear
on a ballot.
"""
e.cids_g = {}
for gid in e.gids:
gids = set()
cids = []
stack = []
reachable_from(e, gid, gids, cids, stack)
e.cids_g[gid] = cids
def reachable_from(e, gid, gids, cids, stack):
"""
Find all gids and cids reachable from initial
gid in 0 or more steps. Main output of interest
is the input list "cids", which has all reachable
contests appended to it.
This works even if the graph contains cycles.
Algorithm is depth-first search (DFS).
"""
if gid in gids:
if gid in stack:
warnings.warn("Group id {} is in a cycle!".format(gid))
return
gids.add(gid)
for cgid in e.cgids_g[gid]:
# Note: 'cgid' means 'contest or group id'
if cgid in e.cids:
cids.append(cgid)
else:
stack.append(gid)
reachable_from(e, cgid, gids, cids, stack)
stack.pop()
def expand_gids_in_list(e, L):
"""
Return list L with all gids replaced by their cid-list equivalent.
Here L is a list of mixed cid and gid identifiers.
Duplicates removed in output, of course.
The operation preserves the order of the portions
(like a contest-free grammar, if there are no cycles).
"""
ans = []
for cgid in L:
if cgid in e.cids:
ans.append(cgid)
else:
for cid in e.cids_g[cgid]:
ans.append(cid)
return ans
| 2,679 |
src/dal/audio/audiodatacache.py
|
pgecsenyi/piepy
| 1 |
2171725
|
import threading
from dal.cache import Cache
class AudioDataCache(Cache):
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self):
### Private attributes.
# A cache that contains albums as album (string) => id (int) pairs.
self._album_cache = {}
# This lock is used for album cache synchronization.
self._album_cache_lock = threading.Lock()
# A cache that contains artist IDs as artist (string) => id (int) pairs.
self._artist_cache = {}
# This lock is used for artist cache synchronization.
self._artist_cache_lock = threading.Lock()
####################################################################################################################
# Public methods.
####################################################################################################################
def clear(self):
with self._artist_cache_lock:
self._artist_cache = {}
with self._album_cache_lock:
self._album_cache = {}
def get_album_id(self, album):
return self._get_value_from_simple_cache(self._album_cache, self._album_cache_lock, album)
def get_artist_id(self, artist):
return self._get_value_from_simple_cache(self._artist_cache, self._artist_cache_lock, artist)
def set_album_id(self, album, album_id):
self._store_item_in_simple_cache(self._album_cache, self._album_cache_lock, album, album_id)
def set_artist_id(self, artist, artist_id):
self._store_item_in_simple_cache(self._artist_cache, self._artist_cache_lock, artist, artist_id)
| 1,854 |
payit/gateways/dummy.py
|
meyt/pyment
| 0 |
2170780
|
import time
from payit import Gateway, Transaction, Redirection, TransactionError
class DummyGateway(Gateway):
"""
Dummy Gateway
"""
__gateway_name__ = "dummy"
__gateway_unit__ = "IRR"
__config_params__ = ["pin", "callback_url", "maximum_amount"]
__base_url__ = "https://dummy-gateway.localhost"
def _generate_id(self):
return int(time.time())
@property
def maximum_amount(self) -> int:
return int(self.config["maximum_amount"] or 1000000)
def get_redirection(self, transaction) -> Redirection:
result = Redirection(
url="/".join((self.__class__.__base_url__, str(transaction.id))),
method="get",
)
print("New redirection created: \n%s" % result.__repr__())
return result
def request_transaction(self, transaction: Transaction) -> Transaction:
if int(transaction.amount) > self.maximum_amount:
raise TransactionError(
"Amount is larger than %s" % self.maximum_amount
)
transaction.id = self._generate_id()
print("New transaction requested: \n%s" % transaction.__repr__())
return transaction
def validate_transaction(self, data: dict) -> Transaction:
transaction = Transaction()
transaction.id = data["id"]
transaction.meta = data
transaction.validate_status = data.get("validateStatus", True)
print("Transaction validated: \n%s" % transaction.__repr__())
return transaction
def verify_transaction(self, transaction: Transaction, data):
if data["id"] == "false":
raise TransactionError("Invalid transaction ID")
transaction.pan = data.get("cardNumber")
print("Transaction verified: \n%s" % transaction.__repr__())
return transaction
| 1,837 |
day19/19.py
|
stefsmeets/advent_of_code
| 0 |
2171536
|
from scipy.spatial.distance import cdist, pdist
from itertools import product
import numpy as np
filename = 'data.txt'
with open(filename) as f:
lines = f.readlines()
scanners = []
for line in lines:
line = line.strip()
if line.startswith('---'):
scanner = []
elif not line:
scanner = np.array(scanner)
scanners.append(scanner)
scanner = None
else:
scanner.append([int(val) for val in line.split(',')])
# add last
if len(scanner):
scanners.append(scanner)
vectors = [
np.array((1, 0, 0)),
np.array((-1, 0, 0)),
np.array((0, 1, 0)),
np.array((0, -1, 0)),
np.array((0, 0, 1)),
np.array((0, 0, -1)),
]
rotmats = []
for vi, vj in product(vectors, vectors):
if vi @ vj == 0:
vk = np.cross(vi, vj)
rotmat = np.array([vi, vj, vk])
rotmats.append(rotmat)
assert len(rotmats) == 24
OFFSETS = [np.array((0,0,0))]
def solve(beacons, scanners):
try_again = []
for i, other in enumerate(scanners):
best_rotmat = None
best_n_matches = 0
for rotmat in rotmats:
rotated = other @ rotmat
dist = cdist(beacons, rotated)
uniq, counts = np.unique(dist, return_counts=True)
i_max = counts.argmax()
n_matches = counts[i_max]
common_dist = uniq[i_max]
if n_matches > best_n_matches:
best_n_matches = n_matches
best_rotmat = dist, common_dist, rotated
if best_n_matches < 6:
try_again.append(other)
continue
dist, common_dist, rotated = best_rotmat
common_beacons = np.argwhere(dist == common_dist)
from_beacons, from_other = common_beacons.T
vector = (beacons[from_beacons] - rotated[from_other])[0]
beacons = np.vstack((beacons, rotated + vector))
beacons = np.unique(beacons, axis=0)
OFFSETS.append(vector)
if len(try_again) > 0:
beacons = solve(beacons, try_again)
return beacons
beacons = solve(scanners[0], scanners[1:])
print(f'part 1: {len(beacons)=}')
print(f"part 2: {pdist(OFFSETS, metric='cityblock').max()=}")
| 2,193 |
ssplanner/coresetup/models/owe_model.py
|
devrajvlpt/splitgenie
| 0 |
2171422
|
# -*- coding: utf-8 -*-
# python manage.py make migrations your_app_label
# python manage.py migrate --fake-initial your_app_label
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from datetime import date, datetime
from django.conf import settings
class OweModel(models.Model):
"""List of splitted amount to each user under same topic
Attributes:
created_at (TYPE): Description
created_by (TYPE): Description
splitted_amount (TYPE): Description
splitted_user (TYPE): Description
updated_at (TYPE): Description
updated_by (TYPE): Description
"""
owed_amount = models.IntegerField()
owed_on = models.CharField(max_length=80, blank=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name= 'sa_ledger_created',
on_delete=models.CASCADE
)
updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name= 'sa_ledger_updated',
on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| 1,356 |
src/test/test_sexp_writer.py
|
spielhuus/nukleus
| 0 |
2170562
|
from io import StringIO
from sys import path
from typing import ByteString
import unittest
from unittest.mock import patch
import sys
from nukleus.PCB import PCB
from nukleus.SexpWriter import SexpWriter
sys.path.append('src')
sys.path.append('../../src')
from nukleus.Schema import Schema
from nukleus.ModelBase import *
from nukleus.ModelSchema import *
from nukleus.ParserVisitor import ParserVisitor
from nukleus.SexpParser import *
class TestSexpWriter(unittest.TestCase):
def test_parse_summe(self):
with open('samples/files/summe_v6/main.kicad_sch', 'r') as infile:
schema_tree = load_tree(infile.read())
schema = Schema()
visitor = ParserVisitor(schema)
visitor.visit(schema_tree)
writer = SexpWriter()
schema.produce(writer)
self.maxDiff = None
with open('samples/files/summe_v6/main.kicad_sch', 'r') as file:
orig = file.read()
text = "".join([s for s in orig.splitlines(True) if s.strip("\r\n")])
result = "".join([s for s in str(writer).splitlines(True) if s.strip("\r\n")])
result += "\n"
self.assertEqual(text, result)
def test_parse_produkt_schema(self):
with open('samples/files/produkt/main.kicad_sch', 'r') as infile:
schema_tree = load_tree(infile.read())
schema = Schema()
visitor = ParserVisitor(schema)
visitor.visit(schema_tree)
writer = SexpWriter()
schema.produce(writer)
with open('samples/files/produkt/main.kicad_sch', 'r') as file:
orig = file.read()
text = "".join([s for s in orig.splitlines(True) if s.strip("\r\n")])
result = "".join([s for s in str(writer).splitlines(True) if s.strip("\r\n")])
result += "\n"
self.assertEqual(text, result)
def test_parse_all_elements(self):
with open('samples/files/all_elements/all_elements.kicad_sch', 'r') as infile:
schema_tree = load_tree(infile.read())
writer = SexpWriter()
visitor = ParserVisitor(writer)
visitor.visit(schema_tree)
with open('samples/files/all_elements/all_elements.kicad_sch', 'r') as file:
orig = file.read()
text = "".join([s for s in orig.splitlines(True) if s.strip("\r\n")])
result = "".join([s for s in str(writer).splitlines(True) if s.strip("\r\n")])
result += "\n"
self.maxDiff = None
self.assertEqual(text, result)
def test_parse_all_elements_schema(self):
with open('samples/files/all_elements/all_elements.kicad_sch', 'r') as infile:
schema_tree = load_tree(infile.read())
schema = Schema()
visitor = ParserVisitor(schema)
visitor.visit(schema_tree)
writer = SexpWriter()
schema.produce(writer)
with open('samples/files/all_elements/all_elements.kicad_sch', 'r') as file:
orig = file.read()
text = "".join([s for s in orig.splitlines(True) if s.strip("\r\n")])
result = "".join([s for s in str(writer).splitlines(True) if s.strip("\r\n")])
result += "\n"
self.maxDiff = None
self.assertEqual(text, result)
def test_parse_produkt_pcb(self):
with open('samples/files/produkt/main.kicad_pcb', 'r') as infile:
schema_tree = load_tree(infile.read())
schema = PCB()
visitor = ParserVisitor(schema)
visitor.visit(schema_tree)
writer = SexpWriter()
schema.produce(writer)
self.maxDiff = None
with open('samples/files/produkt/main.kicad_pcb', 'r') as file:
orig = file.read()
text = "".join([s for s in orig.splitlines(True) if s.strip("\r\n")])
result = "".join([s for s in str(writer).splitlines(True) if s.strip("\r\n")])
result += "\n"
self.assertEqual(text, result)
| 4,194 |
core/config.py
|
bontchev/CitrixHoneypot
| 1 |
2171663
|
import configparser
from os import environ
def to_environ_key(key):
return key.upper()
class EnvironmentConfigParser(configparser.ConfigParser):
def has_option(self, section, option):
if to_environ_key('_'.join((section, option))) in environ:
return True
return super(EnvironmentConfigParser, self).has_option(section, option)
def get(self, section, option, **kwargs):
key = to_environ_key('_'.join((section, option)))
if key in environ:
return environ[key]
return super(EnvironmentConfigParser, self).get(section, option, **kwargs)
def readConfigFile(cfgfile):
"""
Read config files and return ConfigParser object
@param cfgfile: filename or array of filenames
@return: ConfigParser object
"""
parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())
parser.read(cfgfile)
return parser
CONFIG = readConfigFile(('etc/honeypot.cfg.base', 'etc/honeypot.cfg', 'honeypot.cfg'))
| 1,023 |
Tests/Challenges/test_omega_2013.py
|
dev-11/codility-solutions
| 0 |
2170843
|
import unittest
from Solutions import Challenges
class Omega2013Tests(unittest.TestCase):
def test_omega_2013_example_test_01(self):
a = [5, 6, 4, 3, 6, 2, 3]
b = [2, 3, 5, 2, 4]
res = Challenges.omega_2013(a, b)
self.assertEqual(4, res)
| 276 |
pun/echo.py
|
Unviray/pun
| 2 |
2170477
|
"""
pun.echo
========
Print something in console.
"""
from click import style
def fail(msg, task_name=None, indent=25):
title = style('[ fail ]', fg='red')
if task_name is not None:
name = style(task_name, fg='bright_red', bold=True)
else:
name = ''
n = name.ljust(indent)
print(f'{title} {n} {msg}')
def success(task_name):
title = style('[ success ]', fg='green')
name = style(task_name, fg='bright_green', bold=True)
print(f'{title} {name}')
def echo(task_name, msg, indent=25):
name = style(task_name, bold=True)
n = name.ljust(indent)
print(f'> {n} {msg}')
| 637 |
mail_script/client.py
|
sasakalaba/grbic-mail-script
| 0 |
2170902
|
import os
import io
from apiclient.discovery import build
from apiclient.http import MediaIoBaseDownload
from httplib2 import Http
from oauth2client import file, client, tools
from django.conf import settings
class Client(object):
"""
Main wrapper for handling requests and responses to the Google Drive API.
"""
SCOPES = 'https://www.googleapis.com/auth/drive.file'
def __init__(self, *args, **kwargs):
"""
Initialize the API.
"""
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', self.SCOPES)
creds = tools.run_flow(flow, store)
self.service = build('drive', 'v3', http=creds.authorize(Http()))
self.temp_path = os.path.join(settings.MEDIA_ROOT, 'temp')
def get_data(self):
"""
Fetches data from Google Drive and registers it in the database.
"""
query = (
"parents='%s' and mimeType='application/vnd.google-apps.folder'"
% settings.GD_ROOT_DIR_ID
)
results = self.service.files().list(q=query).execute()
return results.get('files', [])
def download_csv(self, file_id, dir_path, filename):
"""
Download file by id.
"""
request = self.service.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
filepath = os.path.join(dir_path, filename)
with open(filepath, 'wb') as output:
output.write(fh.getvalue())
return filepath
def get_csv(self, directory):
"""
Downloads files by id from Google Drive.
"""
paths = {}
# Create the download directory.
download_dir = os.path.join(self.temp_path, directory.title)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
# Download files.
paths['urls_path'] = self.download_csv(
directory.urls_id, download_dir, 'urls.csv')
paths['emails_path'] = self.download_csv(
directory.emails_id, download_dir, 'emails.csv')
return paths
| 2,349 |
numcodecs/tests/test_registry.py
|
Czaki/numcodecs
| 60 |
2169641
|
import pytest
from numcodecs.registry import get_codec
def test_registry_errors():
with pytest.raises(ValueError):
get_codec({'id': 'foo'})
def test_get_codec_argument():
# Check that get_codec doesn't modify its argument.
arg = {"id": "json2"}
before = dict(arg)
get_codec(arg)
assert before == arg
| 338 |
bin/run.py
|
OliWenman/geant4tomosim
| 4 |
2170079
|
"""
Short script for running G4TomoSim with or without visualization on
and saving the data automatically with the simulatetomograpy function
"""
# -*- coding: utf-8 -*-
import os
this_directory = os.path.dirname(os.path.realpath(__file__))
import sys
sys.path.insert(0, this_directory + '/../settings')
import tomosim_input as tsi
#Can optionally add a file path and name for the data to be saved when running script
if __name__ == '__main__':
defaultpath = this_directory + "/../output/tomosimData.nxs"
#Get the filepath from the first arguement
try:
filepath = sys.argv[1]
#If no filepath given, save the data in the default filepath
except IndexError:
filepath = defaultpath
print ("\nSaving data in the default place", filepath)
#===================================================================
import random
#RUN THE SIMULATION
from g4tomosim import G4TomoSim
tomosim = G4TomoSim(tsi.verbose,
tsi.interactive)
tomosim.execute_macrolist(tsi.macrofiles)
tomosim.set_seed(random.randint(1, 1.e9))
"""
#For visualization, make there aren't many pixels otherwise will crash
#HepRApp viewer
tomosim.execute_command("/detector/absorption/xpixels 5")
tomosim.execute_command("/detector/absorption/ypixels 3")
#Creates a directory to save all the vis data. Each projection simulated creates a
#new vis file.
tomosim.setup_visualization(path = this_directory + "/../output/",
filename = "simdata_vis")
tomosim.simulatetomography(filepath = filepath,
n_particles = 100,
rotation_angles = tsi.rotation_angles,
n_darkflatfields = tsi.ndarkflatfields,
zpositions = tsi.zpos)
"""
tomosim.simulatetomography(filepath = filepath,
n_particles = tsi.particles,
rotation_angles = tsi.rotation_angles,
n_darkflatfields = tsi.ndarkflatfields,
zpositions = tsi.zpos)
print ("Finished")
| 2,175 |
cart/admin.py
|
spaceofmiah/ecommerce
| 2 |
2171131
|
from django.contrib import admin
from cart.models import Cart, CartItem
# Register your models here.
class CartItemAdmin(admin.ModelAdmin):
list_display = ['__str__', 'date_added']
class CartAdmin(admin.ModelAdmin):
list_display = ['__str__', 'get_total_amount']
admin.site.register(CartItem, CartItemAdmin)
admin.site.register (Cart, CartAdmin)
| 359 |
optimization_tvm/test.py
|
ryujaehun/chameleon
| 1 |
2171641
|
#!/usr/bin/env python
import subprocess,time,datetime,os
from tqdm import tqdm
networks=['vgg-16','resnet-34']
target='llvm '
batchs=['4']
opts=['0','1','2','3','4']
tuners=['xgb','ga','random','gridsearch']
n_trials=['400','2000']
basetext='''#!/bin/bash
#SBATCH -J _name
#SBATCH -o _time/%j._name.out
#SBATCH -t 1-20:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --tasks-per-node=1
#SBATCH --cpus-per-task=4
#SBATCH -p cpu-xg6230
#SBATCH --nodelist=n14
set echo on
cd $SLURM_SUBMIT_DIR
echo "SLURM_SUBMIT_DIR=$SLURM_SUBMIT_DIR"
srun -l /bin/hostname
srun -l /bin/pwd
srun -l /bin/date
module purge
module load postech
date
source env/bin/activate
python3 tune_relay_x86.py --opt_level=_opt --n_trial=_n_trial --network=_network --batch=_batch --tuner=_tuner --time=_time
/home/jaehunryu/linux/tools/perf/perf stat -d -d -d python3 /home/jaehunryu/workspace/tvm/optimization_tvm/flops.py --path=_time --batch=_batch
squeue --job $SLURM_JOBID
'''
sript='sbatch slurm.sh'
_list=[]
for network in networks:
for batch in batchs:
for opt in opts:
for tuner in tuners:
for trial in n_trials:
_list.append([network,batch,opt,tuner,trial])
for idx,pack in enumerate(_list):
network,batch,opt,tuner,trial=pack
_name='nework_'+network+'_batch_'+batch+'_optlevel_'+opt+'_tuner_'+tuner+'_trials_'+trial
_time="_".join(str(datetime.datetime.now()).split())
_time=os.path.join('/home/jaehunryu/workspace/tvm/optimization_tvm/results/llvm',_time)
os.makedirs(_time,exist_ok=True)
text=basetext
text=text.replace('_opt',opt)
text=text.replace('_n_trial',trial)
text=text.replace('_network',network)
text=text.replace('_batch',batch)
text=text.replace('_tuner',tuner)
text=text.replace('_time',_time)
text=text.replace('_name',_name)
num=subprocess.Popen("squeue|grep jaehun|wc -l", shell=True, stdout=subprocess.PIPE).stdout.read()
num=int(num.decode("utf-8")[:-1])
while num>20:
num=subprocess.Popen("squeue|grep jaehun|wc -l", shell=True, stdout=subprocess.PIPE).stdout.read()
num=int(num.decode("utf-8")[:-1])
time.sleep(10)
with open('/home/jaehunryu/workspace/tvm/optimization_tvm/slurm.sh', 'w') as f:
f.write(text)
time.sleep(31)
proc = subprocess.Popen( sript , shell=True, executable='/bin/bash')
proc.communicate()
| 2,474 |
plotAggregatedMem.py
|
hsabiu/thesis-scripts
| 0 |
2171448
|
# Author: <NAME>
# Date: March 31, 2017
# Purpose: Script to plot aggregated memory load logs generated by Ganglia
# Copyright: Any person can adopt this script to their specific need
from matplotlib.font_manager import FontProperties
from datetime import datetime, timedelta
import csv
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
with open('aggMemory.csv', 'rb') as f:
reader = csv.reader(f)
data_list = list(reader)
time_stamp_list = []
total_free_memory_list = []
total_memory_list = []
total_used_memory_list = []
for index, line in enumerate(data_list):
if index > 0:
time_stamp = line[0]
total_free_memory = float(line[1]) + float(line[3]) + float(line[5]) + float(line[7]) + float(line[9]) + float(line[11]) + float(line[13]) + float(line[15]) + float(line[17]) + float(line[19]) + float(line[21])
total_memory = float(line[2]) + float(line[4]) + float(line[6]) + float(line[8]) + float(line[10]) + float(line[12]) + float(line[14]) + float(line[16]) + float(line[18]) + float(line[20]) + float(line[22])
total_used_memory = total_memory - total_free_memory
time_stamp_list.append(datetime.strptime(time_stamp[0:19], "%Y-%m-%dT%H:%M:%S"))
total_free_memory_list.append(int(total_free_memory / 1000000))
total_memory_list.append(int(total_memory / 1000000))
total_used_memory_list.append(int(total_used_memory / 1000000))
fig, ax = plt.subplots()
plt.title("Image format conversion - YARN", fontsize=20)
plt.ylabel("Memory (GB)", fontsize=15)
plt.xlabel("Timestamp", fontsize=15)
#plt.xlim([datetime(2017, 02, 01, 14, 22, 00), datetime(2017, 02, 01, 14, 35, 00)])
plt.ylim([-5, 250])
#ax.xaxis.set_major_locator(mdates.SecondLocator(interval=60))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:S'))
ax.plot_date(time_stamp_list, total_memory_list, fmt="r-", label="total memory", )
#ax.plot_date(time_stamp_list, total_free_memory_list, fmt="g-.", label="free memory", )
ax.plot_date(time_stamp_list, total_used_memory_list, fmt="b-*", label="used memory")
# font of the legend
fontP = FontProperties()
fontP.set_size('medium')
ax.legend(loc='upper right', shadow=False, ncol=3, prop=fontP)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax.grid(True)
fig.autofmt_xdate()
plt.show()
| 2,324 |
loldb/champion.py
|
Met48/League-of-Legends-DB
| 2 |
2171572
|
import collections
import warnings
from inibin import Inibin
from .ability import Ability
from .skin import get_skins_for_champion
from .util import alias
class Lore(object):
body = ''
quote = ''
quote_author = ''
def __init__(self, body='', quote='', quote_author=''):
self.body = body.replace("''", '"')
self.quote = quote
self.quote_author = quote_author
@staticmethod
def _correct_description(desc):
"""
Correct errors in description text.
Descriptions often contain two single
quotes ('') instead of one double quote (").
"""
return desc.replace("''", '"')
def update_from_sql_row(self, row):
self.body = self._correct_description(row.description)
self.quote = row.quote
self.quote_author = row.quoteAuthor
class Ratings(object):
attack = 0
defense = 0
magic = 0
difficulty = 0
def update_from_sql_row(self, row):
self.attack = row.ratingAttack
self.defense = row.ratingDefense
self.magic = row.ratingMagic
self.difficulty = row.ratingDifficulty
ChampionStat = collections.namedtuple('ChampionStat', 'base per_level')
class ChampionStats(object):
hp = ChampionStat(0, 0)
hp5 = ChampionStat(0, 0)
mana = ChampionStat(0, 0)
mp5 = ChampionStat(0, 0)
damage = ChampionStat(0, 0)
attack_speed = ChampionStat(0, 0)
armor = ChampionStat(0, 0)
magic_resist = ChampionStat(0, 0)
range = 0
speed = 0
# TODO: These methods are specific to champion inibins, should they be here?
@staticmethod
def _create_stat(inibin_map, key):
"""Read base and per_level values from inibin map."""
stat = inibin_map['stats'][key]
return ChampionStat(stat['base'], stat['per_level'])
def update_from_inibin(self, inibin_map):
# TODO: Better error checking
self.hp = self._create_stat(inibin_map, 'hp')
self.hp5 = self._create_stat(inibin_map, 'hp5')
self.mana = self._create_stat(inibin_map, 'mana')
self.mp5 = self._create_stat(inibin_map, 'mp5')
self.range = inibin_map['stats']['range']
self.damage = self._create_stat(inibin_map, 'dmg')
self.attack_speed = self._create_stat(inibin_map, 'aspd')
self.armor = self._create_stat(inibin_map, 'armor')
self.magic_resist = self._create_stat(inibin_map, 'mr')
self.speed = inibin_map['stats']['speed']
_STATS = (
'hp',
'hp5',
'mana',
'mp5',
'damage',
'attack_speed',
'armor',
'magic_resist',
'range',
'speed',
)
def __repr__(self):
return '<ChampionStats %s>' % ' '.join(
'%s=%s' % (key, getattr(self, key))
for key in self._STATS
)
class Champion(object):
"""
Represents a champion.
Instance variables:
id: unique integer id for the champion
internal_name: unique string id for the champion
name: string name of the champion
alias: string name usable as a variable name
title: string title of the champion
icon_path: string name of the champion icon file
select_sound_path
stats: ChampionStats instance
lore: Lore instance
ratings: Ratings instance
tips_as: List of string tips for players of champion
tips_against: List of string tips for players against champion
tags: Set of string tags categorizing champion
abilities: List of Ability instances
skins: List of Skin instances
"""
id = -1
internal_name = ''
name = ''
alias = ''
title = ''
icon_path = ''
select_sound_path = ''
_inibin = None
def __init__(self, internal_name):
self.internal_name = internal_name
self.stats = ChampionStats()
self.lore = Lore()
self.ratings = Ratings()
self.tips_as = []
self.tips_against = []
self.tags = set()
self.abilities = []
self.skins = []
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return '<Champion {} \'{}\'>'.format(self.id, self.name)
def _find_inibin(provider, pattern):
raf_master = provider.get_raf_master()
raf_results = list(raf_master.find_re(pattern))
if not raf_results:
warnings.warn('No inibin for %s' % pattern)
if len(raf_results) > 1:
# TODO: Is this ever triggered?
warnings.warn('Ambiguous inibin for %s' % pattern)
try:
inibin = Inibin(data=raf_results[0].read())
except Exception:
warnings.warn('Malformed inibin for %s' % pattern)
inibin = None
return inibin
def _get_tips_from_string(tips_str):
"""Get list of tips from tips string."""
return [tip.strip() for tip in tips_str.split('*') if tip]
def _get_raw_champion_from_sql_row(row):
"""
Create a Champion using a row from the champions table in the database.
Champion will be incomplete as stats and abilities are only available
using inibins;
"""
# TODO: videos? selection sound name?
# row.name is the internal name
champion = Champion(row.name)
# row.displayName is the public name
champion.name = row.displayName
# Misc
champion.alias = alias(row.displayName)
champion.title = row.title
champion.id = row.id
champion.icon_path = row.iconPath
champion.tags = set(row.tags.split(','))
champion.lore.update_from_sql_row(row)
champion.ratings.update_from_sql_row(row)
# Tips
champion.tips_as = _get_tips_from_string(row.tips)
champion.tips_against = _get_tips_from_string(row.opponentTips)
return champion
def _update_raw_champion_with_provider(champion, provider):
"""Update champion stats and abilities."""
# Find champion inibin
champ_name = champion.internal_name
champ_pattern = '^data/characters/{0}/{0}.inibin$'.format(champ_name)
champ_inibin = _find_inibin(provider, champ_pattern)
if champ_inibin is None:
warnings.warn('Missing inibin for champion %s' % champ_name)
return
# Format as champion inibin
font_config = provider.get_font_config()
champ_inibin = champ_inibin.as_champion(font_config)
champion._inibin = champ_inibin
# Read stats
champion.stats.update_from_inibin(champ_inibin)
# Find abilities
_update_champion_passive(champion, champ_inibin)
_update_champion_abilities(provider, champion, champ_inibin['abilities'])
# Find skins
_update_champion_skins(provider, champion)
def _update_champion_passive(champion, inibin):
# Passive
passive = Ability()
passive.name = passive.internal_name = inibin['passive']
passive.description = passive.tooltip = inibin['passive_desc']
passive.icon_path = inibin['passive_icon']
champion.abilities.append(passive)
_ABILITY_KEYS = list('skill%d' % i for i in range(1, 5))
def _update_champion_abilities(provider, champion, abilities):
# Skills
abilities = [abilities[key] for key in _ABILITY_KEYS]
for i, ability_name in enumerate(abilities):
# Find inibin for ability
ability_inibin = _find_ability_inibin(provider, champion, ability_name)
if ability_inibin is None:
continue
# Format as ability inibin
font_config = provider.get_font_config()
ability_inibin = ability_inibin.as_ability(font_config)
ability = Ability.from_inibin(ability_inibin, i)
if ability is not None:
champion.abilities.append(ability)
_ABILITY_REGEXP_TEMPLATE = r"^data/(?:characters/{0}/)?spells/{1}.inibin$"
def _find_ability_inibin(provider, champion, ability_name):
# Find ability inibin
champ_name = champion.internal_name
ability_pattern = _ABILITY_REGEXP_TEMPLATE.format(champ_name, ability_name)
ability_inibin = _find_inibin(provider, ability_pattern)
if ability_inibin is None:
warnings.warn('Missing inibin for ability %s' % ability_name)
return ability_inibin
def _update_champion_skins(provider, champion):
skins = get_skins_for_champion(provider, champion.id)
skins = sorted(list(skins))
champion.skins = skins
def get_champions(provider):
for row in provider.get_db_rows('champions'):
champion = _get_raw_champion_from_sql_row(row)
_update_raw_champion_with_provider(champion, provider)
yield champion
| 8,474 |
tutorial/cytoscape/__init__.py
|
blozano824/dash-docs
| 1 |
2171592
|
from . import applications_chapter
from . import callbacks_chapter
from . import elements_chapter
from . import events_chapter
from . import layout_chapter
from . import reference_chapter
from . import styling_chapter
| 218 |
nb_test.py
|
jtsen/promotion_lab
| 0 |
2170108
|
import nb_build
from nb_preprocessing import *
from nb_evaluation import *
'''Importing dataset and remove headers'''
data = open_csv_file(r'C:\Users\stalk\PycharmProjects\NBClassifier\store_data.csv')
data = prob_round(str_to_float(delete_col(data, 1)))
'''Calculating probabilities'''
label_counts = nb_build.label_count(data)
total_count_labels = sum(label_counts.values())
prob_yes = label_counts.get('yes') / total_count_labels
prob_no = label_counts.get('no') / total_count_labels
prob_list = [prob_yes, prob_no]
'''Creating train/test sets'''
data_rows = len(data)
train_split = int((data_rows / 10) * 9)
test_split = len(data) - train_split
train_set = bootstrap_sampling(data, rows=train_split)
test_set = bootstrap_sampling(data, rows=test_split)
test_set_labels = get_col(test_set,47)
'''Creating lists of cols for storing conditional probabilities, mu,
and variance (numerical attribute)'''
categorical_cols = [1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 29, 33, 34, 35, 37, 39, 40, 41, 42, 45]
numerical_cols = []
for i in range(len(train_set[0])+1):
if i == 0:
continue
if i not in categorical_cols:
numerical_cols.append(i)
numerical_cols.pop()
'''Creating nested dict: keys = column number (starting from 1)
values = dictionary'''
cols_vars = dict()
for col in range(len(train_set[0])-1):
cols_vars[col+1] = {}
'''Calculate conditional probabilities, mu, and variance'''
cols_vars = nb_build.calc_categorical_conditional_prob(train_set, categorical_cols, cols_vars, label_counts)
cols_vars = nb_build.calc_mu_variance(train_set, numerical_cols, cols_vars)
'''Classify and Calculate Accuracy'''
predictions = nb_build.classify_records(test_set, cols_vars, categorical_cols, numerical_cols, prob_list)
accuracy = calc_accuracy(predictions, test_set_labels)
print(accuracy)
| 1,936 |
function/python/brightics/function/statistics/test/anova_test.py
|
sharon1321/studio
| 0 |
2170538
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import unittest
from brightics.function.statistics.anova import twoway_anova
class TwowayAnovaTest(unittest.TestCase):
def setUp(self):
self.example_df = pd.DataFrame({'Genotype': ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'D', 'D', 'D', 'E',
'E', 'E', 'F', 'F', 'F', 'A', 'A', 'A', 'B', 'B', 'B', 'C', 'C',
'C', 'D', 'D', 'D', 'E', 'E', 'E', 'F', 'F', 'F', 'A', 'A', 'A',
'B', 'B', 'B', 'C', 'C', 'C', 'D', 'D', 'D', 'E', 'E', 'E', 'F',
'F', 'F'],
'years': ['year_1', 'year_1', 'year_1', 'year_1', 'year_1', 'year_1', 'year_1',
'year_1', 'year_1', 'year_1', 'year_1', 'year_1', 'year_1', 'year_1',
'year_1', 'year_1', 'year_1', 'year_1', 'year_2', 'year_2', 'year_2',
'year_2', 'year_2', 'year_2', 'year_2', 'year_2', 'year_2', 'year_2',
'year_2', 'year_2', 'year_2', 'year_2', 'year_2', 'year_2', 'year_2',
'year_2', 'year_3', 'year_3', 'year_3', 'year_3', 'year_3', 'year_3',
'year_3', 'year_3', 'year_3', 'year_3', 'year_3', 'year_3', 'year_3',
'year_3', 'year_3', 'year_3', 'year_3', 'year_3'],
'value': [1.53, 1.83, 1.38, 3.6, 2.94, 4.02, 3.99, 3.3, 4.41, 3.75, 3.63, 3.57,
1.71, 2.01, 2.04, 3.96, 4.77, 4.65, 4.08, 3.84, 3.96, 5.7, 5.07, 7.2,
6.09, 5.88, 6.51, 5.19, 5.37, 5.55, 3.6, 5.1, 6.99, 5.25, 5.28, 5.07,
6.69, 5.97, 6.33, 8.55, 7.95, 8.94, 10.02, 9.63, 10.38, 11.4, 9.66,
10.53, 6.87, 6.93, 6.84, 9.84, 9.87, 10.08]})
def test1(self):
t1 = pd.DataFrame(self.example_df)
print(t1)
response_cols = ['value']
factor_cols = ['Genotype', 'years']
out1 = twoway_anova(table=t1, response_cols=response_cols, factor_cols=factor_cols, group_by=None)
print(out1['result'])
| 3,114 |
501-600/521-530/526-beautifulArrangement/beautifulArrangement-dp.py
|
xuychen/Leetcode
| 0 |
2168828
|
# dp solution from discussion.
from collections import defaultdict
cache = {}
class Solution(object):
def countArrangement(self, N):
def helper(i, X):
if i == 1:
return 1
key = (i, X)
if key in cache:
return cache[key]
total = 0
for j in xrange(len(X)):
if X[j] % i == 0 or i % X[j] == 0:
total += helper(i - 1, X[:j] + X[j + 1:])
cache[key] = total
return total
return helper(N, tuple(range(1, N + 1)))
class Solution2(object):
def countArrangement(self, n):
"""
:type n: int
:rtype: int
"""
candidates = defaultdict(list)
for i in range(1, n+1):
for j in range(i, n+1):
if i % j == 0 or j % i == 0:
candidates[i].append(j)
if i != j:
candidates[j].append(i)
masked = (1 << n) - 1
return self.dfs(candidates, 1, n, masked)
def dfs(self, candidates, value, n, mask):
if value > n:
return mask == 0
count = 0
for candidate in candidates[value]:
if mask & (1 << (candidate-1)):
count += self.dfs(candidates, value+1, n, mask ^ 1 << (candidate-1))
return count
| 1,373 |
ch07_autoencoder/main.py
|
duchce/TensorFlow
| 4,893 |
2170923
|
from autoencoder import Autoencoder
from sklearn import datasets
hidden_dim = 1
data = datasets.load_iris().data
input_dim = len(data[0])
ae = Autoencoder(input_dim, hidden_dim)
ae.train(data)
ae.test([[8, 4, 6, 2]])
| 218 |
my_TFDetector.py
|
Moldazien/BA
| 0 |
2171328
|
import argparse
import glob
import os
import statistics
import sys
import time
import warnings
import humanfriendly
import numpy as np
from tqdm import tqdm
from CameraTraps.ct_utils import truncate_float #is in CameraTraps folder
#import visualization.visualization_utils as viz_utils #removed cause not needed
# ignoring all "PIL cannot read EXIF metainfo for the images" warnings
warnings.filterwarnings('ignore', '(Possibly )?corrupt EXIF data', UserWarning)
# Metadata Warning, tag 256 had too many entries: 42, expected 1
warnings.filterwarnings('ignore', 'Metadata warning', UserWarning)
# Numpy FutureWarnings from tensorflow import
warnings.filterwarnings('ignore', category=FutureWarning)
# Useful hack to force CPU inference
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('Is GPU available? tf.test.is_gpu_available:', tf.test.is_gpu_available())
#%% Classes
class ImagePathUtils:
"""A collection of utility functions supporting this stand-alone script"""
# Stick this into filenames before the extension for the rendered result
DETECTION_FILENAME_INSERT = '_detections'
image_extensions = ['.jpg', '.jpeg', '.gif', '.png']
@staticmethod
def is_image_file(s):
"""
Check a file's extension against a hard-coded set of image file extensions
"""
ext = os.path.splitext(s)[1]
return ext.lower() in ImagePathUtils.image_extensions
@staticmethod
def find_image_files(strings):
"""
Given a list of strings that are potentially image file names, look for strings
that actually look like image file names (based on extension).
"""
return [s for s in strings if ImagePathUtils.is_image_file(s)]
@staticmethod
def find_images(dir_name, recursive=False):
"""
Find all files in a directory that look like image file names
"""
if recursive:
strings = glob.glob(os.path.join(dir_name, '**', '*.*'), recursive=True)
else:
strings = glob.glob(os.path.join(dir_name, '*.*'))
image_strings = ImagePathUtils.find_image_files(strings)
return image_strings
class TFDetector:
"""
A detector model loaded at the time of initialization. It is intended to be used with
the MegaDetector (TF). The inference batch size is set to 1; code needs to be modified
to support larger batch sizes, including resizing appropriately.
"""
# Number of decimal places to round to for confidence and bbox coordinates
CONF_DIGITS = 3
COORD_DIGITS = 4
# MegaDetector was trained with batch size of 1, and the resizing function is a part
# of the inference graph
BATCH_SIZE = 1
# An enumeration of failure reasons
FAILURE_TF_INFER = 'Failure TF inference'
FAILURE_IMAGE_OPEN = 'Failure image access'
DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = 0.85 # to render bounding boxes
DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD = 0.1 # to include in the output json file
DEFAULT_DETECTOR_LABEL_MAP = {
'1': 'animal',
'2': 'person',
'3': 'vehicle' # available in megadetector v4+
}
NUM_DETECTOR_CATEGORIES = 4 # animal, person, group, vehicle - for color assignment
def __init__(self, model_path):
"""Loads model from model_path and starts a tf.Session with this graph. Obtains
input and output tensor handles."""
detection_graph = TFDetector.__load_model(model_path)
self.tf_session = tf.Session(graph=detection_graph)
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
self.box_tensor = detection_graph.get_tensor_by_name('detection_boxes:0')
self.score_tensor = detection_graph.get_tensor_by_name('detection_scores:0')
self.class_tensor = detection_graph.get_tensor_by_name('detection_classes:0')
@staticmethod
def round_and_make_float(d, precision=4):
return truncate_float(float(d), precision=precision)
@staticmethod
def __convert_coords(tf_coords):
"""Converts coordinates from the model's output format [y1, x1, y2, x2] to the
format used by our API and MegaDB: [x1, y1, width, height]. All coordinates
(including model outputs) are normalized in the range [0, 1].
Args:
tf_coords: np.array of predicted bounding box coordinates from the TF detector,
has format [y1, x1, y2, x2]
Returns: list of Python float, predicted bounding box coordinates [x1, y1, width, height]
"""
# change from [y1, x1, y2, x2] to [x1, y1, width, height]
width = tf_coords[3] - tf_coords[1]
height = tf_coords[2] - tf_coords[0]
new = [tf_coords[1], tf_coords[0], width, height] # must be a list instead of np.array
# convert numpy floats to Python floats
for i, d in enumerate(new):
new[i] = TFDetector.round_and_make_float(d, precision=TFDetector.COORD_DIGITS)
return new
@staticmethod
def convert_to_tf_coords(array):
"""From [x1, y1, width, height] to [y1, x1, y2, x2], where x1 is x_min, x2 is x_max
This is an extraneous step as the model outputs [y1, x1, y2, x2] but were converted to the API
output format - only to keep the interface of the sync API.
"""
x1 = array[0]
y1 = array[1]
width = array[2]
height = array[3]
x2 = x1 + width
y2 = y1 + height
return [y1, x1, y2, x2]
@staticmethod
def __load_model(model_path):
"""Loads a detection model (i.e., create a graph) from a .pb file.
Args:
model_path: .pb file of the model.
Returns: the loaded graph.
"""
print('TFDetector: Loading graph...')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
print('TFDetector: Detection graph loaded.')
return detection_graph
def _generate_detections_one_image(self, image):
np_im = np.asarray(image, np.uint8)
im_w_batch_dim = np.expand_dims(np_im, axis=0)
# need to change the above line to the following if supporting a batch size > 1 and resizing to the same size
# np_images = [np.asarray(image, np.uint8) for image in images]
# images_stacked = np.stack(np_images, axis=0) if len(images) > 1 else np.expand_dims(np_images[0], axis=0)
# performs inference
(box_tensor_out, score_tensor_out, class_tensor_out) = self.tf_session.run(
[self.box_tensor, self.score_tensor, self.class_tensor],
feed_dict={self.image_tensor: im_w_batch_dim})
return box_tensor_out, score_tensor_out, class_tensor_out
def generate_detections_one_image(self, image, image_id,
detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD):
"""Apply the detector to an image.
Args:
image: the PIL Image object
image_id: a path to identify the image; will be in the "file" field of the output object
detection_threshold: confidence above which to include the detection proposal
Returns:
A dict with the following fields, see the 'images' key in https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
- 'file' (always present)
- 'max_detection_conf'
- 'detections', which is a list of detection objects containing keys 'category', 'conf' and 'bbox'
- 'failure'
"""
result = {
'file': image_id
}
try:
b_box, b_score, b_class = self._generate_detections_one_image(image)
# our batch size is 1; need to loop the batch dim if supporting batch size > 1
boxes, scores, classes = b_box[0], b_score[0], b_class[0]
detections_cur_image = [] # will be empty for an image with no confident detections
max_detection_conf = 0.0
for b, s, c in zip(boxes, scores, classes):
if s > detection_threshold:
detection_entry = {
'category': str(int(c)), # use string type for the numerical class label, not int
'conf': truncate_float(float(s), # cast to float for json serialization
precision=TFDetector.CONF_DIGITS),
'bbox': TFDetector.__convert_coords(b)
}
detections_cur_image.append(detection_entry)
if s > max_detection_conf:
max_detection_conf = s
result['max_detection_conf'] = truncate_float(float(max_detection_conf),
precision=TFDetector.CONF_DIGITS)
result['detections'] = detections_cur_image
except Exception as e:
result['failure'] = TFDetector.FAILURE_TF_INFER
print('TFDetector: image {} failed during inference: {}'.format(image_id, str(e)))
return result
| 9,482 |
PyParagraph/pyParagraph.py
|
rglukins/python-challenge
| 0 |
2171594
|
import os
import re
import string
#create a file path to access the text file
file_path = os.path.join(".", "paragraph_2.txt")
# create useful functions for loading files & doing calculations
def file_loader(filepath):
"""Extract the text data from the paragraph file"""
with open(filepath, "r") as paragraph_file_handler:
return paragraph_file_handler.read().lower().split()
def word_count(file_text):
return int(len(file_text))
#get the text from the file
paragraph_text = file_loader(file_path)
#For Approximate Letter Count
cleaned_text = [] #clear the punctuation for the the ends of the strings in the text list
for word in paragraph_text:
word = str(word)
word = word.strip(string.punctuation)
cleaned_text.append(word)
char_count = 0 # create a variable for counting characters (for average word length)
for i in cleaned_text: #count the number of characters in the cleaned word list
chars_in_word = len(str(i))
char_count = char_count + chars_in_word
av_letter_count = char_count / len(paragraph_text) #For Average Letter Count: create variable for average letter coun
#Identify Individual sentences
with open(file_path, "r") as text_file:
paragraph = text_file.read()
sentences = re.split(r' *[\.\?!][\'"\)\]]* *', paragraph)
#create variable for sentence count
sentence_count = len(sentences)
#create variable for words per sentence
av_words_per_sentence = word_count(paragraph_text) / sentence_count
#print outputs
print("Approximate word count: " + str(word_count(paragraph_text)))
print (f'Approximate Sentence Count {sentence_count}')
print(f'Average letter Count: {av_letter_count}')
print(f'Average Words Per Sentence: {av_words_per_sentence}')
| 1,732 |
MyImg.py
|
Exisi/PicFilter
| 2 |
2170064
|
import imghdr
from PIL import Image
def is_img(f):
'''
判断文件是否为图片
:param f: 图片路径
:param imgType_list: 图片格式
:return: bool
'''
imgType_list = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'gif', 'webp'} # 其他特殊格式课自行添加
if imghdr.what(f) in imgType_list:
return True
else:
return False
def wh_type(f):
# 判断图片类型
im = Image.open(f)
if im.width > im.height: # 横图
return 1
elif im.width < im.height: # 竖图
return 2
else: # 方图
return 0
def hist(f):
'''
图片像素直方图
:param f: 图片路径
:return: List
'''
im = Image.open(f)
return im.histogram()
def wh_limitByPor(f, type: str, proprotion: float):
'''
根据长宽比例限制
:param f: 图片路径
:param type: 图片类型
:param proprotion: 宽高比例
:return: bool
'''
im = Image.open(f)
w = im.width
h = im.height
print('w:', w, 'h:', h, w / h, 'type', type)
if type == '1' and (h / w) >= proprotion:
return True
elif type == '2' and (w / h) >= proprotion:
return True
else:
return False
def wh_limitByWH(f, limit: dict, type: str):
'''
根据纯数字宽高限制
:param f: 图片路径
:param limit: 最大最小宽高限制
:param type: 图片类型
:return: bool
'''
im = Image.open(f)
w = im.width
h = im.height
if type == '1':
if h <= limit['maxH']: return True
elif type == '2':
if w <= limit['maxW']: return True
else:
if w <= limit['maxW'] and w >= limit['minW'] and h <= limit['maxH'] and h >= limit['minH']: return True
return False
| 1,585 |
lib/bes/macos/pkgutil/pkgutil.py
|
reconstruir/bes
| 0 |
2171712
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.common.check import check
from bes.system.os_env import os_env
from bes.system.which import which
from bes.system.command_line import command_line
from bes.system.execute import execute
from .pkgutil_error import pkgutil_error
class pkgutil(object):
'Class to deal with the pkgutil executable.'
@classmethod
def call_pkgutil(clazz, args, cwd = None, msg = None, use_sudo = False):
exe = which.which('pkgutil')
if not exe:
raise pkgutil_error('pkgutil not found')
cmd = []
if use_sudo:
cmd.append('sudo')
cmd.append(exe)
cmd.extend(command_line.parse_args(args))
env = os_env.clone_current_env(d = {})
rv = execute.execute(cmd,
env = env,
cwd = cwd,
stderr_to_stdout = True,
raise_error = False)
if rv.exit_code != 0:
if not msg:
cmd_flag = ' '.join(cmd)
msg = 'pkgutil command failed: {}\n{}'.format(cmd_flag, rv.stdout)
raise pkgutil_error(msg)
return rv
| 1,148 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.