max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
mopidy/internal/versioning.py
|
grdorin/mopidy
| 6,700 |
2173372
|
import os
import subprocess
import mopidy
def get_version():
try:
return get_git_version()
except OSError:
return mopidy.__version__
def get_git_version():
project_dir = os.path.abspath(
os.path.join(os.path.dirname(mopidy.__file__), "..")
)
process = subprocess.Popen(
["git", "describe"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=project_dir,
)
if process.wait() != 0:
raise OSError('Execution of "git describe" failed')
version = process.stdout.read().strip().decode()
if version.startswith("v"):
version = version[1:]
return version
| 665 |
manage_it/services/forms.py
|
ShangShungInstitute/django-manage-it
| 1 |
2172259
|
from django.forms import ModelForm
from django.forms.models import inlineformset_factory
from models import SLA, Document
class SLAForm(ModelForm):
class Meta:
model = SLA
#fields = ['pub_date', 'headline', 'content', 'reporter']
exclude = ("created_at", "created_by", "service", "terminated")
DocumentFormSet = inlineformset_factory(
SLA, Document,
max_num=2,
exclude=("created_at", "created_by"))
| 443 |
experiment.py
|
crossopt/polar-exp
| 0 |
2172543
|
""" Runs the main experiment, ie various BP strategies, on generated inputs. """
from graph import Graph
from initialize import get_all_possible_configs, get_p_list
from propagate import Counter, default_stopping_condition, successive_cancellation_propagate,\
naive_propagate, flooding_propagate, scheduling_conventional_propagate, scheduling_round_trip_propagate
import matplotlib.pyplot as plt
from datetime import datetime
class ExperimentResult:
""" Class to store the results of an experiment for all propagation methods. """
def __init__(self,
naive_result=None,
flooding_result=None,
conventional_scheduling_result=None,
round_trip_scheduling_result=None,
successive_cancellation_result=None):
self.naive_result = naive_result
self.flooding_result = flooding_result
self.conventional_scheduling_result = conventional_scheduling_result
self.round_trip_scheduling_result = round_trip_scheduling_result
self.successive_cancellation_result = successive_cancellation_result
def print(self):
""" Outputs the result in a human-readable format. """
print('''naive: {:.2f}/{:.2f} flooding: {:.2f}/{:.2f} conventional scheduling: {:.2f}/{:.2f}'''
''' round trip scheduling: {:.2f}/{:.2f} successive cancellation {:.2f}/{:.2f}'''.format(
self.naive_result.steps, self.naive_result.parallel_steps,
self.flooding_result.steps, self.flooding_result.parallel_steps,
self.conventional_scheduling_result.steps, self.conventional_scheduling_result.parallel_steps,
self.round_trip_scheduling_result.steps, self.round_trip_scheduling_result.parallel_steps,
self.successive_cancellation_result.steps, self.successive_cancellation_result.parallel_steps,
))
def get_average_steps(method, graph_list, stopping_conditions):
""" Returns the average (over all graphs in a given list) step amount a BP method performs until termination. """
counter = Counter()
results = [method(graph.get_copy(), condition) for graph, condition in zip(graph_list, stopping_conditions)]
counter.steps = sum([result.steps for result in results]) / max(len(graph_list), 1)
counter.parallel_steps = sum([result.parallel_steps for result in results]) / max(len(graph_list), 1)
return counter
def perform_average_computation_random(k, p, repeats):
""" Returns an ExperimentResult containing the average step amount for running various BP methods.
The propagation methods are run on repeats randomly generated graphs with k start nodes and probability of error p.
The same graphs are used for all propagation methods. The propagation methods currently being run are
naive_propagate, scheduling_conventional_propagate and scheduling_round_trip_propagate.
"""
graph_list = []
condition_list = []
for _ in range(repeats):
graph = Graph(k, p)
graph_list.append(graph)
condition_list.append(default_stopping_condition(graph))
return ExperimentResult(
naive_result=get_average_steps(naive_propagate, graph_list, condition_list),
flooding_result=get_average_steps(flooding_propagate, graph_list, condition_list),
conventional_scheduling_result=get_average_steps(scheduling_conventional_propagate, graph_list, condition_list),
round_trip_scheduling_result=get_average_steps(scheduling_round_trip_propagate, graph_list, condition_list),
successive_cancellation_result=get_average_steps(successive_cancellation_propagate, graph_list, condition_list),
)
def perform_average_computation_all(k, p):
""" Returns an ExperimentResult containing the average step amount for running various BP methods.
The propagation methods are run on all possible generated graphs with k start nodes and probability of error p.
The propagation methods currently being run are
naive_propagate, scheduling_conventional_propagate and scheduling_round_trip_propagate.
"""
graph_list = []
condition_list = []
for end_node_config in get_all_possible_configs(k, p):
graph = Graph(k, p)
graph.update_end_nodes(end_node_config)
graph_list.append(graph)
condition_list.append(default_stopping_condition(graph))
return ExperimentResult(
naive_result=get_average_steps(naive_propagate, graph_list, condition_list),
flooding_result=get_average_steps(flooding_propagate, graph_list, condition_list),
conventional_scheduling_result=get_average_steps(scheduling_conventional_propagate, graph_list, condition_list),
round_trip_scheduling_result=get_average_steps(scheduling_round_trip_propagate, graph_list, condition_list),
successive_cancellation_result=get_average_steps(successive_cancellation_propagate, graph_list, condition_list),
)
def plot_graph(name, k, p_skip, results):
passed_amounts = [i for i in range(2 ** k + 1)]
passed_amounts = passed_amounts[::p_skip]
fig, (step, parallel_step) = plt.subplots(2, 1)
step.plot(passed_amounts, [result.naive_result.steps for result in results], 'r', label='Naive propagation')
step.plot(passed_amounts, [result.flooding_result.steps for result in results], 'k', label='Flooding propagation')
step.plot(passed_amounts, [result.conventional_scheduling_result.steps for result in results], 'b',
label='Conventional scheduling')
step.plot(passed_amounts, [result.round_trip_scheduling_result.steps for result in results], 'g',
label='Round-trip scheduling')
step.plot(passed_amounts, [result.successive_cancellation_result.steps for result in results], 'y',
label='Successive cancellation')
fig.suptitle(name)
step.set(ylabel='Average number of\noperations')
parallel_step.plot(passed_amounts, [result.naive_result.parallel_steps for result in results], 'r',
label='Naive propagation')
parallel_step.plot(passed_amounts, [result.flooding_result.parallel_steps for result in results], 'k',
label='Flooding propagation')
parallel_step.plot(passed_amounts, [result.conventional_scheduling_result.parallel_steps for result in results], 'b',
label='Conventional scheduling')
parallel_step.plot(passed_amounts, [result.round_trip_scheduling_result.parallel_steps for result in results], 'g',
label='Round-trip scheduling')
parallel_step.plot(passed_amounts, [result.successive_cancellation_result.parallel_steps for result in results], 'y',
label='Successive cancellation')
parallel_step.set(xlabel='Number of non-frozen (informative) bits', ylabel='Average number of\nparallel steps')
lines, labels = fig.axes[-1].get_legend_handles_labels()
legend = fig.legend(lines, labels, bbox_to_anchor=(1.0, 1.0), loc='upper left')
plt.savefig('results/graph_{}.png'.format(datetime.now()),bbox_extra_artists=(legend,), bbox_inches='tight')
plt.clf()
print(datetime.now()) # A rough idea of the execution time.
def print_average_result_all(k, dbg=False):
""" Outputs the average step amount dependent on probabilities that generate different amounts of frozen bits. """
results = []
probs = get_p_list(k)
for prob in probs:
results.append(perform_average_computation_all(k, prob))
if dbg:
results[-1].print()
plot_graph('Polar decoding with block size {}'.format(2 ** k), k, 1, results)
def print_average_result_random(k, repeats, p_skip=1, dbg=False):
""" Outputs the average step amount dependent on probabilities that generate different amounts of frozen bits. """
results = []
probs = get_p_list(k)[::p_skip]
print(probs, '\n', len(probs))
for prob in probs:
print(prob)
results.append(perform_average_computation_random(k, prob, repeats))
if dbg:
results[-1].print()
plot_graph('Polar decoding with block size {}, {} runs'.format(2 ** k, repeats), k, p_skip, results)
if __name__ == '__main__':
print(datetime.now())
print_average_result_all(3)
print_average_result_random(5, repeats=200)
print_average_result_random(8, repeats=200, p_skip=1)
| 8,356 |
image-tools/detection/detection.py
|
flegac/deep-experiments
| 0 |
2173128
|
import glob
import os
from pathlib import Path
import cv2
import numpy as np
from skimage import img_as_float
from skimage import transform
from skimage.measure import compare_ssim as ssim
# Histogram of Oriented Gradients
# https://www.learnopencv.com/histogram-of-oriented-gradients/
from image_clustering.image_utils import contrast_stretching, adaptive_equalization
# structural similarity
# histogram matching
# http://paulbourke.net/miscellaneous/equalisation/
def detection(x1, x2, win_size=1, power=2, treshold=.5):
x1 = img_as_float(x1)
x2 = img_as_float(x2)
w, h, _ = x1.shape
s, out = ssim(x1, x2, win_size=2 * win_size + 1, multichannel=True, full=True)
mask = (np.average(1 - out, axis=2) ** power)
return mask
# return (mask > treshold).astype(mask.dtype)
def im_resize(img, scale):
width = int(img.shape[1] * scale)
height = int(img.shape[0] * scale)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
def do_all(name, label, x1, x2, equalizer=None):
if equalizer:
x1 = equalizer(x1)
x2 = equalizer(x2)
cv2.imwrite('{}_{}_x1.png'.format(name, label), im_resize(x1, 2.))
cv2.imwrite('{}_{}_x2.png'.format(name, label), im_resize(x2, 2.))
y_pred = detection(x1, x2)
cv2.imwrite('{}_y_pred_{}.png'.format(name, label), im_resize(y_pred, 2.) * 255)
def main():
path = os.path.abspath(os.path.join(os.curdir, 'images'))
dataset = glob.glob(os.path.join(path, '*_mask.tif'))
for _ in dataset:
name = Path(_).name.replace('_mask.tif', '')
x1 = cv2.imread(_.replace('_mask.tif', '_0.tif'))
x2 = cv2.imread(_.replace('_mask.tif', '_1.tif'))
y = cv2.imread(os.path.join(_), cv2.IMREAD_GRAYSCALE)
cv2.imwrite('{}_x1.png'.format(name), im_resize(x1, 2.))
cv2.imwrite('{}_x2.png'.format(name), im_resize(x2, 2.))
cv2.imwrite('{}_y.png'.format(name), im_resize(y, 2.) * 255)
x1 = transform.match_histograms(x1, x2, multichannel=True)
do_all(name, 'basic', x1, x2)
do_all(name, 'stretch', x1, x2, contrast_stretching)
do_all(name, 'adapt_eq', x1, x2, adaptive_equalization)
if __name__ == "__main__":
main()
| 2,260 |
days/day04/part2.py
|
jaredbancroft/aoc2021
| 0 |
2171184
|
from submarine.systems.bingo import BingoSubsystem
def solution(day):
b = BingoSubsystem(f"inputs/{day}.txt")
score = b.play("lose")
return score
| 160 |
backend/bitcoin_arbitrage/apps.py
|
landdafku11/cryptocurrencybot
| 1 |
2173443
|
from django.apps import AppConfig
class BitcoinArbitrageConfig(AppConfig):
name = 'bitcoin_arbitrage'
| 108 |
Python_PI/Clase23.py
|
Alex8Navas/PythonPI
| 0 |
2172930
|
# Clase 23. Curso Píldoras Informáticas.
# Control de Flujo. Excepciones 3.
# Excepciones propias: Raise.
def evalEdad(edad):
if edad < 0:
raise TypeError("No se permiten edades negativas") # Mensaje personalizado.
if edad < 20:
return "Joven."
elif edad < 40:
return "Joven Adulto."
elif edad < 65:
return "Adulto."
elif edad < 100:
return "Signore."
print(evalEdad(99))
# print(evalEdad(-66))
import math
print("\nPrograma de Cálculo de Raíces")
def Calcularoot(number1):
if number1 < 0:
raise ValueError("No se puede calcular la raíz de un número negativo.")
else:
return math.sqrt(number1)
numberint = int(input("Introduce un número: "))
try:
print(Calcularoot(numberint))
except ValueError as RootNegativa:
print(RootNegativa)
print("Programa Finalizado.")
| 856 |
samples/human_interaction/SendSMSChallenge/__init__.py
|
sebastianburckhardt/azure-functions-durable-python
| 78 |
2173433
|
import json
import random
random.seed(10)
def main(phoneNumber, message):
code = random.randint(0, 10000)
payload = {
"body": f"Your verification code is {code}",
"to": phoneNumber
}
message.set(json.dumps(payload))
code_str = str(code)
return code_str
| 276 |
Integrations/HelloWorldSimple/HelloWorldSimple_test.py
|
TBE-Comp/content
| 0 |
2173318
|
from HelloWorldSimple import say_hello_command
def test_say_hello():
args = {
'name': 'Dbot'
}
result = say_hello_command(args)
assert result == '## Hello Dbot'
| 188 |
tests/test_replacer.py
|
joshua-s/punch
| 0 |
2169704
|
import six
import pytest
import io
from punch import replacer
def file_like(file_content):
if six.PY2:
return io.StringIO(unicode(file_content)) # NOQA
else:
return io.StringIO(file_content)
def test_replace_content_without_config():
with pytest.raises(TypeError):
replacer.Replacer()
def test_replace_content():
current_version = {
'major': 1,
'minor': 0,
'patch': 0
}
new_version = {
'major': 1,
'minor': 0,
'patch': 1
}
file_content = """# Just a comment
__version__ = "1.0.0"
"""
updated_file_content = """# Just a comment
__version__ = "1.0.1"
"""
serializer = "__version__ = \"{{major}}.{{minor}}.{{patch}}\""
rep = replacer.Replacer(serializer)
new_file_content = rep.replace(file_content, current_version, new_version)
assert new_file_content == updated_file_content
def test_get_versions():
current_version = {
'major': 1,
'minor': 0,
'patch': 0
}
new_version = {
'major': 1,
'minor': 0,
'patch': 1
}
serializer = "__version__ = \"{{major}}.{{minor}}.{{patch}}\""
rep = replacer.Replacer(serializer)
list_of_versions = rep.run_all_serializers(current_version, new_version)
assert list_of_versions == [
("__version__ = \"1.0.0\"", "__version__ = \"1.0.1\"")
]
def test_get_versions_with_multiple_serializers():
current_version = {
'major': 1,
'minor': 0,
'patch': 0
}
new_version = {
'major': 1,
'minor': 0,
'patch': 1
}
serializers = [
"__version__ = \"{{major}}.{{minor}}.{{patch}}\"",
"__api_abi__ = \"{{major}}.{{minor}}\""
]
rep = replacer.Replacer(serializers)
list_of_versions = rep.run_all_serializers(current_version, new_version)
assert list_of_versions == [
("__version__ = \"1.0.0\"", "__version__ = \"1.0.1\""),
("__api_abi__ = \"1.0\"", "__api_abi__ = \"1.0\"")
]
def test_get_main_version_change_with_multiple_serializers():
current_version = {
'major': 1,
'minor': 0,
'patch': 0
}
new_version = {
'major': 1,
'minor': 0,
'patch': 1
}
serializers = [
"__version__ = \"{{major}}.{{minor}}.{{patch}}\"",
"__api_abi__ = \"{{major}}.{{minor}}\""
]
rep = replacer.Replacer(serializers)
current, new = rep.run_main_serializer(current_version, new_version)
assert current, new == (
"__version__ = \"1.0.0\"", "__version__ = \"1.0.1\""
)
def test_replace_content_with_multiple_serializers():
current_version = {
'major': 1,
'minor': 0,
'patch': 0
}
new_version = {
'major': 1,
'minor': 0,
'patch': 1
}
file_content = """# Just a comment
__version__ = "1.0.0"
__api_abi__ = "1.0"
"""
updated_file_content = """# Just a comment
__version__ = "1.0.1"
__api_abi__ = "1.0"
"""
serializers = [
"__version__ = \"{{major}}.{{minor}}.{{patch}}\"",
"__api_abi__ = \"{{major}}.{{minor}}\""
]
rep = replacer.Replacer(serializers)
new_file_content = rep.replace(file_content, current_version, new_version)
assert new_file_content == updated_file_content
def test_replace_content_without_using_all_parts():
current_version = {
'major': 1,
'minor': 0,
'patch': 0
}
new_version = {
'major': 1,
'minor': 1,
'patch': 0
}
file_content = """# Just a comment
__version__ = "1.0"
"""
updated_file_content = """# Just a comment
__version__ = "1.1"
"""
serializer = "__version__ = \"{{major}}.{{minor}}\""
rep = replacer.Replacer(serializer)
new_file_content = rep.replace(file_content, current_version, new_version)
assert new_file_content == updated_file_content
| 3,999 |
Diena_16_misc/my_tk_gui.py
|
edzya/Python_RTU_08_20
| 8 |
2172693
|
import tkinter as tk # this is in standard library
class Application(tk.Frame):
count = 5
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
def create_widgets(self):
self.hi_there = tk.Button(self)
self.hi_there["text"] = "Hello World\n(click me)"
self.hi_there["command"] = self.say_hi #binding say_hi method to click
self.hi_there.pack(side="top")
self.quit = tk.Button(self, text="QUIT", fg="red",
command=self.master.destroy)
self.quit.pack(side="bottom")
self.result = tk.Label(self)
self.result["text"] = f"Result {self.count}"
self.result.pack(side="bottom")
def say_hi(self):
print("hi there, everyone!")
self.count += 1
self.result["text"] = f"Result {self.count}" # hand made binding of hi button and result label
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| 1,030 |
plugins/Lima/limaFAI.py
|
yugangzhang/pyFAI
| 45 |
2172115
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import with_statement, print_function
"""
LImA ProcessLib example of pyFAI azimuthal integrator Link and Sink
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "03/08/2016"
__status__ = "beta"
__docformat__ = 'restructuredtext'
import threading
import logging
import numpy
import sys
import os
import distutils.util
from os.path import dirname
logger = logging.getLogger("lima.pyfai")
# set loglevel at least at INFO
if logger.getEffectiveLevel() > logging.INFO:
logger.setLevel(logging.INFO)
from Lima import Core
try:
import pyFAI
except ImportError:
cwd = dirname(dirname(dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(cwd, "build", "lib.%s-%i.%i" % (distutils.util.get_platform(), sys.version_info[0], sys.version_info[1])))
import pyFAI
class StartAcqCallback(Core.SoftCallback):
"""
Class managing the connection from a
Lima.Core.CtControl.prepareAcq() to the configuration of the various tasks
Example of usage:
cam = Basler.Camera(ip)
iface = Basler.Interface(cam)
ctrl = Core.CtControl(iface)
processLink = LinkPyFAI(worker, writer)
extMgr = ctrl.externalOperation()
myOp = self.extMgr.addOp(Core.USER_LINK_TASK, "pyFAILink", 0)
myOp.setLinkTask(processLink)
callback = StartAcqCallback(ctrl, processLink)
myOp.registerCallback(callback)
acq.setAcqNbFrames(0)
acq.setAcqExpoTime(1.0)
ctrl.prepareAcq() #Configuration called here !!!!
ctrl.startAcq()
"""
def __init__(self, control, task=None):
"""
:param control: Lima.Core.CtControl instance
:param task: The task one wants to parametrize at startup. Can be a Core.Processlib.LinkTask or a Core.Processlib.SinkTask
"""
Core.SoftCallback.__init__(self)
self._control = control
self._task = task
def prepare(self):
"""
Called with prepareAcq()
"""
im = self._control.image()
imdim = im.getImageDim().getSize()
x = imdim.getWidth()
y = imdim.getHeight()
bin = im.getBin()
binX = bin.getX()
binY = bin.getY()
lima_cfg = {"dimX":x,
"dimY":y,
"binX":binX,
"binY":binY}
saving = self._control.saving()
sav_parms = saving.getParameters()
lima_cfg["directory"]=sav_parms.directory
lima_cfg["prefix"] = sav_parms.prefix
lima_cfg["start_index"] = sav_parms.nextNumber
lima_cfg["indexFormat"] = sav_parms.indexFormat
# number of images ...
acq = self._control.acquisition()
lima_cfg["number_of_frames"] = acq.getAcqNbFrames() #to check.
lima_cfg["exposure_time"] = acq.getAcqExpoTime()
#ROI see: https://github.com/esrf-bliss/Lima/blob/master/control/include/CtAcquisition.h
print("self._task._worker: %s" % self._task._worker)
if (self._task._worker) is None :
centerX = x // 2
centerY = y // 2
ai = pyFAI.AzimuthalIntegrator()
ai.setFit2D(1000, centerX=centerX, centerY=centerY, pixelX=1, pixelY=1)
worker = pyFAI.worker.Worker(ai)
worker.unit = "r_mm"
worker.method = "lut_ocl_gpu"
worker.nbpt_azim = 360
worker.nbpt_rad = 500
worker.output = "numpy"
print("Worker updated")
self._task._worker = worker
else:
worker = self._task._worker
worker.reconfig(shape=(y, x), sync=True)
if self._task._writer:
config = self._task._worker.get_config()
self._task._writer.init(fai_cfg=config, lima_cfg=lima_cfg)
self._task._writer.flush(worker.radial, worker.azimuthal)
class LinkPyFAI(Core.Processlib.LinkTask):
"""
This is a ProcessLib task which is a link:
it modifies the image for further processing.
It processes every acquired frame with the pyFAI-worker and can optionally
save data as HDF5 or EDF
"""
def __init__(self, worker=None, writer=None):
"""
:param worker: pyFAI.worker.Worker instance
:param writer: pyFAI.io.Writer instance
"""
Core.Processlib.LinkTask.__init__(self)
self._worker = worker
self._writer = writer
def process(self, data) :
"""
Callback function
Called for every frame in a different C++ thread.
"""
rData = Core.Processlib.Data()
rData.frameNumber = data.frameNumber
rData.buffer = self._worker.process(data.buffer)
if self._writer: #optional HDF5 writer
self._writer.write(rData.buffer, rData.frameNumber)
return rData
def __repr__(self):
"""
pretty print of myself
"""
lstout = [ "LinkPyFAI Processlib instance","Worker:",self._worker.__repr__(),"Writer:",self._writer.__repr__()]
return os.linesep.join(lstout)
class SinkPyFAI(Core.Processlib.SinkTaskBase):
"""
This is a ProcessLib task which is a sink:
it processes the image and saves it.
It processes every acquired frame with the pyFAI-worker.
If no writer is provided, processed data are lost.
"""
def __init__(self, worker=None, writer=None):
Core.Processlib.SinkTaskBase.__init__(self)
self._worker = worker
self._writer = writer
if writer is None:
logger.error("Without a writer, SinkPyFAI will just dump all data")
def __repr__(self):
"""
pretty print of myself
"""
lstout = [ "SinkPyFAI Processlib instance","Worker:",self._worker.__repr__(),"Writer:",self._writer.__repr__()]
return os.linesep.join(lstout)
def reset(self):
"""
this is just to force the integrator to initialize
"""
self.ai.reset()
def reconfig(self, shape=(2048, 2048)):
"""
this is just to force the integrator to initialize with a given input image shape
"""
self.shapeIn = shape
self.ai.reset()
if self.do_2D():
threading.Thread(target=self.ai.integrate2d,
name="init2d",
args=(numpy.zeros(self.shapeIn, dtype=numpy.float32),
self.nbpt_rad, self.nbpt_azim),
kwargs=dict(method="lut", unit=self.unit)
).start()
else:
threading.Thread(target=self.ai.integrate1d,
name="init1d",
args=(numpy.zeros(self.shapeIn, dtype=numpy.float32),
self.nbpt_rad),
kwargs=dict(method="lut", unit=self.unit)
).start()
def process(self, data) :
"""
Callback function
Called for every frame in a different C++ thread.
"""
rData = Core.Processlib.Data()
rData.frameNumber = data.frameNumber
rData.buffer = self._worker.process(data.buffer)
if self._writer: #optional HDF5 writer
self._writer.write(rData.buffer, rData.frameNumber)
#
# ctControl = _control_ref()
# saving = ctControl.saving()
# sav_parms = saving.getParameters()
# if not self.subdir:
# directory = sav_parms.directory
# elif self.subdir.startswith("/"):
# directory = self.subdir
# else:
# directory = os.path.join(sav_parms.directory, self.subdir)
# if not os.path.exists(directory):
# logger.error("Ouput directory does not exist !!! %s", directory)
# try:
# os.makedirs(directory)
# except: # No luck withthreads
# pass
#
## directory = sav_parms.directory
# prefix = sav_parms.prefix
# nextNumber = sav_parms.nextNumber
# indexFormat = sav_parms.indexFormat
# kwarg["filename"] = os.path.join(directory, prefix + indexFormat % (nextNumber + data.frameNumber))
# if self.do_2D():
# kwarg["npt_rad"] = self.nbpt_rad
# kwarg["npt_azim"] = self.nbpt_azim
# if self.extension:
# kwarg["filename"] += self.extension
# else:
# kwarg["filename"] += ".azim"
# else:
# kwarg["npt"] = self.nbpt_rad
# if self.extension:
# kwarg["filename"] += self.extension
# else:
# kwarg["filename"] += ".xy"
# if self.do_poisson:
# kwarg["error_model"] = "poisson"
# else:
# kwarg["error_model"] = "None"
#
# try:
# if self.do_2D():
# self.ai.integrate2d(**kwarg)
# else:
# self.ai.integrate1d(**kwarg)
# except:
# print data.buffer.shape, data.buffer.size
# print self.ai
# print self.ai._lut_integrator
# print self.ai._lut_integrator.size
# raise
# # return rData
def setSubdir(self, path):
"""
Set the relative or absolute path for processed data
"""
self.writer.subdir = path
def setExtension(self, ext):
"""
enforce the extension of the processed data file written
"""
if ext:
self._writer.extension = ext
else:
self._writer.extension = None
def setJsonConfig(self, jsonconfig):
self._worker.setJsonConfig(jsonconfig)
| 9,787 |
lexer.py
|
gsedometov/KR580-compiler
| 0 |
2172219
|
import ply.lex as lex
from symbols import cmds, rg_names, tokens
def t_DECLARATION(t):
r'[A-Za-z]+[0-9]*\:'
line = t.lexer.counter
if line - t.lexer.last_declaration == 1:
line -= 1
t.lexer.symtable[t.value[:-1]] = line
t.lexer.last_declaration = t.lexer.counter
def t_CMD(t):
r'([A-Z]|[a-z])+\b'
if t.value in cmds:
t.type = 'CMD'
elif t.value in rg_names:
t.type = 'RG_NAME'
else:
t.type = 'ID'
t.lexer.counter += 1
return t
t_ID = r'[A-Za-z]*[0-9]+'
def t_WORD(t):
r'([\da-f0-9]{2}){1}'
t.value = int(t.value, 16)
t.lexer.counter += 1
return t
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
t_ignore = '\t, '
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
lexer.symtable = {}
lexer.last_declaration = 0
lexer.counter = 0
| 939 |
sandy-disaster-recovery/messaging.py
|
toddjcrane/crisiscleanup-legacy
| 1 |
2170536
|
#!/usr/bin/env python
#
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from google.appengine.api import app_identity, mail
import jinja2
from config_key_db import get_config_key
from admin_handler.admin_identity import get_global_admins, get_event_admins
import aws
# jinja
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(
os.path.dirname(__file__),
'templates',
'email'
)
)
)
# functions
def get_application_id():
return app_identity.get_application_id()
def get_default_version_hostname():
return app_identity.get_default_version_hostname()
def get_base_url():
configured_base_url = get_config_key('system_base_url')
if configured_base_url:
return configured_base_url
else:
# return http as the scheme and assume requests redirected
return "http://" + get_default_version_hostname()
def get_appengine_default_system_email_address():
return "%s <<EMAIL>>" % (
app_identity.get_service_account_name(),
app_identity.get_application_id()
)
def get_aws_ses_default_system_email_address():
" From configuration "
return get_config_key('system_email_address')
def send_email_via_appengine(sender, to, subject, body, cc=None, bcc=None, html_body=None):
send_mail_args = {
'sender': sender,
'to': to,
'subject': subject,
'body': body,
}
if cc:
send_mail_args['cc'] = cc
if bcc:
send_mail_args['bcc'] = bcc
if html_body:
send_mail_args['html'] = html_body
return mail.send_mail(**send_mail_args)
def send_email_via_aws_ses(
sender, to, subject, body, cc=None, bcc=None, html_body=None,
aws_ses_region=None,
aws_ses_access_key_id=None,
aws_ses_secret_access_key=None,
):
return aws.ses_send_email(
source=sender,
to_addresses=to,
subject=subject,
body=body,
cc=cc,
bcc=bcc,
html_body=html_body,
aws_region=aws_ses_region,
aws_access_key_id=aws_ses_access_key_id,
aws_secret_access_key=aws_ses_secret_access_key,
)
def can_send_by_aws_ses(
aws_ses_region,
aws_ses_access_key_id,
aws_ses_secret_access_key,
sender_address
):
keys_available = bool(
aws_ses_region and
aws_ses_access_key_id and
aws_ses_secret_access_key
)
if keys_available:
verified_addresses = aws.ses_get_verified_email_addresses(
aws_ses_region,
aws_ses_access_key_id,
aws_ses_secret_access_key
)
sender_ok = (sender_address in verified_addresses)
if sender_ok:
return True
else:
logging.warning(
"Tried to send by AWS SES but %s is not verified." % (
sender_address))
return False
def send_email_by_service(to, subject, body, cc=None, bcc=None, html_body=None):
" Send by AWS SES if available, otherwise GAE. "
assert not isinstance(to, basestring), "'to' must be a list or iterable"
# check for AWS API keys
aws_ses_region = get_config_key('aws_ses_region')
aws_ses_access_key_id = get_config_key('aws_ses_access_key_id')
aws_ses_secret_access_key = get_config_key('aws_ses_secret_access_key')
# lookup addresses(s) to send from
gae_sender_address = get_appengine_default_system_email_address()
aws_sender_address = get_aws_ses_default_system_email_address()
# send by AWS or fall back to GAE
# catch & log all exceptions to prevent blowing up requests due to email
try:
if can_send_by_aws_ses(aws_ses_region, aws_ses_access_key_id, aws_ses_secret_access_key, aws_sender_address):
logging.error(aws_ses_secret_access_key)
return send_email_via_aws_ses(
aws_sender_address,
to, subject, body, cc=cc, bcc=bcc, html_body=html_body,
aws_ses_region=aws_ses_region,
aws_ses_access_key_id=aws_ses_access_key_id,
aws_ses_secret_access_key=aws_ses_secret_access_key
)
else:
return send_email_via_appengine(
gae_sender_address,
to, subject, body, cc=cc, bcc=bcc, html_body=html_body
)
except:
logging.exception("Exception caused generating email.")
def friendly_email_address(contact):
return u"%s <%s>" % (contact.full_name, contact.email)
def email_contacts(event, contacts, subject, body, html=None, bcc_contacts=None):
prefixed_subject = "[%s] %s" % (get_application_id(), subject)
to_addresses = map(
friendly_email_address,
(contact for contact in contacts if contact.email)
)
bcc_addresses = map(
friendly_email_address,
(contact for contact in bcc_contacts if contact.email)
) if bcc_contacts else []
send_email_by_service(
to_addresses,
prefixed_subject,
body,
bcc=bcc_addresses,
html_body=html,
)
def email_contacts_using_templates(
event, contacts, subject_template_name, body_template_name, **kwargs):
"""
Email contacts using Jinja2 templates.
"""
subject_template = jinja_environment.get_template(subject_template_name)
body_template = jinja_environment.get_template(body_template_name)
kwargs.update({'event': event})
rendered_subject = subject_template.render(kwargs)
rendered_body = body_template.render(kwargs)
email_contacts(event, contacts, rendered_subject, rendered_body)
def email_administrators(event, subject, body, html=None, include_local=True):
admin_orgs = get_event_admins(event) if include_local else get_global_admins()
admin_contacts = reduce(
lambda x, y: list(x) + list(y),
(org.contacts for org in admin_orgs)
)
email_contacts(event, admin_contacts, subject, body, html=html)
def email_administrators_using_templates(
event, subject_template_name, body_template_name, include_local=True, **kwargs):
"""
Email all relevant administrators for event, using Jinja2 templates.
"""
admin_orgs = get_event_admins(event) if include_local else get_global_admins()
admin_contacts = reduce(
lambda x, y: list(x) + list(y),
(org.contacts for org in admin_orgs)
)
email_contacts_using_templates(
event,
admin_contacts,
subject_template_name,
body_template_name,
**kwargs
)
#
# Specific email convenience functions
#
def send_activation_emails(org_for_activation):
activation_url = "%s/activate?code=%s" % (
get_base_url(), org_for_activation.activation_code)
email_contacts_using_templates(
None,
org_for_activation.primary_contacts,
'activation.subject.txt',
'activation.body.txt',
org=org_for_activation,
activation_url=activation_url,
bcc_contacts=get_global_admins()[0].contacts
)
def send_activated_emails(org_activated):
email_contacts_using_templates(
org_activated.incidents[0],
org_activated.primary_contacts,
'activated.subject.txt',
'activated.body.txt',
org=org_activated,
bcc_contacts=get_global_admins()[0].contacts
)
#
# Test Handler
#
import base
import key
GLOBAL_ADMIN_NAME = "Admin"
class EmailTestHandler(base.RequestHandler):
def get(self):
org, event = key.CheckAuthorization(self.request)
if not (org and org.name == GLOBAL_ADMIN_NAME):
self.response.out.write("Must be global admin.")
return
to_addr = self.request.get("to")
from_addr = self.request.get("from")
if to_addr and from_addr:
send_email_by_service(
sender=from_addr,
to=[to_addr],
subject=u"Test email",
body=u"This is a test email."
)
self.response.out.write("Test email sent.")
else:
self.response.out.write("Need to and from addresses")
| 8,723 |
BrewMe.py
|
AdamPrimak/BrewMe
| 4 |
2172945
|
#Setup
import glob
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(18,GPIO.OUT) #Green LED
GPIO.setup(12,GPIO.OUT) #RED LED
GPIO.setup(13,GPIO.OUT) #Yellow LED
GPIO.setup(23,GPIO.OUT) #Buzzer
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
def LED_color(): #hard coded limits on temperature - Green = within limits, Yellow = near limits, Red = outside limits
temp = read_temp()
if temp[0] < 5 or temp[0] > 30:
GPIO.output(18,GPIO.LOW)
GPIO.output(13,GPIO.LOW)
GPIO.output(12,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
time.sleep(0.75)
return "Temprature is outside of limits"
if temp[0] < 15 and temp[0] > 5 or temp[0] < 30 and temp[0] > 20:
GPIO.output(12,GPIO.LOW)
GPIO.output(18,GPIO.LOW)
GPIO.output(13,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
time.sleep(0.25)
GPIO.output(23,GPIO.LOW)
time.sleep(0.25)
GPIO.output(23,GPIO.HIGH)
time.sleep(0.25)
GPIO.output(23,GPIO.LOW)
return "Temperature is near the limits"
if temp[0] < 20 and temp[0] > 15:
GPIO.output(12,GPIO.LOW)
GPIO.output(13,GPIO.LOW)
GPIO.output(23,GPIO.LOW)
GPIO.output(18,GPIO.HIGH)
time.sleep(0.75)
return "Temperature is within the range"
while True:
temp = 0
print(read_temp())
print(LED_color())
time.sleep(1)
| 2,002 |
sddip/sddip/parameters.py
|
leoschleier/sddip
| 0 |
2171334
|
import os
import numpy as np
import pandas as pd
from sddip import utils, config
class Parameters:
def __init__(
self,
test_case_name: str,
n_stages: int,
n_realizations: int,
raw_directory: str = "raw",
bus_file="bus_data.txt",
branch_file="branch_data.txt",
gen_file="gen_data.txt",
gen_cost_file="gen_cost_data.txt",
gen_sup_file="gen_sup_data.txt",
renewables_file="ren_data.txt",
storage_file="storage_data.txt",
scenario_file="scenario_data.txt",
):
raw_data_dir = os.path.join(
config.test_cases_dir, test_case_name, raw_directory
)
scenario_str = f"t{str(n_stages).zfill(2)}_n{str(n_realizations).zfill(2)}"
scenario_data_dir = os.path.join(
config.test_cases_dir, test_case_name, scenario_str
)
raw_data_importer = DataImporter(raw_data_dir)
scenario_data_importer = DataImporter(scenario_data_dir)
# DataFrames
self.bus_df = raw_data_importer.dataframe_from_csv(bus_file)
self.branch_df = raw_data_importer.dataframe_from_csv(branch_file)
self.gen_df = raw_data_importer.dataframe_from_csv(gen_file)
self.gen_cost_df = raw_data_importer.dataframe_from_csv(gen_cost_file)
self.ren_df = raw_data_importer.dataframe_from_csv(renewables_file)
self.storage_df = raw_data_importer.dataframe_from_csv(storage_file)
self.gen_sup_df = scenario_data_importer.dataframe_from_csv(gen_sup_file)
self.scenario_df = scenario_data_importer.dataframe_from_csv(scenario_file)
# Structural data
self.ptdf = None
self.incidence_matrix = None
self.n_lines = None
self.n_buses = None
self.n_gens = None
self.gens_at_bus = None
self.n_storages = None
self.storages_at_bus = None
# Cost data
self.gc = None
self.suc = None
self.sdc = None
self.cost_coeffs = None
# Power generation limits
self.pg_min = None
self.pg_max = None
# Generator ramp rates
self.r_up = None
self.r_down = None
self.r_su = None
self.r_sd = None
# Min up- and down-times
self.min_up_time = None
self.min_down_time = None
self.backsight_periods = None
# Storage charge/discharge rate limits
self.rc_max = None
self.rdc_min = None
# Maximum state of charge
self.soc_max = None
# Charge/discharge efficiencies
self.eff_c = None
self.eff_dc = None
# Line capacity
self.pl_max = None
# Stochastic problem parameters
self.n_stages = None
self.n_realizations_per_stage = None
self.n_scenarios = None
# Nodal probability
self.prob = None
# Power demand
self.p_d = None
# Renewable generation
self.re = None
# Cut constraints lower bound
self.cut_lb = None
# Frist stage trial points
self.init_x_trial_point = None
self.init_y_trial_point = None
self.init_x_bs_trial_point = None
self.init_soc_trial_point = None
self.initialize()
def initialize(self):
"""Triggers the initialization of all parameters based on the corresponding data frames
"""
self._calc_ptdf()
self._init_deterministic_parameters()
self._init_stochastic_parameters()
self._init_initial_trial_points()
def _calc_ptdf(self):
"""Calculates the Power Transmission Distribution Factor and infers the number of buses and lines
"""
nodes = self.bus_df.bus_i.values.tolist()
edges = self.branch_df[["fbus", "tbus"]].values.tolist()
ref_bus = self.bus_df.loc[self.bus_df.type == 3].bus_i.values[0]
graph = utils.Graph(nodes, edges)
self.incidence_matrix = graph.incidence_matrix()
b_l = (
-self.branch_df.x / (self.branch_df.r ** 2 + self.branch_df.x ** 2)
).tolist()
b_diag = np.diag(b_l)
m1 = b_diag.dot(self.incidence_matrix)
m2 = self.incidence_matrix.T.dot(b_diag).dot(self.incidence_matrix)
m1 = np.delete(m1, ref_bus - 1, 1)
m2 = np.delete(m2, ref_bus - 1, 0)
m2 = np.delete(m2, ref_bus - 1, 1)
ptdf = m1.dot(np.linalg.inv(m2))
self.ptdf = np.insert(ptdf, ref_bus - 1, 0, axis=1)
self.ptdf[abs(self.ptdf) < 10 ** -10] = 0
self.n_lines, self.n_buses = self.ptdf.shape
def _init_deterministic_parameters(self):
"""Initializes all deterministic parameters
"""
self.gen_cost_df
self.gen_df
self.branch_df
gc_positive = np.where(self.gen_cost_df.c1 > 0, self.gen_cost_df.c1, 1)
suc_positive = np.where(
self.gen_cost_df.startup > 0, self.gen_cost_df.startup, 1
)
sdc_positive = np.where(
self.gen_cost_df.shutdown > 0, self.gen_cost_df.shutdown, 1
)
costs_log10 = np.concatenate(
[
np.log10(gc_positive),
np.log10(suc_positive),
np.log10(sdc_positive),
np.zeros(1),
]
)
scaling_digits = int(np.max(costs_log10)) + 1
self.cost_div = 10 ** (max(scaling_digits - 2, 0))
self.gc = np.array(self.gen_cost_df.c1) / self.cost_div
self.suc = np.array(self.gen_cost_df.startup) / self.cost_div
self.sdc = np.array(self.gen_cost_df.shutdown) / self.cost_div
# Storages
storage_buses = self.storage_df.bus.values.tolist()
self.n_storages = len(storage_buses)
self.storages_at_bus = [[] for _ in range(self.n_buses)]
s = 0
for b in storage_buses:
self.storages_at_bus[b - 1].append(s)
s += 1
self.rc_max = self.storage_df["Rc"].values.tolist()
self.rdc_max = self.storage_df["Rdc"].values.tolist()
self.soc_max = self.storage_df["SOC"].values.tolist()
self.eff_c = self.storage_df["Effc"].values.tolist()
self.eff_dc = self.storage_df["Effdc"].values.tolist()
# TODO Adjust penalty for slack variables
self.penalty = 10 ** 2
self.cost_coeffs = (
self.gc.tolist()
+ self.suc.tolist()
+ self.sdc.tolist()
+ [self.penalty] * 2
)
print(f"Cost coefficients: {self.cost_coeffs}")
self.pg_min = self.gen_df.Pmin.values.tolist()
self.pg_max = self.gen_df.Pmax.values.tolist()
self.pl_max = self.branch_df.rateA.values.tolist()
self.n_gens = len(self.gc)
# TODO Add ramp rate limits
self.r_up = self.gen_sup_df["R_up"].values.tolist()
self.r_down = self.gen_sup_df["R_down"].values.tolist()
self.r_su = [max(r, p) for r, p in zip(self.r_up, self.pg_min)]
self.r_sd = [max(r, p) for r, p in zip(self.r_down, self.pg_min)]
# TODO add min up and down times to probelm data
self.min_up_time = self.gen_sup_df["UT"].values.tolist()
self.min_down_time = self.gen_sup_df["DT"].values.tolist()
self.backsight_periods = [
max(ut, dt) for ut, dt in zip(self.min_up_time, self.min_down_time)
]
# Lists of generators at each bus
#
# Example: [[0,1], [], [2]]
# Generator 1 & 2 are located at bus 1
# No Generator is located at bus 2
# Generator 3 is located at bus 3
gen_buses = self.gen_df.bus.values.tolist()
gens_at_bus = [[] for _ in range(self.n_buses)]
g = 0
for b in gen_buses:
gens_at_bus[b - 1].append(g)
g += 1
self.gens_at_bus = gens_at_bus
def _init_stochastic_parameters(self):
"""Initializes all stochastic parameters
"""
scenario_df = self.scenario_df
self.n_realizations_per_stage = scenario_df.groupby("t")["n"].nunique().tolist()
self.n_stages = len(self.n_realizations_per_stage)
self.n_scenarios = np.prod(self.n_realizations_per_stage)
prob = []
p_d = []
re = []
for t in range(self.n_stages):
stage_df = scenario_df[scenario_df["t"] == t + 1]
p_d.append(
stage_df[
scenario_df.columns[
scenario_df.columns.to_series().str.contains("Pd")
]
].values.tolist()
)
re.append(
stage_df[
scenario_df.columns[
scenario_df.columns.to_series().str.contains("Re")
]
].values.tolist()
)
prob.append(stage_df["p"].values.tolist())
self.prob = prob
self.p_d = p_d
self.re = re
self.cut_lb = [0] * self.n_stages
def _init_initial_trial_points(self):
"""Initializes the first stage trial points
"""
self.init_x_trial_point = [0] * self.n_gens
self.init_y_trial_point = [0] * self.n_gens
self.init_x_bs_trial_point = [
[0] * n_periods for n_periods in self.backsight_periods
]
self.init_soc_trial_point = [0.5 * soc for soc in self.soc_max]
class DataImporter:
def __init__(self, data_directory: str = None):
self.data_directory = data_directory if data_directory else ""
def dataframe_from_csv(
self, file_path: str, delimiter: str = "\s+"
) -> pd.DataFrame:
path = os.path.join(self.data_directory, file_path)
df = pd.read_csv(path, sep=delimiter)
return df
| 9,803 |
2_semester/Quiz_animation.py
|
Winterpuma/bmstu_python
| 5 |
2173460
|
# Контрольная по Python.
# <NAME> ИУ7-25
from pygame import *
import sys
clock = time.Clock()
size = width, height = 800, 600
screen = display.set_mode(size)
display.set_caption('KR')
WHITE = 255, 255, 255
BLACK = 0, 0, 0
YELLOW = 255, 255, 100
BLUE = 0, 0, 255
UFO_x, UFO_y = 600, 500
UFO_w, UFO_h = 150, 80
while 1:
clock.tick(5)
for e in event.get():
if e.type == QUIT:
quit()
sys.exit()
screen.fill(WHITE)
draw.line(screen, BLACK, (0, 400), (400, 400), 2)
draw.line(screen, BLACK, (400, 540), (800, 540), 2)
draw.line(screen, BLACK, (400, 400),(400, 540), 2)
# UFO
draw.ellipse(screen, BLACK, (UFO_x-(UFO_w/2), UFO_y-(UFO_h/2), UFO_w, UFO_h), 2)
draw.circle(screen, BLACK, (UFO_x, UFO_y-int((5/6)*UFO_h)), int(UFO_h/3), 2)
UFO_w *= 0.98
UFO_h *= 0.975
UFO_x += 3
UFO_y -= 5
# House
draw.rect(screen, BLUE, (270, 80, 20, 100))
draw.polygon(screen, YELLOW, ((70, 200), (320, 200), (195, 100)))
draw.rect(screen, BLACK, (70, 200, 250, 200), 2)
draw.rect(screen, BLACK, (100, 240, 70, 70), 2)
draw.rect(screen, BLACK, (220, 300, 70, 70), 2)
draw.line(screen, BLACK, (100, 275),(170, 275), 2)
draw.line(screen, BLACK, (220, 335), (290, 335), 2)
draw.line(screen, BLACK, (135, 275),(135, 310), 2)
draw.line(screen, BLACK, (255, 335), (255, 370), 2)
display.flip()
| 1,483 |
stopcovid/drill_progress/aws_lambdas/schedule_next_drills_to_trigger.py
|
celestinosalim/dialog-engine
| 0 |
2173473
|
from stopcovid.drill_progress.drill_scheduler import DrillScheduler
from stopcovid.drill_progress.drill_progress import DrillProgressRepository
from stopcovid.utils.logging import configure_logging
from stopcovid.utils.verify_deploy_stage import verify_deploy_stage
configure_logging()
INACTIVITY_THRESHOLD_MINUTES = 720
SCHEDULING_WINDOW_MINUTES = 180
def handler(event, context):
verify_deploy_stage()
DrillScheduler().schedule_drills_to_trigger(
DrillProgressRepository().get_progress_for_users_who_need_drills(
INACTIVITY_THRESHOLD_MINUTES
),
SCHEDULING_WINDOW_MINUTES,
)
return {"statusCode": 200}
| 660 |
Source/JackFramework/Evaluation/listhandler.py
|
Archaic-Atom/JackFramework
| 13 |
2171694
|
# -*- coding: utf-8 -*-
class ListHandler(object):
"""docstring for ListHandler"""
def __init__(self):
super().__init__()
@staticmethod
def list_add(list_A: list, list_B: list) -> list:
assert len(list_A) == len(list_B)
return [item + list_B[i] for i, item in enumerate(list_A)]
@staticmethod
def list_div(list_A: list, num: float) -> list:
return [item / num for _, item in enumerate(list_A)]
@staticmethod
def list_mean(list_A: list) -> list:
return [item for _, item in enumerate(list_A)]
@staticmethod
def double_list_add(list_A: list, list_B: list = None) -> list:
assert type(list_A) == list
assert type(list_A[0]) == list
if list_B is None:
return list_A
for i, item in enumerate(list_A):
list_A[i] = ListHandler.list_add(item, list_B[i])
return list_A
@staticmethod
def double_list_div(list_A: list, num: float) -> None:
res = []
for item in list_A:
tem_res = ListHandler.list_div(item, num)
res.append(tem_res)
return res
| 1,140 |
Modules/TrianglesCircles/first_vectors.py
|
hillegass/sequence
| 10 |
2173266
|
import numpy as np
# Create two vectors
v = np.array([2,3,4])
u = np.array([-1,-2,3])
print(f"u = {u}, v = {v}")
# Add them
w = v + u
print(f"u + v = {w}")
# Multiply by a scalar
w = v * 3
print(f"v * 3 = {w}")
# Get the magnitude
mv = np.linalg.norm(v)
mu = np.linalg.norm(u)
print(f"|v| = {mv:.2f}, |u| = {mu:.2f}")
# Take the dot product
d = v @ u
print("v @ u =", d)
# Get the angle between the vectors
a = np.arccos(d / (mv * mu))
print(f"The angle between u and v is {a * 180 / np.pi:.2f} degrees")
| 512 |
src/pyplot_template.py
|
clovadev/opencv-python
| 0 |
2171793
|
from matplotlib import pyplot as plt
# 이미지 표시 그래프의 구조를 만드는 함수
def plt_arch(plot_number, title, src):
plt.subplot(plot_number), plt.imshow(src, cmap='gray')
plt.title(title), plt.xticks([]), plt.yticks([])
| 215 |
Part 1/Chapter 9/exercise_9.15.py
|
kg55555/pypractice
| 0 |
2173040
|
from random import randint, choice
import string
class Lottery():
def __init__(self):
self.winner = self.number_draw()
def number_draw(self):
winner = ""
lottery = [randint(10, 99) for number in range(1, 11)]
for number in range(1, 6):
lottery.append(choice(string.ascii_lowercase))
return lottery
def chance(self, entry):
count = 0
while True:
winner = ""
for number in range(1, 5):
winner += str(choice(self.winner))
count += 1
if winner == entry:
return count
lotto = Lottery()
while True:
wrong_char = False
all_num = ''
for v in lotto.winner:
all_num += str(v)
guess = input(f"Today's lottery numbers are: {lotto.winner}\nPlace your bets!\n")
for index in range(len(guess)):
if guess[index] not in all_num:
wrong_char = True
if wrong_char:
continue
print(lotto.chance(guess))
| 1,017 |
tests/__init__.py
|
snickell/wiz_light
| 338 |
2173420
|
"""Tests for the WiZ Light integration."""
import json
from homeassistant.components.wiz_light.const import DOMAIN
from homeassistant.const import CONF_IP_ADDRESS, CONF_NAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.common import MockConfigEntry
FAKE_BULB_CONFIG = json.loads(
'{"method":"getSystemConfig","env":"pro","result":\
{"mac":"ABCABCABCABC",\
"homeId":653906,\
"roomId":989983,\
"moduleName":"ESP_0711_STR",\
"fwVersion":"1.21.0",\
"groupId":0,"drvConf":[20,2],\
"ewf":[255,0,255,255,0,0,0],\
"ewfHex":"ff00ffff000000",\
"ping":0}}'
)
REAL_BULB_CONFIG = json.loads(
'{"method":"getSystemConfig","env":"pro","result":\
{"mac":"ABCABCABCABC",\
"homeId":653906,\
"roomId":989983,\
"moduleName":"ESP01_SHRGB_03",\
"fwVersion":"1.21.0",\
"groupId":0,"drvConf":[20,2],\
"ewf":[255,0,255,255,0,0,0],\
"ewfHex":"ff00ffff000000",\
"ping":0}}'
)
TEST_SYSTEM_INFO = {"id": "ABCABCABCABC", "name": "Test Bulb"}
TEST_CONNECTION = {CONF_IP_ADDRESS: "1.1.1.1", CONF_NAME: "Test Bulb"}
async def setup_integration(
hass: HomeAssistantType,
) -> MockConfigEntry:
"""Mock ConfigEntry in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_SYSTEM_INFO["id"],
data={
CONF_IP_ADDRESS: "127.0.0.1",
CONF_NAME: TEST_SYSTEM_INFO["name"],
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
| 1,586 |
surrogate/estimator/tests/test_neural_network.py
|
liujiamingustc/phd
| 3 |
2173338
|
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# License: Apache License, Version 2.0
# Create: 2016-12-02
# import itertools
# import unittest
#
# from numpy import array, linspace, sin, cos, pi
from sklearn.neural_network import MLPRegressor
from surrogate.estimator import ANNSurrogate
if __name__ == "__main__":
X = [[0., 0.], [1., 1.], [10., 10.]]
y = [0.0, 1.0, 10.0]
x_pred = [[5., 5.], [-10., -2.]]
surrogate = ANNSurrogate(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
surrogate.fit(X, y)
y_pred = surrogate.predict(X)
# print surrogate.regressor
# print y_pred
regressor = MLPRegressor(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
regressor.fit(X, y)
y_pred = regressor.predict(X)
print regressor
print y_pred
| 1,391 |
nambaone/bot.py
|
erjanmx/python-nambaone-bot
| 0 |
2168576
|
import requests
from . chat import Chat
from os.path import exists
from . message import Message
from . event_handler import EventHandler
from . exceptions import ClientException, FileDownloadException, FileUploadException
class Bot:
def __init__(self,
token,
base_url=None,
base_file_url=None,
error_handler=None,
user_follow_handler=None,
user_unfollow_handler=None,
message_new_handler=None,
message_update_handler=None,
chat_new_handler=None):
self._base_url = base_url or 'https://api.namba1.co'
self._base_file_url = base_file_url or 'https://files.namba1.co'
self._token = token
self.handler = EventHandler(
self,
error_handler,
user_follow_handler,
user_unfollow_handler,
message_new_handler,
message_update_handler,
chat_new_handler
)
self._response = {
'code': 200,
'success': True,
}
@property
def response(self):
self._response['success'] = self._response['code'] == 200
return self._response
@property
def header(self):
return {
'X-Namba-Auth-Token': self._token
}
def run(self, request):
event = str(request['event']).replace('/', '_')
try:
update = getattr(self.handler, 'event_{}'.format(event))(request['data'])
self.__call_handler(event, update)
except Exception as e:
self.__call_handler('error', {'event': event, 'error': repr(e)})
self._response['code'] = 520
def __call_handler(self, event, update):
if hasattr(self.handler, event) and callable(getattr(self.handler, event)):
getattr(self.handler, event)(self, update)
def _parse_response(self, response_raw):
response = response_raw.json()
if 'success' not in response:
raise ClientException('No valid response returned')
if not response['success']:
error_msg = 'Unknown error' if 'message' not in response else response['message']
error_msg += ' in "{}" request'.format(response_raw.url)
raise ClientException(error_msg)
return response
def _get(self, url, params=()):
return self._parse_response(
requests.get(url, params=params, headers=self.header)
)
def _post(self, url, params=()):
return self._parse_response(
requests.post(url, data=params, headers=self.header)
)
def send_message(self, chat_id, content, content_type):
params = {
'type': content_type,
'content': content,
}
url = '{}/chats/{}/write'.format(self._base_url, chat_id)
response = self._post(url, params)
return Message.from_dict(response['data'])
def create_chat(self, user_id, name='', image=''):
params = {
'name': name,
'image': image,
'members[]': user_id,
}
url = '{}/chats/create'.format(self._base_url)
response = self._post(url, params)
return Chat.from_dict(response['data'])
def typing_start(self, chat_id):
url = '{}/chats/{}/typing'.format(self._base_url, chat_id)
return self._get(url)
def typing_stop(self, chat_id):
url = '{}/chats/{}/stoptyping'.format(self._base_url, chat_id)
return self._get(url)
def get_file(self, token):
"""Use this method to download file by token"""
params = {'token': token}
response = requests.get(self._base_file_url, params=params)
try:
message = response.json()
# if no errors occured and valid json returned
# then file was not found
raise FileDownloadException(message['message'])
except ValueError:
return response.content
def send_file(self, file_path):
"""Use this method to upload file"""
if not exists(file_path):
raise FileUploadException('File does not exist')
response = None
with open(file_path, 'rb') as content:
files = {'file': content}
response = requests.post(self._base_file_url, files=files)
content = response.json()
if not content['success'] or 'file' not in content:
error_msg = content['message'] if 'message' in content else 'Unknown error'
raise FileUploadException(error_msg)
return content['file']
| 4,692 |
build/lib/jwringcentral/exportdata.py
|
MIKEJW08/Python-Package-and-Flask-Dashboard
| 0 |
2173500
|
import pandas as pd
import sqlite3
def ExportDB(df):
con = sqlite3.connect("/Users/jiayiwang08/Desktop/sqlite/test.db")
df.to_sql('new_table', con, if_exists='replace', index=False)
| 194 |
datasets/bn/recreate-bn-transcript3.py
|
belambert/asr_tools
| 2 |
2173133
|
#!/usr/bin/python
import sys
import re
import string
import itertools
sphinx_file=sys.argv[1]
ldc_file=sys.argv[2]
ldc_lines = []
sphinx_lines = []
ldc_tokens = []
sphinx_tokens = []
def noise_token_p(str):
if len(str) > 1 and str[0] == '+' and str[1] == '+' and str[-1] == '+' and str[-2] == '+':
return True
if str[0] == '{' and str[-1] == '}':
return True
if str[0] == '[' and str[-1] == ']':
return True
if str[0] == '#' and str[-1] == '#':
return True
if str == "EH" or str == "UH" or str == "UM" or str == "((" or str == "))":
return True
def remove_alt_pron(str):
if str[-1] == ')' and str[-3] == '(':
str = str[:-3]
return str
# Read the LDC file
for line in open(ldc_file).readlines():
if not line[0] == "<":
line = line[:-1]
line = line.upper()
line = line.replace("_", " ")
line = line.replace("-", " ")
line = line.replace("{", " ")
line = line.replace("}", " ")
line = line.replace("[", " ")
line = line.replace("]", " ")
tokens = line.split()
tokens = filter(lambda x: not noise_token_p(x), tokens)
ldc_lines.append(line)
ldc_tokens.extend(tokens)
# Read the Sphinx file -- try #2
for line in open(sphinx_file).readlines():
m = re.match("(.*)\((.+)\)", line)
trans = m.group(1)
id = m.group(2)
id_tokens = id.split("_")
id_tokens[1] = int(id_tokens[1])
id_tokens[2] = int(id_tokens[2])
trans = trans.replace("<s> ", "")
trans = trans.replace("<sil> ", "")
trans = trans.replace(" </s>", "")
trans = trans.upper()
trans = trans.replace("-", " ")
trans_tokens = trans.split()
trans_tokens = filter(lambda x: not noise_token_p(x), trans_tokens)
trans_tokens = map(remove_alt_pron, trans_tokens)
sphinx_lines.append([id_tokens[0], id_tokens[1], id_tokens[2], id, trans_tokens])
sphinx_lines.sort(key=lambda x: x[1])
sphinx_lines.sort(key=lambda x: x[0])
prev_end = 0
prev_filename = None
print "BEFORE: %d"%len(sphinx_lines)
#for line in sphinx_lines:
for i in range(len(sphinx_lines)):
line = sphinx_lines[i]
filename = line[0]
begin = line[1]
end = line[2]
if begin + 4 < prev_end and filename == prev_filename:
#print prev_end
#print prev_filename
#print line
#sphinx_lines.remove(line)
sphinx_lines[i] = None
else:
prev_end = end
prev_filename = filename
sphinx_lines = filter(lambda x: x != None, sphinx_lines)
print "AFTER: %d"%len(sphinx_lines)
sphinx_tokens = map(lambda x: x[4], sphinx_lines)
print sphinx_tokens[0:10]
sphinx_tokens = list(itertools.chain(*sphinx_tokens))
print "LDC tokens: %d"%len(ldc_tokens)
print "Sphinx tokens: %d"%len(sphinx_tokens)
print "LDC tokens: %s"%ldc_tokens[0:10]
print "Sphinx tokens: %s"%sphinx_tokens[0:10]
remove_char_set = ["@", "*", "\"", "!", "^", ",", ".", "%", "?", "+", "'"]
def fuzzy_string_match(str1, str2):
for char in remove_char_set:
str1 = str1.replace(char, "")
str2 = str2.replace(char, "")
return str1 == str2
ldc_matches=[None]*len(sphinx_tokens)
error_token = None
ldc_index = 0
sphinx_index = 0
count_limit = 1597896
non_match_limit = 5000
sphinx_total = len(sphinx_tokens)
while ldc_index < count_limit:
sphinx_token = sphinx_tokens[sphinx_index]
ldc_token = ldc_tokens[ldc_index]
if fuzzy_string_match(ldc_token, sphinx_token):
ldc_index += 1
sphinx_index += 1
ldc_matches[sphinx_index] = ldc_token
print " MATCH: %25s == %25s (%d of %d)"%(sphinx_token, ldc_token, sphinx_index, sphinx_total)
else:
non_match_counter = 0
while not fuzzy_string_match(ldc_token, sphinx_token) and ldc_index < count_limit:
print "NO MATCH: %25s != %25s (%d of %d)"%(sphinx_token, ldc_token, sphinx_index, sphinx_total)
ldc_index += 1
ldc_token = ldc_tokens[ldc_index]
non_match_counter += 1
if non_match_counter > non_match_limit:
print "Stopping due to more than %d consecutive non-matches!"%non_match_limit
quit()
print " MATCH: %25s == %25s (%d of %d)"%(sphinx_token, ldc_token, sphinx_index, sphinx_total)
sphinx_index += 1
ldc_index += 1
ldc_matches[sphinx_index] = ldc_token
print "LDC index: %d"%ldc_index
print "Sphinx index: %d"%sphinx_index
| 4,488 |
backend/histocat/api/check/controller.py
|
BodenmillerGroup/histocat-web
| 4 |
2172817
|
import dramatiq
from fastapi import APIRouter, Depends
from pydantic import EmailStr
from histocat.api.security import get_admin
from histocat.core.utils import send_test_email
from .dto import MsgDto
router = APIRouter()
@router.post("/test-worker/", response_model=MsgDto, status_code=201)
def test_worker(msg: MsgDto, user=Depends(get_admin)):
"""
Test worker
"""
broker = dramatiq.get_broker()
message = dramatiq.Message(
actor_name="test_worker",
queue_name="default",
args=(),
kwargs={"word": msg.msg},
options={},
)
broker.enqueue(message)
return {"msg": "Word received"}
@router.post("/test-email/", response_model=MsgDto, status_code=201)
def test_email(
email_to: EmailStr,
user=Depends(get_admin),
):
"""
Test emails
"""
send_test_email(email_to=email_to)
return {"msg": "Test email submitted"}
| 913 |
send_email.py
|
brian-cummings/jira_grabber
| 0 |
2172910
|
from sparkpost import SparkPost
import configparser
import jira_model
import argparse
import logging
import json
logger = logging.getLogger("jiraLogger")
#parser = argparse.ArgumentParser(__file__)
#parser.add_argument('-u', dest='user', help='User id', type=str)
#parser.add_argument('-n', dest='user_name', help='User name', type=str)
#parser.add_argument('-e', dest='email', help='User email', type=str)
#parser.add_argument('-d', dest='days', help='Days to query', type=int)
#args = parser.parse_args()
sp_config = configparser.ConfigParser()
sp_config.read('config.ini')
sp_apikey = sp_config['sparkpost']['key']
sp_email = sp_config['sparkpost']['email']
sp_noreply = sp_config['sparkpost']['noreply']
sp_base_uri = sp_config['sparkpost']['base_uri']
sp = SparkPost(sp_apikey, base_uri=sp_base_uri)
def send_recap_email(user,username,email,days=None):
table = ""
results = jira_model.return_worklogs(user, days)
email_body = '<p>Hello ' + username + ',</p>'
if days is None:
results_intro_text = '<p>This is your first email! Here are your results since you started tracking:</p>'
subject_text = 'Your First Worklog Summary'
else:
results_intro_text = '<p>Here are your results for the past ' + str(days) + ' days:</p>'
subject_text = 'Your ' + str(days) + ' day Jira Worklog'
result_count = sum(1 for i in results)
if result_count > 1:
for date, amount in results:
amount_str = ('%f' % amount).rstrip('0').rstrip('.')
final_string = '<tr><td align="center" valign="top">' + date.strftime("%B %d, %Y") \
+ '</td><td align="center" valign="top">' + amount_str +'</td></tr>'
table = table + final_string
email_body = email_body + results_intro_text \
+ '<table border="1" cellpadding="0" cellspacing="0" width="50%" >' \
'<tr> <th bgcolor="#D3D3D3">Date</th><th bgcolor="#D3D3D3">Hours</th>' \
+ table + '</table>'
else:
email_body = email_body + '<p>You have no worklogs for the past ' + str(days) + ' days.</p>'
response = sp.transmissions.send(
recipients=[
{
'address': {
'email': email,
'name': username
},
"metadata" : {
"binding" : "dedicated1"
}
},
],
html=email_body,
from_email=sp_email,
subject=subject_text,
track_opens=True,
track_clicks=True,
reply_to=sp_noreply
)
accepted_recipients = response['total_accepted_recipients']
if accepted_recipients >= 1:
jira_model.update_user_emailedate(user)
#if __name__ == "__main__":
# send_recap_email(args.user, args.user_name, args.email, args.days)
| 2,845 |
drawer/src/dnn.py
|
franneck94/Digits-Recognition-Tensorflow
| 16 |
2172103
|
import os
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
FILE_PATH = os.path.abspath(__file__)
PROJECT_PATH = os.path.dirname(os.path.dirname(FILE_PATH))
MODEL_PATH = os.path.join(PROJECT_PATH, "ressources", "weights", "dnn_mnist.h5")
def create_model() -> Sequential:
# Model params
num_features = 784
num_classes = 10
learning_rate = 0.001
optimizer = Adam(learning_rate=learning_rate)
model = Sequential()
model.add(Dense(units=500, input_shape=(num_features,)))
model.add(Activation("relu"))
model.add(Dense(units=300))
model.add(Activation("relu"))
model.add(Dense(units=100))
model.add(Activation("relu"))
model.add(Dense(units=num_classes))
model.add(Activation("softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"]
)
return model
def nn_predict(model: Sequential, image: np.ndarray = None) -> int:
if image is not None and model is not None:
pred = model.predict(image.reshape(1, 784))[0]
pred = np.argmax(pred, axis=0)
return pred
else:
return -1
def nn_train() -> None:
# Dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Cast to np.float32
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.float32)
x_test = x_test.astype(np.float32)
y_test = y_test.astype(np.float32)
# Dataset variables
train_size = x_train.shape[0]
test_size = x_test.shape[0]
num_features = 784
num_classes = 10
# Compute the categorical classes
y_train = to_categorical(y_train, num_classes=num_classes)
y_test = to_categorical(y_test, num_classes=num_classes)
# Reshape the input data
x_train = x_train.reshape(train_size, num_features)
x_test = x_test.reshape(test_size, num_features)
epochs = 10
batch_size = 256
model = create_model()
model.fit(
x=x_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, y_test),
)
model.save_weights(MODEL_PATH)
if __name__ == "__main__":
nn_train()
| 2,418 |
tests/unittesting/tasks/memory/test_subscribers.py
|
shepilov-vladislav/aiotasks
| 462 |
2172518
|
import time
import uuid
import asyncio
import msgpack
from aiotasks import build_manager
def test_memory_subscribers_oks(event_loop):
manager = build_manager(dsn="memory://", loop=event_loop)
globals()["test_memory_subscribers_oks_finished"] = False
@manager.subscribe("hello")
async def task_test_memory_subscribers_oks(topic, data):
if topic == "hello" and data == "world":
globals()["test_memory_subscribers_oks_finished"] = True
async def run():
manager.run()
await manager.publish("hello", "world")
await manager.wait(timeout=0.2, wait_timeout=0.1)
event_loop.run_until_complete(run())
manager.stop()
assert globals()["test_memory_subscribers_oks_finished"] is True
del globals()["test_memory_subscribers_oks_finished"]
def test_memory_subscribers_no_topics(event_loop):
manager = build_manager(dsn="memory://", loop=event_loop)
async def run():
manager.run()
await manager.publish("hello", "world")
await manager.wait(timeout=0.2, exit_on_finish=True, wait_timeout=0.2)
event_loop.run_until_complete(run())
manager.stop()
assert len(manager.topics_subscribers) == 0
def test_memory_subscribers_empty_topics(event_loop):
import logging
logger = logging.getLogger("aiotasks")
class CustomLogger(logging.StreamHandler):
def __init__(self):
super(CustomLogger, self).__init__()
self.content = []
def emit(self, record):
self.content.append(record.msg)
custom = CustomLogger()
logger.addHandler(custom)
manager = build_manager(dsn="memory://", loop=event_loop)
@manager.subscribe()
async def task_test_memory_subscribers_oks(topic, data):
if topic == "hello" and data == "world":
globals()["test_memory_subscribers_oks_finished"] = True
async def run():
manager.run()
await manager.publish("hello", "world")
await manager.wait(timeout=0.2, exit_on_finish=True, wait_timeout=0.2)
event_loop.run_until_complete(run())
manager.stop()
assert len(manager.topics_subscribers) == 0
assert "Empty topic fount in function 'task_test_memory_subscribers_oks'. Skipping it." in custom.content
def test_memory_subscribers_duplicated_topics(event_loop):
manager = build_manager(dsn="memory://", loop=event_loop)
@manager.subscribe("hello")
async def task_test_memory_subscribers_oks(topic, data):
pass
@manager.subscribe("hello")
async def task_test_memory_subscribers_oks_2(topic, data):
pass
async def run():
manager.run()
await manager.publish("hello", "world")
await manager.wait(timeout=0.2, exit_on_finish=True, wait_timeout=0.1)
event_loop.run_until_complete(run())
manager.stop()
assert len(manager.topics_subscribers) == 1
def test_memory_subscribers_timeout_raises(event_loop):
manager = build_manager(dsn="memory://", loop=event_loop)
globals()["test_memory_subscribers_timeout_raises_finished_tasks"] = False
@manager.subscribe("hello")
async def task_test_memory_subscribers_oks(topic, data):
if topic == "hello":
await asyncio.sleep(data, loop=event_loop)
globals()["test_memory_subscribers_timeout_raises_finished_tasks"] = True
async def run():
manager.run()
await manager.publish("hello", 5)
await manager.wait(timeout=0.5, exit_on_finish=True, wait_timeout=0.1)
event_loop.run_until_complete(run())
manager.stop()
assert globals()["test_memory_subscribers_timeout_raises_finished_tasks"] is False
del globals()["test_memory_subscribers_timeout_raises_finished_tasks"]
| 3,771 |
models/develset/src/utils/test_path.py
|
phdyang007/pytorch-CycleGAN-and-pix2pix
| 1 |
2171138
|
'''
Author: <NAME> @ CUHK-CSE
Homepage: https://dekura.github.io/
Date: 2021-01-24 16:24:55
LastEditTime: 2021-01-24 19:34:03
Contact: <EMAIL>
Description: test the pathlib
'''
import shutil
from pathlib import Path
# bimage_path = Path('/home/guojin/projects/develset_opc/levelset_net/binary_images')
# d_path = Path('/home/guojin/data/datasets/iccad_2013/targets')
# d_path = Path('/home/guojin/data/datasets/iccad_2013/ls_params')
# for test_dir in sorted(bimage_path.iterdir()):
# print(test_dir)
# for png in test_dir.glob('*.pt'):
# # print(png)
# # png_path = test_dir / png
# out_path = d_path / png.name
# shutil.copy(str(png), str(out_path))
t_path = Path('/home/guojin/data/datasets/iccad_2013/targets')
for t in sorted(t_path.glob('*.png')):
print(Path(t.stem).stem)
| 831 |
simconnect/cli.py
|
patricksurry/pysimconnect
| 3 |
2173168
|
import typer
from . import _typerpatch
import json
from typing import List, Optional
from textwrap import fill
from enum import Enum
from lunr.index import Index
from simconnect import SimConnect
import os
import re
app = typer.Typer()
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, 'scvars.json')) as f:
scvars = json.load(f)
for k in ['EVENTS', 'VARIABLES', 'UNITS']:
for d in scvars[k].values():
d['name_'] = d['name' if k != 'UNITS' else 'name_std'].replace(' ', '_')
class MetadataKind(str, Enum):
variable = 'variable'
event = 'event'
unit = 'unit'
def floatfmt(v, width=12, precision=3):
s = f"{v:{width}.{precision}f}".rstrip('0').rstrip('.')
s += ' ' * (width - len(s))
return s
def labelfmt(s, width=2*12):
label = s if len(s) < width else s[:width-4] + '...'
return f"{label:{width}s}"
def matchcase(s: str, prefix: str) -> Optional[str]:
if not prefix:
return s
elif not s.upper().startswith(prefix.upper()):
return None
elif len(prefix) == len(s):
return prefix
else:
tail = s[len(prefix):]
return prefix + (tail.upper() if prefix[-1].isupper() else tail.lower())
def scoped_autocomplete(kind: str, max_results=10):
def _complete(incomplete: str):
return sorted(
list(filter(None, (matchcase(d['name_'], incomplete) for d in scvars[kind].values()))),
key=lambda s: (len(s), s)
)[:max_results]
return _complete
simvardef = typer.Argument(..., autocompletion=scoped_autocomplete('VARIABLES'))
eventdef = typer.Argument(..., autocompletion=scoped_autocomplete('EVENTS'))
unitsdef = typer.Option(None, autocompletion=scoped_autocomplete('UNITS'))
def canonicalvars(simvars: List[str]) -> List[str]:
# appear to be case insenstive but ' ' vs '_' is important
return [s.upper().replace('_', ' ') for s in simvars]
@app.command()
def get(simvars: List[str] = simvardef, units: Optional[str] = unitsdef):
simvars = canonicalvars(simvars)
unitdesc = f" ({units})" if units else ''
for s in simvars:
with SimConnect(name='cli') as sc:
v = sc.get_simdatum(s.replace('_,', ' '), units)
typer.echo(f"{s}{unitdesc} = {v}")
@app.command()
def watch(simvars: List[str] = simvardef, units: Optional[str] = unitsdef, interval: Optional[int] = 1):
simvars = canonicalvars(simvars)
typer.echo(f"Watching {', '.join(simvars)} every {interval} seconds")
with SimConnect(name='cli') as sc:
dd = sc.subscribe_simdata(
[dict(name=sv, units=units) for sv in simvars],
interval=interval
)
latest = 0
headings = None
while True:
# consume incoming messages, waiting up to a second
sc.receive(timeout_seconds=1)
if not headings:
# show staggered header across two lines
headings = list(dd.simdata.keys())
# even cols
typer.echo(''.join(labelfmt(headings[i]) for i in range(0, len(headings), 2)))
# odd cols
typer.echo(' '*12 + ''.join(labelfmt(headings[i]) for i in range(1, len(headings), 2)))
changed = list(dd.simdata.changedsince(latest).keys())
latest = dd.simdata.latest()
values = [
typer.style(
floatfmt(dd.simdata[k]),
fg=typer.colors.BLUE if k in changed else typer.colors.WHITE
)
for k in headings
]
typer.echo(''.join(values))
@app.command()
def set(simvar: str = simvardef, value: float = typer.Argument(...), units: Optional[str] = unitsdef):
simvar = canonicalvars([simvar])[0]
typer.echo(f"Setting {simvar} = {value}" + (f" ({units})" if units else ''))
with SimConnect(name='cli') as sc:
sc.set_simdatum(simvar, value, units)
@app.command()
def send(event: str = eventdef, value: Optional[float] = None):
event = event.upper()
typer.echo(f"Sending {event}({value})")
with SimConnect(name='cli') as sc:
sc.send_event(event, value or 0)
@app.command()
def search(name: List[str], kind: Optional[MetadataKind] = None, max_results: int = 10, brief: bool = False):
q = ' '.join(name)
if kind:
q += f" +kind:{kind.name.upper()}S"
with open(os.path.join(thisdir, 'scvars_idx.json')) as f:
scvarsidx = Index.load(json.load(f))
styles = dict(
# in mac terminal some double-width emoji need a trailing space to display correctly
# but this seems fine on windows terminal
VARIABLES=dict(color=typer.colors.BLUE, symbol="🧭"),
EVENTS=dict(color=typer.colors.GREEN, symbol="⚙️"), # or maybe? 🔔
UNITS=dict(color=typer.colors.RED, symbol="📐"),
DIMENSIONS=dict(color=typer.colors.MAGENTA, symbol="📏"),
)
indent = ' ' * 4
refs = scvarsidx.search(q)
if not refs and not q.endswith('*'):
# Try implicit wildcard if not results
refs = scvarsidx.search(q.rstrip() + '*')
docs = []
for r in refs[:max_results]:
k, name = r['ref'].split('_', 1)
docs.append(dict(scvars[k][name], kind=k))
typer.echo(
'Showing '
+ typer.style(f"{len(docs)}/{len(refs)}", fg=typer.colors.BLUE)
+ ' results'
)
if not docs:
typer.echo(fill("""
Perhaps try wildcard 'alti*', fuzzy match 'alti~2'
or advanced options at https://lunr.readthedocs.io/en/latest/usage.html
""".strip()))
else:
if len(docs) < len(refs):
typer.echo('Increase --max-results for more')
typer.echo('')
for d in docs:
name = d['name_std'] if ',' in d['name'] else d['name']
style = styles[d['kind']]
label = typer.style(f"{name}", fg=style['color'], bold=True)
typer.echo(style['symbol'] + ' ' + label + f"{' ✏️' if d.get('settable') else ''}")
if brief:
continue
desc = d.get('description')
if desc:
s = re.sub(r'\s*\n\s*', ' ', desc).strip()
typer.echo(fill(s, initial_indent=indent, subsequent_indent=indent))
loc = '; '.join(filter(None, [d['page'], d['section'] or None])).lower()
typer.echo(f"{indent}Category: {loc}")
if d.get('units'):
lines = [s for s in d['units'].split('\n') if s.rstrip()]
lines[0] = f"{indent}Default units: {lines[0]} [{d['dimensions']}]"
typer.echo('\n'.join(lines))
if __name__ == "__main__":
app()
| 6,659 |
src/sf_network_discovery_initialize.py
|
SPSCommerce/carve
| 1 |
2173054
|
# import pylab as plt
import lambdavars
from aws import *
def lambda_handler(event, context):
'''
discovers AWS accounts/regions in use in an Org and returns the results as a dict
'''
# need to purge S3 discovery folder before starting new discovery
aws_purge_s3_path('discovery/')
# get list of accounts/regions in use in the Org
accounts = aws_discover_org_accounts()
regions = aws_all_regions()
discovery_targets = []
for account_id, account_name in accounts.items():
discovery_targets.append({
"account_id": account_id,
"account_name": account_name
})
print(f"discovered {len(accounts)} accounts")
# return discovery_targets
result = {'accounts': discovery_targets, 'regions': regions}
return result
if __name__ == "__main__":
event = {}
result = lambda_handler(event, None)
print(json.dumps(result))
| 926 |
lib/dga_models/controller.py
|
sibeiyang/sgmn
| 130 |
2172780
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import math
import torch.nn.functional as F
class Controller(nn.Module):
def __init__(self, dim_word_output, T_ctrl):
super(Controller, self).__init__()
ctrl_dim = dim_word_output
# define c_0 and reset_parameters
self.c_init = Parameter(torch.FloatTensor(1, ctrl_dim))
self.reset_parameters()
# define fc operators
self.encode_que_list = nn.ModuleList([nn.Sequential(nn.Linear(ctrl_dim, ctrl_dim),
nn.Tanh(),
nn.Linear(ctrl_dim, ctrl_dim))])
for i in range(T_ctrl - 1):
self.encode_que_list.append(nn.Sequential(nn.Linear(ctrl_dim, ctrl_dim),
nn.Tanh(),
nn.Linear(ctrl_dim, ctrl_dim)))
self.fc1 = nn.Linear(2*ctrl_dim, ctrl_dim)
self.fc2 = nn.Linear(ctrl_dim, 1)
self.fc3 = nn.Linear(2*ctrl_dim, ctrl_dim)
self.T_ctrl = T_ctrl
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.c_init.size(1))
self.c_init.data.uniform_(-stdv, stdv)
def forward(self, lstm_seq, q_encoding, attn_mask):
c_prev = self.c_init.expand(q_encoding.size(0), self.c_init.size(1))
words_weight_list = []
control_vector_list = []
for t in range(self.T_ctrl):
q_i = self.encode_que_list[t](q_encoding)
q_i_c = torch.cat([q_i, c_prev], dim=1)
cq_i = self.fc1(q_i_c)
cq_i_reshape = cq_i.unsqueeze(1).expand(-1, lstm_seq.size(1), -1)
interactions = cq_i_reshape * lstm_seq
interactions = torch.cat([interactions, lstm_seq], dim=2)
interactions = F.tanh(self.fc3(interactions))
logits = self.fc2(interactions).squeeze(2)
mask = (1.0 - attn_mask.float()) * (-1e30)
logits = logits + mask
logits = F.softmax(logits, dim=1)
norm_cv_i = logits * attn_mask.float()
norm_cv_i_sum = torch.sum(norm_cv_i, dim=1).unsqueeze(1).expand(logits.size(0), logits.size(1))
norm_cv_i[norm_cv_i_sum != 0] = norm_cv_i[norm_cv_i_sum != 0] / norm_cv_i_sum[norm_cv_i_sum != 0]
words_weight_list.append(norm_cv_i)
c_i = torch.sum(
norm_cv_i.unsqueeze(2).expand(norm_cv_i.size(0), norm_cv_i.size(1), lstm_seq.size(2)) * lstm_seq, dim=1)
c_prev = c_i
control_vector_list.append(c_prev)
return words_weight_list, control_vector_list
| 2,707 |
main4.py
|
AjsonZ/E04a-Sprites
| 0 |
2172807
|
#!/usr/bin/env python3
import utils, os, random, time, open_color, arcade
utils.check_version((3,7))
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprites Example"
class MyGame(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
arcade.set_background_color(open_color.white)
self.car_list = arcade.SpriteList()
def setup(self):
cars = ['bus','kart','police','buggy','ambulance','bus_school','hotdog','scooter','station','cycle']
for i in range(20):
car = random.choice(cars)
for i in range(10):
x = random.randint(0,i)
y = random.randint(0,600)
self.car_sprite = arcade.Sprite("Cars/{car}.png".format(car=car), 2)
self.car_sprite.center_x = x
self.car_sprite.center_y = y
self.car_list.append(self.car_sprite)
def on_draw(self):
arcade.start_render()
self.car_list.draw()
pass
def update(self, delta_time):
pass
def on_mouse_motion(self, x, y, dx, dy):
for i in self.car_list:
self.car_sprite.center_x = x
self.car_sprite.center_y = y
pass
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 1,501 |
Probabillity/code.py
|
swantikag/ga-learner-dsb-repo
| 0 |
2172661
|
# --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# code starts here
df = pd.read_csv(path)
total = len(df)
p_a = len(df[df["fico"]>700])/total
p_b = len(df[df["purpose"]=="debt_consolidation"])/total
df1 = df[df["purpose"]=="debt_consolidation"]
p_a_and_b = len(df[(df["fico"]>700) & (df["purpose"]=="debt_consolidation")])/total
p_a_b = p_a_and_b/p_a
p_b_a = p_a_and_b/p_b
result = p_b_a == p_a
print(result)
# code ends here
# --------------
# code starts here
total = len(df)
new_df = df[df["paid.back.loan"]=="Yes"]
prob_lp = len(new_df)/total
df2 = df[df["credit.policy"]=="Yes"]
prob_cs = len(df2)/total
df_lp_cs = df[(df["paid.back.loan"]=="Yes") & (df["credit.policy"]=="Yes")]
prob_lp_a_cs = len(df_lp_cs)/total
# prob_pd_cs = prob_lp_a_cs/prob_cs
prob_pd_cs = new_df[new_df['credit.policy'] == 'Yes'].shape[0] / new_df.shape[0]
bayes = (prob_pd_cs * prob_lp) / prob_cs
print(bayes)
# code ends here
# --------------
# code starts here
df_purpose = df["purpose"].value_counts()
df_purpose.plot(kind="bar")
df1 = df[df["paid.back.loan"]=="No"]
df1["purpose"].value_counts().plot(kind="bar")
# code ends here
# --------------
# code starts here
inst_median = df["installment"].median()
inst_mean = df.installment.mean()
df.installment.hist()
df["log.annual.inc"].hist()
# code ends here
| 1,342 |
experiments/interferometer/coherence_length.py
|
jackerschott/AP21
| 0 |
2171343
|
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi, sqrt, exp
import os
import scipy.constants as cs
from scipy.optimize import curve_fit
from scipy.signal import find_peaks_cwt
import datproc.print as dpr
import datproc.plot as dpl
## General
output = __name__ == '__main__'
def gauss(t, A, mu, sigma):
return A * exp(-0.5 * (t - mu)**2 / sigma**2) / sqrt(2 * pi * sigma**2)
## Data
lda = 532.0 * cs.nano
d_lda = 1.0 * cs.nano
v_mov = 0.1 * cs.milli
data = np.genfromtxt('data/interferometer/signal.csv', delimiter=',', skip_header=18)
t = data[:,3]
U = data[:,4]
## Evaluation
t_range = [-0.035, 0.065]
if output:
plt.subplots(num=2)
plt.xlabel(r'$t$ / s')
plt.ylabel(r'$U$ / V')
plt.xlim(*t_range)
plt.plot(t, U)
i_U_max = find_peaks_cwt(U, range(1,30), noise_perc=20)
i_U_max = i_U_max[U[i_U_max] > 0.0]
i_U_max = i_U_max[t[i_U_max] > t_range[0]]
i_U_max = i_U_max[t[i_U_max] < t_range[1]]
popt, pcov = curve_fit(gauss, t[i_U_max], U[i_U_max], p0=(2.5e-3, 0.015, 0.015))
d_popt = sqrt(np.diag(pcov))
if output:
x_fit = dpl.x_fit_like(t[i_U_max])
y_fit = gauss(x_fit, *popt)
data_pts, *_ = plt.plot(t[i_U_max], U[i_U_max], marker='o', ls='None')
plt.plot(x_fit, y_fit, color=data_pts.get_color())
if output:
print(dpr.val(popt[2], d_popt[2], name='Δt', unit='s'))
delta_s = v_mov * popt[2]
d_delta_s = v_mov * d_popt[2]
if output:
print(dpr.val(delta_s, d_delta_s, name='Δs', unit='m'))
L = 2 * pi * delta_s
d_L = L * d_delta_s / delta_s
if output:
print(dpr.val(L, d_L, name='L', unit='m'))
if output:
fig_folder_path = 'figures/interferometer'
if not os.path.exists(fig_folder_path):
os.makedirs(fig_folder_path)
for i in plt.get_fignums():
plt.figure(i).savefig(os.path.join(fig_folder_path, 'fig' + str(i) + '.pgf'), bbox_inches='tight', pad_inches=0.0)
plt.figure(i).savefig(os.path.join(fig_folder_path, 'fig' + str(i) + '.pdf'))
plt.show()
| 1,932 |
nmf/datasets/lfw_people.py
|
nssuperx/irl334-research-srcs
| 0 |
2172555
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
# lfw_people = fetch_lfw_people(data_home='./face', slice_=(slice(70, 195), slice(78, 172)))
lfw_people = fetch_lfw_people(data_home='./face', slice_=(slice(90, 170), slice(85, 165)), resize=0.2375) # 19 x 19 px
print(lfw_people.images.shape)
images_num = lfw_people.images.shape[0]
img_h = lfw_people.images[0].shape[0]
img_w = lfw_people.images[0].shape[1]
out_images = lfw_people.images.reshape(images_num, img_h * img_w).T
np.save('./face_images', out_images)
print(out_images.shape)
img = out_images.T[0].reshape(img_h, img_w)
print(img.shape)
plt.imshow(img)
plt.show()
# img = lfw_people.images[2]
# plt.imshow(img)
# plt.show()
| 741 |
pasa/dict/idiom/case.py
|
sonoisa/pasa
| 5 |
2171680
|
# -*- coding: utf-8 -*-
from pasa.utils import get_or_else
class Case(object):
def __init__(self, yaml):
self.base = get_or_else(yaml, 'base', "")
self.read = get_or_else(yaml, 'read', "")
self.pos = get_or_else(yaml, 'pos', "")
def __repr__(self):
return "{{base={}, read={}, pos={}}}".format(self.base, self.read, self.pos)
| 370 |
cerenkov3_data/edge_GTEx.py
|
ramseylab/cerenkov3
| 0 |
2173453
|
import os
from functools import reduce
import pandas as pd
from util_path import get_path
from util_ensembl import map_Ensembl_IDs_to_Entrez
def read_snp_df(fn):
return pd.read_csv(fn, header=None, sep="\t", usecols=[3], names=["rs_id"])
def list_files(directory, fn_suffix):
"""
list all files in a certain directory ending with a suffix
"""
return [f for f in os.listdir(directory) if f.endswith(fn_suffix)]
def read_eqtl_df(fn):
eqtl_df = pd.read_csv(fn, sep="\t", usecols=["gene_id", "gene_name", "rs_id_dbSNP147_GRCh37p13", "pval_perm", "pval_beta"])
eqtl_df = eqtl_df.rename({"gene_id": "gene_ensembl_id",
"rs_id_dbSNP147_GRCh37p13": "rs_id"}, axis=1)
return eqtl_df
def gen_snp_egene_map(snp_df, eqtl_dir, eqtl_suffix):
eqtl_fn_list = list_files(eqtl_dir, eqtl_suffix)
for eqtl_fn in eqtl_fn_list:
eqtl_path = os.path.join(eqtl_dir, eqtl_fn)
eqtl_df = read_eqtl_df(eqtl_path)
snp_egene_map = snp_df.merge(eqtl_df, on="rs_id", how="inner")
eqtl_source = eqtl_fn.split(eqtl_suffix)[0]
snp_egene_map = snp_egene_map.assign(eqtl_source = eqtl_source)
yield snp_egene_map
if __name__ == "__main__":
snp_dir = get_path("vertex/SNP")
snp_fn = "osu19_SNP.bed"
snp_df = read_snp_df(os.path.join(snp_dir, snp_fn))
res_dir = get_path("resource/GTEx")
eqtl_dir = os.path.join(res_dir, "GTEx_Analysis_v7_eQTL")
eqtl_suffix = ".v7.egenes.txt"
snp_egene_map = pd.concat(gen_snp_egene_map(snp_df, eqtl_dir, eqtl_suffix), ignore_index=True)
# Get rid of version numbers
snp_egene_map.loc[:, "gene_ensembl_id"] = snp_egene_map.loc[:, "gene_ensembl_id"].apply(lambda x: x.split(".")[0])
snp_egene_map.to_csv(os.path.join(res_dir, "p1_SNP_x_GTEx.tsv"), sep="\t", index=False)
snp_egene_el = snp_egene_map.loc[:, ["rs_id", "gene_ensembl_id"]].drop_duplicates()
snp_egene_el = map_Ensembl_IDs_to_Entrez(snp_egene_el, ensembl_colname="gene_ensembl_id", new_colname="gene_id", keep_unmapped=True)
snp_egene_el.sort_values(by="gene_id", inplace=True)
output_dir = get_path("edge/snp-gene")
snp_egene_el.to_csv(os.path.join(output_dir, "SNP_x_GTEx.edgelist"), sep="\t", index=False, header=False)
| 2,308 |
scripts/element_search.py
|
mou3adb/spread_the_particle
| 4 |
2172418
|
import time, datetime
import numpy as np
import matplotlib.pyplot as pp
from interpolation_linear import psi1, psi2, psi3
#==============================================================================
def is_in_element(position, element, allNodes):
"""
This function tests the inclusion of a point in an element. It calculates
the form functions. If one if negative, it means that the point is outside.
NOTE: We substract 1 from element.nodes since node ids start from 1, like
element ids.
example of allNodes array:
allNodes = np.array(list(zip(flow.nodes_X[0],flow.nodes_Y[0])))
"""
nodes_coords = allNodes[element.nodes - 1]
p1, p2, p3 = nodes_coords[[0, 1, 2]]
N1 = psi1(position, p1, p2, p3)
N2 = psi2(position, p1, p2, p3)
N3 = psi3(position, p1, p2, p3)
# It suffices that one interpolation function takes a negative value to
# deduce that the point is not inside the element.
if (N1 >= 0) and (N2 >= 0) and (N3 >= 0):
return True
else:
return False
def find_element(position, elements, allNodes):
for element in elements:
if is_in_element(position, element, allNodes):
return element
def find_opposite_neighbor(node, element, elements):
"""
This function returns the neighbor of 'element' that doesn't contain the
node 'node'.
NOTE: Valid only for triangular elements.
"""
neighbors = elements[element.neighbors - 1]
for neighbor in neighbors:
if node not in neighbor.nodes:
return neighbor
def find_element_partrack(position, current_element, elements, allNodes):
"""
This function implements the particle tracer algorithm of
Lohner and Ambrosiano (1990)
"""
nodes_coords = allNodes[current_element.nodes - 1]
p1, p2, p3 = nodes_coords[[0, 1, 2]]
N1 = psi1(position, p1, p2, p3)
N2 = psi2(position, p1, p2, p3)
N3 = psi3(position, p1, p2, p3)
if (N1 >= 0) and (N2 >= 0) and (N3 >= 0):
# In this case, the particle is still inside 'current_element'.
return current_element
else:
# e.g.
# If N3 has the most negative value, then index_smallest = 2.
# Thus point_id_smallest (>= 0) is the farthest node from 'position'.
index_smallest = np.argmin([N1, N2, N3])
point_id_smallest = current_element.nodes[index_smallest]
next_element = find_opposite_neighbor(point_id_smallest,
current_element,
elements)
return find_element_partrack(position, next_element, elements, allNodes)
def find_neighbors_aux(element, elements):
"""
This function finds the neighbors of 'element'.
"""
nodes = element.nodes[[0, 1, 2]]
for e in elements:
temp_nodes = e.nodes[[0, 1, 2]]
# check = [node1_in_or_out?, node2_in_or_out?, node3_in_or_out?]
check = []
for node in nodes:
if node in temp_nodes:
check.append(True)
else:
check.append(False)
# When an edge is shared by two elements, they have two nodes in
# common.
if check.count(True) == 2:
element.neighbors = np.concatenate([element.neighbors, [e.id]])
def find_neighbors(elements):
# This function may take a lot of time, since it explores the whole domain,
# and checks element by element.
t1 = time.time()
print('Searching neighbors of every element in the domain.')
for e in elements:
find_neighbors_aux(e, elements)
cpu_time = time.time() - t1
print('Done!')
print('CPU_TIME = %.2f SECONDS = %s (HH:MM:SS)' \
% (cpu_time, datetime.timedelta(seconds=cpu_time)))
#==============================================================================
# Section relating to element drawing
def draw_element(element, allNodes):
nodes_coords = allNodes[element.nodes - 1]
print(nodes_coords)
plot_params = {'linestyle':'-',
'color' :'olive'}
pp.plot(nodes_coords[[0,1,2,0],0], nodes_coords[[0,1,2,0],1], **plot_params)
def draw_elements(elements, allNodes, color):
for element in elements:
draw_element(element, allNodes, color)
| 4,302 |
masque/playground/pwa.py
|
dfdx/masque
| 3 |
2173250
|
from operator import itemgetter
import itertools as it
import random
import matplotlib.delaunay as triang
from masque.datasets import CKDataset
from masque.utils import implot, ij2xy
import pwa
def get_random_faces(datadir='data/CK', n_samples=4):
dataset = CKDataset(datadir)
data = [series for series in dataset.data if series.label != -1]
random_idxs = random.sample(range(len(data)), n_samples)
random_series = itemgetter(*random_idxs)(data)
images = [series.images[-1] for series in random_series]
shapes = [series.landmarks[-1] for series in random_series]
mean_shape = data[10].landmarks[0]
return images, shapes, mean_shape
def pwa_demo(datadir='data/CK'):
images, shapes, mean_shape = get_random_faces(datadir)
transformed = [pwa.warp(im, shape, mean_shape)
for im, shape in zip(images, shapes)]
# implot(zip(images, transformed)) # interlieve, not zip!
implot(images + transformed)
| 971 |
tests/test_pdf.py
|
MartinThoma/edapy
| 17 |
2173497
|
# Third party
import pkg_resources
# First party
import edapy.pdf
def test_make_path_absolute():
path = "examples/book.pdf" # always use slash
filepath = pkg_resources.resource_filename("edapy", path)
edapy.pdf.get_pdf_info(filepath)
| 250 |
lib/python/qmk/tests/test_qmk_path.py
|
jskelcy/qmk_toolbox
| 0 |
2169688
|
import os
import qmk.path
def test_keymap_onekey_pytest():
path = qmk.path.keymap('handwired/onekey/pytest')
assert path == 'keyboards/handwired/onekey/keymaps'
def test_normpath():
path = qmk.path.normpath('lib/python')
assert path == os.path.join(os.environ['ORIG_CWD'], 'lib/python')
| 308 |
swiper/user/apis.py
|
Han-spros/swiper
| 0 |
2173271
|
from common import errors
from common.utils import is_phone_num
from libs.http import render_json
from user import logics
def verify_phone(request):
"""
验证手机号码
生成验证码
保存验证码
发送
"""
phone_num = request.POST.get('phone_num')
if is_phone_num(phone_num):
# 号码格式正确
if logics.send_verify_code(phone_num):
# 验证码生成发送
return render_json()
else:
return render_json(code = errors.SMS_SEND_ERR)
# 返回错误码
else:
return render_json(code=errors.PHONE_NUM_ERR)
# 返回错误码
| 574 |
piprot/piprot.py
|
emichal/piprot
| 0 |
2172539
|
import asyncio
import logging
from dataclasses import astuple
from datetime import timedelta, date
from itertools import chain
from piprot.models import Requirement, PackageInfo, Messages
from piprot.utils.pypi import PypiPackageInfoDownloader
from piprot.utils.requirements_parser import RequirementsParser
from typing import Optional, Tuple, List
logger: logging.Logger = logging.getLogger(__name__)
loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
class Piprot:
def __init__(self, req_files: List[str], delay_in_days: int = 5) -> None:
self.pypi = PypiPackageInfoDownloader()
self.delay_timedelta = timedelta(days=delay_in_days)
self.requirements = list(
chain.from_iterable([RequirementsParser(req_file).parse() for req_file in req_files])
)
def main(self) -> int:
tasks = [self._handle_single_requirement(requirement) for requirement in self.requirements]
has_outdated_packages = loop.run_until_complete(asyncio.gather(*tasks))
if any(has_outdated_packages):
return 1
return 0
async def _handle_single_requirement(self, requirement: Requirement) -> bool:
current_version, current_release_date = await self.pypi.version_and_release_date(
requirement
)
latest_version, latest_release_date = await self.pypi.version_and_release_date(
Requirement(requirement.package)
)
package_info = PackageInfo(
name=requirement.package,
latest_version=latest_version,
latest_release_date=latest_release_date,
current_version=current_version,
current_release_date=current_release_date,
)
is_outdated, message = self.__handle_single_requirement(package_info, requirement)
logger.error(message)
return is_outdated
def __handle_single_requirement(
self, package: PackageInfo, requirement: Requirement
) -> Tuple[bool, str]:
package_name, latest_version, _, current_version, _ = astuple(package)
if requirement.ignore:
message = Messages.IGNORED.format(package=requirement.package)
return False, message
if not all([latest_version, current_version]):
message = Messages.CANNOT_FETCH.format(
package=package_name, version=requirement.version
)
return False, message
if latest_version > current_version:
return self._is_rotten(package)
message = Messages.NOT_ROTTEN.format(
package=requirement.package, version=str(current_version)
)
return False, message
def _is_rotten(self, package: PackageInfo) -> Tuple[bool, str]:
if not package.latest_version.is_direct_successor(package.current_version):
return self._is_not_direct_successor_rotten(package)
return self._is_direct_successor_rotten(package)
def _is_direct_successor_rotten(self, package: PackageInfo) -> Tuple[bool, str]:
rotten_time = self.calculate_rotten_time(package.latest_release_date)
if rotten_time > self.delay_timedelta:
message = Messages.ROTTEN_DIRECT_SUCCESSOR.format(
package=package.name,
current_version=str(package.current_version),
rotten_days=rotten_time.days,
latest_version=str(package.latest_version),
)
return True, message
message = Messages.NOT_ROTTEN.format(
package=package.name, version=str(package.current_version)
)
return False, message
def _is_not_direct_successor_rotten(self, package: PackageInfo) -> Tuple[bool, str]:
if not all([package.latest_release_date, package.current_release_date]):
# since we cannot calculate if it's actually rotten, we assume it is
message = Messages.NO_DELAY_INFO.format(
package=package.name,
current_version=str(package.current_version),
latest_version=str(package.latest_version),
)
return True, message
rotten_time = self.calculate_rotten_time(
package.latest_release_date, package.current_release_date
)
if rotten_time > self.delay_timedelta:
timedelta_since_last_release = self.calculate_rotten_time(package.latest_release_date)
message = Messages.ROTTEN_NOT_DIRECT_SUCCESSOR.format(
package=package.name,
current_version=str(package.current_version),
rotten_days=rotten_time.days,
latest_version=str(package.latest_version),
days_since_last_release=timedelta_since_last_release.days,
)
return True, message
message = Messages.NOT_ROTTEN.format(
package=package.name, version=str(package.current_version)
)
return False, message
@staticmethod
def calculate_rotten_time(
latest_release_date: date, current_release_date: Optional[date] = None
) -> timedelta:
if current_release_date:
return latest_release_date - current_release_date
return date.today() - latest_release_date
| 5,283 |
sensor/bme280_sensor.py
|
alrock/meteopi
| 0 |
2173340
|
from datetime import datetime
import smbus2
import bme280
from sensor.sensor import Sensor
class BME280Sensor(Sensor):
CACHE_UPDATE_INTERVAL = 5 # in seconds
def __init__(self, port=1, address=0x76):
super().__init__('BME280')
self.address = address
self.bus = smbus2.SMBus(port)
self.calibration_params = bme280.load_calibration_params(self.bus, self.address)
self.data_cache = None
@property
def sample(self):
if not self.data_cache or (datetime.now() - self.data_cache['timestamp']).seconds > self.CACHE_UPDATE_INTERVAL:
sample = bme280.sample(self.bus, self.address, self.calibration_params)
self.data_cache = {'temperature': sample.temperature, 'humidity': sample.humidity,
'pressure': sample.pressure, 'timestamp': sample.timestamp}
return self.data_cache
@property
def temperature(self):
return self.sample['temperature']
@property
def humidity(self):
return self.sample['humidity']
| 1,060 |
buildList1.py
|
prodotiscus/llsh-posts
| 0 |
2171197
|
#!/usr/bin/python3
import re, json
members_html = open('List1.html', 'r').read()
members_json = {'List1Members': []}
members = re.finditer(
r'<li><b>\s*([А-ЯЁа-яё]+)\s*<\/b>\s+([А-ЯЁа-яё]+)\s+\(\s*([А-ЯЁа-яё\-]+)\s*\)(\s+\[\*\])?<\/li>',
members_html
)
for member in members:
members_json['List1Members'].append({
"name": member.group(2) + " " + member.group(1),
"location": member.group(3)
})
with open('List1.json', 'w') as list1:
list1.write(json.dumps(members_json))
list1.close()
| 525 |
palsbet/urls.py
|
denis254/xpredict
| 0 |
2171961
|
from django.urls import path, include
from palsbet import views
from . views import rolloverh, play, information, register, punterpick, homeviptips, payment, homevip, timeofsending, modeofsending, home, viewolderesults, androidapp, rollover, viptips, guide, howmanyodds, privacy
urlpatterns = [
path('', home),
path('play/', play),
path('privacy/', privacy),
path('home/', home),
path('information/', information),
path('register/', register),
path('crafttechsolution/', views.optout, name = "optout"),
path('viewolderesults/', viewolderesults),
path('androidapp/', androidapp),
path('rollover/', rollover),
path('viptips/', viptips),
path('guide/', guide),
path('howmanyodds/', howmanyodds),
path('modeofsending/', modeofsending),
path('timeofsending/', timeofsending),
path('accounts/', include('django.contrib.auth.urls')),
path('home_vip/', homevip),
path('payment/', payment),
path('homevip/', homevip),
path('homeviptips/', homeviptips),
path('homepunterpicks/', punterpick),
path('homerollover/', rolloverh),
]
| 1,131 |
2017/d04.py
|
m1el/advent-of-code
| 0 |
2172911
|
from collections import defaultdict
with open('04.txt') as fd:
data = [line.strip().split(' ') for line in fd.readlines()]
valid = 0
for l in data:
counts = defaultdict(int)
for w in l:
counts[''.join(sorted(w))] += 1
if not [w for w in counts.values() if w > 1]:
valid += 1
print(valid)
| 319 |
behave_django/runner.py
|
richardARPANET/behave-django
| 0 |
2172644
|
try:
from django.test.runner import DiscoverRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner as DiscoverRunner
from behave_django.environment import BehaveHooksMixin
from behave_django.testcase import (BehaviorDrivenTestCase,
ExistingDatabaseTestCase)
class BehaviorDrivenTestRunner(DiscoverRunner, BehaveHooksMixin):
"""Test runner that uses the BehaviorDrivenTestCase"""
testcase_class = BehaviorDrivenTestCase
class ExistingDatabaseTestRunner(DiscoverRunner, BehaveHooksMixin):
"""Test runner that uses the ExistingDatabaseTestCase
This test runner nullifies Django's test database setup methods. Using this
test runner would make your tests run with the default configured database
in settings.py.
"""
testcase_class = ExistingDatabaseTestCase
def setup_databases(*args, **kwargs):
pass
def teardown_databases(*args, **kwargs):
pass
| 978 |
mdi/migrations/0068_survey_to_models_manytomany.py
|
inclusive-design/coop-map-directory-index
| 1 |
2173469
|
# Generated by Django 3.0.3 on 2020-05-17 22:33
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
from mdi.models import Organization, LegalStatus, Sector, Challenge
from surveys.models import Ecosystem2020
from django.contrib.auth import get_user_model
def forward(apps, schema_editor):
for organization in Organization.objects.filter(created_at__gt='2020-03-14 22:43:01.000000'):
print("\nname: {} email: {}".format(organization.name, organization.admin_email))
survey = Ecosystem2020.objects.filter(a__range=(organization.created_at,organization.created_at)).get(f=organization.name)
# LegalStatus
for legal_status in survey.y.split(', '):
if legal_status != '' and legal_status != None:
l = ''
try:
l = LegalStatus.objects.get(name=legal_status)
print(" status_id: {} status: {}".format(l.id, l.name))
organization.legal_status.add(l)
except ObjectDoesNotExist:
print(" status_huh? {}".format(legal_status))
l = LegalStatus(name=legal_status, order=9999)
l.save()
organization.legal_status.add(l)
# Sector
for sector in survey.al.split(', '):
if sector != '' and sector != None and sector != 'fundraising' and sector != 'Technology':
sector = sector.replace('Technology-', 'Technology:')
sector = sector.replace('Art/Artistic Production', 'Art')
s = ''
try:
s = Sector.objects.get(name=sector)
print(" sector_id: {} sector: {}".format(s.id, s.name))
organization.sectors.add(s)
except ObjectDoesNotExist:
print(" sector_huh? {}".format(sector))
# Challenges
challenge = survey.ae
if challenge != '' and challenge != None:
c = ''
try:
c = Challenge.objects.get(name=challenge)
print(" challenge_id: {} challenge: {}".format(c.id, c.name))
except ObjectDoesNotExist:
print(" challenge_huh? {}".format(challenge))
c = Challenge(name=challenge, order=9999)
c.save()
organization.challenges.add(c)
class Migration(migrations.Migration):
dependencies = [
('mdi', '0067_auto_20200517_0503'),
]
operations = [
migrations.RunPython(forward)
]
| 2,603 |
early-immature-ideas/idea-1/step01_try_train_with_elegantrl.py
|
guang384/my-hft-thesis-2022
| 0 |
2173057
|
import gym
from elegantrl.agent import AgentModSAC
from elegantrl.config import get_gym_env_args, Arguments
from elegantrl.run import train_and_evaluate, train_and_evaluate_mp
import os
'''
安装ElegantRL
pip install git+https://github.com/AI4Finance-LLC/ElegantRL.git
解决中间关于“swig”的报错
conda install swig
'''
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
gym.logger.set_level(40) # Block warning
get_gym_env_args(gym.make("LunarLanderContinuous-v2"), if_print=True)
env_func = gym.make
env_args = {
"env_num": 1,
"env_name": "LunarLanderContinuous-v2",
"max_step": 1000,
"state_dim": 8,
"action_dim": 2,
"if_discrete": False,
"target_return": 200,
"id": "LunarLanderContinuous-v2",
}
args = Arguments(AgentModSAC, env_func=env_func, env_args=env_args)
args.target_step = args.max_step
args.gamma = 0.99
args.eval_times = 2 ** 5
args.if_remove = False
if __name__ == '__main__':
train_and_evaluate_mp(args)
| 947 |
rook/catalog/__init__.py
|
roocs/roocs-wps-demo
| 0 |
2172746
|
from roocs_utils.exceptions import InvalidCollection
from .db import DBCatalog
from rook import CONFIG
def get_catalog(project):
if CONFIG[f"project:{project}"].get("use_catalog"):
try:
catalog = DBCatalog(project)
return catalog
except Exception:
raise InvalidCollection()
__all__ = [
"get_catalog",
"DBCatalog",
]
| 385 |
evaluate/previous_works/HoHoNet/lib/model/horizon_refinement/linear.py
|
Syniez/Joint_360depth
| 11 |
2172903
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv1dbnrelu(in_channels, out_channels, **kwargs):
return nn.Sequential(
nn.Conv1d(in_channels, out_channels, **kwargs),
nn.BatchNorm1d(out_channels),
nn.ReLU(inplace=True),
)
class Linear(nn.Module):
def __init__(self, c_mid, base_ch=256):
super(Linear, self).__init__()
self.conv_1x1 = conv1dbnrelu(c_mid, base_ch*4, kernel_size=1, bias=False)
self.out_channels = base_ch*4
def forward(self, feat):
feat = feat['1D']
feat = self.conv_1x1(feat)
return {'1D': feat}
| 629 |
other/export.py
|
2xx4ever/cmdb
| 1 |
2172641
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
from import_export import resources
from app01.models import Host, Asset
class HostResource(resources.ModelResource):
class Meta:
model = Host
fields = ('ip_pub', 'ip_prv', 'pwd_<PASSWORD>', '<PASSWORD>')
class AssetResource(resources.ModelResource):
class Meta:
model = Asset
fields = ('ip_pub', 'hostname', 'os', 'cpu_model', 'cpu', 'mem', 'disk', 'update_time')
| 477 |
utils/patrons.py
|
varghesejose2020/2021
| 6 |
2173181
|
import sys, csv, json
print("Generating patrons.json from Eventyay.com order list csv file. Source: "+sys.argv[1])
json_out = []
with open(sys.argv[1], 'r', encoding="utf-8") as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
if(row[2]=='completed' and row[5]=='Donation Ticket'):
json_out.append({
"name": "{} {}".format(row[15], row[16]),
"org_and_title": "{} @ {}".format(row[20], row[18]),
"donated_at": row[1]
})
output = json.dumps(json_out)
with open('patrons.json', 'w', encoding="utf-8") as f:
f.write(output)
print("Done, Check out patrons.json")
| 667 |
scripts/android/devices.py
|
appcelerator-archive/titanium_mobile_tooling
| 2 |
2172064
|
#!/usr/bin/env python
import os, sys
from androidsdk import *
if len(sys.argv) == 1:
print "Usage: %s <android-sdk>" % sys.argv[0]
sys.exit(1)
sdk = AndroidSDK(sys.argv[1])
devices = sdk.list_devices()
json = "["
for device in devices:
json += "{\"name\": \"%s\", \"port\": %d, \"is_emulator\": %s, \"is_offline\": %s}" % (device.get_name(), device.get_port(), str(device.is_emulator()).lower(), str(device.is_offline()).lower())
json += "]"
print json
| 462 |
HR.py
|
mauriciomani/HRM-and-Machine-Learning
| 1 |
2173427
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 21:19:20 2018
@author: <NAME>
"""
"""I am using scikit-learn library to find DECISION RULES from the decision trees.
So we can make decisions"""
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import tree
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
# This is a simulated dataset
df = pd.read_csv("C:/Users/mauri/Desktop/Big Data/kaggle/HR_kaggle.csv")
df.head()
# Size of the dataset
df.shape
df.dtypes
# There are no null values
df.isnull().sum()
# create dummy variables
""" When modeling is very important to preprocess the data.
However right know I will just use the model I was given with. """
df = pd.get_dummies(df, columns = ['salary', 'sales'], drop_first = True)
x = df.drop(labels = 'left', axis = 1)
y = df['left']
feature_name = x.columns
#Train and test to check out the score of our model
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.35)
""" When modeling, picking an easy algorithm is way better than a more colpex.
Just follow the Occam's Razor or lex parsimonaie law. In other words, the simplest solution tends
to be the right one.
So if we have a simple hypothesis we should select one with simple assumptions.
This of course it is not an irrefutable principle.
That is why we will select as less complexity as possible.
In other words, a small tree (few nodes) might work well. It will also be more simple to extract some rules."""
# Fit the decision tree algorithm
clf = DecisionTreeClassifier(max_depth = 5)
clf.fit(x_train, y_train)
# Print the score of the model
print(clf.score(x_test, y_test))
tree.export_graphviz(clf, out_file='C:/Users/mauri/Desktop/d_tree.dot', feature_names=feature_name, class_names = ["left", "in"])
""" It will also be relevant to understant how important is each feature to the model we built.
We can say that those variables are important to the decision making of the employees. """
importances = clf.feature_importances_
rel = [e for e in importances if e > 0.001]
indices = np.argsort(rel)
plt.figure()
plt.title('Importance of the features', fontdict = {'fontsize': 20, 'weight': 'bold','alpha': 0.67})
plt.barh(range(len(indices)), importances[indices], color='#5CD1BD', align='center')
plt.yticks(range(len(indices)), feature_name[indices])
plt.xlabel('Relative Importance')
| 2,441 |
TrippyMain/Garrisson/warehousemain.py
|
py-ranoid/Trippy
| 0 |
2172963
|
from Warehouse.Scripts.deets import generate_place_details
from Warehouse.Scripts.graphgen import mutualGen
from Warehouse.Scripts.normalizedata import normalize
from Warehouse.Scripts.bing_image import BingWebSearch
from Warehouse.Scripts.AudioRektnika import withPickle, withText
from wardogs import City, Attraction
import pandas as pd
def pusher(data):
images = BingWebSearch(search = data["CityName"])
desc = data["Description"]
helplines = dict()
helplines['police'] = data["HelplinePolice"]
helplines['fire'] = data["HelplineFire"]
helplines['ambulance'] = data["HelplineAmbulance"]
helplines['tourism'] = data["HelplineTourism"]
if pd.notna(desc):
(en, enurl, hin, hinurl, spa, spaurl) = withText(desc, city + "Desc")
else:
desc = None
city = City(name = data["CityName"], helplines = helplines, lat = data["Latitude"], lng = data["Longitude"], images = images, desc = desc)
city.fire()
def addCity(city="Chennai"):
cities = pd.read_excel("Cities.xlsx")
cities = cities.set_index("CityCode")
cities = cities[pd.notnull(cities.index)]
cities = cities.set_index("CityName")
try:
data = cities.loc[city]
except KeyError as e:
print "That city is not yet supported"
print "Stacktrace: "
print e
quit()
pusher(data)
'''
mutualGen(city)
generate_place_details(city)
normalize(city) This script requires attraction descriptions
withPickle(city) This script requires attraction descriptions
'''
def addAllCities():
cities = pd.read_excel("Cities.xlsx")
cities = cities.set_index("CityCode")
cities = cities[pd.notnull(cities.index)]
for i in cities.index:
print "LOG: Adding city:", data["CityName"]
data = cities.loc[i]
pusher(data)
'''
mutualGen(city)
generate_place_details(city)
normalize(city) This script requires attraction descriptions
withPickle(city) This script requires attraction descriptions
'''
| 2,076 |
examples/errors2/components/filemode/component.py
|
risclog-solution/batou
| 34 |
2172324
|
from batou.component import Component
from batou.lib.file import File
class FileMode(Component):
def configure(self):
self += File('new-file.txt', mode='wrongmode')
| 180 |
src/generate_snapshot_data/Corpus.py
|
saheel1115/szz-
| 9 |
2173402
|
#!/usr/bin/python
import argparse
import os, sys, inspect
import os.path
import shutil
import logging
import datetime
from Config import Config
from DbEdits import DbEdits
from GitRepo import GitRepo
from OutDir import OutDir
from SnapShot import SnapShot
sys.path.append("src/util")
import Log
from Util import cd
import Util
class Corpus:
def __init__(self, projectPath, language, outDir, configFile, debug=True):
self.src_path = projectPath
self.language = language
self.out_path = outDir
self.cfg = configFile
self.debug = debug
proj_path = self.src_path.rstrip(os.sep)
self.project_name = proj_path.split(os.sep)[-1]
logging.info("project = %s\n", self.project_name)
self.snapshots = []
self.changed_files_per_date = {} #commit_date -> set(file_name)
self.edit_to_snapshot = {}
self.snapshot2edit = {}
#self.initEdits()
self.edits = self.fetchEdits()
self.initSnapshots()
def __str__(self):
retStr = "project : " + self.project_name + "\n"
retStr += "SnapShots : \n"
for s in self.snapshots:
retStr += str(s) + " "
return retStr
def printSnapshots(self):
for s in self.snapshots:
print s
def initEdits(self):
self.edits = self.fetchEdits()
# for e in self.edits:
# #print e.file_name, e.sha, e.commit_date
# file_name = e.file_name.replace(os.sep,'_')
# if not self.changed_files_per_date.has_key(e.commit_date):
# self.changed_files_per_date[e.commit_date] = set()
# self.changed_files_per_date[e.commit_date].add(file_name)
# if self.debug:
# for key in sorted(self.changed_files_per_date):
# print key, self.changed_files_per_date[key]
def fetchEdits(self):
logging.info("Going to fetch edits for project : %s", self.project_name)
db_config = self.cfg.ConfigSectionMap("Database")
logging.debug("Database configuration = %r\n", db_config)
db_edits = DbEdits(self.project_name, self.language)
db_edits.connectDb(db_config['database'], db_config['user'], db_config['host'], db_config['port'])
db_edits.fetchEditsFromTable(db_config['table'])
#logging.debug(db_edits)
return db_edits.edits
@staticmethod
def minKey(commitDate, snapshot):
if commitDate >= snapshot.date:
return (commitDate - snapshot.date).days
else:
return 9999
def mapEditToSnapshot(self):
for e in self.edits:
cd = e.commit_date
snap = min(self.snapshots, key=lambda sd : self.minKey(cd,sd))
if cd < snap.date:
print("---> skipping: commit_date %s: snapshot %s" % (cd, snap.date))
continue
self.edit_to_snapshot[e] = snap
snap.addEdit(e)
logging.debug("mapEditToSnapshot : <edit> : <snapshot>")
for key in self.edit_to_snapshot:
logging.debug("%s:%s" % (key, self.edit_to_snapshot[key]))
def initSnapshots(self):
snaps = [snap for snap in os.listdir(self.src_path)]
# 'ss_sha_info.txt' is not a snapshot directory, hence can be removed
# ...it actually contains some metadata about the commit SHAs of each snapshot
# ...which is used by `src/generate_asts_and_type_data/gather_typedata_into_csv.py`
snaps.remove('ss_sha_info.txt')
snaps.sort()
for snap in snaps:
s = SnapShot(self.src_path, snap, self.out_path)
self.snapshots.append(s)
self.mapEditToSnapshot()
def dump(self):
Util.cleanup(self.out_path + "/*")
for snap in self.snapshots:
snap.dumpTestFiles()
snap.dumpTrainFiles()
| 3,912 |
main.py
|
islandhuynh/snake
| 0 |
2172215
|
from scoreboard import Scoreboard
from turtle import Screen
from snake import Snake
from food import Food
import time
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor("black")
screen.title("Snake Game by <NAME>")
screen.tracer(0)
snake = Snake()
food = Food()
score = Scoreboard()
screen.onkey(snake.move_up, "w")
screen.onkey(snake.move_down, "s")
screen.onkey(snake.move_left, "a")
screen.onkey(snake.move_right, "d")
screen.listen()
game_is_on = True
while game_is_on:
if snake.head.xcor() > 290 or snake.head.xcor() < -290 or snake.head.ycor() > 290 or snake.head.ycor() < -290:
score.reset()
snake.reset()
screen.update()
time.sleep(0.1)
snake.move()
if snake.head.distance(food) < 20:
food.refresh()
snake.extend()
score.increase_score()
for seg in snake.segments[1:]:
if snake.head.distance(seg) < 10:
score.reset()
snake.reset()
screen.exitonclick()
| 931 |
ratelib/nucleus.py
|
kompoth/raterlib
| 0 |
2173237
|
import os
import re
import numpy as np
# Create element vs Z dictationary
PYRLIB_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(PYRLIB_PATH, "data")
EL_VS_Z = {}
Z_VS_EL = ["n"]
with open(os.path.join(DATA_PATH, "elements.txt")) as fd:
for line in fd:
Z, element = line.split()
EL_VS_Z[element.lower()] = int(Z)
Z_VS_EL.append(element.lower())
class MetaStableError(ValueError):
"""
Exception for meta stable states that are temporarily unsupported
"""
pass
class Nucleus:
"""
Class for nucleus participating in reaction
"""
def __init__(self, name=None, A=None, Z=None, N=None):
self._A = np.nan
self._Z = np.nan
if name is not None:
# Default initialisation by name string
self.__init_by_descr(name)
elif A is not None or Z is not None or N is not None:
# Initialization with numbers
self.__init_by_numbers(A=A, Z=Z, N=N)
else:
raise ValueError("Provide nucleus name or nuclear numbers.")
def __init_by_descr(self, descr):
re_str = r'^([A-Za-z]{1,2})([-\*]*)(\d{0,3})$'
descr = descr.replace(' ', '')
parse_results = re.search(re_str, descr)
if not parse_results:
# Undiscovered elements
re_str = r'^([Cc]\d)([-\*]*)(\d{3})'
parse_results = re.search(re_str, descr)
if not parse_results:
raise ValueError("Failed to parse: '{}'".format(descr))
if parse_results.group(2) != "":
msg = "Meta stable states are not supported: '{}'.".format(descr)
raise MetaStableError(msg)
element = parse_results.group(1).lower()
# TODO: refactor
if parse_results.group(3):
self._Z = EL_VS_Z[element]
self._A = int(parse_results.group(3))
elif element == "n":
self._Z = 0
self._A = 1
elif element == "p":
self._Z = 1
self._A = 1
elif element == "d":
self._Z = 1
self._A = 2
elif element == "t":
self._Z = 1
self._A = 3
else:
raise ValueError("A was not provided: '{}'".format(descr))
def __init_by_numbers(self, A=None, Z=None, N=None):
none_num = (A, Z, N).count(None)
if none_num > 1:
raise ValueError("Provide at least 2 values: A, Z or N")
self._A = A if A is not None else Z + N
self._Z = Z if Z is not None else A - N
if self._A < self._Z or self.Z < 0:
raise ValueError("Mass number too small:"
"A = {}, Z = {}".format(self._A, self._Z))
def __repr__(self):
if self.Z > 1 or self.A > 3:
return "{}{}".format(Z_VS_EL[self.Z], self.A)
elif self.A == 1:
return "n" if self.Z == 0 else "p"
elif self.A == 2 and self.Z == 1:
return "d"
elif self.A == 3 and self.Z == 1:
return "t"
else:
raise ValueError("Unknown nucleus with A = {} and "
"Z = {}.".format(self.A, self.Z))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return (self.A == other.A) and (self.Z == other.Z)
def __lt__(self, other):
if not self.Z == other.Z:
return self.Z < other.Z
else:
return self.A < other.A
@property
def A(self):
return self._A
@property
def Z(self):
return self._Z
@property
def N(self):
return self.A - self.Z
@property
def element(self):
return Z_VS_EL[self.Z]
@property
def name(self):
return self.__repr__()
def relative(self, dZ=0, dN=0):
"""
Return Nucleus relative to self.
"""
Z = self.Z + dZ
N = self.N + dN
return Nucleus(Z=Z, N=N)
def neighbours(self):
"""
Get array of 8 nearest nuclei
"""
rez = []
for dZ in range(-1, 2):
for dN in range(-1, 2):
if dZ == 0 and dN == 0:
continue
rez.append(self.relative(dZ=dZ, dN=dN))
return np.array(rez)
| 4,335 |
argentina/util/functions.py
|
emibap/dataviz
| 0 |
2170051
|
from datetime import datetime
from datetime import date
# Returns the percentage of the year that passed given a date string in YYYY-mm-dd format
# Min and max percentages are optional parameters (to improve visuals)
def get_date_year_percentage(date_str, min_pcn = 0.12, max_pcn = 0.88):
date_time_obj = datetime.strptime(date_str, "%Y-%m-%d")
year = date_time_obj.year
date_year = datetime.combine(date(year, 1, 1), datetime.min.time())
date_year_plus = datetime.combine(date(year + 1, 1, 1), datetime.min.time())
days_passed = (date_time_obj - date_year).days
return min(max((days_passed / (date_year_plus - date_year).days), min_pcn), max_pcn)
def get_year_from_datestr(date_str):
date_time_obj = datetime.strptime(date_str, "%Y-%m-%d")
return date_time_obj.year
# https://www.geeksforgeeks.org/insert-row-at-given-position-in-pandas-dataframe/
# Function to insert row in the dataframe
def insert_row(row_number, df, row_value):
# Starting value of upper half
start_upper = 0
# End value of upper half
end_upper = row_number
# Start value of lower half
start_lower = row_number
# End value of lower half
end_lower = df.shape[0]
# Create a list of upper_half index
upper_half = [*range(start_upper, end_upper, 1)]
# Create a list of lower_half index
lower_half = [*range(start_lower, end_lower, 1)]
# Increment the value of lower half by 1
lower_half = [x.__add__(1) for x in lower_half]
# Combine the two lists
index_ = upper_half + lower_half
# Update the index of the dataframe
df.index = index_
# Insert a row at the end
df.loc[row_number] = row_value
# Sort the index labels
df = df.sort_index()
# return the dataframe
return df
# Let's create a row which we want to insert
# row_number = 2
# row_value = ['11/2/2011', 'Wrestling', 12000]
# if row_number > df.index.max()+1:
# print("Invalid row_number")
# else:
## Let's call the function and insert the row
## at the second position
#df = Insert_row(row_number, df, row_value)
# Print the updated dataframe
#print(df)
| 2,103 |
tests/test_parsing.py
|
chriscardillo/gusty
| 103 |
2173395
|
import pytest
from airflow import DAG
from datetime import datetime, timedelta
from airflow.utils.dates import days_ago
from gusty import create_dag
from gusty.parsing import parse
##############
## FIXTURES ##
##############
@pytest.fixture(scope="session")
def parsing_dag_dir():
return "tests/dags/parsing"
@pytest.fixture(scope="session")
def dag(parsing_dag_dir):
dag = create_dag(
parsing_dag_dir,
description="A dag with some custom parsing functions.",
schedule_interval="0 0 * * *",
default_args={
"owner": "gusty",
"depends_on_past": False,
"start_date": days_ago(1),
"email": "<EMAIL>",
"email_on_failure": False,
"email_on_retry": False,
"retries": 3,
"retry_delay": timedelta(minutes=5),
},
ignore_subfolders=True,
parse_hooks={
".py": lambda file_path: {
"operator": "airflow.operators.python.PythonOperator",
"python_callable": lambda: "this was custom",
},
},
)
return dag
@pytest.fixture(scope="session")
def custom_task(dag):
custom_task = dag.task_dict["a_parse_hook_task"]
return custom_task
@pytest.fixture(scope="session")
def sql_task(dag):
custom_task = dag.task_dict["sql_task"]
return custom_task
###########
## Tests ##
###########
def test_read_yaml_spec():
yaml_spec = parse("tests/dags/no_metadata/top_level_task.yml")
assert yaml_spec["task_id"] == "top_level_task"
assert yaml_spec["file_path"] == "tests/dags/no_metadata/top_level_task.yml"
assert "operator" in yaml_spec.keys()
assert "bash_command" in yaml_spec.keys()
def test_parse_hooks(custom_task):
callable = custom_task.__dict__["python_callable"]
res = callable()
assert res == "this was custom"
def test_sql_parse(sql_task):
assert sql_task.sql == "SELECT *\nFROM gusty_table"
| 1,972 |
ll/environment.bzl
|
qogecoin/rules_ll
| 5 |
2171495
|
"""# `//ll:environment.bzl`
Action environments.
"""
def compile_object_environment(ctx, toolchain_type):
if toolchain_type == "//ll:toolchain_type":
return {
"LLVM_SYMBOLIZER_PATH": ctx.toolchains[toolchain_type].symbolizer.path,
"LINK": ctx.toolchains[toolchain_type].bitcode_linker.path,
"LLD": ctx.toolchains[toolchain_type].linker.path,
"PATH": "$PATH:" + ctx.toolchains[toolchain_type].linker_executable.dirname,
"LD_LIBRARY_PATH": "$LD_LIBRARY_PATH:" + "/usr/local/cuda/lib64",
}
elif toolchain_type == "//ll:heterogeneous_toolchain_type":
return {
"LLVM_SYMBOLIZER_PATH": ctx.toolchains[toolchain_type].symbolizer.path,
"CLANG_OFFLOAD_BUNDLER": ctx.toolchains[toolchain_type].offload_bundler.path,
"LINK": ctx.toolchains[toolchain_type].bitcode_linker.path,
"LLD": ctx.toolchains[toolchain_type].linker.path,
"PATH": "$PATH:" + ctx.toolchains[toolchain_type].linker_executable.dirname,
"LD_LIBRARY_PATH": "$LD_LIBRARY_PATH:" + "/usr/local/cuda/lib64",
}
elif toolchain_type == "//ll:bootstrap_toolchain_type":
return {
"CPLUS_INCLUDE_PATH": Label("@llvm-project").workspace_root + "/libcxx/src",
}
else:
fail("Unregognized toolchain type. rules_ll supports " +
"//ll:toolchain_type and //ll:bootstrap_toolchain_type.")
| 1,458 |
tests/components/broadlink/test_helpers.py
|
tbarbette/core
| 30,023 |
2172651
|
"""Tests for Broadlink helper functions."""
import pytest
import voluptuous as vol
from homeassistant.components.broadlink.helpers import data_packet, mac_address
async def test_padding(hass):
"""Verify that non padding strings are allowed."""
assert data_packet("Jg") == b"&"
assert data_packet("Jg=") == b"&"
assert data_packet("Jg==") == b"&"
async def test_valid_mac_address(hass):
"""Test we convert a valid MAC address to bytes."""
valid = [
"A1B2C3D4E5F6",
"a1b2c3d4e5f6",
"A1B2-C3D4-E5F6",
"a1b2-c3d4-e5f6",
"A1B2.C3D4.E5F6",
"a1b2.c3d4.e5f6",
"A1-B2-C3-D4-E5-F6",
"a1-b2-c3-d4-e5-f6",
"A1:B2:C3:D4:E5:F6",
"a1:b2:c3:d4:e5:f6",
]
for mac in valid:
assert mac_address(mac) == b"\xa1\xb2\xc3\xd4\xe5\xf6"
async def test_invalid_mac_address(hass):
"""Test we do not accept an invalid MAC address."""
invalid = [
None,
123,
["a", "b", "c"],
{"abc": "def"},
"a1b2c3d4e5f",
"a1b2.c3d4.e5f",
"a1-b2-c3-d4-e5-f",
"a1b2c3d4e5f66",
"a1b2.c3d4.e5f66",
"a1-b2-c3-d4-e5-f66",
"a1b2c3d4e5fg",
"a1b2.c3d4.e5fg",
"a1-b2-c3-d4-e5-fg",
"a1b.2c3d4.e5fg",
"a1b-2-c3-d4-e5-fg",
]
for mac in invalid:
with pytest.raises((ValueError, vol.Invalid)):
mac_address(mac)
| 1,434 |
students/K33402/Krivoshapkina_Aitalina/LR_3/hospital_app/admin.py
|
aytakr/ITMO_ICT_WebDevelopment_2021-2022
| 0 |
2171249
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Patient)
admin.site.register(Specialization)
# admin.site.register(Schedule)
admin.site.register(Doctor)
admin.site.register(ScheduleOfDoctor)
admin.site.register(Cabinet)
admin.site.register(CabinetOfficer)
admin.site.register(ScheduleOfCabinet)
admin.site.register(PriceList)
admin.site.register(Visit)
# admin.site.register(VisitOfPatient)
admin.site.register(MedicalCard)
| 481 |
zanza/__init__.py
|
fogasl/zanza
| 0 |
2173536
|
"""
This script obfuscates the input string by the following algorithm:
- Takes the first character of the input string, and gets its Unicode code
point (character code)
- Sets this char code as a reference value
- Initializes a *list* in which stores the digits of the previous value as a
*list*
- Continues to the next character by calculating its code point and the delta
to the previous character's code point.
- Sets the current code point as the next reference value
- Adds this delta value to the return list and repeats the steps above to the
input strings length.
"""
def zanza(source):
"""Obfuscate the input string.
Args:
source (str): Input string
"""
if len(source) == 0:
raise ValueError("Invalid input: zero-length string")
ret = []
prev = ord(source[0])
first = map(int, str(prev))
ret.append(list(first))
for s in source[1:]:
curr = ord(s)
ret.append(curr - prev)
prev = curr
return ret
| 1,000 |
pneumonia_xray_testing.py
|
alleetw101/pneumonia-xray
| 1 |
2173531
|
# Name: pneumonia_xray_testing
# Author: <NAME> <<EMAIL>>
# Version: Testing
import pathlib
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
from datetime import datetime
import time
import matplotlib.pyplot as plt
# Importing data
train_data_dir = pathlib.Path('train')
test_data_dir = pathlib.Path('test')
# Verifying datasets were loaded correctly. Counts number of jpeg files in folder/directory
train_image_count = len(list(train_data_dir.glob('*/*.jpeg')))
test_image_count = len(list(test_data_dir.glob('*/*.jpeg')))
print(train_image_count, test_image_count)
# Testing Variables
img_width = 720
img_height = 540
val_size = int(train_image_count * 0.2)
test_size = int(train_image_count * 0.1)
BATCH_SIZE = 64
ACTIVATION = 'relu'
regularizer = tf.keras.regularizers.l2(0.01)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
callback = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)]
EPOCHS = 50 # Max Epoch w/ EarlyStopping
STEPS_PER_EPOCH = 16
# Variable names for log file
REGULARIZER = 'l2(0.01)'
OPTIMIZER = 'adam'
CALLBACK = 'EarlyStopping(val_loss, 2)'
COMMENTS = '(training) shuffle, repeat, batch'
# Obtaining categorical names with folder names
class_names = np.array(sorted([item.name for item in train_data_dir.glob('*') if item.name != '.DS_Store']))
print(class_names)
# Functions to create image, label pair in dataset
def get_label(file_path):
parts = tf.strings.split(file_path, sep='/')
temp = parts[-2] == class_names
return tf.argmax(temp)
def decode_img(img):
img = tf.io.decode_jpeg(img, channels=1)
return tf.image.resize_with_pad(img, img_height, img_width)
def process_path(file_path):
label = get_label(file_path)
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
# Configures datasets for use in models
def configure_performance(ds, dataset=''):
ds = ds.shuffle(1000)
if dataset == 'train':
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
ds = ds.cache()
ds = ds.prefetch(AUTOTUNE)
return ds
average_acc = []
average_loss = []
for _ in range(8):
# Time variables for log file
date = datetime.now().strftime("%Y%m%d_%H%M")
starttime = time.time()
# Loads datasets into tensorflow dataset with verification
list_ds = tf.data.Dataset.list_files(str(train_data_dir / '*/*'), shuffle=True)
# list_ds = list_ds.shuffle(train_image_count, reshuffle_each_iteration=False)
test_ds = tf.data.Dataset.list_files(str(test_data_dir / '*/*'), shuffle=False)
# for i in list_ds.take(1):
# print(i.numpy())
# example = i
# Splitting training dataset into train and validation datasets
train_ds = list_ds.skip(val_size)
val_ds = list_ds.take(val_size)
print(tf.data.experimental.cardinality(train_ds).numpy(), tf.data.experimental.cardinality(val_ds).numpy())
# Creates image/label pair for datasets with verification
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.map(process_path, num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(process_path, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.map(process_path, num_parallel_calls=AUTOTUNE)
# for image, label in train_ds.take(1):
# print("Image shape: ", image.numpy().shape)
# print("Label: ", label.numpy())
# Configure datasets for model
train_ds = configure_performance(train_ds, dataset='train')
val_ds = configure_performance(val_ds)
test_ds = configure_performance(test_ds)
# Create and compile model
model = tf.keras.Sequential([
layers.experimental.preprocessing.Rescaling(1. / 255),
layers.Conv2D(32, 3, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Conv2D(32, 3, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Conv2D(32, 3, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Conv2D(32, 3, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(512, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.Dropout(0.2),
layers.Dense(512, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.Dropout(0.2),
layers.Dense(128, activation=ACTIVATION, kernel_regularizer=regularizer),
layers.Dense(2)
])
model.compile(optimizer=optimizer, loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']) # Update log file variables if metric is changed
# Train model on testing dataset
train_history = model.fit(train_ds, validation_data=val_ds, epochs=EPOCHS, verbose=1, steps_per_epoch=STEPS_PER_EPOCH,
callbacks=callback)
# Time variables for log file
endtime = time.time()
traintime = (endtime-starttime)
# Evaluate trained model on test dataset
loss, acc = model.evaluate(test_ds, verbose=1)
# Saves entire model in SavedModel format
model.save(f'PneSavedModels/Pne{date}', overwrite=False)
# Update log file
with open('PneSavedModels/PneLog.txt', 'r') as f:
tempfile = f.read()
with open('PneSavedModels/PneLog.txt', 'w') as f:
f.write('###############\n')
f.write(f'Run: {date}, train_time: {traintime}\n')
f.write(f'Img_width: {img_width}, Img_height: {img_height}, Val_size: {val_size}\n')
f.write(f'Batch_size: {BATCH_SIZE}, Activation: {ACTIVATION}, Optimizer: {OPTIMIZER}, Regularizer: {REGULARIZER}\n')
f.write(f'Epochs: {EPOCHS}, Steps_per_epoch: {STEPS_PER_EPOCH}, Callback(s): {CALLBACK}\n\n')
for runs in range(len(train_history.history['loss'])):
f.write(f'Epoch {runs+1}- Loss: {train_history.history["loss"][runs]}, ')
f.write(f'Acc: {train_history.history["accuracy"][runs]}, ')
f.write(f'Val_loss: {train_history.history["val_loss"][runs]}, ')
f.write(f'Val_acc: {train_history.history["val_accuracy"][runs]}\n')
f.write(f'\nComments: {COMMENTS}\n\n')
f.write(f'Test_loss: {loss}, Test_accuracy: {acc}\n\n')
model.summary(print_fn=lambda x: f.write(x + '\n'))
f.write('###############\n\n')
f.write(tempfile)
average_acc.append(acc)
average_loss.append(loss)
print(f'Average accuracy: {np.average(average_acc)}')
print(f'Average loss: {np.average(average_loss)}')
print(average_acc)
print(average_loss)
| 6,725 |
purchase/models.py
|
FreeGodCode/store
| 0 |
2173296
|
import datetime
from django.db import models
class PurchaseContract(models.Model):
"""采购合同"""
PC_STATUS_CHOICES = (
(0, '草稿'),
(1, '已审批')
)
id = models.AutoField(primary_key=True)
pc_identify = models.CharField(max_length=15, verbose_name='合同编号')
pc_serial = models.CharField(max_length=4, verbose_name='合同流水号')
organization = models.ForeignKey('base.Organization', verbose_name='组织', related_name='org_pc', on_delete=models.CASCADE)
pc_name = models.CharField(max_length=20, verbose_name='合同名称')
supplier = models.ForeignKey('base.Supplier', verbose_name='供应商', related_name='supplier_pc', on_delete=models.CASCADE)
pc_date = models.DateTimeField(default=datetime.datetime.now, verbose_name='合同签订日期')
pc_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='合同总额')
pc_remarks = models.TextField(max_length=400, verbose_name='合同备注', null=True)
pc_status = models.IntegerField(choices=PC_STATUS_CHOICES, default=0, verbose_name='合同状态')
pc_creator = models.CharField(max_length=20, verbose_name='合同创建者名字')
pc_creator_identify = models.CharField(max_length=20, verbose_name='合同创建者工号')
pc_created_at= models.DateTimeField(auto_now_add=True, verbose_name='合同创建时间')
class Meta:
db_table = 'db_purchase_contract'
verbose_name = "采购合同"
verbose_name_plural = verbose_name
def __str__(self):
return self.pc_name
class PurchaseContractDetail(models.Model):
"""合同物料明细"""
PCD_USE_STATUS_CHOICES = (
(0, '未使用'),
(1, '已使用')
)
id = models.AutoField(primary_key=True)
purchase_contract = models.ForeignKey('PurchaseContract', verbose_name='采购合同', related_name='pc_pcd', on_delete=models.CASCADE)
material = models.ForeignKey('base.Material', verbose_name='物料', related_name='material_pcd', on_delete=models.CASCADE)
pcd_num = models.IntegerField(verbose_name='物料数量')
pcd_taxRate = models.IntegerField(default=13, verbose_name='税率', null=True)
pcd_tax_unitPrice = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='含税单价', null=True)
pcd_unitPrice = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='无税单价', null=True)
pcd_tax_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='含税总额', null=True)
pcd_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='无税总额', null=True)
pcd_tax_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='税额', null=True)
pcd_pr_identify = models.CharField(max_length=15, verbose_name='请购单编号')
pcd_prd_remarks = models.TextField(max_length=400, verbose_name='物料备注', null=True)
pcd_used = models.IntegerField(choices=PCD_USE_STATUS_CHOICES, default=0, verbose_name='明细单是否使用')
class Meta:
db_table = 'db_purchase_contract_detail'
verbose_name = "合同物料明细"
verbose_name_plural = verbose_name
def __str__(self):
return self.pcd_num
class PurchaseContractPayDetail(models.Model):
"""合同付款协议"""
PAY_PREPAY_CHOICES = (
(0, '否'),
(1, '是')
)
id = models.AutoField(primary_key=True)
purchase_contract = models.ForeignKey('PurchaseContract', verbose_name='采购合同', related_name='pc_pay', on_delete=models.CASCADE)
pay_batch = models.IntegerField(verbose_name='付款批次')
pay_rate = models.IntegerField(verbose_name='付款比率')
pay_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='付款金额')
pay_planDate = models.DateField(verbose_name='计划付款日期')
pay_prepay = models.IntegerField(choices=PAY_PREPAY_CHOICES, verbose_name='是否预付款')
pay_remarks = models.TextField(max_length=400, verbose_name='付款备注', null=True)
class Meta:
db_table = 'db_purchase_contract_pay_detail'
verbose_name = "合同付款协议"
verbose_name_plural = verbose_name
def __str__(self):
return self.pay_batch
class PurchaseOrder(models.Model):
"""采购订单"""
PO_STATUS_CHOICES = (
(0, '草稿'),
(1, '已审批')
)
id = models.AutoField(primary_key=True)
po_identify = models.CharField(max_length=15, verbose_name='采购订单编号')
po_serial = models.CharField(max_length=4, verbose_name='采购订单流水号')
organization = models.ForeignKey('base.Organization', verbose_name='组织', related_name='org_po', on_delete=models.CASCADE)
supplier = models.ForeignKey('base.Supplier', verbose_name='供应商', related_name='supplier_po', on_delete=models.CASCADE)
po_date = models.DateTimeField(default=datetime.datetime.now,verbose_name='采购订单生效日期')
po_sum = models.IntegerField(verbose_name='采购订单总额')
po_remarks = models.TextField(max_length=400, verbose_name='采购订单备注')
purchase_contract = models.ForeignKey('PurchaseContract', verbose_name='采购合同', related_name='pc_po', on_delete=models.CASCADE)
purchase_request = models.ForeignKey('purchaseRequest.PurchaseRequest', verbose_name='请购单', related_name='pr_po', on_delete=models.Model)
pc_identify = models.CharField(max_length=15, verbose_name='采购合同编号', null=True)
po_status = models.IntegerField(choices=PO_STATUS_CHOICES, default=0, verbose_name='采购订单状态')
po_creator = models.CharField(max_length=20, verbose_name='采购订单创建者名字')
po_creator_identify = models.CharField(max_length=20, verbose_name='采购订单创建者编号')
po_created_at= models.DateTimeField(auto_now_add=True, verbose_name='采购订单创建时间')
class Meta:
db_table = 'db_purchase_order'
verbose_name = "采购订单"
verbose_name_plural = verbose_name
def __str__(self):
return self.po_identify
class OrderDetail(models.Model):
"""采购订单明细"""
id = models.AutoField(primary_key=True)
purchase_order = models.ForeignKey('PurchaseOrder', verbose_name='采购订单', related_name='po_od', on_delete=models.CASCADE)
# pr_detail = models.ForeignKey('purchaseRequest.PurchaseRequest', verbose_name='请购单物料明细', related_name='pr_od', on_delete=models.CASCADE)
# pcd_detail = models.ForeignKey('PurchaseContractDetail', verbose_name='合同物料明细', related_name='pcd_od', on_delete=models.CASCADE)
material = models.ForeignKey('base.Material', verbose_name='物料', related_name='material_od', on_delete=models.CASCADE)
od_num = models.IntegerField(verbose_name='采购数量')
od_taxRate = models.IntegerField(default=13, verbose_name='税率')
od_tax_unitPrice = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='含税单价')
od_unitPrice = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='无税单价')
od_tax_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='含税总额')
od_tax_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='税额')
od_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='无税总额')
od_pr_identify = models.CharField(max_length=15, verbose_name='请购单编号')
od_prd_remarks = models.TextField(max_length=400, verbose_name='物料备注')
class Meta:
db_table = 'db_order_detail'
verbose_name = "采购订单详情"
verbose_name_plural = verbose_name
| 7,081 |
app/requester.py
|
mateuszbaranczyk/portfolio
| 0 |
2172814
|
import datetime
import json
import requests
import typing as T
class ExchangeRateRequester:
def __init__(self):
self.api_url = "http://api.nbp.pl/api/exchangerates/rates/c/{currency_code}/{start_date}/{end_date}/"
def get_rate(
self,
currency_code="EUR",
start_date=str(datetime.date.today()),
end_date=str(datetime.date.today()),
) -> list:
api_url = self.api_url.format(currency_code=currency_code, start_date=start_date, end_date=end_date)
response = requests.get(api_url).text
data = json.loads(response)
return data
@staticmethod
def extract_todays_bid(rest_response: dict) -> float:
todays_rates = rest_response["rates"][0]
todays_bid = todays_rates["bid"]
return todays_bid
@staticmethod
def extract_historical_bids(rest_response: dict) -> T.Dict[str, float]:
historical_rates = rest_response["rates"]
historical_bids = {}
for rate in historical_rates:
historical_rate = rate["bid"]
historical_date = rate["effectiveDate"]
historical_bids[historical_date] = historical_rate
return historical_bids
def get_todays_rate(self) -> float:
rest_response = self.get_rate()
todays_bid = self.extract_todays_bid(rest_response)
return todays_bid
def get_historical_bids(self, start_date: str, end_date: str = str(datetime.date.today())) -> T.Dict[str, float]:
rest_response = self.get_rate(start_date=start_date, end_date=end_date)
historocal_bids = self.extract_historical_bids(rest_response)
return historocal_bids
| 1,720 |
PyKeydown/keydown.py
|
aheadlead/PyTetris
| 2 |
2173530
|
# coding=utf-8
__author__ = 'weiyulan'
from threading import Thread
import termios
import fcntl
import sys
import os
from select import select
from time import sleep
import keyvalue
class Keydown(object):
def __init__(self, callback):
self.callback = callback
self.oldflags = None
self.oldterm = None
self.daemon_thread = Thread(target=self.daemon)
self.daemon_thread.setDaemon(True)
self.fsm = {}
self.state = self.fsm
self.stop_flag = False
# 从 keyvalue 里面的键值建立自动机
# 这里使用了 Python 自省的特性
for key in dir(keyvalue):
if key[:2] == "__": # 跳过一些变量,如 __author__ 、 __builtins__ 等。
return
last_pointer = None
pointer = self.fsm
# print '$' + str(getattr(keyvalue, key))
value = None
for value in getattr(keyvalue, key):
if value not in pointer:
pointer[value] = {}
last_pointer = pointer
pointer = pointer[value]
# print '#'+str(self.fsm)
last_pointer[value] = getattr(keyvalue, key)
# print '*' + str(self.fsm)
def start(self):
self.daemon_thread.start()
def daemon(self):
fd = sys.stdin.fileno()
self.oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
self.oldflags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
fcntl.fcntl(fd, fcntl.F_SETFL, self.oldflags | os.O_NONBLOCK)
while 1:
if self.stop_flag is True:
self.stop_flag = False
break
try:
while 1:
c = sys.stdin.read(1)
print ord(c)
if isinstance(self.state, dict):
if ord(c) in self.state:
# print '-> ', ord(c)
self.state = self.state[ord(c)]
else:
# 不支持此按键
# print 'no support'
self.state = self.fsm
# print self.state
if isinstance(self.state, tuple):
# print "hehe"
if not hasattr(self.callback, '__call__'):
raise Exception("回调函数不能被调用。")
self.callback(self.state)
self.state = self.fsm
break
except IOError:
pass
def stop(self):
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
self.stop_flag = True
| 2,849 |
auto_trapezium.py
|
fjchange/Data_Analyze
| 0 |
2173327
|
import math
class auto_trap:
def __init__(self,a,b,func,limit=0.00001):
self.a=a
self.b=b
self.h=(b-a)/2.0
self.limit=limit
self.func=func
self.T1=(func(a)+func(b))*self.h
def cal(self):
n=2
while (1):
self.T0=self.T1
S=0
for i in range(1,n):
t=self.a+(2*i-1)*self.h/n
S=S+self.func(t)
self.T1=self.T0/2.0+S*self.h/n
if abs(self.T1-self.T0)<3*self.limit:
return self.T1
else:n=2*n
| 581 |
run_model.py
|
wfclark5/hamlet
| 2 |
2172647
|
import sys
from subprocess import call, Popen
call("python get_cyclone_members.py", shell = True)
call("python geoprocess_members.py", shell = True)
call("python drop_members.py", shell = True)
call("python exposure_summary.py", shell = True)
| 251 |
merge_py.py
|
bhavaniravi/mergepy
| 1 |
2172463
|
import ast
from collections import namedtuple
import sys, inspect, importlib
import astunparse
import glob
imports = []
classes = []
class_names = []
def get_imports(path):
with open(path) as fh:
root = ast.parse(fh.read(), path)
for node in ast.iter_child_nodes(root):
print (node)
if isinstance(node, ast.Import):
module = []
elif isinstance(node, ast.ImportFrom):
module = node.module.split('.')
elif isinstance(node, ast.ClassDef):
class_names.append(node.name)
source = astunparse.unparse(node)
classes.append(source)
continue
else:
continue
for n in node.names:
if not module:
statement = f"import {n.name}"
else:
statement = f"from {'.'.join(module)} import {n.name}"
if n.asname:
statement = statement + f"as {n.asname}"
imports.append(statement)
return classes
input_folder_path = input("Input folder path: ") or "example/input"
output_file_path = input("Output file path with .py extension: ") or "example/output/eg.py"
print (glob.glob(f"{input_folder_path}/*.py"))
for f in glob.glob(f"{input_folder_path}/*.py"):
print (f"Processing file :: {f}")
get_imports(f)
with open(output_file_path, "w") as f:
for i in imports:
print(i, file=f)
for c in classes:
print(c.replace("\\n", "\n"), file=f)
| 1,539 |
src/main/resources/spigot/event.py
|
True-cc/SPYgotUtils
| 4 |
2169998
|
from org.bukkit.event import EventPriority
from org.bukkit.event.player import AsyncPlayerChatEvent
def test(event):
"""
:type event: AsyncPlayerChatEvent
"""
msg = event.getMessage() # type: str
if msg.startswith("p!"):
event.setCancelled(True)
event.getPlayer().sendMessage("Hewwo fwom jythwon!")
# Events registered on enable, not load.
def on_enable():
register_event(AsyncPlayerChatEvent, EventPriority.MONITOR, test)
| 468 |
problem0391.py
|
kmarcini/Project-Euler-Python
| 0 |
2172600
|
###########################
#
# #391 Hopping Game - Project Euler
# https://projecteuler.net/problem=391
#
# Code by <NAME>
#
###########################
| 154 |
sorting_algs/cocktail_shaker_sort.py
|
shrijaltamrakar/Descent_py
| 2 |
2173486
|
from __future__ import print_function
from inspect.TimeIt import timeit
@timeit
def cocktail_shaker_sort(unsorted):
"""
implementation of cocktail shaker sort algo in pure python
:param unsorted: unsorted list
:return: sorted list
"""
for i in range(len(unsorted)-1,0,-1):
swapped = False
for j in range(i,0,-1):
if unsorted[j] < unsorted[j-1]:
unsorted[j], unsorted[j-1] = unsorted[j-1], unsorted[j]
swapped = True
for j in range(i):
if unsorted[j] > unsorted[j+1]:
unsorted[j], unsorted[j+1] = unsorted[j+1], unsorted[j]
swapped = True
if not swapped:
return unsorted
if __name__=="__main__":
try:
raw_input # python 2
except NameError:
raw_input = input
user_input = raw_input('Enters a numbers seperated by comma : \n').strip()
unsorted = [int(item) for item in user_input.split(',')]
cocktail_shaker_sort(unsorted)
print(unsorted)
| 1,047 |
vectorize.py
|
jmkinder1/code-samples
| 5 |
2173364
|
# vectorize.py
# -------------------------------------------------------------------------
# Use vectorized operations to generate multiple solutions to the
# quadratic equation.
# -------------------------------------------------------------------------
import numpy as np
b, c = 2, -1
a = np.arange(-1, 2, 0.3)
x = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
| 356 |
guests/migrations/0018_auto_20191124_0058.py
|
pop/wedding-website
| 0 |
2172793
|
# Generated by Django 2.2.6 on 2019-11-24 00:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('guests', '0017_auto_20191123_2318'),
]
operations = [
migrations.RemoveField(
model_name='party',
name='category',
),
migrations.RemoveField(
model_name='party',
name='save_the_date_opened',
),
migrations.RemoveField(
model_name='party',
name='save_the_date_sent',
),
migrations.RemoveField(
model_name='party',
name='type',
),
]
| 658 |
pycoder/model/inference.py
|
himanshu-dutta/pycoder
| 1 |
2173174
|
from pycoder.utils import formatter
from pycoder.model.transformer import load_transformers, save_transformers
from pycoder.imports import (
pipeline,
Union,
Path,
List,
GPT2LMHeadModel,
AutoTokenizer,
rmtree,
)
class CodeInference:
def __init__(
self,
model_path: Union[Path, str],
tokenizer_path: Union[Path, str],
control_tokens: dict,
max_length: int,
cuda: bool = False,
verbose: bool = False,
) -> None:
check_load_from_model_hub(model_path, tokenizer_path)
model, tokenizer = load_transformers(model_path, tokenizer_path, verbose)
model.config.task_specific_params["text-generation"]["max_length"] = max_length
self.coder = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
config={"max_length": max_length},
device=0 if cuda else -1,
)
self.control_tokens = control_tokens
def __call__(
self, topics: Union[List[str], str], description: str, code_prefix: str = ""
) -> List[str]:
"""
sends in input:
<|TOP|>TOPICS<|DES|>DESCRIPTION<|CODE|>
hopes for the output as:
<|TOP|>TOPICS<|DES|>DESCRIPTION<|CODE|>CODE<|EOS|>
"""
if isinstance(topics, list):
topics = ",".join(topics)
inp = (
self.control_tokens["topics_token"]
+ topics
+ self.control_tokens["description_token"]
+ description
+ self.control_tokens["code_token"]
+ code_prefix
)
out = self.coder(inp)
out = list(
map(
lambda x: x["generated_text"].split(self.control_tokens["code_token"])[
1
],
out,
)
)
return out
def check_load_from_model_hub(
model_path: Union[Path, str], tokenizer_path: Union[Path, str]
) -> None:
from pycoder.config import HF_HUB_NAME, CACHE_DIR
model_path = Path(model_path)
tokenizer_path = Path(tokenizer_path)
if not model_path.exists() or not tokenizer_path.exists():
print(
formatter(
"\nModel and Tokenizer being downloaded and saved from ModelHub (once only)...\n",
color="g",
)
)
model = GPT2LMHeadModel.from_pretrained(
HF_HUB_NAME, cache_dir=CACHE_DIR / "tokenizer"
)
tokenizer = AutoTokenizer.from_pretrained(
HF_HUB_NAME, cache_dir=CACHE_DIR / "model"
)
save_transformers(model_path, tokenizer_path, model, tokenizer, verbose=False)
print(
formatter("\nModel and Tokenizer saved.", color="g", bold=True, tick=True)
+ "\n"
)
rmtree(CACHE_DIR)
| 2,876 |
Ago-Dic-2017/Enrique Castillo/Práctica1/Cliente.py
|
Andremm303/DAS_Sistemas
| 0 |
2173083
|
from Persona import Persona
class Cliente(Persona):
def __init__(self, nombre, apellidos, edad, direccion, telefono, idCliente):
Persona.__init__(self, nombre, apellidos, edad, direccion, telefono)
self.idCliente = idCliente
def getIdCliente(self):
return self.idCliente
def setidCliente(self):
self.idCliente = idCliente
def atribCliente(self):
return "Nombre: {}\nApellidos: {}\nEdad: {}\nDirección {}\nTeléfono: {}\nNúmero de Cliente: {}\n".format(self.nombre,
self.apellidos, self.edad, self.direccion, self.telefono, self.idCliente)
| 604 |
test/eutilities_test/e_utilities_test.py
|
BrianPulfer/AuthorNameDisambiguation
| 8 |
2172659
|
import unittest
from main.eutilities import e_utilities
class TestQueries(unittest.TestCase):
def test_query_class(self):
"""Tests all the Query class methods: construction, parameters settings/adding and 'to string' method"""
# Creating an empty query and filling it afterwards
q1 = e_utilities.Query()
q1.add_mesh_term('mt1')
q1.add_mesh_term('mt2')
q1.set_pubblication_date('2011')
# Testing that the query is correctly created for the REST request
self.assertEqual('mt1[mesh]+AND+mt2[mesh]+AND+2011[pdat]', q1.to_string())
# Testing that the set_mesh_terms method overwrites the older ones
q1.set_mesh_terms(['mt3', 'mt4'])
self.assertEqual('mt3[mesh]+AND+mt4[mesh]+AND+2011[pdat]', q1.to_string())
# Testing query creation with all parameters
q2 = e_utilities.Query('2009', ['cancer', 'asthma'])
self.assertEqual('cancer[mesh]+AND+asthma[mesh]+AND+2009[pdat]', q2.to_string())
q2.add_any_terms(20180383)
self.assertEqual('20180383+AND+cancer[mesh]+AND+asthma[mesh]+AND+2009[pdat]', q2.to_string())
def test_search(self):
"""Tests that a valid result is returned for a valid GET request."""
# IMPORTANT: This tests fails when executed with all the other tests. Run the test alone to check if successful.
q = e_utilities.Query()
q.set_pubblication_date('2009')
q.add_mesh_term('asthma')
q.add_mesh_term('leukotrienes')
result = e_utilities.search(e_utilities.DATABASES.PubMed, q)
self.assertIsNotNone(result)
# Checking that at least 10 articles are found
self.assertTrue(10 < len(result.content.decode('utf-8').split('<Id>')))
def test_fetch(self):
"""Tests that data is fetched giving only a query"""
# Creating the query
q = e_utilities.Query(pdat="2009", mesh=['asthma', 'leukotrienes'])
# Fetching result from PubMed database based on the query
result = e_utilities.fetch(e_utilities.DATABASES.PubMed, q)
# Checking that the fetched data's content is longer that 109'000 characters (109'286 when checked manually)
self.assertTrue(len(str(result.content)) > 109000)
# Creating a query that only specifies the PMID
q2 = e_utilities.Query(any_terms=["20113659"])
# Fetching result with new query
result2 = e_utilities.fetch(e_utilities.DATABASES.PubMed, q2)
# Checking that the result's content only contains one publication
self.assertEqual(2, len(str(result2.content).split('[Indexed for MEDLINE]')))
# Checking that the result is what's excpected to be
self.assertTrue("OBJECTIVE: Cysteinyl leukotriene (CysLTs) plays an important role in airway" in str(result2.content).split('[Indexed for MEDLINE]')[0])
def test_fetch_articles(self):
"""Tests that article can be retrieved correctly"""
# Creating a query that only contains the PubMed ID
q = e_utilities.Query(any_terms=["20113659"])
# Fetching articles
articles = e_utilities.fetch_articles(e_utilities.DATABASES.PubMed, q)
# Checking that fetched article is what expected (checked manually via browser)
self.assertEqual(1, len(articles))
self.assertTrue("Zhongguo Dang Dai Er Ke Za Zhi" in articles[0].get_raw_text())
def test_search_by_pmid(self):
"""Tests that method 'search_with_pmid(pmid)' returns the correct data"""
# Retrieving the response from searching an article given it's ID
response = e_utilities.search_by_pmid(20180383)
# Verifying that the response only contains one result
self.assertTrue("<eSearchResult><Count>1</Count>" in str(response.content))
# Verifying that the response's content only contains the selected response's ID
self.assertTrue("<IdList>\n<Id>20180383</Id>\n</IdList>" in response.content.decode('utf-8'))
def test_extract_ids(self):
"""Tests that 'extract_ids(response)' correctly returns a list of denotation IDs given a request's response"""
# Creating the query
q = e_utilities.Query('2009', ['cancer', 'asthma'])
# Getting the response
response = e_utilities.search(e_utilities.DATABASES.PubMed, q)
# Extracting the IDs given the response's content
ids = e_utilities.extract_ids(str(response.content))
# Testing that the response contains all expected ids (verified manually via browser)
expected_response = [20180383, 20128434, 20110007, 20044861, 20016028, 19995140, 19960035, 19926424, 19917947,
19863293, 19858390, 19851534, 19831405, 19817310, 19812684, 19808918, 19757309, 19737788,
19735105]
for elem in expected_response:
self.assertTrue(elem in ids)
if __name__ == '__main__':
unittest.main()
| 4,969 |
ia870/iaregmax.py
|
rdenadai/ia870p3
| 5 |
2172844
|
# -*- encoding: utf-8 -*-
# Module iaregmax
from numpy import *
from ia870.iasecross import iasecross
def iaregmax(f, Bc=iasecross()):
from ia870.iasubm import iasubm
from ia870.iahmax import iahmax
from ia870.iabinary import iabinary
from ia870.iaregmin import iaregmin
from ia870.ianeg import ianeg
y = iasubm(f, iahmax(f,1,Bc))
return iabinary(y)
#return iaregmin( ianeg(f),Bc)
| 417 |
hw3/asp_ex.py
|
D-denHeijer/KRR-course
| 0 |
2172859
|
import clingo
def print_answer_sets(program):
# Load the answer set program, and call the grounder
control = clingo.Control();
control.add("base", [], program);
control.ground([("base", [])]);
# Define a function that will be called when an answer set is found
# This function sorts the answer set alphabetically, and prints it
def on_model(model):
sorted_model = [str(atom) for atom in model.symbols(shown=True)];
sorted_model.sort();
print("Answer set: {{{}}}".format(", ".join(sorted_model)));
# Ask clingo to find all models (using an upper bound of 0 gives all models)
control.configuration.solve.models = 0;
# Call the clingo solver, passing on the function on_model for when an answer set is found
answer = control.solve(on_model=on_model)
# Print a message when no answer set was found
if answer.satisfiable == False:
print("No answer sets");
print_answer_sets("""
step(1..9).
block(1..9).
init(3,2). init(6,5). init(9,8).
init(2,1). init(5,4). init(8,7).
init(1,0). init(4,0). init(7,0).
goal(8,6). goal(5,7).
goal(6,4). goal(7,3).
goal(4,2). goal(3,9).
goal(2,1).
location(0).
location(B) :- block(B).
{ move(B,L,T) } :- block(B), location(L), step(T), B != L.
object(B,T) :- move(B,_,T).
target(B,T) :- move(_,B,T).
:- step(T), 2 #count { object(B,T) : object(B,T) }.
:- step(T), 2 #count { target(B,T) : target(B,T) }.
on(B,L,0) :- init(B,L).
on(B,L,T) :- move(B,L,T).
on(B,L,T) :- on(B,L,T-1), step(T), not object(B,T).
blocked(B,T) :- on(_,B,T), block(B), step(T+1).
:- object(B,T), blocked(B,T -1).
:- target(B,T), blocked(B,T -1).
:- goal(B,L), step(T), not step(T+1), not on(B,L,T).
#show move /3.
""")
| 1,814 |
src/08_multiprocessing/child_cpu.py
|
rurumimic/concurrency-python
| 0 |
2171772
|
import multiprocessing
import os
class WorkerProcess(multiprocessing.Process):
def __init__(self):
super(WorkerProcess, self).__init__()
def run(self):
print(f'Child PID: {multiprocessing.current_process().pid}')
def main():
print(f'Main PID: {multiprocessing.current_process().pid}')
processes = []
count = os.cpu_count()
if count == None:
return
print(f'os.cpu: {count}')
for i in range(count):
process = WorkerProcess() # 0
processes.append(process)
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
main()
'''
Main PID: 77701
os.cpu: 8
Child PID: 77714
Child PID: 77712
Child PID: 77713
Child PID: 77715
Child PID: 77716
Child PID: 77717
Child PID: 77718
Child PID: 77719
'''
| 824 |
setup.py
|
wikimedia/research-recommendation-api
| 3 |
2173483
|
from setuptools import setup, find_packages
setup(
name='recommendation',
version='0.3.2',
url='https://github.com/wikimedia/research-recommendation-api',
license='Apache Software License',
maintainer='Wikimedia Research',
maintainer_email='<EMAIL>',
description='Provide recommendations in Wikimedia projects',
long_description='',
packages=find_packages(exclude=['test', 'test.*', '*.test']),
install_requires=['flask',
'flask-restplus',
'requests',
'numpy',
'scipy',
'sklearn',
# https://github.com/noirbizarre/flask-restplus/issues/777
'Werkzeug==0.16.0'],
package_data={'recommendation.web': ['static/*.*',
'static/i18n/*',
'static/images/*',
'static/suggest-searches/*',
'templates/*'],
'recommendation': ['data/*']},
zip_safe=False,
setup_requires=['pytest-runner',
'setuptools_scm < 2.0.0'],
tests_require=['pytest',
'responses',
'memory_profiler',
'psutil'])
| 1,338 |
forte/data/base_store.py
|
KGerring/forte
| 0 |
2173222
|
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import List, Iterator, Tuple, Any
__all__ = ["BaseStore"]
class BaseStore:
r"""The base class which will be used by :class:
`~forte.data.data_store.DataStore`."""
def __init__(self):
r"""
This is a base class for the efficient underlying data structure. A
current implementation of `BaseStore` is `DataStore`.
A `BaseStore` contains a collection of Forte entries.
Each entry type contains some subtypes, which could have
various fields stored in entry lists.
"""
@abstractmethod
def add_annotation_raw(self, type_name: str, begin: int, end: int) -> int:
r"""This function adds an annotation entry with `begin` and `end`
indices to the `type_name` sortedlist in `self.__elements`,
returns the `tid` for the inserted entry.
Args:
type_name (str): The index of Annotation sortedlist in `self.__elements`.
begin (int): Begin index of the entry.
end (int): End index of the entry.
Returns:
`tid` of the entry.
"""
raise NotImplementedError
@abstractmethod
def add_link_raw(
self, type_name: str, parent_tid: int, child_tid: int
) -> Tuple[int, int]:
r"""This function adds a link entry with `parent_tid` and `child_tid`
to the `type_name` list in `self.__elements`, returns the `tid` and the
`index_id` for the inserted entry in the list. This `index_id` is the
index of the entry in the `type_name` list.
Args:
type_name (str): The index of Link list in `self.__elements`.
parent_tid (int): `tid` of the parent entry.
child_tid (int): `tid` of the child entry.
Returns:
`tid` of the entry and its index in the `type_name` list.
"""
raise NotImplementedError
@abstractmethod
def add_group_raw(
self, type_name: str, member_type: str
) -> Tuple[int, int]:
r"""This function adds a group entry with `member_type` to the
`type_name` list in `self.__elements`, returns the `tid` and the
`index_id` for the inserted entry in the list. This `index_id` is the
index of the entry in the `type_name` list.
Args:
type_name (str): The index of Group list in `self.__elements`.
member_type (str): Fully qualified name of its members.
Returns:
`tid` of the entry and its index in the `type_name` list.
"""
raise NotImplementedError
@abstractmethod
def set_attribute(self, tid: int, attr_name: str, attr_value: Any):
r"""This function locates the entry data with `tid` and sets its
`attr_name` with `attr_value`.
Args:
tid (int): Unique Id of the entry.
attr_name (str): Name of the attribute.
attr_value (any): Value of the attribute.
"""
raise NotImplementedError
@abstractmethod
def set_attr(self, tid: int, attr_id: int, attr_value: Any):
r"""This function locates the entry data with `tid` and sets its
attribute `attr_id` with value `attr_value`.
Called by `set_attribute()`.
Args:
tid (int): Unique id of the entry.
attr_id (int): Id of the attribute.
attr_value: value of the attribute.
"""
raise NotImplementedError
@abstractmethod
def get_attribute(self, tid: int, attr_name: str):
r"""This function finds the value of `attr_name` in entry with
`tid`.
Args:
tid (int): Unique id of the entry.
attr_name (str): Name of the attribute.
Returns:
The value of `attr_name` for the entry with `tid`.
"""
raise NotImplementedError
@abstractmethod
def get_attr(self, tid: int, attr_id: int):
r"""This function locates the entry data with `tid` and gets the value
of `attr_id` of this entry. Called by `get_attribute()`.
Args:
tid (int): Unique id of the entry.
attr_id (int): Id of the attribute.
Returns:
The value of `attr_id` for the entry with `tid`.
"""
raise NotImplementedError
@abstractmethod
def delete_entry(self, tid: int):
r"""This function removes the entry with `tid` from the data store.
Args:
tid (int): Unique id of the entry.
"""
raise NotImplementedError
@abstractmethod
def get_entry(self, tid: int) -> Tuple[List, int, int]:
r"""Look up the entry_dict with key `tid`. Return the entry,
its `type_name`, and its `index_id` in the `entry_type` list.
Args:
tid (int): Unique id of the entry.
Returns:
The entry which `tid` corresponds to, its `type_name` and its index
in the `entry_type` list.
"""
raise NotImplementedError
@abstractmethod
def get(self, type_name: str, include_sub_type: bool) -> Iterator[List]:
r"""This function fetches entries from the data store of
type `type_name`.
Args:
type_name (str): The index of the list in `self.__elements`.
include_sub_type: A boolean to indicate whether get its subclass.
Returns:
An iterator of the entries matching the provided arguments.
"""
raise NotImplementedError
@abstractmethod
def next_entry(self, tid: int) -> List:
r"""Get the next entry of the same type as the `tid` entry.
Args:
tid (int): Unique id of the entry.
Returns:
The next entry of the same type as the `tid` entry.
"""
raise NotImplementedError
@abstractmethod
def prev_entry(self, tid: int) -> List:
r"""Get the previous entry of the same type as the `tid` entry.
Args:
tid (int): Unique id of the entry.
Returns:
The previous entry of the same type as the `tid` entry.
"""
raise NotImplementedError
| 6,796 |
schoolport/app_core/migrations/0009_tb_price_standards_price_currency.py
|
yotink522/schoolport
| 0 |
2172987
|
# Generated by Django 3.1.7 on 2021-05-09 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_core', '0008_tb_price_standards'),
]
operations = [
migrations.AddField(
model_name='tb_price_standards',
name='price_currency',
field=models.CharField(max_length=20, null=True),
),
]
| 416 |
AssertGen.py
|
sardarr/AutoGrade
| 0 |
2172526
|
import os
def main(arg):
ind=1
runner=''
asserts=open('Asserts/'+arg+'.txt').readlines()
for lines in asserts:
if 'assert' not in lines:
runner+=lines[:-1]+'\n'
print(lines[:-1])
else:
runner+='try {\n\t'+lines+'\tSystem.out.println(\"Passed '+str(ind)+'"); \n}\n'+'catch(Exception |AssertionError e ) {\n\tSystem.out.println(\"Failed '+str(ind)+'\");\n}'
print('try {\n\t'+lines+'\tSystem.out.println(\"Passed '+str(ind)+'"); \n}\n'+'catch(Exception |AssertionError e ) {\n\tSystem.out.println(\"Failed '+str(ind)+'\");\n}')
ind+=1
if not os.path.isdir(os.path.join('Resources', arg)):
os.makedirs(os.path.join('Resources', arg))
with open(os.path.join('Resources',arg)+'/runner.java','w') as file:
file.writelines(runner)
file.close()
if __name__ == '__main__':
HWID='hw5'
main(HWID)
| 921 |
src/mabot/ui/progressbar.py
|
jussimalinen/robotframework-mabot
| 8 |
2173391
|
# Copyright 2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from Tkinter import *
import thread
import time
class ProgressBar(Toplevel):
def __init__(self, parent, title):
self._parent = parent
self._title = title
self.not_created = False
self._init_progress_bar()
def _init_progress_bar(self):
if os.name != 'nt':
self.not_created = True
return
Toplevel.__init__(self, self._parent)
self.title(self._title)
self.protocol("WM_DELETE_WINDOW", lambda: True)
self.width = 200
self.height = 10
self.geometry(self._get_location(self._parent))
self.progress_bar_view = ProgressBarView(self, self.width, self.height)
self.progress_bar_view.pack()
self._parent.update()
self._running = thread.allocate_lock()
thread.start_new_thread(self._update, ())
def add_ask_method(self, method):
self._ask_method = method
def call_ask_method(self, *args):
self.destroy()
result = self._ask_method(*args)
self._init_progress_bar()
return result
def _get_location(self, parent):
x = parent.winfo_rootx() + parent.winfo_width()/2 - self.width/2
y = parent.winfo_rooty() + parent.winfo_height()/2 - self.height/2
return "+%d+%d" % (x, y)
def _update(self):
while self._running.acquire(0):
self.progress_bar_view.update_progress()
time.sleep(0.1)
self._running.release()
def destroy(self):
if self.not_created:
return
self._running.acquire()
Toplevel.destroy(self)
class ProgressBarView:
def __init__(self, master, width, height):
self.master=master
self.value = 0
self.width = width
self.height = height
#TODO: Test with Ubuntu
self.fill_color = 'blue'
self.background = 'white'
self.frame = Frame(self.master, bd=2, width=self.width, height=self.height)
self.canvas = Canvas(self.frame, background=self.background,
width=self.width, height=self.height)
self.scale = self.canvas.create_rectangle(0, 0, self.width, self.height,
fill=self.fill_color)
self.canvas.pack(fill=BOTH, expand=YES)
self.update()
def update_progress(self):
self.value += self.width / 20
if self.value > self.width:
self.value -= self.width
self.update()
def pack(self, *args, **kw):
self.frame.pack(*args, **kw)
def update(self):
start = (float(self.value) / self.width * self.width) - self.width / 4
end = float(self.value) / self.width * self.width
self.canvas.coords(self.scale, start, 0, end, self.height)
self.canvas.update_idletasks()
| 3,451 |
examples/basic/merge_instance.py
|
souviksaha97/spydrnet-physical
| 0 |
2172551
|
"""
===================================
Merging two instances in the design
===================================
This example demonstrate how to merge two instance of the design
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
# TODO
print("NotImplemented")
| 293 |
train_mnist.py
|
yjhuangcd/local-lipschitz
| 15 |
2172366
|
### basic modules
import numpy as np
import time, pickle, os, sys, json, PIL, tempfile, warnings, importlib, math, copy, shutil, setproctitle
from datetime import datetime
### torch modules
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import torch.nn.functional as F
import data_load
import utils
import Local_bound as Local
if __name__ == "__main__":
args = utils.argparser(data='mnist',epochs=300,warmup=0,rampup=150,batch_size=256,epsilon=1.58,epsilon_train=1.58)
print(datetime.now())
print(args)
print('saving file to {}'.format(args.prefix))
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
test_log = open(args.prefix + "_test.log", "w")
train_loader, test_loader = data_load.data_loaders(args.data, args.batch_size, args.test_batch_size, augmentation=args.augmentation, normalization=args.normalization, drop_last=args.drop_last, shuffle=args.shuffle)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
best_err = 1
err = 1
model = utils.select_model(args.data, args.model, args.init)
# compute the feature size at each layer
input_size = []
depth = len(model)
x = torch.randn(1,1,28,28).cuda()
for i, layer in enumerate(model.children()):
if i < depth-1:
input_size.append(x.size()[1:])
x = layer(x)
# create u on cpu to store singular vector for every input at every layer
u_train = []
u_test = []
for i in range(len(input_size)):
print(i)
if not model[i].__class__.__name__=='ReLU_x' and not model[i].__class__.__name__=='Flatten' and not isinstance(model[i], nn.ReLU):
u_train.append(torch.randn((len(train_loader.dataset), *(input_size[i])), pin_memory=True))
u_test.append(torch.randn((len(test_loader.dataset), *(input_size[i])), pin_memory=True))
else:
u_train.append(None)
u_test.append(None)
if args.opt == 'adam':
opt = optim.Adam(model.parameters(), lr=args.lr)
elif args.opt == 'sgd':
opt = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
print(opt)
if args.lr_scheduler == 'step':
lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=args.step_size, gamma=args.gamma)
elif args.lr_scheduler =='multistep':
lr_scheduler = MultiStepLR(opt, milestones=args.wd_list, gamma=args.gamma)
elif (args.lr_scheduler == 'exp'):
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
opt, lr_lambda=lambda step: utils.lr_exp(args.lr, args.end_lr, step, args.epochs))
print(lr_scheduler)
eps_schedule = np.linspace(args.starting_epsilon,
args.epsilon_train,
args.schedule_length)
kappa_schedule = np.linspace(args.starting_kappa,
args.kappa,
args.kappa_schedule_length)
u_list = None
for t in range(args.epochs):
# set up epsilon and kappa scheduling
if t < args.warmup:
epsilon = 0
epsilon_next = 0
elif args.warmup <= t < args.warmup+len(eps_schedule) and args.starting_epsilon is not None:
epsilon = float(eps_schedule[t-args.warmup])
epsilon_next = float(eps_schedule[np.min((t+1-args.warmup, len(eps_schedule)-1))])
else:
epsilon = args.epsilon_train
epsilon_next = args.epsilon_train
if t < args.warmup:
kappa = 1
kappa_next = 1
elif args.warmup <= t < args.warmup+len(kappa_schedule):
kappa = float(kappa_schedule[t-args.warmup])
kappa_next = float(kappa_schedule[np.min((t+1-args.warmup, len(kappa_schedule)-1))])
else:
kappa = args.kappa
kappa_next = args.kappa
print('%.f th epoch: epsilon: %.7f - %.7f, kappa: %.4f - %.4f, lr: %.7f'%(t,epsilon,epsilon_next,kappa,kappa_next,opt.state_dict()['param_groups'][0]['lr']))
# begin training
if t < args.warmup:
utils.train(train_loader, model, opt, t, train_log, args.verbose)
_ = utils.evaluate(test_loader, model, t, test_log, args.verbose)
elif args.warmup <= t:
st = time.time()
u_list, u_train, robust_losses_train, robust_errors_train, losses_train, errors_train = Local.train(train_loader, model, opt, epsilon, kappa, t, train_log, args.verbose, args, u_list, u_train)
print('Taken', time.time()-st, 's/epoch')
u_test, err, robust_losses_test, losses_test, errors_test = Local.evaluate(test_loader, model, epsilon_next, t, test_log, args.verbose, args, u_list, u_test)
if args.lr_scheduler == 'step':
if max(t - (args.rampup + args.warmup - 1) + 1, 0):
print("LR DECAY STEP")
lr_scheduler.step(epoch=max(t - (args.rampup + args.warmup - 1) + 1, 0))
elif args.lr_scheduler =='multistep' or args.lr_scheduler =='exp':
print("LR DECAY STEP")
lr_scheduler.step()
else:
raise ValueError("Wrong LR scheduler")
# Save the best model after epsilon has been the largest
if t>=args.warmup+len(eps_schedule):
if err < best_err and args.save:
print('Best Error Found! %.3f'%err)
best_err = err
torch.save({
'state_dict' : model.state_dict(),
'err' : best_err,
'epoch' : t
}, args.prefix + "_best.pth")
torch.save({
'state_dict': model.state_dict(),
'err' : err,
'epoch' : t
}, args.prefix + "_checkpoint.pth")
args.print = True
trained = torch.load(args.prefix + "_best.pth")['state_dict']
model_eval = utils.select_model(args.data, args.model, args.init)
model_eval.load_state_dict(trained)
print('std testing ...')
std_err = utils.evaluate(test_loader, model_eval, t, test_log, args.verbose)
print('pgd testing ...')
pgd_err = utils.evaluate_pgd(test_loader, model_eval, args)
print('verification testing ...')
u_test, last_err, robust_losses_test, losses_test, errors_test = Local.evaluate(test_loader, model_eval, args.epsilon, t, test_log, args.verbose, args, u_list, u_test)
print('Best model evaluation:', std_err.item(), pgd_err.item(), last_err.item())
| 6,867 |
healthyways/maps_api.py
|
frederiksemmel/euvsvirus-healthyways
| 0 |
2171175
|
import googlemaps
import os
def dir_deptime(start, dest, dep_time):
api = os.environ["GOOGLE_API_KEY"]
gmaps = googlemaps.Client(key=api)
route = gmaps.directions(
start, dest, mode="transit", departure_time=dep_time, alternatives=True
)
return route
def dir_arrtime(start, dest, arr_time):
api = os.environ["GOOGLE_API_KEY"]
gmaps = googlemaps.Client(key=api)
route = gmaps.directions(
start, dest, mode="transit", arrival_time=arr_time, alternatives=True
)
return route
| 536 |
whois/rest_api/router.py
|
we-race-here/wrh-bot
| 0 |
2173309
|
from rest_framework.routers import DefaultRouter
from whois.rest_api import viewset
router = DefaultRouter()
router.register('WRHDiscordServers', viewset.WRHDiscordServersView)
| 177 |
otter/plugins/common/ClickableLabel.py
|
andrsd/otter
| 0 |
2173575
|
from PyQt5 import QtWidgets, QtCore
class ClickableLabel(QtWidgets.QLabel):
clicked = QtCore.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
def enterEvent(self, event):
f = self.font()
f.setUnderline(True)
self.setFont(f)
def leaveEvent(self, event):
f = self.font()
f.setUnderline(False)
self.setFont(f)
def mouseReleaseEvent(self, event):
self.clicked.emit()
| 476 |
data_tools/validators.py
|
AfricasVoices/Experimental-UnitChart
| 0 |
2172569
|
from datetime import datetime
def validate_string(s, variable_name=""):
assert isinstance(s, str), "{} not a string".format(variable_name)
assert s != "", "{} is empty".format(variable_name)
return s
def validate_int(i, variable_name=""):
assert isinstance(i, int), "{} not an int".format(variable_name)
return i
def validate_double(d, variable_name=""):
assert isinstance(d, float), "{} not a double".format(variable_name)
return d
def validate_bool(b, variable_name=""):
assert isinstance(b, bool), "{} not a bool".format(variable_name)
return b
def validate_list(l, variable_name=""):
assert isinstance(l, list), "{} not a list".format(variable_name)
return l
def validate_hexcolor(c, variable_name=""):
assert isinstance(c, str), "{} not a color".format(variable_name)
assert c.startswith("#")
assert len(c) == 7
return c
def validate_datetime(d, variable_name=""):
assert isinstance(d, datetime), "{} not a datetime".format(variable_name)
return d
| 1,035 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.