max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
1info1 - logica/logicadados.py
|
fernandapiresnanda/logica-
| 0 |
2026131
|
from random import randint
print("Jogo de Dados")
dados1 = randint(1, 6)
dados2 = randint(1, 6)
dados3 = randint(1, 6)
dados4 = randint(1, 6)
print("dado 1:", dados1)
print("dado 2:", dados2)
print("dado 3:", dados3)
print("dado 4:", dados4)
| 245 |
aux/configure.py
|
gnzlbg/nmp
| 21 |
2026144
|
#!/usr/bin/env python
"""Configures Hom3's build.
Usage:
configure.py [<mode>] [options] [<build_path>]
configure.py -h | --help
configure.py --version
<mode> Build mode [default: release (simulation ready)].
<build_path> Build directory [default: repository/build].
Modes:
asan Builds with Address Sanitizer.
coverage Builds with code-coverage analysis.
debug Builds in optimized debug mode.
debug-unopt Builds in unoptimized debug mode.
release Builds in release mode.
Options:
-h --help Show this screen.
--version Show version.
--verbose Verbose output.
--asan With Address Sanitizer.
--assertions With assertions.
--debug Debug mode.
--coverage Generates code coverage information.
--debug-info With debug information.
--warnings-as-errors Warnings are treated as errors.
"""
from docopt import docopt
import os
import shutil
import subprocess
import copy
flags = {'--asan' : 'NMP_WITH_ASAN',
'--assertions' : 'NMP_WITH_ASSERTIONS',
'--debug' : 'NMP_WITH_DEBUG',
'--coverage' : 'NMP_WITH_COVERAGE',
'--debug-info' : 'NMP_WITH_DEBUG_INFO',
'--warnings-as-errors' : 'NMP_WITH_WARNINGS_AS_ERRORS',
'--verbose' : 'NMP_VERBOSE_CONFIGURE'
}
# Release mode flags:
release = { '--asan' : False,
'--assertions' : False,
'--debug' : False,
'--coverage' : False,
'--debug-info' : False,
'--warnings-as-errors' : False,
'--verbose' : False,
}
# Debug mode flags:
debug = { '--asan' : False,
'--assertions' : True,
'--debug' : False,
'--coverage' : False,
'--debug-info' : True,
'--warnings-as-errors' : False,
'--verbose' : False,
}
# Debug unopt mode flags:
debug_unopt = copy.deepcopy(debug)
debug_unopt['--debug'] = True
# Asan mode flags:
asan = copy.deepcopy(debug)
asan['--asan'] = True
# Commit mode flags:
commit = copy.deepcopy(asan)
commit['--warning-as-errors'] = True
# Commit mode flags:
coverage = copy.deepcopy(debug_unopt)
coverage['--coverage'] = True
mode_flags = { 'asan' : asan,
'commit' : commit,
'coverage' : coverage,
'debug' : debug,
'debug-unopt' : debug_unopt,
'release' : release
}
def value(k, v, mode):
if mode is None:
return 'On' if v else 'Off'
else:
return 'On' if v or mode_flags.get(mode, 'release').get(k, v) else 'Off'
def cmake_options(args):
mode = args['<mode>']
options=''
for k,v in args.iteritems():
try:
options += ' -D' + flags[k] + '=' + value(k, v, mode)
except Exception:
pass
return options
if __name__ == '__main__':
# Read environment variables
src_dir = os.getcwd() + '/'
version = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
# Read cmd line arguments
args = docopt(__doc__, version='NMP ' + version)
verbose = args['--verbose']
print('Configuring NMP...')
# Set up the build dir: use either user provided path or default path
if args['<build_path>'] is None:
build_dir = src_dir + 'build'
else:
build_dir = args['<build_path>']
if not os.path.exists(build_dir):
os.makedirs(build_dir)
else:
shutil.rmtree(build_dir)
os.makedirs(build_dir)
# Move to the build dir:
os.chdir(build_dir)
# Build up the cmake command:
cmake_cmd = ('cmake ' + src_dir)
cmake_cmd = cmake_cmd + cmake_options(args)
# Run cmake
if verbose:
print('[I] CMake CMD: ' + cmake_cmd)
p = subprocess.Popen(cmake_cmd, universal_newlines=False, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate('foo\nfoofoo\n')
if verbose:
print out
print err
if p.returncode == 0:
print('... done!')
else:
print('[E] Configuring failed!')
exit(p.returncode)
| 4,383 |
dashboard_generator.py
|
allannad/exec-dash
| 0 |
2025013
|
# dashboard_generator.py
import pandas as pd
import os
import numpy
import tkinter as tk
from tkinter import filedialog
import re
import matplotlib as plot
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import altair as alt
from altair import Chart, X, Y, Axis, SortField
def to_usd(my_price):
"""
Converts a numeric value to usd-formatted string, for printing and display purposes.
Source: https://github.com/prof-rossetti/intro-to-python/blob/master/notes/python/datatypes/numbers.md#formatting-as-currency
Param: my_price (int or float) like 4000.444444
Example: to_usd(4000.444444)
Returns: $4,000.44
"""
return f"${my_price:,.2f}" #> $12,000.71
#prompt the user for a file see https://www.youtube.com/watch?v=H71ts4XxWYU
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
#validate the file and quit if does not meet sales file regex condition
validatethis = file_path[-16:]
if re.match(r"^(sales)-[0-9][0-9][0-9][0-9][0-9][0-9].(csv)", validatethis):
print()
else:
print('This is not a valid file. Please select a monthly sales file.')
exit()
#create dataframe from selected file
df = pd.read_csv(file_path, error_bad_lines=False)
#create column for month and year of sales report
df['year'] = pd.DatetimeIndex(df['date']).year
df['month'] = pd.DatetimeIndex(df['date']).month
df['monthname'] = pd.to_datetime(df['month'], format='%m').dt.month_name()
#get month name as variable
month = df.at[0,'monthname']
#get year as variable
year = df.at[0,'year']
#calculate revenue
revenue = df["sales price"].sum()
df["formattedrevenue"] = to_usd(revenue)
#print(df)
#print beginning of report
print("-----------------------")
print("MONTH:" + ' ' + str(month) + ' ' + str(year))
print("-----------------------")
print("CRUNCHING THE DATA...")
print("-----------------------")
#print("TOTAL MONTHLY SALES: $12,000.71")
print("TOTAL MONTHLY SALES:",to_usd(revenue))
#identify Top selling products:
#create new df of items with the max units sold
pdsales = df.groupby(['product'], as_index=False).sum()
pdsales["revenue"] = pdsales["sales price"]
#make the column be in USD
pdsales["formattedrevenue"] = pdsales["revenue"].apply(to_usd)
#print(pdsales)
#sort them by top sellers
pdsalessorted = pdsales.sort_values(by=['revenue'], ascending=False)
#add column with number of rows, to list out later. plus 1 is necessary as lists start at 0. :)
pdsalessorted["number"] = numpy.arange(len(pdsalessorted)) + 1
#iterate through and print the number in popularity, product name and total revenue
print("-----------------------")
print("TOP SELLING PRODUCTS:")
for index, row in pdsalessorted.iterrows():
print(row['number'],row['product'],row['formattedrevenue'])
#print(pdsalessorted)
#print(type(pdsalessorted))
#DATA VISUALIZATION
print("-----------------------")
print("VISUALIZING THE DATA...")
newdf = pdsalessorted.filter(['product','revenue','formattedrevenue'], axis=1)
#print(newdf)
rev = newdf.at[0,'revenue']
#print(type(rev))
productname = newdf['product'].tolist()
revenueamt = newdf['revenue'].tolist()
formattedrevenueamt = newdf['formattedrevenue'].tolist()
#productname = []
#revenueamt = []
#for x in newdf:
# productname.append(x['product'])
# revenueamt.append(x['revenue'])
#print(productname)
#print(revenueamt)
#print(formattedrevenueamt)
x = [i for i in productname]
rev = [i for i in revenueamt]
formattedrev = [i for i in formattedrevenueamt]
x_pos = [i for i, _ in enumerate(x)]
fig, ax = plt.subplots()
#for i, v in enumerate(formattedrev):
#ax.text(v + 1, i + .25, str(v), color='black')
#ax.text(v + 1, i + .25, v, color='blue')
formatter = ticker.FormatStrFormatter('$%1.2f')
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_ticks_position(position='bottom')
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_visible(True)
tick.label2.set_visible(False)
tick.label2.set_color('black')
plt.barh(x_pos, rev, color='green')
plt.ylabel("Product")
plt.xlabel("Revenue (USD)")
plt.title("Top Selling Products" + ' ' + str(month) + ' ' + str(year))
plt.yticks(x_pos, x)
plt.show()
#plt.savefig(os.path.join(os.path.dirname(__file__),"figures","chart.png"))
#plt.savefig('chart.png')
| 4,257 |
client/creature.py
|
richchurcher/dread-snarfle
| 1 |
2025863
|
from asyncio import get_event_loop
from client.utils import get_api_client, get_key_manager
class Creature:
def __init__(self, address):
self.client = get_api_client()
self.key_manager = get_key_manager()
loop = get_event_loop()
self.network_id = loop.run_until_complete(self.client.get_network_id())
if (address):
self.address = address
else:
self.address = self.key_manager.generate()
self.key_manager.set_nonce(self.address, self.network_id, 0)
| 541 |
dfm/data/dagw/dagw.py
|
martbern/danish-foundation-models
| 8 |
2025021
|
"""TODO(DAGW): Add a description here."""
import os
import datasets
from datasets.utils import metadata
import ndjson
from .licenses import *
_CITATION = """\
@inproceedings{dagw,
title = {{The Danish Gigaword Corpus}},
author = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year = 2021,
booktitle = {Proceedings of the 23rd Nordic Conference on Computational Linguistics},
publisher = {NEALT}
}
"""
_DESCRIPTION = """\
The Danish Gigaword Corpus contains raw text spanning several different domains and forms. The dataset is available under the Creative Commons Attribution-ShareAlike
License.
"""
_HOMEPAGE = "https://gigaword.dk"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
_DATA_URL = "https://bit.ly/danishgigaword10"
_INDIVIDUAL_LICENSES = {
"adl": cc0,
"botxt": cc0,
"cc": cc0,
"danavis": cc0,
"dannet": dannet_license,
"datwitter": twitter_license,
"depbank": att_sharealike_4,
"ep": cc0,
"ft": cc0,
"gutenberg": gutenberg_license,
"hest": cc0,
"jvj": att_sharealike_4,
"naat": cc0,
"opensub": opensub_license,
"relig": cc0,
"retsinformationdk": retsinformationdk_license,
"retspraksis": cc0,
"skat": cc0,
"spont": cc0,
"synne": cc0,
"tv2r": tv2r_license,
"twfv19": twitter_license,
"wiki": cc0,
"wikibooks": cc0,
"wikisource": cc0,
}
class DAGWConfig(datasets.BuilderConfig):
"""BuilderConfig for DAGW."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for DAGW
Args:
data_url: `string`, url to the dataset
**kwargs: keyword arguments forwarded to super.
"""
super(DAGWConfig, self).__init__(
version=datasets.Version(
"1.0.0",
),
**kwargs,
)
self.data_url = data_url
class DAGW(datasets.GeneratorBasedBuilder):
"""The Danish Gigaword Corpus contains raw text spanning several different domains and forms. The dataset is available under the Creative Commons Attribution-ShareAlike
License."""
BUILDER_CONFIGS = [
DAGWConfig(
name="dagw",
data_url=_DATA_URL,
description="Document level dataset. Each row contains one document (of greatly varying length)",
)
]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"source": datasets.Value("string"),
"doc_id": datasets.Value("string"),
"LICENSE": datasets.Value("string"),
"uri": datasets.Value("string"),
"date_built": datasets.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
if self.config.name == "DAGW-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_file, "dagw", "sektioner"),
"split": "train",
},
)
]
def _generate_examples(self, data_file, split):
"""Yields examples."""
def _get_filepaths(dagw_sektion_path):
"""Gets the path to all text files in DAGW
Returns:
Dict[str, List[str]] -- Dictionary with sektion as key, and list of filepaths as stringshu
"""
sections = os.listdir(dagw_sektion_path)
filepaths = {}
for p in sections:
subpath = os.path.join(dagw_sektion_path, p)
filepaths[p] = [
os.path.join(subpath, p)
for p in os.listdir(subpath)
if p != "LICENSE"
and not p.endswith(".json")
and not p.endswith(".jsonl")
and not p.startswith("README")
]
def handle_subdir(section):
return [
os.path.join(filepaths[section][0], p)
for p in os.listdir(filepaths[section][0])
if p != "LICENSE"
and not p.endswith(".json")
and not p.endswith(".jsonl")
and not p.startswith("README")
]
filepaths["twfv19"] = handle_subdir("twfv19")
filepaths["datwitter"] = handle_subdir("datwitter")
return filepaths
def _read_metadata(section):
if section not in ["datwitter", "twfv19"]:
with open(os.path.join(data_file, section, section + ".jsonl")) as f:
meta = ndjson.load(f)
if section not in ["danavis", "depbank", "botxt"]:
# convert list of dicts to dict with doc_id as key
try:
metadict = {
d["doc_id"]: {
"uri": d["uri"],
"date_built": d["date_built"],
}
for d in meta
}
except KeyError:
# not all sections have an uri
metadict = {
d["doc_id"]: {
"uri": "NA",
"date_built": d["date_built"],
}
for d in meta
}
# no uri or date_built for danavis
if section in ["danavis", "botxt"]:
metadict = {
d["doc_id"]: {
"uri": "NA",
"date_built": "NA",
}
for d in meta
}
if section == "depbank":
metadict = {
d["doc_id"]: {
"uri": d["uri"],
"date_built": "NA",
}
for d in meta
}
return metadict
# for twitter corpora
else:
return {
"da_all_150420-260520.txt": {"uri": "NA", "date_built": "NA"},
"da_fv19.txt": {"uri": "NA", "date_built": "NA"},
}
filepaths = _get_filepaths(data_file)
row_n = 0
for section in filepaths.keys():
section_metadata = _read_metadata(section)
section_license = _INDIVIDUAL_LICENSES[section]
for path in filepaths[section]:
with open(path) as f:
text = f.read()
doc_id = path.split("/")[-1]
yield row_n, {
"text": text,
"source": section,
"doc_id": doc_id,
"LICENSE": section_license,
"uri": section_metadata[doc_id]["uri"],
"date_built": section_metadata[doc_id]["date_built"],
}
row_n += 1
| 8,144 |
run.py
|
dgaston/kvasir
| 0 |
2025619
|
#!/usr/bin/env python
import os
from kvasir import app
port = int(os.environ.get('PORT', 5000))
app.run(host='localhost', port=port, debug = True)
| 147 |
src/util.py
|
person594/fotaro
| 0 |
2026608
|
from datetime import datetime, timedelta, timezone
from typing import Any
def escape(x: Any) -> str:
if x is None:
return "NULL"
elif isinstance(x, str):
return "'" + str_escape(x) + "'"
elif isinstance(x, int):
return str(x)
else:
return escape(repr(x))
def str_escape(s: str) -> str:
return s.replace("'", "''")
def escape_identifier(s: str) -> str:
return '"' + s.replace('"', '""') + '"'
def now_timestamp() -> int:
return utc_to_timestamp(datetime.now(timezone.utc))
def utc_to_timestamp(utc: datetime) -> int:
return int((utc - datetime.fromtimestamp(0, timezone.utc)).total_seconds())
def timestamp_to_utc(timestamp: int) -> datetime:
return datetime.fromtimestamp(timestamp, timezone.utc)
| 772 |
api/app_settings.py
|
pca/pca-web
| 3 |
2026351
|
import os
from django.conf import settings
WCA_DEFAULT_CALLBACK_URL = getattr(
settings,
"WCA_DEFAULT_CALLBACK_URL",
os.getenv(
"WCA_DEFAULT_CALLBACK_URL",
"http://localhost:8080/wca-callback",
),
)
WCA_ALLOWED_CALLBACK_URLS = getattr(
settings, "WCA_ALLOWED_CALLBACK_URLS", os.getenv("WCA_ALLOWED_CALLBACK_URLS", "")
).split(",")
FB_PAGE_TOKEN = getattr(settings, "FB_PAGE_TOKEN", os.getenv("FB_PAGE_TOKEN"))
FB_PAGE_ID = getattr(settings, "FB_PAGE_ID", os.getenv("FB_PAGE_ID"))
FB_PAGE_FEED_LIMIT = getattr(
settings, "FB_PAGE_FEED_LIMIT", os.getenv("FB_PAGE_FEED_LIMIT", 5)
)
| 622 |
gibson2/objects/shapenet_object.py
|
i-m-vivek/iGibson
| 3 |
2026176
|
from gibson2.objects.object_base import Object
import pybullet as p
import numpy as np
class ShapeNetObject(Object):
"""
ShapeNet object
Reference: https://www.shapenet.org/
"""
def __init__(self, path, scale=1., position=[0, 0, 0], orientation=[0, 0, 0]):
super(ShapeNetObject, self).__init__()
self.filename = path
self.scale = scale
self.position = position
self.orientation = orientation
self._default_mass = 3.
self._default_transform = {
'position': [0, 0, 0],
'orientation_quat': [1. / np.sqrt(2), 0, 0, 1. / np.sqrt(2)],
}
pose = p.multiplyTransforms(positionA=self.position,
orientationA=p.getQuaternionFromEuler(
self.orientation),
positionB=self._default_transform['position'],
orientationB=self._default_transform['orientation_quat'])
self.pose = {
'position': pose[0],
'orientation_quat': pose[1],
}
def _load(self):
"""
Load the object into pybullet
"""
collision_id = p.createCollisionShape(p.GEOM_MESH,
fileName=self.filename,
meshScale=self.scale)
body_id = p.createMultiBody(basePosition=self.pose['position'],
baseOrientation=self.pose['orientation_quat'],
baseMass=self._default_mass,
baseCollisionShapeIndex=collision_id,
baseVisualShapeIndex=-1)
return body_id
| 1,780 |
tabla/tabla/simulation/pu.py
|
ziqingzeng/public
| 6 |
2025584
|
from .pe import PE
from .bus import PENB, PEGB
from .buffer import Buffer
from .bus_arbiter import PEGBArbiter
from .defaults import DEFAULT_NAMESPACE_BUFFER_SIZE, DEFAULT_BUS_BUFFER_SIZE, DEFAULT_INPUT_BITWIDTH, DEFAULT_INTERIM_BITWIDTH, DEFAULT_BUS_BITWIDTH
"""
PU has PEs, buses, bus arbiter.
"""
class PU(object):
def __init__(self, id,
pes_per_pu,
pe_buffer_size=DEFAULT_NAMESPACE_BUFFER_SIZE,
buffer_interim_size=DEFAULT_NAMESPACE_BUFFER_SIZE,
input_bitwidth=DEFAULT_INPUT_BITWIDTH,
interim_bitwidth=DEFAULT_INTERIM_BITWIDTH,
bus_bitwidth=DEFAULT_BUS_BITWIDTH,
bus_buffer_size=DEFAULT_BUS_BUFFER_SIZE,
debug=False):
self.id = id
self.pes_per_pu = pes_per_pu
# Size of namespace buffers for the PEs that belong to this PU
self.pe_buffer_size = pe_buffer_size
# Size of NI (namespace interim) buffer for the PEs that belong ot this PU
self.buffer_interim_size = buffer_interim_size
self.input_bitwidth = input_bitwidth
self.interim_bitwidth = interim_bitwidth
# Create PEs for this PU
self.pes = []
for i in range(pes_per_pu):
relative_id = i
absolute_id = pes_per_pu * id + relative_id
pe = PE(absolute_id, relative_id, self.pe_buffer_size,
self.buffer_interim_size,
self.input_bitwidth,
self.interim_bitwidth,
debug=debug)
self.pes.append(pe)
# Set Head PE of this PU
self.head_pe = self.pes[0]
self.head_pe.is_head_pe = True
# Set PENB's for each pair of PEs in this PU
for i, pe in enumerate(self.pes[:-1]):
source_pe = pe
dest_pe = self.pes[i + 1]
penb = PENB(source_pe, dest_pe, debug=debug)
# print(penb)
source_pe.set_penb(penb)
# Set last PE's neighbor to be first PE
last_pe = self.pes[-1]
first_pe = self.pes[0]
penb = PENB(last_pe, first_pe, debug=debug)
# print(penb)
last_pe.set_penb(penb)
self.bus_bitwidth = bus_bitwidth
self.bus_buffer_size = bus_buffer_size
# PE Global Bus for the PEs that belong to this PU
self.pegb = PEGB(self.pes, self.bus_bitwidth, debug=debug)
# PE Global Bus Arbiter
self.bus_arbiter = PEGBArbiter(self.pegb, debug=debug)
self.cycle = 0
self.debug = debug
def __str__(self):
pe_str = ''
for pe in self.pes:
pe_str += 'PE ' + str(pe.absolute_id) + ', '
s = f'PU {self.id}\n' + \
f'\t{pe_str}\n' + \
f'\t{self.pegb.__str__()}'
return s
def buffer_sizes(self):
sizes = {}
for pe in self.pes:
sizes[f'PE{pe.relative_id}'] = pe.buffer_sizes()
sizes[f'PEGB'] = int(self.pegb.new_data_written)
return sizes
# Probably won't be used much
def load_instructions_to_pe(self, pe_id_relative, instructions):
pe = self.pes[pe_id_relative]
pe.load_instructions(instructions)
def run_one_cycle(self):
# if self.debug:
# print(f'Cycle {self.cycle}')
self.accessed = False
# Dictionary to hold access counts for each on-chip memory component in this PE
# Example format: {"PE0": pe access stats dictionary...}
# Use PE absolute id for this
pu_access_stats = {}
self.bus_arbiter.run_one_cycle()
self.accessed = self.bus_arbiter.accessed
for pe in self.pes:
if self.debug:
print(f'PE {pe.relative_id}')
pe_access_stats = pe.run_one_cycle()
pu_access_stats[f'PE_{pe.absolute_id}'] = pe_access_stats
if self.debug:
if pe.done_processing:
print(f'\tPE {pe.relative_id}: DONE PROCESSING')
else:
program_counter = pe.program_counter
num_insts = pe.instruction_memory.num_instructions
progress_percentage = int(program_counter / num_insts * 100)
print(f'\tPE {pe.relative_id} PC: {program_counter} out of {num_insts} total ({progress_percentage} %)')
self.accessed = self.accessed or pe.accessed
if self.debug:
print()
if self.done_processing:
print(f'\t*** PU {self.id} DONE PROCESSING ***')
elif self.accessed is False:
print(f'\t*** PU {self.id}: Nothing happened in cycle {self.cycle} ***')
self.cycle += 1
if self.debug:
print()
return pu_access_stats
def run_cycles(self, cycles):
for i in range(cycles):
self.run_one_cycle()
@property
def done_processing(self):
"""
Returns True if this all PE's in this PU completed processing all instructions.
"""
status = True
for pe in self.pes:
if not pe.done_processing:
# if self.debug:
# print(f'\tPE {pe.absolute_id} did not complete processing all insts')
return False
return status
def set_punb(self, punb):
"""
Set the PUNB of Head PE.
"""
self.head_pe.set_punb(punb)
# TODO (Not important) use this in write_to_pu_read_buffer() function and test it
@property
def pugb_read_buffer(self):
return self.head_pe.pugb_read_buffer
# TODO (Not important) use this in read_from_pu_write_buffer() function and test it
@property
def pugb_write_buffer(self):
return self.head_pe.pugb_write_buffer
if __name__ == '__main__':
pu = PU(1, 8)
print(pu)
pe = pu.pes[1]
print(pe)
| 5,931 |
check_environment.py
|
jonasvdd/DS-python-data-analysis
| 65 |
2026468
|
# This script is adapted from <NAME>:
# https://github.com/amueller/scipy-2018-sklearn/blob/master/check_env.ipynb
# and glemaitre: https://github.com/glemaitre/pyparis-2018-sklearn/blob/master/check_environment.py
from __future__ import print_function
from distutils.version import LooseVersion as Version
import sys
try:
import curses
curses.setupterm()
assert curses.tigetnum("colors") > 2
OK = "\x1b[1;%dm[ OK ]\x1b[0m" % (30 + curses.COLOR_GREEN)
FAIL = "\x1b[1;%dm[FAIL]\x1b[0m" % (30 + curses.COLOR_RED)
except:
OK = '[ OK ]'
FAIL = '[FAIL]'
try:
import importlib
except ImportError:
print(FAIL, "Python version 3.4 is required,"
" but %s is installed." % sys.version)
def import_version(pkg, min_ver, fail_msg=""):
mod = None
try:
mod = importlib.import_module(pkg)
if pkg in {'PIL'}:
ver = mod.VERSION
elif pkg in {'xlrd'}:
ver = mod.__VERSION__
else:
ver = mod.__version__
if Version(ver) < min_ver:
print(FAIL, "%s version %s or higher required, but %s installed."
% (lib, min_ver, ver))
else:
print(OK, '%s version %s' % (pkg, ver))
except ImportError:
print(FAIL, '%s not installed. %s' % (pkg, fail_msg))
return mod
# first check the python version
print('Using python in', sys.prefix)
print(sys.version)
pyversion = Version(sys.version)
if pyversion >= "3":
if pyversion < "3.6":
print(FAIL, "Python version 3.6 is required,"
" but %s is installed." % sys.version)
else:
print(FAIL, "Python 3 is required, but %s is installed." % sys.version)
print()
requirements = {'numpy': "1.9", 'matplotlib': "2.0",
'pandas': "0.25", 'notebook': "5",
'plotnine': '0.6',
'pyproj': '1.9.5.1', 'requests': '2.18.0',
'seaborn': '0.9.0', 'xlrd': '1.1.0'}
# now the dependencies
for lib, required_version in list(requirements.items()):
import_version(lib, required_version)
# mplleaflet has no option to derive __version__
try:
import mplleaflet
print(OK, '%s can be loaded' % ('mplleaflet'))
except:
print(FAIL, '%s can not be loaded.' % ('mplleaflet'))
| 2,281 |
examples/gng_iris.py
|
ChristophRaab/prototorch_models
| 4 |
2026376
|
"""Growing Neural Gas example using the Iris dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Reproducibility
pl.utilities.seed.seed_everything(seed=42)
# Prepare the data
train_ds = pt.datasets.Iris(dims=[0, 2])
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64)
# Hyperparameters
hparams = dict(
num_prototypes=5,
input_dim=2,
lr=0.1,
)
# Initialize the model
model = pt.models.GrowingNeuralGas(
hparams,
prototypes_initializer=pt.initializers.ZCI(2),
)
# Compute intermediate input and output sizes
model.example_input_array = torch.zeros(4, 2)
# Model summary
print(model)
# Callbacks
vis = pt.models.VisNG2D(data=train_loader)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
max_epochs=100,
callbacks=[vis],
weights_summary="full",
)
# Training loop
trainer.fit(model, train_loader)
| 1,217 |
astro/21cm/get_slice_zfreq.py
|
liweitianux/atoolbox
| 4 |
2025871
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
# MIT License
#
"""
Get the slices at the specified redshifts/frequencies from the HI
light-cone cube (created by `make_lightcone.py`), and use linear
interpolation.
"""
import sys
import argparse
import logging
from datetime import datetime, timezone
import numpy as np
from astropy.io import fits
from astropy.cosmology import FlatLambdaCDM
from z2freq import z2freq, freq2z
logging.basicConfig(level=logging.INFO,
format="[%(levelname)s:%(lineno)d] %(message)s")
logger = logging.getLogger()
cosmo = FlatLambdaCDM(H0=71, Om0=0.27)
class LightCone:
"""
Light-cone cube mimic the observation of HI signal.
"""
def __init__(self, infile):
with fits.open(infile) as f:
self.data = f[0].data
self.header = f[0].header
logger.info("Loaded light-cone cube: %dx%d (cells) * %d (slices)" %
(self.Nside, self.Nside, self.Nslice))
@property
def Nslice(self):
ns, __, __ = self.data.shape
return ns
@property
def Nside(self):
return self.header["Nside"]
@property
def slices_Dc(self):
"""
The comoving distances of each slice in the light-cone cube.
The slices are evenly distributed along the LoS with equal
comoving step. [Mpc]
"""
Dc_step = self.header["Dc_step"]
Dc_min = self.header["Dc_min"]
Dc = np.array([Dc_min + Dc_step*i for i in range(self.Nslice)])
return Dc
def get_slice(self, z):
Dc = cosmo.comoving_distance(z).value # [Mpc]
slices_Dc = self.slices_Dc
if Dc < slices_Dc.min() or Dc > slices_Dc.max():
raise ValueError("requested redshift out of range: %.2f" % z)
i2 = (slices_Dc <= Dc).sum()
i1 = i2 - 1
Dc1, s1 = slices_Dc[i1], self.data[i1, :, :]
Dc2, s2 = slices_Dc[i2], self.data[i2, :, :]
slope = (s2 - s1) / (Dc2 - Dc1)
return s1 + slope * (Dc - Dc1)
def write_slice(self, outfile, data, z, clobber=False):
freq = z2freq(z)
Dc = cosmo.comoving_distance(z).value # [Mpc]
header = fits.Header()
header["BUNIT"] = (self.header["BUNIT"],
self.header.comments["BUNIT"])
header["Lside"] = (self.header["Lside"],
self.header.comments["Lside"])
header["Nside"] = (self.header["Nside"],
self.header.comments["Nside"])
header["REDSHIFT"] = (z, "redshift of this slice")
header["FREQ"] = (freq, "[MHz] observed HI signal frequency")
header["Dc"] = (Dc, "[cMpc] comoving distance")
header["DATE"] = (datetime.now(timezone.utc).astimezone().isoformat(),
"File creation date")
header.add_history(" ".join(sys.argv))
hdu = fits.PrimaryHDU(data=data, header=header)
try:
hdu.writeto(outfile, overwrite=clobber)
except TypeError:
hdu.writeto(outfile, clobber=clobber)
logger.info("Wrote slice to file: %s" % outfile)
def main():
outfile_pattern = "{prefix}_f{freq:06.2f}_z{z:06.3f}.fits"
outfile_prefix = "deltaTb"
parser = argparse.ArgumentParser(
description="Get slices at requested redshifts/frequencies " +
"from light-cone cube")
parser.add_argument("-C", "--clobber", dest="clobber",
action="store_true",
help="overwrite existing files")
parser.add_argument("-i", "--infile", dest="infile", required=True,
help="input light-cone cube")
parser.add_argument("-o", "--outfile", dest="outfile",
default=outfile_pattern,
help="output image slice filename pattern FITS " +
"(default: %s)" % outfile_pattern)
parser.add_argument("-p", "--prefix", dest="prefix",
default=outfile_prefix,
help="prefix of output slices (default: %s)" %
outfile_prefix)
exgrp = parser.add_mutually_exclusive_group(required=True)
exgrp.add_argument("-z", "--redshifts", dest="redshifts", nargs="+",
help="redshifts where to interpolate slices")
exgrp.add_argument("-f", "--freqs", dest="freqs", nargs="+",
help="21cm frequencies [MHz] to interpolate slices")
args = parser.parse_args()
if args.redshifts:
redshifts = [float(z) for z in args.redshifts]
freqs = z2freq(redshifts, print_=False)
else:
freqs = [float(f) for f in args.freqs]
redshifts = freq2z(freqs, print_=False)
lightcone = LightCone(args.infile)
for z, f in zip(redshifts, freqs):
outfile = args.outfile.format(prefix=args.prefix, z=z, freq=f)
logger.info("z=%06.3f, freq=%06.2f MHz : %s ..." % (z, f, outfile))
data = lightcone.get_slice(z)
lightcone.write_slice(outfile, data=data, z=z, clobber=args.clobber)
if __name__ == "__main__":
main()
| 5,138 |
slippi/game.py
|
davisdude/py-slippi
| 0 |
2026194
|
from slippi.event import ParseEvent
from slippi.parse import parse
from slippi.util import *
class Game(Base):
"""Replay data from a game of Super Smash Brothers Melee."""
def __init__(self, input):
"""Parses Slippi replay data from `input` (stream or path)."""
self.start = None
""":py:class:`slippi.event.Start`: Information about the start of the game"""
self.frames = []
"""list(:py:class:`slippi.event.Frame`): Every frame of the game, indexed by frame number"""
self.end = None
""":py:class:`slippi.event.End`: Information about the end of the game"""
self.metadata = None
""":py:class:`slippi.metadata.Metadata`: Miscellaneous data not directly provided by Melee"""
self.metadata_raw = None
"""dict: Raw JSON metadata, for debugging and forward-compatibility"""
handlers = {
ParseEvent.START: lambda x: setattr(self, 'start', x),
ParseEvent.FRAME: lambda x: self.frames.append(x),
ParseEvent.END: lambda x: setattr(self, 'end', x),
ParseEvent.METADATA: lambda x: setattr(self, 'metadata', x),
ParseEvent.METADATA_RAW: lambda x: setattr(self, 'metadata_raw', x)}
parse(input, handlers)
def _attr_repr(self, attr):
if attr == 'frames':
return 'frames=[...](%d)' % len(self.frames)
elif attr != 'metadata_raw':
return super()._attr_repr(attr)
else:
return None
| 1,512 |
Dataset_Generation/Umwandlungsskript_v2.py
|
simi48/Ef-If_Jassen
| 4 |
2026248
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# This script modifies textfiles in order to work with YOLOv2
#
# Author: <NAME>
# Date : 29.04.2019
#
# History:
# Version Date Who Changes
# 1.0 29.04.2019 M7ma created
#
# Copyright © <NAME>, <NAME>, <NAME>, <NAME>
# Frauenfeld, Switzerland. All rights reserved
# -----------------------------------------------------------------------------
for idx in range(49431):
f = open("D:\Deep_Jass\\" + str(idx)+ "_scene.txt", "r")
lines = f.readlines()
for i in range(3):
c, x, y, h, w = [int(s) for s in lines[i].split() if s.isdigit()]
lines[i] = str(c) + " " + str(x/1000) + " " + str(y/1000) + " " + str(h/1000) + " " + str(w/1000) + "\n"
f.close()
f = open("D:\Deep_Jass\\" + str(idx) + "_scene.txt", "w")
f.writelines(lines)
f.close()
| 953 |
knowledge_graph/crawler/runner/xywy/drugs_list.py
|
Syhen/knowledge-graph
| 2 |
2026588
|
# -*- coding: utf-8 -*-
"""
Author: @heyao
Created On: 2019/6/25 下午1:19
"""
from knowledge_graph.crawler.runner.base import BaseRunner
class XYWYDrugListRunner(BaseRunner):
URL = 'http://yao.xywy.com/class.htm'
def __init__(self, redis_connection, redis_key='drug:list:xywy', mongo_db=None, query=None):
super(XYWYDrugListRunner, self).__init__(redis_connection, redis_key, mongo_db, query)
def run(self):
"""
:return:
"""
self.queue.push({
'url': self.URL,
})
if __name__ == '__main__':
from knowledge_graph.pools import redis_connection
runner = XYWYDrugListRunner(redis_connection)
runner.run()
| 690 |
server/data/azstorage/blobs.py
|
Neoteroi/Torino
| 7 |
2024915
|
import urllib.parse
from datetime import datetime, timedelta
from typing import List, cast
from azure.core.exceptions import ResourceExistsError
from azure.storage.blob import (
BlobSasPermissions,
BlobServiceClient,
ContainerSasPermissions,
generate_blob_sas,
generate_container_sas,
)
from core.errors import ConflictError
from core.pools import PoolClient
from domain.blobs import BlobsService, Container
from domain.settings import Settings
from .logs import log_blob_dep
def _list_containers(blob_client: BlobServiceClient) -> List[Container]:
containers: List[Container] = []
for item in blob_client.list_containers():
containers.append(Container(id=item.name, name=item.name, etag=item.etag))
return containers
def _create_container(blob_client: BlobServiceClient, name: str) -> None:
try:
blob_client.create_container(name=name)
except ResourceExistsError:
raise ConflictError("A container with the given name already exists")
class AzureStorageBlobsService(BlobsService, PoolClient):
def __init__(self, blob_client: BlobServiceClient, settings: Settings) -> None:
super().__init__()
self.blob_client = blob_client
self.settings = settings
@log_blob_dep()
async def get_containers(self) -> List[Container]:
return await self.run(_list_containers, self.blob_client)
@log_blob_dep()
async def create_container(self, name: str) -> None:
return await self.run(_create_container, self.blob_client, name)
def get_read_blob_sas(
self,
container_name: str,
file_name: str,
display_name: str,
) -> str:
escaped_name = urllib.parse.quote(display_name)
token = generate_blob_sas(
account_name=self.settings.storage_account_name,
account_key=self.settings.storage_account_key,
container_name=container_name,
blob_name=file_name,
permission=BlobSasPermissions(read=True, create=False, write=False),
expiry=datetime.utcnow() + timedelta(hours=2),
content_disposition=f'attachment;filename="{escaped_name}"',
)
return cast(str, token)
def get_read_container_sas(self, container_name: str) -> str:
token = generate_container_sas(
account_name=self.settings.storage_account_name,
account_key=self.settings.storage_account_key,
container_name=container_name,
permission=ContainerSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=24),
)
return cast(str, token)
def get_admin_blob_sas(self, container_name: str, file_name: str) -> str:
token = generate_blob_sas(
account_name=self.settings.storage_account_name,
account_key=self.settings.storage_account_key,
container_name=container_name,
blob_name=file_name,
permission=BlobSasPermissions(read=True, create=True, write=True),
expiry=datetime.utcnow() + timedelta(hours=2),
)
return cast(str, token)
| 3,146 |
src/modules/comments/application/search/comments_searcher.py
|
bertini36/serverless-comments-engine
| 0 |
2024531
|
from dataclasses import asdict
from typing import List
from .search_comments_query import SearchCommentsQuery
from ...domain.comment import Comment
from ...domain.comments_repository import CommentsRepository
class CommentsSearcher:
def __init__(self, repository: CommentsRepository):
self.repository = repository
def search(self, query: SearchCommentsQuery) -> List[Comment]:
comments = self.repository.get_comments(**asdict(query))
comments = Comment.sort(comments)
return comments
| 528 |
doughnut/settings.py
|
vaginessa/doughnut
| 0 |
2025190
|
import os
CONFIG_FILE_NAME = "doughnut.json"
SCHEMA_FILE_NAME = "schema.json"
RESOURCE_DIRECTORY = os.path.abspath("resources")
UPLOAD_DIRECTORY = os.path.abspath("upload")
BUILD_DIRECTORY = os.path.abspath("build")
STATIC_DIRECTORY = os.path.abspath("static")
CSS_FILE = os.path.join(RESOURCE_DIRECTORY, "css", "style.css")
TEMPLATE_DIRECTORY = os.path.join(RESOURCE_DIRECTORY, "templates")
SCHEMA_FILE = os.path.join(STATIC_DIRECTORY, SCHEMA_FILE_NAME)
PDF_OPTIONS = {
'encoding': 'UTF-8',
'page-size': 'A4',
'margin-top': '0.5in',
'margin-bottom': '0.3in',
'margin-left': '0in',
'margin-right': '0in',
'header-spacing': '3.8',
'footer-spacing': '1.8',
'no-outline': None,
'disable-local-file-access': None,
'disable-external-links': None
}
ASTYLE_OPTIONS = "--mode={mode} --style=google --indent=spaces=4 -xC50 -xG -C -S -K -N -L -xW -w -xw -Y -p -U -xe -k3 -W3 -j -xp -c -xy -xL -F"
PYGMENT_HTML_LINE_SPAN_PREFIX = "doughnut-span"
ASTYLE_BLACKLIST_EXTENSIONS = ['lex', 'l', 'html', 'jsp', 'hbm', 'cfg']
| 1,062 |
python/145_binary_tree_postorder_traversal.py
|
liaison/LeetCode
| 17 |
2026146
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
output = []
def dfs(node):
if not node:
return
dfs(node.left)
dfs(node.right)
output.append(node.val)
dfs(root)
return output
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class SolutionStack:
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
output = deque([])
stack = [root]
while stack:
node = stack.pop()
if node:
stack.append(node.left)
stack.append(node.right)
output.appendleft(node.val)
return output
| 1,070 |
pylsdj/speech_instrument.py
|
alexras/pylsdj
| 25 |
2026423
|
from .vendor.six.moves import range
# Max. length of a word name
WORD_NAME_LENGTH = 4
def word_cleanup(word):
return ''.join([chr(x) for x in word if x != 0]).ljust(WORD_NAME_LENGTH)
class Word(object):
def __init__(self, song, index):
self._song = song
"""the word's parent song"""
self._index = index
"""the word's index within the speech instrument"""
@property
def song(self):
return self._song
@property
def index(self):
return self._index
@property
def name(self):
"""the word's name"""
return self._song.song_data.word_names[self._index]
@name.setter
def name(self, value):
self._song.song_data.word_names[self._index] = value
@property
def sounds(self):
"""a list of the sounds that make up the word; each sound has
an ``allophone`` and a ``length``"""
return self._song.song_data.words[self._index]
class SpeechInstrument(object):
def __init__(self, song):
self._song = song
self._words = [
Word(self._song, i) for i in
range(len(self._song.song_data.words))]
@property
def song(self):
"""the speech instrument's parent song"""
return self._song
@property
def words(self):
"""a list of the speech instrument's defined words, as Word objects"""
return self._words
| 1,426 |
imageKit/use_model.py
|
Littel-Laboratory/homes-dataset-tools
| 12 |
2023078
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import numpy as np
from PIL import Image
import six
import six.moves.cPickle as pickle
import chainer
from chainer import serializers
import nin
parser = argparse.ArgumentParser(
description='predict bukken photo type')
parser.add_argument('img_path', help='Path to bukken image')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
parser.add_argument('--model', default='model',
help='the model that generated by train_imagenet.py')
parser.add_argument('--labels', default='image_label.csv',
help='csv file of bukken image labels')
args = parser.parse_args()
# 平均画像読み込み
mean_image = pickle.load(open(args.mean, 'rb'))
# モデル読み込み
model = nin.NIN()
serializers.load_hdf5(args.model, model)
cropwidth = 256 - model.insize
def resize_image(raw_image):
# 学習データサイズに変換
if (raw_image.size[0] > 120 or raw_image.size[1] > 120) :
raw_image.thumbnail([120, 120], Image.ANTIALIAS)
# ndarrayに変換
image_array = np.asarray(raw_image)
# 画像繰り返しで256*256サイズに変換
image_array = crop_image(image_array.copy())
# さらにninモデルの入力サイズに変換
image = image_array.transpose(2, 0, 1)
top = left = cropwidth / 2
bottom = model.insize + top
right = model.insize + left
image = image[:, top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
return image
# 画像のミラー&切り抜き
def crop_image(src) :
target_shape = (256, 256)
# mirror image
while src.shape[0] < target_shape[0] or src.shape[1] < target_shape[1]:
# print(src.shape)
src = np.concatenate((np.fliplr(src), src), axis=1)
src = np.concatenate((np.flipud(src), src), axis=0)
src = src[:target_shape[0], :target_shape[1]]
return src
# 解析結果の整理
def format_scores(scores_array) :
res = []
for x, scores in enumerate(scores_array) :
scores = scores_array[x][:23]
elements = []
for line in open(args.labels):
pair = line.rstrip().split(",")
elements.append(pair[1])
scoreDic = {}
for y, score in enumerate(scores) :
scoreDic[elements[y]] = score
rank = []
i = 0
for k, v in sorted(scoreDic.items(), key=lambda x:x[1], reverse=True) :
rank.append((k,v))
i += 1
res.append(rank)
return res
# 画像読み込み
raw_image = Image.open(args.img_path)
fixed_image = resize_image(raw_image)
x = chainer.Variable(np.asarray([fixed_image]), volatile='on')
scores = model.predict(x)
formated_score = format_scores(scores.data)
for tupple in formated_score[0][:10]:
print("%-12s\t %.3f%% " %(tupple[0], tupple[1]*100.0 ))
| 2,884 |
demo_data/converter.py
|
isaac868/SENG475-Neural-Network-Project
| 0 |
2026411
|
import numpy as np
import sys
from PIL import Image
img = np.asarray(Image.open(str(sys.argv[1])).convert("L").resize((28,28)))
st = ""
for row in img:
for val in row:
st += str(255 - val)
st += ","
print st
| 229 |
tools/geodistribution.py
|
koulevprime/GeoLifeReader
| 5 |
2026134
|
#Input: Database with time-homogenized records.
#Output: ONE simulator ready files.
#Parameters: Number of users to output.
# Weekday of interest.
# Area of interest.
# Time delta between records.
import logging
logging.basicConfig(
level=logging.DEBUG,
filename='/tmp/geolife.geostats.log',
filemode='w'
)
logger = logging.getLogger("geolife")
stdout = logging.StreamHandler()
stdout.setLevel(logging.DEBUG)
logger.addHandler(stdout)
from schema import HomogenizedRecord
from config import getEngine
import argparse
from utils import timerange
from utils import ETACalculator
from utils import num_elements_in_time_range
from datetime import time
from datetime import timedelta
from sqlalchemy.orm import sessionmaker
import os
Session = sessionmaker()
engine = getEngine()
Session.configure(bind=engine)
# Parse the command-line arguments.
def get_arguments():
parser = argparse.ArgumentParser(
description='Write out files for simulation.'
)
parser.add_argument(
'-n', '--num-users',
dest='num_users',
help="Number of users to select from db",
type=int,
default=None,
)
parser.add_argument(
'-d', '--time-delta',
dest='time_delta',
help="Number of seconds that should be between any two consecutive records",
type=lambda x: timedelta(seconds=int(x)),
default=timedelta(seconds=5),
)
parser.add_argument(
'-o', '--output_directory',
dest="output_directory",
help='Directory to store created files (default: ./out)',
default="./out",
type=os.path.abspath,
)
args = parser.parse_args()
return args
if __name__ == "__main__":
session = Session()
args = get_arguments()
delta = args.time_delta
output_directory = args.output_directory
n = num_elements_in_time_range(start=time.min, end=time.max, step=delta)
eta_til_completed = ETACalculator(n, "Geographic distribution over time")
for t in timerange(time.min, time.max, delta):
records = session.query(HomogenizedRecord).filter(
HomogenizedRecord.time == t,
).count()
print(records)
eta_til_completed.checkpoint()
logger.info(eta_til_completed.eta())
| 2,187 |
project1/change_file_name.py
|
h-mayorquin/camp_india_2016
| 3 |
2026100
|
import os
# This imports all the file names
location = './data/'
filenames = os.listdir(location)
for filename in filenames:
aux = filename.split('.')
name = aux[0]
extension = aux[1]
new_name = location + name.upper() + '.' + extension
os.rename(location + filename, new_name)
| 300 |
cs161-intro-programming-solving-problem/HW1/hw1-2_AlvesSilva-OtavioAugusto-Mac.py
|
tavioalves/computerscience-psu
| 0 |
2026497
|
# turtle_example.py - example program for turtle graphics
# (c) 2015, <NAME>, 902840168
#
import turtle
turtle.color('red','yellow')
turtle.begin_fill()
while True:
turtle.forward(200)
turtle.left(170)
if abs(turtle.pos()) < 1:
break
turtle.end_fill()
turtle.done()
| 284 |
aiorate_limiter/storage/redis.py
|
theruziev/rate_limiter
| 1 |
2023783
|
import hashlib
import time
from aioredis import Redis
from ..base import RateLimiterAbstract, RateLimiterResult, RateLimiterOpts
REDIS_TOKEN_BUCKET_SCRIPT = """
local tokens_key = KEYS[1]
local timestamp_key = KEYS[2]
local duration = tonumber(ARGV[1])
local max_points = tonumber(ARGV[2])
local now = tonumber(ARGV[3])
local points = tonumber(ARGV[4])
local data = redis.call("mget", tokens_key, timestamp_key)
local value = tonumber(data[1])
local last_update = tonumber(data[2])
if value == nil then
value = max_points
end
if last_update == nil then
last_update = now
end
local refill_count = (now - last_update) / duration
value = value + (refill_count * max_points)
last_update = last_update + (refill_count * duration)
if value >= max_points then
value = max_points
last_update = now
end
local available_points =math.floor(value - points)
if value >= points then
value = available_points
end
local ttl = math.floor((last_update * 2) / 1000)
redis.call("setex", tokens_key, ttl, value)
redis.call("setex", timestamp_key, ttl, last_update)
return { available_points, last_update }
""" # noqa
REDIS_SCRIPT_HASH = hashlib.sha1(REDIS_TOKEN_BUCKET_SCRIPT.encode()).hexdigest() # noqa
class RedisRateLimiter(RateLimiterAbstract):
def __init__(self, opts: RateLimiterOpts, redis: Redis):
self.opts = opts
self.redis = redis
async def init(self):
await self._load_script()
async def _load_script(self):
"""
Load lua token-bucket implementation
"""
script_exist = bool((await self.redis.script_exists(REDIS_SCRIPT_HASH))[0])
if not script_exist:
await self.redis.script_load(REDIS_TOKEN_BUCKET_SCRIPT)
async def consume(self, key: str, points: int = 1) -> RateLimiterResult:
build_key = self._build_key(key)
keys = [f"{build_key}:tokens", f"{build_key}:ts"]
value, last_update = await self.redis.evalsha(
REDIS_SCRIPT_HASH,
keys=keys,
args=[self.opts.duration, self.opts.points, self._get_time(), points],
)
return RateLimiterResult(
remaining_points=value,
ms_before_next=self._get_time() - last_update,
consumed_points=points,
)
@classmethod
def _get_time(cls) -> int:
"""
returning milliseconds of timestamp
:return:
"""
return int(time.time() * 1000)
def _build_key(self, key: str) -> str:
return f"{self.opts.key_prefix}:{key}"
| 2,539 |
arametrics-linkedin/api-test.py
|
AravinthPanch/araMetrics
| 0 |
2026298
|
#!/usr/bin/env python
# Author : <NAME>
# Description : API Tester
import requests
import pprint
import csv
import json
from config.config import *
# urn:li:fs_miniProfile:ACoAAArvscgBnXp5J2T_ipLQXmXy73eyNWs9zcE
# urn:li:member:183480776
# profileUpdatesV2
LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/identity/profileUpdatesV2?count=5&includeLongTermHistory=true&moduleKey=member-shares%3Aphone&numComments=0&numLikes=0&profileUrn=urn%3Ali%3Afsd_profile%3AACoAAArvscgBnXp5J2T_ipLQXmXy73eyNWs9zcE&q=memberShareFeed'
# socialUpdateAnalyticsHeader
LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/identity/socialUpdateAnalyticsHeader/urn:li:activity:6749841001667465216'
# LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/identity/profiles/aravinthpanch/following?count=1&entityType=INFLUENCER&q=followedEntities'
# LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/identity/profiles/me/networkinfo?shouldIncludeFollowingCount=true'
# LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/identity/profiles/ACoAAANoAaQBWD4a3F1sZL97KZ659lr_0SGzbKo/posts'
# LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/feed/social/urn:li:activity:6749841001667465216'
# LI_REQUEST_URL = 'https://www.linkedin.com/voyager/api/feed/updates/urn:li:activity:6749841001667465216'
with requests.session() as session:
session.cookies['li_at'] = LI_ACCESS_TOKEN
session.cookies["JSESSIONID"] = LI_CSRF_TOKEN
session.headers = LI_REQUEST_HEADER
session.headers["csrf-token"] = session.cookies["JSESSIONID"].strip('"')
response = session.get(LI_REQUEST_URL).json()
print(json.dumps(response))
| 1,643 |
monascaclient/tests/v2_0/shell/test_alarm_definitions.py
|
openstack/python-monascaclient
| 20 |
2026528
|
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslotest import base
from monascaclient.osc import migration as migr
from monascaclient.v2_0 import alarm_definitions as ad
from monascaclient.v2_0 import shell
class FakeV2Client(object):
def __init__(self):
super(FakeV2Client, self).__init__()
self.alarm_definitions = mock.Mock(
spec=ad.AlarmDefinitionsManager)
class TestAlarmDefinitionShellV2(base.BaseTestCase):
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_update(self, mc):
mc.return_value = c = FakeV2Client()
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_name = 'alarm_name'
ad_desc = 'test_alarm_definition'
ad_expr = 'avg(Test_Metric_1)>=10'
ad_action_id = '16012650-0b62-4692-9103-2d04fe81cc93'
ad_action_enabled = 'True'
ad_match_by = 'hostname'
ad_severity = 'CRITICAL'
raw_args = [
ad_id, ad_name, ad_desc, ad_expr,
ad_action_id, ad_action_id, ad_action_id, ad_action_enabled,
ad_match_by, ad_severity
]
name, cmd_clazz = migr.create_command_class(
'do_alarm_definition_update',
shell
)
cmd = cmd_clazz(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
parsed_args = parser.parse_args(raw_args)
cmd.run(parsed_args)
c.alarm_definitions.update.assert_called_once_with(
actions_enabled=True,
alarm_actions=[ad_action_id],
alarm_id=ad_id,
description=ad_desc,
expression=ad_expr,
match_by=[ad_match_by],
name=ad_name,
ok_actions=[ad_action_id],
severity=ad_severity,
undetermined_actions=[ad_action_id]
)
@mock.patch('monascaclient.osc.migration.make_client')
def test_alarm_definitions_list(self, mc):
mc.return_value = c = FakeV2Client()
c.alarm_definitions.list.return_value = [{
"name": "ntp_sync_check",
"id": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee",
"expression": "(max(ntp.offset{}, deterministic)>=1)",
"match_by": ['hostname'],
"description": "NTP time sync check",
"actions_enabled": True,
"deterministic": True,
"alarm_actions": ['aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'],
"ok_actions": [],
"undetermined_actions": [],
"severity": "HIGH",
}]
name, cmd_class = migr.create_command_class(
'do_alarm_definition_list',
shell
)
cmd = cmd_class(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
raw_args = []
parsed_args = parser.parse_args(raw_args)
cmd.run(parsed_args)
c.alarm_definitions.list.assert_called_once()
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_name(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_name = 'patch_name'
raw_args = '{0} --name {1}'.format(ad_id, ad_name).split(' ')
self._patch_test(mc, raw_args, alarm_id=ad_id, name=ad_name)
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_actions(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
ad_action_id = '16012650-0b62-4692-9103-2d04fe81cc93'
actions = ['alarm-actions', 'ok-actions',
'undetermined-actions']
for action in actions:
raw_args = ('{0} --{1} {2}'.format(ad_id, action, ad_action_id)
.split(' '))
self._patch_test(mc, raw_args, **{
'alarm_id': ad_id,
action.replace('-', '_'): [ad_action_id]
})
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_patch_severity(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
severity_types = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']
for st in severity_types:
raw_args = ('{0} --severity {1}'.format(ad_id, st)
.split(' '))
self._patch_test(mc, raw_args, alarm_id=ad_id, severity=st)
@mock.patch('monascaclient.osc.migration.make_client')
def test_should_not_patch_unknown_severity(self, mc):
ad_id = '0495340b-58fd-4e1c-932b-5e6f9cc96490'
st = 'foo'
raw_args = ('{0} --severity {1}'.format(ad_id, st)
.split(' '))
self._patch_test(mc, raw_args, called=False)
@staticmethod
def _patch_test(mc, args, called=True, **kwargs):
mc.return_value = c = FakeV2Client()
name, cmd_clazz = migr.create_command_class(
'do_alarm_definition_patch',
shell
)
cmd = cmd_clazz(mock.Mock(), mock.Mock())
parser = cmd.get_parser(name)
parsed_args = parser.parse_args(args)
cmd.run(parsed_args)
if called:
c.alarm_definitions.patch.assert_called_once_with(**kwargs)
else:
c.alarm_definitions.patch.assert_not_called()
| 5,757 |
isimip_data/search/viewsets.py
|
ISI-MIP/isimip-data
| 3 |
2026225
|
from rest_framework.viewsets import ReadOnlyModelViewSet
from .models import Facet
from .serializers import FacetSerializer
class FacetViewSet(ReadOnlyModelViewSet):
serializer_class = FacetSerializer
queryset = Facet.objects.all()
| 244 |
analyzeWmGmData.py
|
bacaron/athlete-brain-study
| 0 |
2026615
|
#!/usr/bin/env python3
import os,sys,glob
from matplotlib import colors as mcolors
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
### setting up variables and adding paths. to use, update topPath, scripts_dir, utils_dir, and data_dir
## open config.json with specific paths and variables
with open('config.json') as config_f:
config = json.load(config_f)
## paths
print("setting up variables")
# grab paths
topPath = config['topPath']
if not os.path.exists(topPath):
os.chdir(topPath)
data_dir = topPath+'/data/' # output data directory
if not os.path.exists(data_dir):
os.mkdir(data_dir)
img_dir = topPath+'/img/'
if not os.path.exists(img_dir):
os.mkdir(img_dir)
# set up scripts file path. This is the path to the downloaded github repository. this will set up the other scripts based off this path
scripts_dir = config['scriptsPath'] # path to github repo
utils_dir = scripts_dir+'/utils/' # scripts
configs_dir = scripts_dir+'/configs/' # configuration files with corresponding track and cortical functional domain groupings
# appending paths to environment
sys.path.insert(0,scripts_dir)
sys.path.insert(1,utils_dir)
## groups, colors, measures, domains, and covariates
groups = config['groups'].split()
colors_array = config['colors'].split()
diff_measures = config['diffusion_measures'].split()
if 'functional_tracks' in config:
functional_tracks = config['functional_tracks'].split()
if 'lobes' in config:
lobes = config['lobes'].split()
covariates = ['mass','b0_snr','Total Brain Volume','Total Cortical Gray Matter Volume','Total White Matter Volume','Total Cortical Thickness']
## loop through groups and identify subjects and set color schema for each group
colors = {}
subjects = {}
# this is based on group identifiers in the subject numbers: 1_ = football, 2_ = cross country, 3_ = non-athlete
for g in range(len(groups)):
# set subjects array
subjects[groups[g]] = [f.split(topPath+'/')[1] for f in glob.glob(topPath+'/*'+str(g+1)+'_0*')]
subjects[groups[g]].sort()
# set colors array
colors_name = colors_array[g]
colors[groups[g]] = colors_array[g]
print("setting up variables complete")
#### create subjects.csv
print("creating subjects.csv")
from compile_data import collectSubjectData
subjects_data = collectSubjectData(topPath,data_dir,configs_dir,groups,subjects,colors)
print("creating subjects.csv complete")
#### generate snr plot
print("plotting snr data")
# grab data
from compile_data import collectSNRData
snr = collectSNRData(topPath,data_dir,groups,subjects)
# merge subject data to make easier for anova computing
snr_subjects = pd.merge(subjects_data,snr,on='subjectID')
# plot data
from plot_track_data import plotSNR
plotSNR(list(snr['snr']),list(snr['subjectID']),list(subjects_data['colors']),dir_out=img_dir)
print("plotting snr data complete")
#### generate wholebrain plots
print("plotting whole brain stats")
# grab data
from compile_data import collectWholeBrainStats
wholebrain = collectWholeBrainStats(topPath,data_dir,groups,subjects)
# merge subject data to make easier for anova computing
wholebrain_subjects = pd.merge(subjects_data,wholebrain,on='subjectID')
# plot data
from plot_cortex_data import plotWholeBrainData
for dc in ['subjectID','Total Brain Volume','Total Cortical Gray Matter Volume','Total White Matter Volume','Total Cortical Thickness']:
plotWholeBrainData(groups,colors,dc,wholebrain,dir_out=img_dir)
print("plotting whole brain stats complete")
# compute anovas for whole brain, snr, and mass data
from compile_data import computeAnovas
for cov in covariates:
if cov == 'b0_snr':
data_frame = snr_subjects
else:
data_frame = wholebrain_subjects
computeAnovas(cov,'classID',data_frame,'bonf','cohen',data_dir)
#### group average white matter analyses
print("computing group average white matter track analyses")
## create data structures
# macro measures (length, volume, streamline count)
from compile_data import collectTrackMacroData
[track_names,track_macro] = collectTrackMacroData(topPath,data_dir,groups,subjects)
# micro measures (DTI, NODDI tract profile data)
from compile_data import collectTrackMicroData
track_micro = collectTrackMicroData(topPath,data_dir,groups,subjects,180)
# combine the two
from compile_data import combineTrackMacroMicro
[track_data,track_mean_data] = combineTrackMacroMicro(data_dir,track_macro[track_macro['structureID'] != 'wbfg'],track_micro)
# functional-specific track measures (associative, projection, commissural)
if 'functional_tracks' in config:
from compile_data import compileFunctionalData
functional_track_data = compileFunctionalData(data_dir,track_mean_data,functional_tracks,labelsPath=configs_dir)
## length, volume, streamline count of tracks
from plot_track_data import plotTrackMacroData
for dc in ['volume','length','count']:
plotTrackMacroData(groups,colors,dc,track_mean_data,diff_measures,dir_out=img_dir)
## DTI/NODDI tract profiles (SD error bars)
from plot_track_data import plotTrackMicrostructureProfiles
plotTrackMicrostructureProfiles(groups,colors,track_names,track_data,diff_measures,dir_out=img_dir)
## DTI/NODDI categorical scatter plots (group averages)
from compile_data import computeRankOrderEffectSize
from plot_track_data import plotTrackMicrostructureAverage
rank_order_tracks = computeRankOrderEffectSize(groups,subjects,'tracks',diff_measures,track_mean_data,[diff_measures[0:4],diff_measures[4:]],data_dir)
plotTrackMicrostructureAverage(groups,colors,rank_order_tracks['tensor'],track_mean_data,diff_measures[0:4],dir_out=img_dir)
plotTrackMicrostructureAverage(groups,colors,rank_order_tracks['noddi'],track_mean_data,diff_measures[4:],dir_out=img_dir)
## group difference histograms
from plot_track_data import plotDifferenceHistograms
plotDifferenceHistograms(groups,subjects,track_mean_data,diff_measures,colors,dir_out=img_dir)
## h0 boostrapping test
from plot_track_data import plotBootstrappedH0TrackAverageDifference
plotBootstrappedH0TrackAverageDifference(groups,subjects,track_mean_data,diff_measures,colors,10000,img_dir)
## bootstrapped histograms
from plot_track_data import plotBootstrappedDifference
plotBootstrappedDifference(groups,subjects,track_mean_data,diff_measures,colors,10000,0.05,img_dir,data_dir+"/tracks_boostrapped")
print("computing group average white matter track analyses complete")
#### group average cortex mapping analyses
print("computing group average gray matter parcel analyses")
## create data structures
# cortical measures
from compile_data import collectCorticalParcelData
cortical = collectCorticalParcelData(topPath,data_dir,groups,subjects)
# subcortical measures
from compile_data import collectSubCorticalParcelData
subcortical = collectSubCorticalParcelData(topPath,data_dir,groups,subjects)
# combine the two
from compile_data import combineCorticalSubcortical
[graymatter_names,graymatter] = combineCorticalSubcortical(data_dir,cortical,subcortical)
# lobe-specific measures (frontal, temporal, occipital, parietal, insular, limbic, motor, somatosensory)
if 'lobes' in config:
from compile_data import compileFunctionalData
functional_lobe_data = compileFunctionalData(data_dir,cortical,lobes,labelsPath=configs_dir)
## volume, cortical thickness analyses
# cortical thickness/volume by diffusion measure per cortical parcel
from plot_cortex_data import plotCorticalParcelData
for dc in ['volume','thickness']:
plotCorticalParcelData(groups,colors,dc,cortical,diff_measures,dir_out=img_dir)
## lobe or parcel averages
from plot_cortex_data import plotMicrostructureAverage
if 'lobes' in config:
data = functional_lobes_data
label = 'lobes'
else:
data = cortical
label = 'cortical'
rank_order_cortex = computeRankOrderEffectSize(groups,subjects,label,diff_measures,data,[diff_measures[0:4],diff_measures[4:]],data_dir)
plotMicrostructureAverage(groups,colors,label,rank_order_cortex['tensor'],data,diff_measures[0:4],dir_out=img_dir)
plotMicrostructureAverage(groups,colors,label,rank_order_cortex['noddi'],data,diff_measures[4:],dir_out=img_dir)
## subcortical averages
rank_order_subcortex = computeRankOrderEffectSize(groups,subjects,'subcortex',diff_measures,subcortical,[diff_measures[0:4],diff_measures[4:]],data_dir)
plotMicrostructureAverage(groups,colors,'subcortex',rank_order_subcortex['tensor'],subcortical,diff_measures[0:4],dir_out=img_dir)
plotMicrostructureAverage(groups,colors,'subcortex',rank_order_subcortex['noddi'],subcortical,diff_measures[4:],dir_out=img_dir)
## group difference histograms
from plot_cortex_data import plotDifferenceHistograms
plotDifferenceHistograms(groups,subjects,"cortical",cortical,diff_measures,colors,dir_out=img_dir)
plotDifferenceHistograms(groups,subjects,"subcortical",subcortical,diff_measures,colors,dir_out=img_dir)
## h0 boostrapping test
from plot_cortex_data import plotBootstrappedH0PooledParcelAverageDifference
plotBootstrappedH0PooledParcelAverageDifference(groups,subjects,cortical,'cortical',diff_measures,colors,10000,img_dir)
plotBootstrappedH0PooledParcelAverageDifference(groups,subjects,subcortical,'subcortical',diff_measures,colors,10000,img_dir)
## bootstrapped histograms
from plot_cortex_data import plotBootstrappedDifference
plotBootstrappedDifference(groups,subjects,cortical,"cortical",diff_measures,colors,10000,0.05,img_dir,data_dir+"/cortex_boostrapped")
plotBootstrappedDifference(groups,subjects,subcortical,"subcortical",diff_measures,colors,10000,0.05,img_dir,data_dir+"/subcortex_boostrapped")
print("computing group average gray matter parcel analyses complete")
print("project data has been generated and plotted!")
| 9,627 |
ex09_two_arr_proccesses.py
|
yinchi/simpy-examples
| 3 |
2023346
|
# Example 09: two arrival processes
'''
So far, we have modelled systems with a single arrival
process. Here, we model two arrival processes. We see
that the Simpy environment automatically triggers events
from both processes in the correct order.
This behavior has already been seen in the previous queue
examples, where *each* arrival spawns its own process to:
1. Queue for service (if the queue is full, but has a buffer).
2. Obtain the required resource.
3. Wait for the designated service time.
4. Release the obtained resource.
Here, since there are only two processes and all events
occur at *known* times, it should be easier to understand
the internal workings of the Simpy simulation.
Note that events can be scheduled for the same time. Simpy
does not guarantee which event will be triggered first.
'''
import simpy
def generator(env, t, s):
while True:
yield env.timeout(t)
print('Generator', s, 'at time:', env.now)
env = simpy.Environment()
# Create two arrival processes
env.process(generator(env, 1, 'A')) # generate arrivals every second
env.process(generator(env, 5, 'B')) # generate arrivals every 5 seconds
env.run(until=20)
| 1,178 |
fedot/core/composer/cache_db.py
|
vishalbelsare/FEDOT
| 0 |
2026127
|
import pickle
import sqlite3
import uuid
from contextlib import closing
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Tuple
from fedot.core.utils import default_fedot_data_dir
if TYPE_CHECKING:
from .cache import CachedState
class OperationsCacheDB:
def __init__(self, db_path: str):
self.db_path = db_path or Path(default_fedot_data_dir(), f'tmp_{str(uuid.uuid4())}')
self._db_suffix = '.cache_db'
self.db_path = Path(self.db_path).with_suffix(self._db_suffix)
self._del_prev_temps()
self._effectiveness_keys = ['pipelines_hit', 'nodes_hit', 'pipelines_total', 'nodes_total']
self._eff_table = 'effectiveness'
self._op_table = 'operations'
self._init_db()
def get_effectiveness(self) -> Tuple[int, int, int, int]:
with closing(sqlite3.connect(self.db_path)) as conn:
with conn:
cur = conn.cursor()
cur.execute(f'SELECT {",".join(self._effectiveness_keys)} FROM {self._eff_table};')
return cur.fetchone()
def reset(self):
with closing(sqlite3.connect(self.db_path)) as conn:
with conn:
cur = conn.cursor()
self._reset_eff(cur)
self._reset_ops(cur)
def _del_prev_temps(self):
for file in self.db_path.parent.glob(f'tmp_*{self._db_suffix}'):
file.unlink()
def _init_db(self):
with closing(sqlite3.connect(self.db_path)) as conn:
with conn:
cur = conn.cursor()
eff_type = ' INTEGER DEFAULT 0'
fields = f'{eff_type},'.join(self._effectiveness_keys) + eff_type
cur.execute((
f'CREATE TABLE IF NOT EXISTS {self._eff_table} ('
'id INTEGER PRIMARY KEY CHECK (id = 1),' # noqa better viewed like that
f'{fields}' # noqa
');'
))
cur.execute(f'INSERT INTO {self._eff_table} DEFAULT VALUES;')
with conn:
cur = conn.cursor()
cur.execute((
f'CREATE TABLE IF NOT EXISTS {self._op_table} ('
'id TEXT PRIMARY KEY,' # noqa better viewed like that
'operation BLOB' # noqa
');'
))
def _inc_eff(self, cur: sqlite3.Cursor, col: str, inc_val: int = 1):
cur.execute(f'UPDATE {self._eff_table} SET {col} = {col} + {inc_val};')
def _reset_eff(self, cur: sqlite3.Cursor):
cur.execute(f'DELETE FROM {self._eff_table};')
cur.execute(f'INSERT INTO {self._eff_table} DEFAULT VALUES;')
def _reset_ops(self, cur: sqlite3.Cursor):
cur.execute(f'DELETE FROM {self._op_table};')
@staticmethod
def _create_temp_for_ordered_select(cur: sqlite3.Cursor, uids: List[str]):
_, *other = uids
tmp_name = 'tmp'
cur.execute(f'DROP TABLE IF EXISTS {tmp_name};') # TODO: make truly temp table, not like that
query = (
f'CREATE TABLE {tmp_name} AS '
'SELECT 1 as id1, ? as id2'
)
for num, _ in enumerate(other, 2):
query += (
' union '
f'SELECT {num} as id1, ? as id2'
)
cur.execute(query, uids)
return tmp_name
def get_operations(self, uids: List[str]) -> List[Optional['CachedState']]:
with closing(sqlite3.connect(self.db_path)) as conn:
with conn:
cur = conn.cursor()
tmp_name = self._create_temp_for_ordered_select(cur, uids)
cur.execute((
f'SELECT operation FROM {tmp_name} '
f'LEFT JOIN {self._op_table} ON {self._op_table}.id = {tmp_name}.id2 '
f'ORDER BY {tmp_name}.id1;'
))
retrieved = cur.fetchall()
non_null = [x for (x,) in retrieved if x is not None]
self._inc_eff(cur, 'nodes_hit', len(non_null))
if len(non_null) == len(uids):
self._inc_eff(cur, 'pipelines_hit')
retrieved = [pickle.loads(x) if x is not None else None for (x,) in retrieved]
self._inc_eff(cur, 'nodes_total', len(uids))
self._inc_eff(cur, 'pipelines_total')
return retrieved
def add_operation(self, conn: sqlite3.Connection, uid: str, val: 'CachedState'):
with conn:
cur = conn.cursor()
pdata = pickle.dumps(val, pickle.HIGHEST_PROTOCOL)
cur.execute(f'INSERT OR IGNORE INTO {self._op_table} VALUES (?, ?);',
[uid, sqlite3.Binary(pdata)])
def add_operations(self, uid_val_lst: List[Tuple[str, 'CachedState']]):
with closing(sqlite3.connect(self.db_path)) as conn:
with conn:
cur = conn.cursor()
pickled = [
(uid, sqlite3.Binary(pickle.dumps(val, pickle.HIGHEST_PROTOCOL)))
for uid, val in uid_val_lst
]
cur.executemany(f'INSERT OR IGNORE INTO {self._op_table} VALUES (?, ?);', pickled)
def __len__(self):
with closing(sqlite3.connect(self.db_path)) as conn:
with conn:
cur = conn.cursor()
cur.execute(f'SELECT id FROM {self._op_table};')
all_rows = cur.fetchall()
return len(all_rows)
| 5,530 |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/isissrv6locatorentrylist.py
|
ralfjon/IxNetwork
| 0 |
2024877
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class IsisSRv6LocatorEntryList(Base):
"""The IsisSRv6LocatorEntryList class encapsulates a required isisSRv6LocatorEntryList node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the IsisSRv6LocatorEntryList property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'isisSRv6LocatorEntryList'
def __init__(self, parent):
super(IsisSRv6LocatorEntryList, self).__init__(parent)
@property
def IsisSRv6EndSIDList(self):
"""An instance of the IsisSRv6EndSIDList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isissrv6endsidlist.IsisSRv6EndSIDList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isissrv6endsidlist import IsisSRv6EndSIDList
return IsisSRv6EndSIDList(self)._select()
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def AdvertiseLocatorAsPrefix(self):
"""Advertise Locator as Prefix
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('advertiseLocatorAsPrefix')
@property
def Algorithm(self):
"""Algorithm
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('algorithm')
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DBit(self):
"""D Bit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('dBit')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Locator(self):
"""Locator
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('locator')
@property
def LocatorName(self):
"""Locator Name
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('locatorName')
@property
def LocatorSize(self):
"""Locator Size
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('locatorSize')
@property
def Metric(self):
"""Metric
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('metric')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def PrefixLength(self):
"""Prefix Length
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('prefixLength')
@property
def Redistribution(self):
"""Redistribution
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('redistribution')
@property
def ReservedFlags(self):
"""Reserved (Flags)
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('reservedFlags')
@property
def RouteMetric(self):
"""Route Metric
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('routeMetric')
@property
def RouteOrigin(self):
"""Route Origin
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('routeOrigin')
@property
def SidCount(self):
"""SID Count
Returns:
number
"""
return self._get_attribute('sidCount')
@SidCount.setter
def SidCount(self, value):
self._set_attribute('sidCount', value)
| 5,481 |
rollbar/test/__init__.py
|
arthurio/pyrollbar
| 81 |
2025336
|
import unittest2
SNOWMAN = b'\xe2\x98\x83'
SNOWMAN_UNICODE = SNOWMAN.decode('utf8')
class BaseTest(unittest2.TestCase):
pass
| 133 |
quex/output/core/TEST/test-pseudo_ambiguous_post_conditions.py
|
smmckay/quex-mirror
| 0 |
2026175
|
#! /usr/bin/env python
import generator_test
import sys
if "--hwut-info" in sys.argv:
print "Pseudo Ambgiguous Post Condition: Part I"
print "CHOICES: ANSI-C-PlainMemory, ANSI-C, ANSI-C-CG, Cpp, Cpp_StrangeStream, Cpp-Template, Cpp-Template-CG, Cpp-Path, Cpp-Path-CG, ANSI-C-PathTemplate;"
print "SAME;"
sys.exit(0)
choice = sys.argv[1]
pattern_list = [
# -- pre-conditioned expressions need to preceed same (non-preoconditioned) expressions,
# otherwise, the un-conditional expressions gain precedence and the un-conditional
# pattern is never matched.
#
# -- post-conditioned patterns do not need to appear before the same non-postconditioned
# patterns, since they are always longer.
#
# normal repetition (one or more) of 'x'
'x+/x',
# other characters
'[a-z]+',
# whitespace
'[ \\t\\n]+'
]
pattern_action_pair_list = map(lambda x: [x, x.replace("\\", "\\\\")], pattern_list)
test_str = "xxx x xx x"
generator_test.do(pattern_action_pair_list, test_str, {}, choice, QuexBufferSize=10)
| 1,079 |
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_0/__init__.py
|
jnthn/intellij-community
| 2 |
2026537
|
from ._mod0_1_1_0_0_0 import *
from ._mod0_1_1_0_0_1 import *
from ._mod0_1_1_0_0_2 import *
from ._mod0_1_1_0_0_3 import *
from ._mod0_1_1_0_0_4 import *
| 154 |
Bot.py
|
personal-army-of-4o/stack_bot
| 0 |
2024690
|
#state format:
#{
# "jobs": [
# {
# "jobname": "<name>",
# "stack": {
# "task": "taskname",
# "prev_task": {...}
# }
# }
# ]
#}
import json
import os
class Bot:
def __init__(self, cfg):
self._state={}
self._state={"jobs": []}
self._filename=cfg["state_file_name"]
try:
self._load_state()
except:
print("fixing state file")
self._dump_state()
self._load_state()
def Handle(self, message):
msg=(None, False)
if message.content.startswith(".stack"):
msg=self._handle_stack(message.content[6:] + " (by " + message.author.name + "#" + message.author.discriminator + ")")
elif message.content.startswith(".help"):
msg=(".help - print this help.\n.state - print jobs\n" + self._job_usage() + self._stack_usage(), False)
elif message.content.startswith(".state"):
msg=(self.Dump(), False)
if msg[1]:
self._dump_state()
return msg
def Dump(self):
msg=""
jobs=self._state["jobs"]
for i in range(0, len(jobs)):
j=jobs[i]
msg+="stack "+ str(i) + ": " + j["jobname"] + "\n" + self._dump_stack(j["stack"], " ")
if msg == "":
msg="empty stack"
return msg
def _dump_stack(self, stack, indent):
if stack:
return indent + stack["task"] + "\n" + self._dump_stack(stack["prev_task"], indent+" ")
else:
return ""
def _job_add(self, dsc):
self._state["jobs"].append({"jobname": dsc, "stack": None})
return ("ack", True)
def _job_rm(self, jn):
if jn < 0 or jn >= len(self._state["jobs"]):
return ("nack: invalid stack number", False)
if self._state["jobs"][jn]["stack"] != None:
return ("nack: stack is not empty", False)
self._state["jobs"].pop(jn)
return ("ack", True)
def _job_usage(self):
return ".stack new <stack description> - start new stack\n.stack rm <stack number> - remove stack at given number\n"
def _handle_stack(self, msg):
words=msg.split()
if len(words) < 2:
return ("too little args. usage:\n" + self._stack_usage(), False)
cmd=words[0]
if cmd == "push":
try:
jn=int(words[1])
except:
return ("failed to parse stack number", False)
if jn < 0 or jn > len(self._state["jobs"])-1:
return ("invalid stack number", False)
if len(words) < 3:
return (self._stack_usage(), False)
task=" ".join(words[2:])
return self._stack_push(jn, task)
elif cmd == "pop":
try:
jn=int(words[1])
except:
return ("failed to parse stack number", False)
return self._stack_pop(jn)
elif cmd == "new":
dsc=" ".join(words[1:])
return self._job_add(dsc)
elif cmd == "rm":
try:
jn=int(words[1])
except:
return ("failed to parse stack number", False)
return self._job_rm(jn)
else:
return ("invalid stack op " + words[0] + ". usage:\n" + self._stack_usage(), False)
def _stack_usage(self):
return ".stack push <stack number> <task description> - add task to stack\n.stack pop <stack number> - pop task from stack\n"
def _stack_push(self, jn, task):
self._state["jobs"][jn]["stack"]={"task": task, "prev_task": self._state["jobs"][jn]["stack"]}
return ("ack", True)
def _stack_pop(self, jn):
if self._state["jobs"][jn]["stack"]:
self._state["jobs"][jn]["stack"]=self._state["jobs"][jn]["stack"]["prev_task"]
return ("ack", True)
else:
return ("nack: stack empty", False)
def _load_state(self):
with open(self._filename, "r+") as read_file:
self._state = json.load(read_file)
def _dump_state(self):
with open(self._filename, "w+") as write_file:
json.dump(self._state, write_file)
| 4,305 |
rllib/value_function/nn_ensemble_value_function.py
|
shenao-zhang/DCPU
| 8 |
2026317
|
"""Value and Q-Functions parametrized with ensembles of Neural Networks."""
import torch
import torch.nn as nn
from .nn_value_function import NNQFunction, NNValueFunction
class NNEnsembleValueFunction(NNValueFunction):
"""Implementation of a Value Function implemented with a Neural Network.
Parameters
----------
dim_state: Tuple
dimension of state.
num_states: Tuple, optional
number of discrete states (None if state is continuous).
layers: list, optional
width of layers, each layer is connected with a non-linearity.
tau: float, optional
when a new parameter is set, tau low-passes the new parameter with the old one.
biased_head: bool, optional
flag that indicates if head of NN has a bias term or not.
"""
def __init__(self, num_heads=2, *args, **kwargs):
assert num_heads > 0
self.num_heads = num_heads
super().__init__(*args, **kwargs)
self.nn = nn.ModuleList(
[NNValueFunction(*args, **kwargs) for _ in range(num_heads)]
)
@classmethod
def from_value_function(cls, value_function, num_heads: int):
"""Create ensemble form value_function."""
out = cls(
dim_state=value_function.dim_state,
num_heads=num_heads,
num_states=value_function.num_states,
tau=value_function.tau,
input_transform=value_function.input_transform,
)
out.nn = nn.ModuleList(
[
value_function.__class__.from_other(value_function, copy=False)
for _ in range(num_heads)
]
)
return out
def forward(self, state, action=torch.tensor(float("nan"))):
"""Get value of the value-function at a given state."""
return torch.stack(
[value_function(state, action) for value_function in self.nn], dim=-1
)
@torch.jit.export
def embeddings(self, state):
"""Get embeddings of the value-function at a given state."""
return torch.stack(
[value_function.embeddings(state) for value_function in self.nn], dim=-1
)
@classmethod
def default(cls, environment, *args, **kwargs):
"""See AbstractValueFunction.default."""
return super().default(environment)
class NNEnsembleQFunction(NNQFunction):
"""Implementation of a Q-Function implemented with a Neural Network.
Parameters
----------
dim_state: Tuple
dimension of state.
dim_action: Tuple
dimension of action.
num_states: int, optional
number of discrete states (None if state is continuous).
num_actions: int, optional
number of discrete actions (None if action is continuous).
layers: list, optional
width of layers, each layer is connected with a Tanh non-linearities.
tau: float, optional
when a new parameter is set, tau low-passes the new parameter with the old one.
biased_head: bool, optional
flag that indicates if head of NN has a bias term or not.
"""
def __init__(self, num_heads=2, *args, **kwargs):
self.num_heads = num_heads
assert num_heads > 0
super().__init__(*args, **kwargs)
self.nn = nn.ModuleList(
[NNQFunction(*args, **kwargs) for _ in range(self.num_heads)]
)
@classmethod
def from_q_function(cls, q_function, num_heads: int):
"""Create ensemble form q-funciton."""
out = cls(
dim_state=q_function.dim_state,
dim_action=q_function.dim_action,
num_heads=num_heads,
num_states=q_function.num_states,
num_actions=q_function.num_actions,
tau=q_function.tau,
input_transform=q_function.input_transform,
)
out.nn = nn.ModuleList(
[
q_function.__class__.from_other(q_function, copy=False)
for _ in range(num_heads)
]
)
return out
def forward(self, state, action=torch.tensor(float("nan"))):
"""Get value of the q-function at a given state-action pair."""
return torch.stack(
[q_function(state, action) for q_function in self.nn], dim=-1
)
@classmethod
def default(cls, environment, *args, **kwargs):
"""See AbstractQFunction.default."""
return super().default(environment)
| 4,449 |
src/test_clustering.py
|
samatix/ml-asset-managers
| 2 |
2026200
|
import unittest
import numpy as np
import numpy.testing as npt
from src.testing.fixtures import CorrelationFactory
from src.cluster import KMeansBase, KMeansHL
class KmeansBaseTestCase(unittest.TestCase):
def test_clustering(self):
corr = np.array(
[
[1, 0.9, -0.4, 0, 0],
[0.9, 1, -0.3, 0.1, 0],
[-0.4, -0.3, 1, -0.1, 0],
[0, 0.1, -0.1, 1, 0],
[0, 0, 0, 0, 1],
]
)
kmeans = KMeansBase(max_n_clusters=4, random_state=0).fit(corr)
# Assert the best quality calculation
npt.assert_almost_equal(kmeans.quality, 1.188441935313023)
# TODO: Review the Silhouette Calculation
# Assert that the optimal number of clusters is 2
self.assertEqual(len(set(kmeans.labels_)), 2)
# Assert that the 1 and 2 belong to the same cluster as
# they are both correlated
self.assertEqual(kmeans.labels_[0], kmeans.labels_[1])
class KmeansHLTestCase(unittest.TestCase):
def test_clustering(self):
corr0 = CorrelationFactory(
n_cols=20,
n_blocks=4,
seed=13
).random_block_corr()
cluster = KMeansHL(n_init=1, random_state=13)
cluster.fit(corr=corr0)
npt.assert_equal(cluster.labels_,
[1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 4, 3, 3, 3, 3, 2,
2, 2, 2])
if __name__ == '__main__':
unittest.main()
| 1,507 |
terra_sdk/protobuf/cosmos/evidence/v1beta1/evidence_pb2.py
|
sejalsahni/terra.py
| 24 |
2025321
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cosmos/evidence/v1beta1/evidence.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="cosmos/evidence/v1beta1/evidence.proto",
package="cosmos.evidence.v1beta1",
syntax="proto3",
serialized_options=b"Z-github.com/cosmos/cosmos-sdk/x/evidence/types\250\342\036\001",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n&cosmos/evidence/v1beta1/evidence.proto\x12\x17\x63osmos.evidence.v1beta1\x1a\x14gogoproto/gogo.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xa8\x01\n\x0c\x45quivocation\x12\x0e\n\x06height\x18\x01 \x01(\x03\x12\x32\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x08\xc8\xde\x1f\x00\x90\xdf\x1f\x01\x12\r\n\x05power\x18\x03 \x01(\x03\x12\x37\n\x11\x63onsensus_address\x18\x04 \x01(\tB\x1c\xf2\xde\x1f\x18yaml:"consensus_address":\x0c\x98\xa0\x1f\x00\x88\xa0\x1f\x00\xe8\xa0\x1f\x00\x42\x33Z-github.com/cosmos/cosmos-sdk/x/evidence/types\xa8\xe2\x1e\x01\x62\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_EQUIVOCATION = _descriptor.Descriptor(
name="Equivocation",
full_name="cosmos.evidence.v1beta1.Equivocation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="height",
full_name="cosmos.evidence.v1beta1.Equivocation.height",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time",
full_name="cosmos.evidence.v1beta1.Equivocation.time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\310\336\037\000\220\337\037\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="power",
full_name="cosmos.evidence.v1beta1.Equivocation.power",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="consensus_address",
full_name="cosmos.evidence.v1beta1.Equivocation.consensus_address",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\362\336\037\030yaml:"consensus_address"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\230\240\037\000\210\240\037\000\350\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=123,
serialized_end=291,
)
_EQUIVOCATION.fields_by_name[
"time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["Equivocation"] = _EQUIVOCATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Equivocation = _reflection.GeneratedProtocolMessageType(
"Equivocation",
(_message.Message,),
{
"DESCRIPTOR": _EQUIVOCATION,
"__module__": "cosmos.evidence.v1beta1.evidence_pb2"
# @@protoc_insertion_point(class_scope:cosmos.evidence.v1beta1.Equivocation)
},
)
_sym_db.RegisterMessage(Equivocation)
DESCRIPTOR._options = None
_EQUIVOCATION.fields_by_name["time"]._options = None
_EQUIVOCATION.fields_by_name["consensus_address"]._options = None
_EQUIVOCATION._options = None
# @@protoc_insertion_point(module_scope)
| 5,454 |
setup.py
|
fabiansinz/xibaogou
| 0 |
2024727
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
long_description = "Spike triggered mixture model (STM) based learning algorithm to detect cells in stacks."
setup(
name='xibaogou',
version='0.1.0.dev1',
description="Cell detection algorithm.",
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
license="MIT License",
url='https://github.com/fabiansinz/xibaogou',
keywords='machine learning, computational biology',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['numpy','theano'],
classifiers=[
'Development Status :: 1 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT ',
'Topic :: Database :: Front-Ends',
],
)
| 930 |
AnalyzeAllStudentResultData/IdentifyDesiredStudent.py
|
angelagongli/DataScrapingToolkit
| 0 |
2025506
|
# We want to come to only one StudentResult for every student in Menaka's dataset,
# => Narrow the set of StudentResult based on exact First Name and Last Name as well as
# Middle Name/Middle Initial, Expected Age and City where we can
from selenium import webdriver
ChromeOptions = webdriver.ChromeOptions()
ChromeOptions.add_experimental_option("excludeSwitches", ["enable-automation"])
ChromeOptions.add_experimental_option('useAutomationExtension', False)
ChromeOptions.add_argument("--disable-blink-features=AutomationControlled")
driver = webdriver.Chrome(executable_path='C:\Program Files (x86)\ChromeDriver\chromedriver.exe', options=ChromeOptions)
# Advice from Menaka: WebDriver must sleep generated time interval of 20-25 seconds
# Before every scrape, waiting enough time to be able to handle CAPTCHA <- TODO
from random import randint
from time import sleep
from bs4 import BeautifulSoup
import re
import datetime
import mysql.connector
cnx = mysql.connector.connect(user='root',
password=<PASSWORD>,
host='127.0.0.1',
database='Student_DB')
outerCursor = cnx.cursor(buffered=True)
innerCursor = cnx.cursor(buffered=True)
pull_studentinformation = ("SELECT cohort, firstName, middleName, lastName, "
"city, state, country FROM Students WHERE id=%(student_id)s")
pull_allresultsforstudentatstep = ("SELECT id, resultName, resultAge, resultCity, resultCityHistory, resultType, resultData "
"FROM StudentResults WHERE student_id=%(student_id)s AND identificationStep=%(identificationStep)s")
update_studentresultidentificationstep = ("UPDATE StudentResults SET identificationStep=%s WHERE id=%s")
insert_studentrelativeresult = ("INSERT INTO StudentRelativeResults "
"(student_id, relativeResultName, relativeResultVoterRecordURL, relativeResultSource) "
"VALUES (%(student_id)s, %(relativeResultName)s, %(relativeResultVoterRecordURL)s, "
"%(relativeResultSource)s)")
for i in range(1, 100):
student_data = {
'student_id': i,
'identificationStep': 'Not Identified'
}
outerCursor.execute(pull_studentinformation, student_data)
for (cohort, firstName, middleName, lastName, city, state, country) in outerCursor:
desiredStudentAge = datetime.datetime.now().year - cohort + 22
firstName = firstName
middleName = middleName
lastName = lastName
city = city
outerCursor.execute(pull_allresultsforstudentatstep, student_data)
for (id, resultName, resultAge, resultCity, resultCityHistory, resultType, resultData) in outerCursor:
if (firstName in resultName.upper() and
lastName in resultName.upper()):
innerCursor.execute(update_studentresultidentificationstep, ('First Pass', id))
cnx.commit()
student_data['identificationStep'] = 'First Pass'
outerCursor.execute(pull_allresultsforstudentatstep, student_data)
for (id, resultName, resultAge, resultCity, resultCityHistory, resultType, resultData) in outerCursor:
if middleName and (middleName in resultName.upper() or
f" {middleName[0:1]} " in resultName.upper()):
innerCursor.execute(update_studentresultidentificationstep, ('Refined', id))
cnx.commit()
elif resultAge and (resultAge >= desiredStudentAge - 1 and
resultAge <= desiredStudentAge + 1):
innerCursor.execute(update_studentresultidentificationstep, ('Refined', id))
cnx.commit()
elif city and city in resultCityHistory.upper():
innerCursor.execute(update_studentresultidentificationstep, ('Refined', id))
cnx.commit()
student_data['identificationStep'] = 'Refined'
outerCursor.execute(pull_allresultsforstudentatstep, student_data)
for (id, resultName, resultAge, resultCity, resultCityHistory, resultType, resultData) in outerCursor:
if ((middleName and (middleName in resultName.upper() or
f" {middleName[0:1]} " in resultName.upper())) and
(resultAge and (resultAge >= desiredStudentAge - 1 and
resultAge <= desiredStudentAge + 1)) and
(city and city in resultCityHistory.upper())):
innerCursor.execute(update_studentresultidentificationstep, ('Identified', id))
cnx.commit()
break
student_data['identificationStep'] = 'Identified'
outerCursor.execute(pull_allresultsforstudentatstep, student_data)
# Once the desired Student is identified and when the desired Student's StudentResult
# Contains the Student's Relative data, we generate all of the Student's StudentRelativeResults,
# Remembering that the results shown are only a designation of relatedness as accurate as TruthFinder.com
for (id, resultName, resultAge, resultCity, resultCityHistory, resultType, resultData) in outerCursor:
if resultType == "StudentRelatives":
IdentifiedStudentRelativeNames = resultData.split("*")
for StudentRelativeName in IdentifiedStudentRelativeNames:
print(f"Now Looking Up {StudentRelativeName}...")
StudentRelativeNameArr = StudentRelativeName.split(" ")
if len(StudentRelativeNameArr) == 2:
studentRelativeFirstName = StudentRelativeNameArr[0]
studentRelativeLastName = StudentRelativeNameArr[1]
elif len(StudentRelativeNameArr) == 3:
studentRelativeFirstName = StudentRelativeNameArr[0]
studentRelativeMiddleName = StudentRelativeNameArr[1]
studentRelativeLastName = StudentRelativeNameArr[2]
elif len(StudentRelativeNameArr) > 3:
studentRelativeFirstName = StudentRelativeNameArr[0]
studentRelativeMiddleName = StudentRelativeNameArr[1]
for namePiece in StudentRelativeNameArr[2:-1]:
studentRelativeMiddleName = studentRelativeMiddleName + namePiece
studentRelativeLastName = StudentRelativeNameArr[-1]
if studentRelativeMiddleName.strip():
queryString = f"{studentRelativeFirstName}-{studentRelativeMiddleName}-{studentRelativeLastName}"
else:
queryString = f"{studentRelativeFirstName}-{studentRelativeLastName}"
driver.get(f"https://voterrecords.com/voters/{queryString}/1")
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find(attrs={"class":"BottomMargin10 TopH1"}).contents[1]
headerRegex = re.compile("( has) ([\d|,]+) Voter Record")
headerRegexMatch = headerRegex.match(header)
voterRecordNumber = headerRegexMatch.group(2)
if voterRecordNumber != "0":
print(f"Pulling All from {StudentRelativeName}'s First Page...")
StudentRelativeResults = []
for person in soup.find_all('tr'):
if person.find_next().name == "th":
print("Header")
continue
driverCursor = person.find_next()
resultName = driverCursor.span.span.a.get_text().strip()
# Keep Only Exact First Name/Last Name Because the Relative's Name is All We Have to Go On
if not (studentRelativeFirstName.upper() in resultName.upper() and
studentRelativeLastName.upper() in resultName.upper()):
continue
resultVoterRecordURL = driverCursor.span.span.a['href']
if not resultVoterRecordURL:
continue
resultAge = ""
resultAgeExistingTag = driverCursor.span.find('strong', text=re.compile(".*Age.*"))
if resultAgeExistingTag is not None:
resultAge = str(resultAgeExistingTag.next_element.next_element)
driverCursor = driverCursor.find_next_sibling()
resultCity = ""
resultCityExistingTag = driverCursor.find('strong', text=re.compile(".*Residential Address.*"))
if resultCityExistingTag is not None:
resultCity = resultCityExistingTag.find_next('span').get_text()
StudentRelativeResult = {
'student_id': i,
'relativeResultName': resultName,
'relativeResultVoterRecordURL': resultVoterRecordURL,
'relativeResultSource': 'StudentRelatives'
}
StudentRelativeResults.append(StudentRelativeResult)
innerCursor.executemany(insert_studentrelativeresult, StudentRelativeResults)
cnx.commit()
sleep(randint(20,25))
innerCursor.close()
outerCursor.close()
cnx.close()
| 9,245 |
Python3-Learn/TwoSum.py
|
nikhiljain-413/Hacktoberfest2021_beginner
| 65 |
2025919
|
#AUTHOR: <NAME>
#Python3 Concept: Twosum in Python
#GITHUB: https://github.com/gympohnpimol
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
ls = []
for i in range(0, len(nums)):
item = target - nums[i]
nums[i] = "done"
if item in nums:
ls.append(i)
ls.append(nums.index(item))
return ls
| 412 |
solutions/day15/test_day2.py
|
NunoMCSilva/My-Advent-of-Code-2017-Solutions
| 0 |
2026126
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import solutions.day15.day2 as day2
@pytest.mark.parametrize('test_initial_value, test_factor, test_div, test_output', [
(65, day2.FACTOR_A, 4, [1352636452, 1992081072, 530830436, 1980017072, 740335192]),
(8921, day2.FACTOR_B, 8, [1233683848, 862516352, 1159784568, 1616057672, 412269392])
])
def test_generators(test_initial_value, test_factor, test_div, test_output):
# arrange
generator = day2.generator(test_initial_value, test_factor, test_div)
# act
output = []
for value in generator:
output.append(value)
if len(output) == len(test_output):
break
# assert
assert output == test_output
def test_judge():
# arrange
generator_a = day2.generator(65, day2.FACTOR_A, 4)
generator_b = day2.generator(8921, day2.FACTOR_B, 8)
# act
output = day2.judge(generator_a, generator_b, rounds=1056)
# assert
assert list(output) == [0] * 1055 + [1]
# remove test due to amount of time it takes
"""
# takes a bit to run, and as such is not an unit test -- check types (integration, etc.)
def test_run():
assert day2.run(65, 8921) == 309
"""
| 1,198 |
tests/pydantic/test_dynamic_validator.py
|
skalarsystems/fhir-zeug
| 10 |
2026077
|
"""Test add dynamic validator to FHIRAbstractBase Model."""
import pydantic
import pytest
from fhirzeug.generators.python_pydantic.templates.resource_header import (
FHIRAbstractBase,
)
class ExampleModel(FHIRAbstractBase):
"""Simple resource with a boolean field."""
field_a: bool
class ChildModel(ExampleModel):
"""Child model."""
class AnotherExampleModel(FHIRAbstractBase):
"""Another resource that has a field of type ChildModel."""
field_example: ChildModel
def test_dynamic_validator():
"""Test dynamic validators with different inheritance use cases."""
# 1. First case : field_a can take any boolean values in any classes.
ExampleModel(field_a=True)
ExampleModel(field_a=False)
ChildModel(field_a=True)
ChildModel(field_a=False)
AnotherExampleModel(field_example={"field_a": True})
AnotherExampleModel(field_example={"field_a": False})
# 2. Second case : field_a can only take True values in all classes.
# Starting from now, field_a value must be True
ExampleModel._add_post_root_validator(_must_be_true)
ExampleModel(field_a=True)
with pytest.raises(pydantic.ValidationError):
ExampleModel(field_a=False)
ChildModel(field_a=True)
with pytest.raises(pydantic.ValidationError):
ChildModel(field_a=False)
AnotherExampleModel(field_example={"field_a": True})
with pytest.raises(pydantic.ValidationError):
AnotherExampleModel(field_example={"field_a": False})
# 3. Third use case : field_a has no valid values for class ChildExample
# Class AnotherExampleModel is also concerned but not ExampleModel.
# Previous rule still apply on all classes.
ChildModel._add_post_root_validator(_must_be_false)
ExampleModel(field_a=True)
with pytest.raises(pydantic.ValidationError):
ExampleModel(field_a=False)
with pytest.raises(pydantic.ValidationError):
ChildModel(field_a=True)
with pytest.raises(pydantic.ValidationError):
ChildModel(field_a=False)
with pytest.raises(pydantic.ValidationError):
AnotherExampleModel(field_example={"field_a": True})
with pytest.raises(pydantic.ValidationError):
AnotherExampleModel(field_example={"field_a": False})
def _must_be_true(values):
"""Root validator to be added to ExampleModel."""
assert values.get("field_a")
return values
def _must_be_false(values):
"""Root validator to be added to ChildModel."""
assert not values.get("field_a")
return values
| 2,537 |
project/urls_development.py
|
pmoran13800/rhgamestation-manager
| 0 |
2025651
|
"""rhgamestation-manager URL Configuration for development environment
Inherit from the default 'urls.py'
"""
from .urls import *
# Debug
#if settings.DEBUG:
urlpatterns = patterns('',
(r'^icomoon/', include('icomoon.urls', namespace='icomoon')),
) + urlpatterns
| 269 |
mne/realtime/__init__.py
|
kalenkovich/mne-python
| 1 |
2026414
|
"""Realtime MEG data processing with servers and clients."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from .client import RtClient
from .epochs import RtEpochs
from .lsl_client import LSLClient
from .mock_lsl_stream import MockLSLStream
from .mockclient import MockRtClient
from .fieldtrip_client import FieldTripClient
from .stim_server_client import StimServer, StimClient
| 504 |
tools/ext/PyGTKCodeBuffer-1.0-RC2/examples/pascal.py
|
automenta/trex-autonomy
| 0 |
2025986
|
#!/usr/bin/python
import gtk
from gtkcodebuffer import CodeBuffer, SyntaxLoader, add_syntax_path
#
# This example shows how to use the CodeBuffer.
# Note: You need to have PyGTKCodeBuffer installed!
txt = """fsd sdfsd sdfsd is fsdfdf"""
lang = SyntaxLoader("pascal")
buff = CodeBuffer(lang=lang)
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
scr = gtk.ScrolledWindow()
win.add(scr)
scr.add(gtk.TextView(buff))
win.set_default_size(300,200)
win.show_all()
win.connect("destroy", lambda w: gtk.main_quit())
buff.set_text(txt)
gtk.main()
| 569 |
src/cbpromarketbbo/cbpromarketbbo.py
|
ikendra/cbpromarketbbo
| 0 |
2026518
|
__all__ = ["CBProMarketBBO", "timer"]
__author__ = "<NAME>"
import websocket
import time
import logging
import urllib.request
import json
import math
import functools
from typing import Dict, List, Callable
URL_WS = "wss://ws-feed.pro.coinbase.com/"
URL_PRODUCTS = "https://api.pro.coinbase.com/products"
URL_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0"
MSG_SNAPSHOT = "snapshot"
MSG_L2UPDATE = "l2update"
MSG_SUBCRIPTIONS = "subscriptions"
BOOK_BIDS = "bids"
BOOK_ASKS = "asks"
BID_PRICE = "bid_price"
BID_SIZE = "bid_size"
ASK_PRICE = "ask_price"
ASK_SIZE = "ask_size"
def timer(method):
@functools.wraps(method)
def wrapper(*args):
t_start = time.time()
method_return = method(*args)
t_finish = time.time()
logging.debug("Time spent in {0}: {1:.6f} s".format(method.__name__, t_finish - t_start))
return method_return
return wrapper
class CBProMarketBBO:
def __init__(
self, products: List, output_file_name: str = None,
callback_func: Callable[[], None] = None, debug: bool = False) -> None:
self.book: Dict = {}
self.bbo: Dict = {}
self.ws = None
self.event_count: int = 0
self.logfile_enabled: bool = False
self.callback_func = callback_func
for key in products:
self.bbo[key] = {
BID_PRICE: None,
BID_SIZE: None,
ASK_PRICE: None,
ASK_SIZE: None
}
if output_file_name is None:
logging.disable()
else:
if debug:
_logging_level = logging.DEBUG
else:
_logging_level = logging.INFO
self.logfile_enabled = True
logging.basicConfig(format='%(asctime)s - %(message)s', level=_logging_level, filename=output_file_name)
self.products = self._initialize_products(products)
@timer
def _initialize_products(self, products: List) -> Dict[str, int]:
"""Retrieve relevant details about the products from Coinbase Pro
We need to know at least the quote increment for the products so that we display the prices
with the correct number of decimal places
"""
product_request = urllib.request.Request(url=URL_PRODUCTS, headers={'User-Agent': URL_USER_AGENT})
product_response = urllib.request.urlopen(product_request)
all_products = json.load(product_response)
product_details = {}
for product in products:
for cbpro_product in all_products:
if cbpro_product["id"] == product:
quote_increment = float(cbpro_product["quote_increment"])
num_decimal_places = int(math.log10(1 / quote_increment))
product_details[product] = num_decimal_places
logging.debug(
"Retrieved quote increment for {}: {} = {} decimal places".
format(product, quote_increment, num_decimal_places))
return product_details
def start(self) -> None:
self.ws = websocket.WebSocketApp(
URL_WS,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open)
if self.ws:
self.ws.run_forever()
def on_open(self) -> None:
"""Subscribe to events from Coinbase Pro
"""
channel = [{"name": "level2", "product_ids": list(self.products.keys())}]
msg_subscribe = {"type": "subscribe", "channels": channel}
subscribe_payload = json.dumps(msg_subscribe)
self.ws.send(subscribe_payload)
def on_error(self, msg) -> None:
logging.error(msg)
def on_close(self) -> None:
logging.debug("Websocket closed")
logging.debug("Total events received: {} ".format(self.event_count))
def on_message(self, msg) -> None:
"""Receive and process an event from Coinbase Pro
On MSG_SUBSCRIPTIONS we receive acknowledgement that we're successfully subscribed. On MSG_SNAPSHOT we
receive the entire order book for given product. ON MSG_L2UPDATE we receive updates to the book, which we
have to put on top of the snapshot. (Level 2 means aggregated updates for up to 50 levels of the order book)
"""
decoded_msg = json.loads(msg)
message_type = decoded_msg["type"]
if message_type == MSG_SUBCRIPTIONS:
product_ids = decoded_msg["channels"]
logging.debug("Subscriptions: {}".format(product_ids))
elif message_type == MSG_SNAPSHOT:
product_id = decoded_msg["product_id"]
self._snapshot(decoded_msg)
# Old best bid and ask doesn't exist yet, this will always set a new bbo
self.set_if_new_bbo(product_id)
elif message_type == MSG_L2UPDATE:
product_id = decoded_msg["product_id"]
self.update(decoded_msg)
self.set_if_new_bbo(product_id)
self.event_count += 1
@timer
def _snapshot(self, msg) -> None:
"""Process a snapshot message
Store the snapshot of the order book in memory. Snapshot is stored in such data structure that will be easy
to update with subsequent incremental order book update events
"""
product_id = msg["product_id"]
logging.debug("Received snapshot for {}".format(product_id))
price_precision = "%.{}f".format(self.products[product_id])
self.book[product_id] = {}
for book_side in [BOOK_BIDS, BOOK_ASKS]:
self.book[product_id][book_side] = \
{(price_precision % float(level[0])): float(level[1]) for level in msg[book_side]}
def update(self, msg) -> None:
product_id = msg["product_id"]
price_precision = "%.{}f".format(self.products[product_id])
for change in msg["changes"]:
side = change[0]
price = float(change[1])
size = float(change[2])
book_side = BOOK_BIDS if side == "buy" else BOOK_ASKS
if size == 0:
self.book[product_id][book_side].pop((price_precision % price))
else:
self.book[product_id][book_side][(price_precision % price)] = size
def set_if_new_bbo(self, product_id) -> bool:
"""Sets the new best bid and ask, if found in the order book
If no new best bid and ask is detected in the order book, no data structure is modified
"""
bbo_has_changed = False
max_bid_price = max(self.book[product_id][BOOK_BIDS], key=lambda x: float(x))
max_bid_size = self.book[product_id][BOOK_BIDS][max_bid_price]
min_ask_price = min(self.book[product_id][BOOK_ASKS], key=lambda x: float(x))
min_ask_size = self.book[product_id][BOOK_ASKS][min_ask_price]
current_bbo = {
BID_PRICE: max_bid_price,
BID_SIZE: max_bid_size,
ASK_PRICE: min_ask_price,
ASK_SIZE: min_ask_size}
if current_bbo != self.bbo[product_id]:
if self.callback_func:
self.callback_func(product_id, current_bbo)
self.bbo[product_id] = current_bbo
logging.info("{}: {} @ {} -- {} @ {}".
format(product_id, max_bid_size, max_bid_price, min_ask_size, min_ask_price))
bbo_has_changed = True
return bbo_has_changed
| 7,590 |
gpjax/parameters/transforms.py
|
thomaspinder/GPJax
| 44 |
2025611
|
from collections.abc import KeysView
from typing import Callable, Tuple
import jax.numpy as jnp
from ml_collections import ConfigDict
def build_unconstrain(keys: KeysView, config: ConfigDict) -> Callable:
"""
Build the transformation map the will transform a set of parameters such that each parameter is now defined on the
entire real line.
:param keys: A set of dictionary keys that represent parameters name-key.
:param config: A configuration dictionary that informs which transformation should be applied to which parameter.
:return: A callable that will apply the desired transformation(s).
"""
transforms = {k: config.transformations[config.transformations[k]] for k in keys}
def unconstrain(params: dict) -> dict:
return {k: jnp.array(transforms[k].inverse(v)) for k, v in params.items()}
return unconstrain
def build_constrain(keys: KeysView, config: ConfigDict) -> Callable:
"""
Build the transformation map the will transform a set of parameters such that each unconstrained parameter
representation is now defined on the parameter's original, possibly constrained, space.
:param keys: A set of dictionary keys that represent parameters name-key.
:param config: A configuration dictionary that informs which transformation should be applied to which parameter.
:return: A callable that will apply the desired transformation(s).
"""
transforms = {k: config.transformations[config.transformations[k]] for k in keys}
def constrain(params: dict) -> dict:
return {k: jnp.array(transforms[k].forward(v)) for k, v in params.items()}
return constrain
def build_all_transforms(keys: KeysView, config: ConfigDict) -> Tuple[Callable, Callable]:
"""
Build both the constraining and unconstraining function mappings.
:param keys: A set of dictionary keys that represent parameters name-key.
:param config: A configuration dictionary that informs which transformation should be applied to which parameter.
:return: A tuple of callables that will apply the desired transformation(s) to both the original and the unconstrained parameter values, in this order.
"""
unconstrain = build_unconstrain(keys, config)
constrain = build_constrain(keys, config)
return constrain, unconstrain
| 2,323 |
python_builds/pawn_rig.py
|
S0nic014/luna_sample_files
| 0 |
2025156
|
import luna_rig
from luna_rig.core import pybuild
from luna_rig import importexport
class RigBuild(pybuild.PyBuild):
SPACE_MATRIX_METHOD = False
def run(self):
self.base = luna_rig.components.SimpleComponent.create(name="body")
self.root_ctl = self.base.add_control("c_body_00_jnt", "root", as_hook=True, bind_joint="c_body_00_jnt", orient_axis="y")
self.barel_ctl = self.base.add_control("c_barel_00_jnt",
"barel",
bind_joint="c_barel_00_jnt",
parent=self.root_ctl,
shape="arrow",
orient_axis="y")
self.muzzle_ctl = self.base.add_control("c_barel_01_jnt",
"muzzle",
bind_joint="c_barel_01_jnt",
parent=self.barel_ctl,
attributes=["tx"])
self.barel_ctl.add_orient_switch(self.character.world_locator, local_parent=self.root_ctl)
self.left_front_leg = luna_rig.components.FKIKComponent.create(name="front_leg",
side="l",
start_joint="l_front_leg_00_jnt",
end_joint="l_front_leg_02_jnt",
ik_world_orient=True,
meta_parent=self.base,
hook=0)
self.right_front_leg = luna_rig.components.FKIKComponent.create(name="front_leg",
side="r",
start_joint="r_front_leg_00_jnt",
end_joint="r_front_leg_02_jnt",
ik_world_orient=True,
meta_parent=self.base,
hook=0)
self.left_back_leg = luna_rig.components.FKIKComponent.create(name="front_leg",
side="l",
start_joint="l_back_leg_00_jnt",
end_joint="l_back_leg_02_jnt",
ik_world_orient=True,
meta_parent=self.base,
hook=0)
self.right_back_leg = luna_rig.components.FKIKComponent.create(name="front_leg",
side="r",
start_joint="r_back_leg_00_jnt",
end_joint="r_back_leg_02_jnt",
ik_world_orient=True,
meta_parent=self.base,
hook=0)
for leg_comp in self.character.get_meta_children(of_type=luna_rig.components.FKIKComponent):
leg_comp.fk_controls[-1].group.hide()
# Spaces
for leg_comp in self.character.get_meta_children(of_type=luna_rig.components.FKIKComponent):
# Pole vector
leg_comp.pv_control.add_world_space(via_matrix=self.SPACE_MATRIX_METHOD)
leg_comp.pv_control.add_space(leg_comp.ik_control, "IK", via_matrix=self.SPACE_MATRIX_METHOD)
leg_comp.pv_control.add_space(self.root_ctl, "Body", via_matrix=self.SPACE_MATRIX_METHOD)
# IK
leg_comp.ik_control.add_world_space(via_matrix=self.SPACE_MATRIX_METHOD)
leg_comp.ik_control.add_space(self.root_ctl, "Body", via_matrix=self.SPACE_MATRIX_METHOD)
leg_comp.fk_controls[0].add_orient_switch(self.character.world_locator)
self.character.add_root_motion(self.root_ctl, root_joint="c_root_00_jnt")
for comp in self.character.meta_children:
comp.attach_to_skeleton()
def post(self):
importexport.CtlShapeManager().import_asset_shapes()
importexport.SkinManager().import_all()
# importexport.NgLayersManager.import_all()
self.character.set_publish_mode(True)
if __name__ == "__main__":
RigBuild("character", "Pawn")
| 5,119 |
driver/driverinterface.py
|
koltenfluckiger/pyseleniummanagement
| 0 |
2025475
|
from .options import BrowserOptions
try:
from abc import ABC
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
except ImportError as err:
print("Unable to import: {}".format(err))
exit()
class DriverInterface(ABC):
def factory(self) -> object:
"""Factory function returns driver object"""
class Chrome(DriverInterface):
def __init__(self, executable_path: str,
chrome_options: BrowserOptions) -> None:
self.executable_path = executable_path
self.chrome_options = chrome_options
def factory(self) -> object:
return webdriver.Chrome(
executable_path=self.executable_path, chrome_options=self.chrome_options)
class Firefox(DriverInterface):
def __init__(self, executable_path: str, firefox_options: BrowserOptions,
firefox_profile: FirefoxProfile = FirefoxProfile(), binary_path: str = None) -> None:
self.executable_path = executable_path
self.binary_path = binary_path
self.firefox_profile = firefox_profile
self.firefox_options = firefox_options
def factory(self) -> object:
try:
return webdriver.Firefox(executable_path=self.executable_path,
firefox_options=self.firefox_options, firefox_profile=self.firefox_profile, firefox_binary=self.binary_path)
except Exception as err:
print(err)
class Safari(DriverInterface):
def __init__(self, service_args: BrowserOptions, executable_path: str = "/usr/bin/safaridriver") -> None:
self.executable_path = executable_path
self.service_args = service_args
def factory(self) -> object:
try:
return webdriver.Safari(executable_path=self.executable_path, service_args=self.service_args)
except Exception as err:
print(err)
| 1,917 |
tests/test_loading.py
|
Pithikos/rest-framework-roles
| 19 |
2026664
|
import pytest
from rest_framework_roles.parsing import validate_config, load_roles, load_view_permissions
from rest_framework_roles.roles import is_admin, is_anon
VALID_ROLES = {'admin': is_admin, 'anon': is_anon}
VALID_VIEW_PERMISSIONS = [
{
'view': 'someview',
'permissions': {
'admin': True,
'anon': False,
}
}
]
REST_FRAMEWORK_ROLES = {}
VALID_ROLES_PATH = f'{__name__}.VALID_ROLES'
VALID_VIEW_PERMISSIONS_PATH = f'{__name__}.VALID_VIEW_PERMISSIONS'
# ---------------------------------- Basic -------------------------------------
def test_load_roles_by_dotted_path():
assert load_roles({'roles': VALID_ROLES_PATH, 'view_permissions': None})
def test_load_view_permissions_by_dotted_path():
assert load_view_permissions({'roles': None, 'view_permissions': VALID_VIEW_PERMISSIONS_PATH})
def test_validate_config():
validate_config({'roles': None})
# with pytest.raises(Exception):
# validate_config({'view_permissions': None})
validate_config({'view_permissions': None, 'roles': None})
| 1,085 |
PyTorch/1-Intorduction/test.py
|
chauhankartik/DeepLearning-EarlySteps
| 1 |
2026257
|
#external libraries
import numpy as np
import pickle
import os
import json
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
#internal utilities
import config
from model import NeuralNetwork
import data_loader
#hyper-parameter setup
hyper_params = {
"num_epochs" : config.num_epochs,
"batch_size" : config.batch_size,
"learning_rate" : config.learning_rate,
"hidden_size" : config.hidden_size,
"pretrained" : config.pretrained
}
# train on GPU if CUDA variable is set to True (a GPU with CUDA is needed to do so)
test_dataloader = data_loader.test_data_loader()
#define loss and optimizer
criterion = nn.CrossEntropyLoss()
def test(dataloader, model, loss_fn):
optimizer = torch.optim.Adadelta(model.parameters(), lr = config.learning_rate)
size = len(dataloader.dataset)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X,y in dataloader:
X,y = X.to(config.device), y.to(config.device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test error :\n Accuracy : {(100*correct) : >0.1f} %, Avg loss : {test_loss: >8f}\n")
| 1,209 |
python-mxnet/mxnet-graph.py
|
ArmstrongYang/StudyShare
| 2 |
2025636
|
import logging
import numpy as np
import mxnet as mx
logging.basicConfig(level=logging.INFO)
sample_count = 1000
train_count = 900
valid_count = sample_count - train_count
feature_count = 100
category_count = 10
batch = 10
X = mx.nd.uniform(low=0, high=1, shape=(sample_count, feature_count))
print('X.shape: ', X.shape, 'X.size: ', X.size)
Y = mx.nd.empty((sample_count, ))
for i in range(sample_count):
Y[i] = np.random.randint(0, category_count)
print('Y.shape: ', Y.shape, 'Y.size: ', Y.size)
print('Y.asnumpy: ', Y[0:10].asnumpy())
X_train = mx.nd.crop(X, begin=(0, 0), end=(train_count, feature_count-1))
X_valid = mx.nd.crop(X, begin=(train_count, 0),
end=(sample_count, feature_count-1))
Y_train = Y[0:train_count]
Y_valid = Y[train_count:sample_count]
# 构建网络
data = mx.sym.Variable('data')
fc1 = mx.sym.FullyConnected(data, name='fc1', num_hidden=64)
relu1 = mx.sym.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.sym.FullyConnected(relu1, name='fc2', num_hidden=category_count)
out = mx.sym.SoftmaxOutput(fc2, name='softmax')
mod = mx.mod.Module(out)
# 数据迭代器
train_iter = mx.io.NDArrayIter(data=X_train, label=Y_train, batch_size=batch)
# for batch in train_iter:
# print(batch.data)
# print(batch.label)
# raise EOFError("Eorror")
mod.bind(data_shapes=train_iter.provide_data,
label_shapes=train_iter.provide_label)
# Allowed, but not efficient
# mod.init_params()
# Much better
mod.init_params(initializer=mx.init.Xavier(magnitude=2.))
mod.init_optimizer(optimizer='sgd', optimizer_params=(
('learning_rate', 0.1), ))
mod.fit(train_iter, num_epoch=50)
pred_iter = mx.io.NDArrayIter(data=X_valid,label=Y_valid, batch_size=batch)
pred_count = valid_count
correct_preds = total_correct_preds = 0
for preds, i_batch, batch in mod.iter_predict(pred_iter):
label = batch.label[0].asnumpy().astype(int)
pred_label = preds[0].asnumpy().argmax(axis=1)
correct_preds = np.sum(pred_label==label)
total_correct_preds = total_correct_preds + correct_preds
print('Validation accuracy: %2.2f' % (1.0*total_correct_preds/pred_count))
| 2,126 |
apricotlib/map_domain_to_go.py
|
malvikasharan/APRICOT
| 5 |
2026159
|
#!/usr/bin/env python
# Description = Maps each domains to their corresponding domains from InterPro
import argparse
import os
__description__ = ("Maps each domains to their corresponding domains from "
"InterPro")
def main():
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("go_path")
parser.add_argument("interpro_to_go")
parser.add_argument("interpro_data")
parser.add_argument("cdd_data")
args = parser.parse_args()
map_domain_to_go = MapDomainToGo(
args.go_path, args.interpro_to_go,
args.interpro_data, args.cdd_data)
map_domain_to_go.read_obo_file()
map_domain_to_go.map_interpro_to_domains()
map_domain_to_go.read_mapped_go_data()
map_domain_to_go.map_cdd_to_go()
class MapDomainToGo(object):
def __init__(self, go_path,
interpro_to_go,
interpro_data,
cdd_data):
self._go_path = go_path
self._interpro_to_go = interpro_to_go
self._interpro_data = interpro_data
self._cdd_data = cdd_data
self._go_info_dict = {}
self._interpro_domain_dict = {}
self._domain_interpro_dict = {}
self._interpro_go_dict = {}
def read_obo_file(self):
'''parses and reads the GO obo file'''
out_fh = open(self._go_path+'/go_obo_table.csv', 'w')
for go_file in os.listdir(self._go_path):
go_fh = open(self._go_path+'/'+go_file, 'r')
if '.obo' in go_file:
for entry in go_fh.read().split('[Term]'):
if 'id: GO:' in entry:
info_dict = {}
for info in entry.split('\n'):
if info.startswith('id: GO:'):
info_dict["go_id"] = info.split(
': ')[1].strip()
if "name:" in info:
info_dict["name"] = info.split(
': ')[1].strip()
if "namespace:" in info:
if "biological_process" in info:
info_dict["namesp"] = 'bp'
elif "cellular_component" in info:
info_dict["namesp"] = 'cc'
elif "molecular_function" in info:
info_dict["namesp"] = 'mf'
out_fh.write("%s:%s\t%s\n" % (
info_dict["namesp"], info_dict["go_id"],
info_dict["name"]))
self._go_info_dict[info_dict["go_id"]] = (
"%s:%s [%s]" % (info_dict["namesp"],
info_dict["go_id"], info_dict["name"]))
go_fh.close()
out_fh.close()
return self._go_info_dict
def map_interpro_to_domains(self):
'''maps ipr ids to domain ids'''
with open(self._interpro_data, 'r') as in_fh:
for entry in in_fh:
ipr_id = entry.split('\t')[0]
domain_ids = entry.strip().split('\t')[-1]
self._interpro_domain_dict[ipr_id] = domain_ids
if '|' in domain_ids:
for domains in domain_ids.split('|'):
self._domain_interpro_dict[domains] = ipr_id
else:
self._domain_interpro_dict[domain_ids] = ipr_id
return self._interpro_domain_dict, self._domain_interpro_dict
def read_mapped_go_data(self):
'''maps domains to go id'''
with open(self._interpro_to_go, 'r') as interpro_to_go_fh:
for mapped_entry in interpro_to_go_fh:
if "InterPro:" in mapped_entry:
ipr_id = mapped_entry.split('InterPro:'
)[1].split(' ')[0]
go_id = mapped_entry.split(';')[-1].split(
'GO:')[1].strip()
self._interpro_go_dict.setdefault(
ipr_id, []).append("GO:%s" % go_id)
with open(self._go_path+'/mapped_interpro_to_go.csv',
'w') as map_out_fh:
with open(self._go_path+'/unmapped_interpro_to_go.csv',
'w') as unmap_out_fh:
for ipr in set(self._interpro_domain_dict.keys()):
if ipr in set(self._interpro_go_dict.keys()):
if len(self._interpro_go_dict[ipr]) > 1:
go_list = []
for go in self._interpro_go_dict[ipr]:
try:
go_list.append(self._go_info_dict[go])
except KeyError:
go_list.append('%s: No description' % go)
map_out_fh.write("%s\t%s\t%s\n" % (
ipr, self._interpro_domain_dict[ipr],
','.join(go_list)))
else:
try:
map_out_fh.write("%s\t%s\t%s\n" % (
ipr, self._interpro_domain_dict[ipr],
self._go_info_dict[
self._interpro_go_dict[ipr][0]]))
except:
map_out_fh.write("%s\t%s\t%s\n" % (
ipr, self._interpro_domain_dict[ipr],
'No description'))
else:
unmap_out_fh.write("%s\t%s\n" % (
ipr, self._interpro_domain_dict[ipr]))
return self._interpro_go_dict
def map_cdd_to_go(self):
'''maps cdd to go and interpro'''
map_go_fh = open(
self._go_path+'/mapped_cdd_to_go.csv', 'w')
unmap_go_fh = open(
self._go_path+'/unmapped_cdd_to_go.csv', 'w')
map_ipr_fh = open(
self._go_path+'/mapped_cdd_to_interpro.csv', 'w')
unmap_ipr_fh = open(
self._go_path+'/unmapped_cdd_to_interpro.csv', 'w')
interpro_path = '/'.join(self._interpro_to_go.split('/')[0:-1])
ipr_map_cdd_length = open(
interpro_path+'/mapped_interpro_to_cdd_length.csv', 'w')
with open(self._cdd_data, 'r') as cdd_data_fh:
for cdd_entry in cdd_data_fh:
cdd_id = cdd_entry.split('\t')[0]
cdd_db_id = cdd_entry.split('\t')[1]
cdd_length = cdd_entry.strip().split('\t')[-1]
if 'pfam' in cdd_db_id:
cdd_db_id = cdd_db_id.replace('pfam', 'PF')
if 'smart' in cdd_db_id:
cdd_db_id = cdd_db_id.replace('smart', 'SM')
if cdd_db_id in set(self._domain_interpro_dict.keys()):
ipr = self._domain_interpro_dict[cdd_db_id]
ipr_map_cdd_length.write("%s\t%s\t%s\t%s\t%s\n" % (
ipr, self._interpro_domain_dict[ipr],
cdd_id, cdd_db_id, cdd_length))
map_ipr_fh.write("%s\t%s\t%s\t%s\n" % (
cdd_id, cdd_db_id,
ipr, self._interpro_domain_dict[ipr]))
try:
if self._interpro_go_dict[ipr]:
if len(self._interpro_go_dict[ipr]) > 1:
go_list = []
for go in self._interpro_go_dict[ipr]:
go_list.append(self._go_info_dict[go])
map_go_fh.write("%s\t%s\t%s\n" % (
cdd_id, cdd_db_id, ','.join(go_list)))
else:
map_go_fh.write("%s\t%s\t%s\n" % (
cdd_id, cdd_db_id, self._go_info_dict[
self._interpro_go_dict[ipr][0]]))
except KeyError:
unmap_go_fh.write("%s\t%s\n" % (
cdd_id, cdd_db_id))
else:
unmap_ipr_fh.write("%s\t%s\n" % (cdd_id, cdd_db_id))
ipr_map_cdd_length.close()
map_go_fh.close()
unmap_go_fh.close()
map_ipr_fh.close()
unmap_ipr_fh.close()
if __name__ == '__main__':
main()
| 8,650 |
app/schemas/job.py
|
javi-cortes/linkedon
| 0 |
2026475
|
from typing import Optional, List
from pydantic import BaseModel
class Job(BaseModel):
name: str
description: Optional[str]
country: str
user_id: int
salary: int = 0
required_skills: List[str]
class Config:
orm_mode = True
class JobCreate(BaseModel):
name: str
description: str
user_id: int
salary: int = 0
required_skills: List[str]
country: str
class JobSearchCriteria(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
user_id: Optional[int] = None
salary_max: Optional[int] = 0
salary_min: Optional[int] = 0
required_skills: Optional[List[str]] = None
country: Optional[str] = None
| 704 |
ext_clock_test.py
|
ifurusato/ros
| 9 |
2026054
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2021-02-19
# modified: 2021-02-19
#
# Uses an interrupt (event detect) set on a GPIO pin as an external Clock trigger.
#
import pytest
import sys, time
from colorama import init, Fore, Style
init()
from lib.config_loader import ConfigLoader
from lib.logger import Logger, Level
from lib.message_bus import MessageBus
from lib.ext_clock import ExternalClock
# ..............................................................................
@pytest.mark.unit
def test_external_clock():
_log = Logger("xclock", Level.INFO)
_log.info('starting test...')
# read YAML configuration
_loader = ConfigLoader(Level.INFO)
filename = 'config.yaml'
_config = _loader.configure(filename)
_message_bus = MessageBus(Level.INFO)
_clock = ExternalClock(_config, _message_bus, Level.INFO)
try:
_log.info('starting clock...')
_clock.enable()
while _clock.enabled:
time.sleep(1.0)
except KeyboardInterrupt:
_log.info("caught Ctrl-C.")
finally:
_log.info("closed.")
# main .........................................................................
def main(argv):
test_external_clock()
# call main ....................................................................
if __name__== "__main__":
main(sys.argv[1:])
#EOF
| 1,601 |
IGitt/__init__.py
|
etcher-be/igit
| 6 |
2025883
|
"""
This package contains abstractions to use git, as well as services providing
hosting for it (GitHub, GitLab and others).
"""
from os.path import dirname, join
class ElementDoesntExistError(Exception):
"""
Indicates that the desired element doesn't exist.
"""
class ElementAlreadyExistsError(Exception):
"""
Indicates that the element (that is probably to be created) already exists.
"""
with open(join(dirname(__file__), 'VERSION'), 'r') as ver:
VERSION = ver.readline().strip()
| 518 |
homebytwo/routes/tests/test_auth_pipeline.py
|
drixselecta/homebytwo
| 7 |
2026511
|
from .. import auth_pipeline
from .factories import ActivityFactory
def test_auth_pipeline_new_athlete(athlete, celery, mocker):
athlete.activities_imported = False
athlete.save(update_fields=["activities_imported"])
mock_import_task = mocker.patch(
"homebytwo.routes.tasks.import_strava_activities_task.run"
)
mock_streams_task = mocker.patch(
"homebytwo.routes.tasks.import_strava_activities_streams_task.run"
)
mock_train_task = mocker.patch(
"homebytwo.routes.tasks.train_prediction_models_task.run"
)
auth_pipeline.import_strava(user=athlete.user, is_new=True)
mock_import_task.assert_called_with(athlete_id=athlete.id)
mock_streams_task.assert_called
mock_train_task.assert_called_with(athlete_id=athlete.id)
def test_auth_pipeline_existing_athlete(athlete, celery, mocker):
ActivityFactory.create_batch(5, athlete=athlete, streams=None)
ActivityFactory.create_batch(5, athlete=athlete)
activities = athlete.activities.filter(
streams__isnull=True, skip_streams_import=False
)
activity_ids = list(activities.values_list("strava_id", flat=True))
assert athlete.activities.count() == 10
mock_streams_task = mocker.patch(
"homebytwo.routes.tasks.import_strava_activities_streams_task.run"
)
mock_train_task = mocker.patch(
"homebytwo.routes.tasks.train_prediction_models_task.run"
)
auth_pipeline.import_strava(user=athlete.user, is_new=False)
mock_streams_task.assert_called_with(activity_ids)
mock_train_task.assert_called_with(athlete_id=athlete.id)
| 1,612 |
awaard/urls.py
|
Uomar7/awaards
| 0 |
2026527
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns=[
url('^$',views.landing_page, name='home'),
url(r'^edit/$', views.edit_profile, name="edit_profile"),
url(r'^profile/(\d+)', views.profile, name="profile"),
# url(r'^search/', views.search_results, name='search_results')
url(r'^api/projects/$',views.ProjectList.as_view()),
url(r'^api/profile/$',views.ProfileList.as_view()),
url(r"^new/project/$",views.new_project, name='new_project'),
url(r'^project/(\d+)',views.single_project, name="single_project"),
]
| 633 |
tests/test_random_component.py
|
RaviPandey33/gym-electric-motor-1
| 179 |
2026707
|
import pytest
import gym_electric_motor as gem
import numpy as np
class TestRandomComponent:
@pytest.fixture
def random_component(self):
return gem.RandomComponent()
def test_seed(self, random_component):
"""Test, if a predefined SeedSequence is set when it is passed."""
seed_sequence = np.random.SeedSequence()
random_component.seed(seed_sequence)
assert seed_sequence == random_component.seed_sequence
def test_default_seed(self, random_component):
"""Test, if a random SeedSequence is set when no SeedSequence is passed."""
random_component.seed()
assert isinstance(random_component.seed_sequence, np.random.SeedSequence)
assert isinstance(random_component.random_generator, np.random.Generator)
def test_reseed(self, random_component):
"""Test if the seed of the RandomComponent differs after two random seedings."""
random_component.seed()
initial_seed = random_component.seed_sequence
random_component.seed()
assert random_component.seed_sequence != initial_seed
def test_next_generator(self, random_component):
"""This test checks if the next_generator function sets the RandomComponent to defined states after each
next_generator() call, no matter how many steps have been taken before."""
# Seed the random component with a defined seed.
random_component.seed(np.random.SeedSequence(123))
# Generate a first episode of 42 random numbers
rands_first_ep = random_component.random_generator.random(42)
# Use the next generator
random_component.next_generator()
# Generate another episode of 42 steps
rands_second_ep = random_component.random_generator.random(42)
# Reseed the environment to the previous state
random_component.seed(np.random.SeedSequence(123))
# Test, if the first random numbers of the first episodes are equal
assert(np.all(rands_first_ep[:30] == random_component.random_generator.random(30))),\
'The random numbers of the initial and reseeded random component differ.'
random_component.next_generator()
# Also the second episode has to be equal. Therefore, the next generator has to be set np matter how many steps
# have been taken in the first episode.
assert(np.all(rands_second_ep == random_component.random_generator.random(64)[:42])),\
'The random numbers of the initial and reseeded random component differ.'
| 2,557 |
py_work/data_process/normal/QED_extend.py
|
kotori-y/kotori_work
| 6 |
2026545
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 13 09:44:00 2019
@Author: CBDD Group, CSU, China
@Homepage: http://www.scbdd.com
@Mail: <EMAIL>
@Blog: https://blog.moyule.me
♥I love Megumi forerver♥
"""
print (__doc__)
import pandas as pd
from math import e as ex
from math import log
from tqdm import tqdm
from math import exp
#
#def dfunc(x,a,b,c,d,e,f,dmax):
#
# part_one = (-(x-c+(d/2))/e)
# part_one = 1 + ex**part_one
# part_one = b/part_one
#
# part_two = (-(x-c-(d/2))/f)
# part_two = 1 + ex**part_two
# part_two = 1 - (1/part_two)
#
# res = a + part_one*part_two
#
# res = res/dmax
#
# return res
def ADS(x,a,b,c,d,e,f,max_val):
y = a+(b/(1+exp(-(x-c+d/2)/e)))*(1-1/(1+exp(-(x-c-d/2)/f))) #[ADS]
y = y/max_val
return y
def SDS(x,a,b,c,d,e,max_val):
y = a+b*(1+exp(-d/(2*e)))*(1+exp(d/(2*e)))*exp(-(x-c)/e)/((1+exp(-((x-c+d/2)/e)))*(1+exp(-((x-c-d/2)/e)))) #[SDS]
y = y/max_val
return y
def QED(data):
di = 0
n = 7
for key,value in data.items():
# n += 1
max_val = param['Max_val'][key]
a = param['a'][key]
b = param['b'][key]
c = param['c'][key]
d = param['d'][key]
e = param['e'][key]
f = param['f'][key]
try:
if f != 0:
di += log(ADS(value,a,b,c,d,e,f,max_val))
else:
di += log(SDS(value,a,b,c,d,e,max_val))
except:
di += 0
try:
res = exp(di/n)
except:
res = 0
return res
if '__main__' == __name__:
# file = input("Input file path:\n")
# ads_param = pd.read_csv(r"C:\Users\0720\Desktop\MATE\yzy\QED_test\20190522\ADS_param.csv",index_col='X')
# param = ads_param.to_dict()
# sds_param = pd.read_csv(r"C:\Users\0720\Desktop\MATE\yzy\QED_test\20190522\SDS_param.csv",index_col='X')
# param.update(sds_param.to_dict())
param = pd.read_csv(r"",index_col='X')
param.fillna(0,inplace=True)
param = param.to_dict()
df = pd.read_csv(r"")
df['Label'] = 1
df_i = df.copy()
df_i.drop(['Label','Smiles'],axis=1,inplace=True)
data = df_i.apply(lambda x: x.to_dict(),axis=1)
df['QED'] = data.map(QED)
#df['QEDw'] = data.map(QEDw)
def QEDw(data):
di = 0
W = 4.2
try:
for key,value in data.items():
# n += 1
a = param['a'][key]
b = param['b'][key]
c = param['c'][key]
d = param['d'][key]
e = param['e'][key]
f = param['f'][key]
dmax = param['d(x)MAX'][key]
weight = param['weight'][key]
di += weight*log(dfunc(value,a,b,c,d,e,f,dmax))
res = ex**(di/W)
except:
res = 0
return res
| 2,863 |
code/models/fc_nn.py
|
severilov/BCI-thesis
| 0 |
2026613
|
import torch
from torch import nn
import torch.nn.functional as F
class BaselineNN(nn.Module):
def __init__(self):
# call constructor from superclass
super().__init__()
# define network layers
self.fc1 = nn.Linear(4, 32)
self.fc2 = nn.Linear(32, 32)
self.fc3 = nn.Linear(32, 32)
self.fc4 = nn.Linear(32, 32)
self.fc5 = nn.Linear(32, 4)
def forward(self, x):
# define forward pass
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = torch.sigmoid(self.fc5(x))
return x
def init_fc_nn():
pass
| 692 |
spider/genesis_mining.py
|
carboclan/miningPools
| 0 |
2025795
|
import requests
from .util import logger, poolItem, generate_request
import time, json
from urllib.parse import quote
from decimal import *
from parsel import Selector
s = generate_request()
merchant = "genesis_mining"
@logger.catch
def getdata():
url = "https://www.genesis-mining.com/pricing"
logger.info(f"get contract list {url}")
z1 = s.get(url, timeout=60)
response = Selector(text=z1.text)
ret = []
bitcoin_list = [
"bitcoin-mining",
"bitcoin-mining-6month",
"bitcoin-mining-radiant-zero",
"bitcoin-mining-radiant-zero-6month",
"ethereum-mining",
]
for b in bitcoin_list:
gmp_prices = (
response.xpath(f'//div[@id="{b}"]')
.xpath('.//li[@class="gmp-price"]/span/text()')
.extract()
)
ths = (
response.xpath(f'//div[@id="{b}"]')
.xpath('.//li[@class="gmp-megaw"]/text()')
.extract()
)
_sold_percent = (
response.xpath(f'//div[@id="{b}"]').xpath('.//a[@title="已售罄"]').extract()
)
if _sold_percent:
sold_percent = 100
else:
sold_percent = 10
gmp_prices = [i.replace(",", "") for i in gmp_prices]
ths = [x.split(" ")[0].strip() for x in [i for i in ths if i.strip() != ""]]
if "bitcoin" in b:
coin = "BTC"
elif "ethereum" in b:
coin = "ETH"
if "6month" in b:
duration = 6 * 30
else:
duration = 730
if "zero" in b:
zero = "00"
else:
zero = "006"
for i in range(3):
ret.append(
{
"upfront_fee": float(gmp_prices[i]),
"contract_size": float(ths[i]),
"duration": duration,
"zero": zero,
"b": b,
"sold_percent": sold_percent,
"coin": coin,
}
)
return ret
def parsedata():
data = getdata()
for i in data:
contract = i
_id = merchant + "_" + contract["b"] + "_" + str(contract["duration"])
coin = contract["coin"]
duration = contract["duration"]
issuers = merchant
contract_size = contract["contract_size"]
electricity_fee = 0.06
management_fee = 0.0
## buy url .5 fix
if ".5" not in str(contract_size):
contract_size_url = str(int(contract_size))
else:
contract_size_url = str(contract_size)
if duration == 30 * 6:
buy_url = f"https://www.genesis-mining.com/upgrade-hashpower?a=sha256_6month{contract['zero']}&p={contract_size_url}"
else:
buy_url = f"https://www.genesis-mining.com/upgrade-hashpower?a=sha256_2year{contract['zero']}&p={contract_size_url}"
upfront_fee = contract["upfront_fee"]
messari = 0.04
sold_percent = contract["sold_percent"]
p = poolItem(
_id,
coin,
duration,
issuers,
contract_size,
electricity_fee,
management_fee,
buy_url,
upfront_fee,
messari,
sold_percent,
)
p.save2db()
if __name__ == "__main__":
parsedata()
| 3,378 |
adjutant_ui/content/quota/views.py
|
elastx/adjutant-ui
| 8 |
2024914
|
# Copyright (c) 2016 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables as horizon_tables
from adjutant_ui.api import adjutant
from adjutant_ui.content.quota import forms as quota_forms
from adjutant_ui.content.quota import tables as quota_tables
class IndexView(horizon_tables.MultiTableView):
page_title = _("Quota Management")
table_classes = (quota_tables.RegionOverviewTable,
quota_tables.SizeOverviewTable,
quota_tables.QuotaTasksTable)
template_name = 'management/quota/index.html'
def get_region_overview_data(self):
try:
return adjutant.region_quotas_get(self.request)
except Exception:
exceptions.handle(self.request, _('Failed to list quota sizes.'))
return []
def get_size_overview_data(self):
try:
return adjutant.quota_sizes_get(self.request)
except Exception:
exceptions.handle(self.request, _('Failed to list quota sizes.'))
return []
def get_quota_tasks_data(self):
try:
return adjutant.quota_tasks_get(self.request)
except Exception:
exceptions.handle(self.request, _('Failed to list quota tasks.'))
return []
class RegionDetailView(horizon_tables.DataTableView):
table_class = quota_tables.RegionQuotaDetailTable
template_name = 'management/quota/region_detail.html'
page_title = _("'{{ region }}' Quota Details")
def get_data(self):
try:
return adjutant.quota_details_get(self.request,
self.kwargs['region'])
except Exception:
exceptions.handle(self.request, _('Failed to list quota sizes.'))
return []
def get_context_data(self, **kwargs):
context = super(RegionDetailView, self).get_context_data(**kwargs)
context['region'] = self.kwargs['region']
return context
class QuotaSizeView(horizon_tables.DataTableView):
table_class = quota_tables.QuotaDetailUsageTable
template_name = 'management/quota/size_detail.html'
page_title = _("'{{ size }}' Quota Details")
def get_data(self):
try:
return adjutant.size_details_get(self.request,
size=self.kwargs['size'])
except Exception:
exceptions.handle(self.request, _('Failed to list quota size.'))
return []
def get_context_data(self, **kwargs):
# request.user.services_region
context = super(QuotaSizeView, self).get_context_data(**kwargs)
context['title'] = _("%s - Quota Details") \
% self.kwargs['size'].title()
return context
class RegionUpdateView(forms.ModalFormView, horizon_tables.MultiTableView):
form_class = quota_forms.UpdateQuotaForm
table_classes = (quota_tables.ChangeSizeDisplayTable, )
submit_url = 'horizon:management:quota:update'
context_object_name = 'ticket'
template_name = 'management/quota/update.html'
success_url = reverse_lazy("horizon:management:quota:index")
page_title = _("Update Quota")
def get_change_size_data(self):
try:
return adjutant.quota_details_get(self.request,
region=self.kwargs['region'])
except Exception:
exceptions.handle(self.request, _('Failed to list quota sizes.'))
return []
def get_object(self):
return adjutant.region_quotas_get(self.request,
region=self.kwargs['region'])[0]
def get_context_data(self, **kwargs):
context = super(RegionUpdateView, self).get_context_data(**kwargs)
context['region'] = self.get_object()
args = (self.kwargs['region'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['form'] = self.get_form()
return context
def get_form_kwargs(self):
kwargs = super(RegionUpdateView, self).get_form_kwargs()
sizes = adjutant.quota_sizes_get(
self.request, region=self.kwargs['region'])
kwargs['size_choices'] = []
region = self.get_object()
for size in sizes:
if region.quota_size == size.name:
continue
if size.name not in region.preapproved_quotas:
kwargs['size_choices'].append(
[size.id, "%s (requires approval)" % size.name.title()])
else:
kwargs['size_choices'].append([size.id, size.name.title()])
return kwargs
def get_initial(self):
region = self.get_object()
data = {'id': region.id,
'region': region.region,
'quota_size': region.quota_size,
'preapproved_quotas': region.preapproved_quotas
}
return data
def post(self, request, *args, **kwargs):
# NOTE(amelia): The multitableview overides the form view post
# this reinstates it.
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
| 5,985 |
2021/crypto_v2/python_profit.py
|
jake-stewart/funtime_projects
| 0 |
2026422
|
#!/bin/python3
from binance.client import Client
import time
import os
api_key = "x"
api_secret = "x"
client = Client(api_key, api_secret)
investors = {
"John": {
"price": x,
"coins": x
},
"Carl": {
"price": y,
"coins": y
}
}
multiple_investors = len(investors) > 1
while True:
symbol_ticker = client.get_symbol_ticker(symbol="DOGEAUD")
if multiple_investors:
os.system("clear")
for name, investor in investors.items():
doge_price = float(symbol_ticker["price"])
new_balance = round(doge_price * investor["coins"], 2)
profit = round(new_balance - investor["price"], 2)
profit_percent = round(profit / investor["price"] * 100, 2)
if multiple_investors:
print(name + ":")
print(f" Investment: {investor['price']}")
print(f" Current value: {new_balance}")
print(f" Profit: {profit} ({profit_percent}%)")
else:
print(f"{investor['price']} -> {new_balance} = {profit} ({profit_percent}%)")
time.sleep(5)
| 1,141 |
src/main/scripts/central-events-5-or-more.py
|
nchambers/caevo
| 36 |
2026167
|
import sys
import get_central_events as gce
xmlfile = sys.argv[1]
hl = gce.HalfLinks(inputFname=xmlfile)
hl.get_halfLinks()
hl.get_halfLink_counts()
labels = "BEFORE AFTER INCLUDES IS_INCLUDED SIMULTANEOUS VAGUE".split()
outputDir = sys.argv[2]
N = sys.argv[3]
mostCommonHalfLinks = hl.halfLinkCounts.most_common()
for label in labels:
out = open(sys.argv[2] + '/' + label + '_' + N + '.txt','w')
for halfLinkInfo in mostCommonHalfLinks:
if halfLinkInfo[0][1] == label and halfLinkInfo[1] > int(N):
entId = halfLinkInfo[0][0]
out.write(hl.get_text_and_sent_with_context_from_full(entId)+'\n\n')
out.close()
| 625 |
src/application-insights/azext_applicationinsights/_validators.py
|
Mannan2812/azure-cli-extensions
| 207 |
2025895
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=len-as-condition
from knack.util import CLIError
from azure.mgmt.core.tools import parse_resource_id, is_valid_resource_id
def validate_app_service(namespace):
if namespace.app_service and is_valid_resource_id(namespace.app_service):
namespace.app_service = parse_resource_id(namespace.app_service)['name']
def validate_dest_account(namespace):
if is_valid_resource_id(namespace.dest_account):
parsed_storage = parse_resource_id(namespace.dest_account)
storage_name = parsed_storage['resource_name']
namespace.dest_account = storage_name
def validate_applications(namespace):
if namespace.resource_group_name:
if isinstance(namespace.application, list):
if len(namespace.application) == 1:
if is_valid_resource_id(namespace.application[0]):
raise CLIError("Specify either a full resource id or an application name and resource group.")
else:
raise CLIError("Resource group only allowed with a single application name.")
def validate_storage_account_name_or_id(cmd, namespace):
if namespace.storage_account_id:
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if not is_valid_resource_id(namespace.storage_account_id):
namespace.storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=namespace.storage_account_id
)
def validate_log_analytic_workspace_name_or_id(cmd, namespace):
if namespace.workspace_resource_id:
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if not is_valid_resource_id(namespace.workspace_resource_id):
namespace.workspace_resource_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='microsoft.OperationalInsights',
type='workspaces',
name=namespace.workspace_resource_id
)
| 2,692 |
extrairdados/documentacao/testesweb.py
|
pedrograngeiro/Webcrasping-E-sports-Wiki
| 0 |
2026364
|
from bs4 import BeautifulSoup
#with open('html_doc.html') as html_doc:
# soup = BeautifulSoup(html_doc, 'html.parser')
soup = BeautifulSoup('<b class="boldest">Extremely bold</b>')
tag = soup.b
type(tag)
| 209 |
libs/yowsup/yowsup/yowsup/demos/contacts/layer.py
|
akshitpradhan/TomHack
| 22 |
2025796
|
from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_contacts.protocolentities import GetSyncIqProtocolEntity, ResultSyncIqProtocolEntity
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity
import threading
import logging
logger = logging.getLogger(__name__)
class SyncLayer(YowInterfaceLayer):
PROP_CONTACTS = "org.openwhatsapp.yowsup.prop.syncdemo.contacts"
def __init__(self):
super(SyncLayer, self).__init__()
#call back function when there is a successful connection to whatsapp server
@ProtocolEntityCallback("success")
def onSuccess(self, successProtocolEntity):
contacts= self.getProp(self.__class__.PROP_CONTACTS, [])
contactEntity = GetSyncIqProtocolEntity(contacts)
self._sendIq(contactEntity, self.onGetSyncResult, self.onGetSyncError)
def onGetSyncResult(self, resultSyncIqProtocolEntity, originalIqProtocolEntity):
print(resultSyncIqProtocolEntity)
raise KeyboardInterrupt()
def onGetSyncError(self, errorSyncIqProtocolEntity, originalIqProtocolEntity):
print(errorSyncIqProtocolEntity)
raise KeyboardInterrupt()
| 1,230 |
src/directory_check_for_headers.py
|
drbsmith/auto_code_commenter
| 0 |
2025569
|
"""! @file
# Directory Check For Headers
TODO_DOC
## Dependencies
* os, sys
* util.log
* header_generator
* function_documenter
* python_code.CodeBlock
## Functions
* TestModulesInDir
* main
[generated by Auto Code Commenter at 2022-03-08 20:01:01. https://github.com/drbsmith/auto_code_commenter]
@package src"""
import os, sys
p = os.path.dirname(os.path.abspath('./src'))
sys.path.append(p)
from util.log import setup_logging
logger = setup_logging()
## go through sub folders as well?
RECURSIVE = True
def TestModulesInDir(path):
if not path[-1] == '/':
path += '/'
from header_generator import CheckForHeader
from function_documenter import FindFunctions, CheckForDocumentation
from python_code.CodeBlock import CodeBlock
# get files at path
children= [os.path.join(path, child) for child in os.listdir(path)]
directories= list(filter(os.path.isdir, children))
for f in children:
if '.py' in f and not '.pyc' in f and not f in directories:
try:
with open(f, 'r') as fp:
rawcode = fp.read()
code_lines = CodeBlock.ParsePython(rawcode)
header = CheckForHeader(code_lines)
if header is None:
logger.info('Missing header docstring: {}'.format(f))
# get all lines that contain a function definition
func_lines = FindFunctions(code_lines)
func_lines.append(len(code_lines)) # stick EoF on the list
for i, j in zip(func_lines[:-1], func_lines[1:]):
has_doc = code_lines[i].hasDocumentation() # CheckForDocumentation(code_lines[i])
if not has_doc:
logger.info('Missing function docstring: {}::{}'.format(f, code_lines[i].getFunctionName()))
except:
logger.error('directory_check_for_headers: {}'.format(f), exc_info = True)
if RECURSIVE:
for direct in directories:
TestModulesInDir(direct)
def main():
if len(sys.argv) < 2:
logger.error('missing required directory path')
return
path = sys.argv[1]
TestModulesInDir(path)
if __name__ == '__main__':
main()
| 1,987 |
calvin_models/calvin_agent/utils/visualizations.py
|
nikepupu/calvin
| 70 |
2026272
|
# Force matplotlib to not use any Xwindows backend.
import matplotlib
import numpy as np
from pytorch_lightning.loggers import WandbLogger
import torch
import wandb
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def visualize_temporal_consistency(max_batched_length_per_demo, gpus, sampled_plans, all_idx, step, logger, prefix=""):
"""compute t-SNE plot of embeddings os a task to visualize temporal consistency"""
labels = []
for demo in max_batched_length_per_demo:
labels = np.concatenate((labels, np.arange(demo) / float(demo)), axis=0)
# because with ddp, data doesn't come ordered anymore
labels = labels[torch.flatten(all_idx).cpu()]
colors = [plt.cm.Spectral(y_i) for y_i in labels]
assert sampled_plans.shape[0] == len(labels), "plt X shape {}, label len {}".format(
sampled_plans.shape[0], len(labels)
)
from MulticoreTSNE import MulticoreTSNE as TSNE
x_tsne = TSNE(perplexity=40, n_jobs=8).fit_transform(sampled_plans.cpu())
plt.close("all")
fig, ax = plt.subplots()
_ = ax.scatter(x_tsne[:, 0], x_tsne[:, 1], c=colors, cmap=plt.cm.Spectral)
fig.suptitle("Temporal Consistency of Latent space")
ax.axis("off")
if isinstance(logger, WandbLogger):
logger.experiment.log({prefix + "latent_embedding": wandb.Image(fig)})
else:
logger.experiment.add_figure(prefix + "latent_embedding", fig, global_step=step)
| 1,428 |
custom/ewsghana/migrations/0005_auto_20151204_2142.py
|
dborowiecki/commcare-hq
| 0 |
2025308
|
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ewsghana', '0004_sqlnotification'),
]
operations = [
migrations.AlterField(
model_name='facilityincharge',
name='location',
field=models.ForeignKey(to='locations.SQLLocation', on_delete=django.db.models.deletion.PROTECT),
preserve_default=True,
),
]
| 474 |
pynamodb_encoder/primitive_attribute_encoder.py
|
lyang/pynamodb-encoder
| 4 |
2023955
|
from pynamodb.attributes import (
Attribute,
BinaryAttribute,
BinarySetAttribute,
DiscriminatorAttribute,
JSONAttribute,
TTLAttribute,
UTCDateTimeAttribute,
)
SERIALIZABLE_TYPES = (BinaryAttribute, BinarySetAttribute, DiscriminatorAttribute, JSONAttribute)
ENCODER_MAPPING = {
SERIALIZABLE_TYPES: lambda attr, data: attr.serialize(data),
TTLAttribute: lambda _, data: data.timestamp(),
UTCDateTimeAttribute: lambda _, data: data.isoformat(),
}
class PrimitiveAttributeEncoder:
@staticmethod
def encode(attr: Attribute, data):
for types, callable in ENCODER_MAPPING.items():
if isinstance(attr, types):
return callable(attr, data)
return data
| 737 |
mwel/mwel/decompiler/xmlparser.py
|
esayui/mworks
| 0 |
2025673
|
from __future__ import division, print_function, unicode_literals
from collections import OrderedDict
from xml.etree import ElementTree
from xml.parsers import expat
class XMLElement(object):
comment_tag = object()
text = None
tail = None
lineno = None
colno = None
def __init__(self, tag, attrib):
self.tag = tag
self.attrib = attrib
self.children = []
def append(self, subelement):
self.children.append(subelement)
class XMLParser(object):
def __init__(self, error_logger):
self.error_logger = error_logger
def parse(self, src_data):
tb = ElementTree.TreeBuilder(XMLElement)
def xml_decl(version, encoding, standalone):
pass
def start_element(name, attrs):
attrs = OrderedDict(attrs[i:i+2] for i in range(0, len(attrs), 2))
elem = tb.start(name, attrs)
elem.lineno = p.CurrentLineNumber
elem.colno = p.CurrentColumnNumber
def end_element(name):
tb.end(name)
def comment(data):
tag = XMLElement.comment_tag
start_element(tag, ())
tb.data(data)
end_element(tag)
def default(data):
if data.strip():
self.error_logger('XML contains unexpected data',
lineno = p.CurrentLineNumber,
colno = p.CurrentColumnNumber)
p = expat.ParserCreate()
p.XmlDeclHandler = xml_decl
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CommentHandler = comment
p.DefaultHandlerExpand = default
p.buffer_text = True
p.ordered_attributes = True
try:
p.Parse(src_data, True)
root = tb.close()
if root.tag == 'monkeyml':
return root
self.error_logger('XML does not contain an MWorks experiment '
'(root element is %s, not monkeyml)' % root.tag)
except expat.ExpatError:
self.error_logger(('Failed to parse XML: %s' %
expat.ErrorString(p.ErrorCode)),
lineno = p.ErrorLineNumber,
colno = p.ErrorColumnNumber)
| 2,340 |
flashflood/identifiers.py
|
HumanCellAtlas/flash-flood
| 1 |
2026478
|
from uuid import uuid4
from enum import Enum
from functools import lru_cache
from flashflood.util import datetime_from_timestamp, timestamp_now
TOMBSTONE_SUFFIX = ".dead"
class JournalUpdateAction(Enum):
UPDATE = "update"
DELETE = "delete"
class JournalID(str):
DELIMITER = "--"
@classmethod
def make(cls, start_timestamp: str, end_timestamp: str, version: str, blob_id: str):
return cls(start_timestamp + cls.DELIMITER + end_timestamp + cls.DELIMITER + version + cls.DELIMITER + blob_id)
@classmethod
def from_key(cls, key):
return cls(key.rsplit("/", 1)[1])
@lru_cache()
def _parts(self):
start_timestamp, end_timestamp, version, blob_id = self.split(self.DELIMITER)
return start_timestamp, end_timestamp, version, blob_id
@property
def start_date(self):
return datetime_from_timestamp(self._parts()[0])
@property
def end_date(self):
end_date = self._parts()[1]
if "new" == end_date:
return self.start_date
else:
return datetime_from_timestamp(end_date)
@property
def version(self) -> str:
return self._parts()[2]
@property
def blob_id(self):
return self._parts()[3]
@property
def range_prefix(self):
return self.rsplit(self.DELIMITER, 2)[0]
class JournalUpdateID(str):
"""
This defines the id used to compose the object key on storage for journal updates.
"""
DELIMITER = "--"
@classmethod
def make(cls, journal_id: str, event_id: str, action: JournalUpdateAction):
reverse_journal_id = journal_id[::-1]
return cls(reverse_journal_id + cls.DELIMITER
+ event_id + cls.DELIMITER
+ timestamp_now() + cls.DELIMITER
+ action.name)
@classmethod
def from_key(cls, key):
return cls(key.rsplit("/", 1)[1])
@lru_cache()
def _parts(self):
reverse_journal_id, event_id, timestamp, action_name = self.rsplit(JournalID.DELIMITER, 3)
return JournalID(reverse_journal_id[::-1]), event_id, timestamp, JournalUpdateAction[action_name]
@property
def journal_id(self) -> JournalID:
return self._parts()[0]
@property
def event_id(self) -> str:
return self._parts()[1]
@property
def timestamp(self) -> str:
return self._parts()[2]
@property
def action(self) -> JournalUpdateAction:
return self._parts()[3]
@staticmethod
def prefix_for_journal(journal_id: JournalID):
return journal_id[::-1]
| 2,602 |
api_token/views.py
|
selfsryo/django_api_sample
| 0 |
2025904
|
from django.http import JsonResponse
from api_token.authentications import create_token
def token_create(request):
"""APIトークン作成
"""
if not request.user.is_authenticated:
return JsonResponse({'message': 'ログインしてください'}, status=403)
if hasattr(request.user, 'token'):
return JsonResponse({
'message': '既にトークンがあります',
'token': request.user.token.token
})
tk = create_token(request)
return JsonResponse({
'message': 'トークンを作成しました',
'token': tk.token
}, status=201)
| 563 |
recursive-ackermann.py
|
hamidswer/recursive-ackermann
| 0 |
2026181
|
def recursive_ackermann(a,b):
ackermann_result = 0
if( a==0 ):
ackermann_result = b + 1
elif( b==0 ):
ackermann_result = recursive_ackermann(a-1,1)
else:
ackermann_result = recursive_ackermann(a-1,recursive_ackermann(a,b-1))
return ackermann_result
print(recursive_ackermann(1,1))
# 3
| 330 |
src/Discord/command/utility/ping.py
|
JulesWall/S-te-manager
| 1 |
2025967
|
import discord
from Discord.command.Command import *
class Ping(Command):
def __init__(self, message, bot):
Command.__init__(self, message, bot)
async def run(self):
if not self.has_permission : return await self.not_permission()
import time
now = time.monotonic()
msg = await self.channel.send(f'Latence du bot :')
ping = int((time.monotonic() - now) * 1000)
await msg.edit(content=f'Latence du bot : ``{ping} ms``')
| 486 |
Python/src-archiv/using_frustum/dynamic/packer.py
|
bigov/daft-lib
| 1 |
2026288
|
#-*- coding:utf-8 -*-
import pyglet
def info_quad(window_height, width, height):
''' Рисует прямоугольную область в 2D режиме
в верхнем-левом углу экрана '''
border = 10
x0 = border
x1 = x0 + width
y1 = window_height - border
y0 = y1 - height
my_quad = [x0,y0, x1,y0, x1,y1, x0,y1]
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS,
('v2i', my_quad),
('c4f', (0.8, 0.9, 0.8, 0.6,
0.8, 0.9, 0.8, 0.6,
0.8, 0.9, 0.8, 0.6,
0.8, 0.9, 0.8, 0.6 ))
)
def expand_IxI(descr_list, I):
''' Декодирует список, описывающий в сжатой форме поверхность одной
плитки в квадратную матрицу координат IxI (9х9)
Если начало поверхности должно состоит из пропусков (нулей), тогда в
декодируемом списке ведущим должен быть 0:
m[0, 3, 5] - вначале будет 3 пропуска (нуля)
Матрица из всех (81) нулей кодируется как m[0] или m[0,81], хотя
такая поверхность не имеет смысла - ее нет.
Матрица из 81-й единицы кодируется как m[] или m[81]
В остальных позициях, кроме начальной, нулей быть не должно.
'''
f = 1
if descr_list:
descr_list.reverse()
Dp = descr_list.pop()
if Dp == 0:
f = 0
Dp = descr_list.pop() if descr_list else 0
else:
Dp = 0
IxI = []
m_row = []
for l in range(I):
for c in range(I):
m_row.append(f)
if Dp:
Dp -= 1
if not Dp:
f = 0 if f == 1 else 1
Dp = descr_list.pop() if descr_list else 0
IxI.append(m_row)
m_row = []
del(f, c, l, m_row, Dp)
return IxI
def glType(typ, *args):
"""return ctypes array of GLwhatever for Pyglet's OpenGL interface. (This
seems to work for all types, but it does almost no type conversion. Just
think in terms of "C without type casting".)
typ -> ctype or GL name for ctype; see pyglet.gl.GLenum through GLvoid
args -> Either vararg, or args[0] as an iterable container
Examples:
# Float
ar = gl_vec(GLfloat, 0.0, 1.0, 0.0)
ar = gl_vec(GLfloat, [0.0, 1.0, 0.0])
# Unsigned byte
ar = gl_vec(GLubyte, 'a','b','c')
ar = gl_vec(GLubyte, 'abc')
ar = gl_vec(GLubyte, ['a','b','c'])
ar = gl_vec(GLubyte, 97, 98, 99)
"""
if len(args) == 1:
if isinstance(args[0],(tuple,list)):
args = args[0]
elif isinstance(args[0],str) and len(args[0]) > 1:
args = args[0]
if isinstance(args[0], str) and typ is pyglet.gl.GLubyte:
return (typ * len(args))(*[ord(c) for c in args])
else:
return (typ * len(args))(*args)
def compress_IxI(m):
''' Кодирует 2d матрицу IxI координат в сжатый список для хранения
в базе данных
'''
line = []
for r in range(len(m)):
line += m[r]
curr = line[0]
rez = [0] if curr == 0 else []
i = 0
for n in range(len(line)):
if curr == line[n]:
i += 1
else:
rez.append(i)
i = 1
curr = line[n]
return rez
# ============================================================================
# Диагностика
# ============================================================================
if __name__ == "__main__":
print('')
descr_list = [0, 40, 1]
print(descr_list)
print('---')
# Распаковка списка
m_9x9 = expand_IxI(descr_list, 9)
for n in range(len(m_9x9)):
print(m_9x9[n])
del(n)
print('---')
# Упаковка
line = compress_IxI(m_9x9)
print(line)
print('')
print('')
descr_list = [40, 1]
print(descr_list)
print('---')
# Распаковка списка
m_9x9 = expand_IxI(descr_list, 9)
for n in range(len(m_9x9)):
print(m_9x9[n])
del(n)
print('---')
# Упаковка
line = compress_IxI(m_9x9)
print(line)
print('')
| 4,176 |
borsachart/charts/models.py
|
azaleas/borsachart
| 4 |
2026675
|
from django.contrib.postgres.fields import JSONField
from django.db import models
class Ticker(models.Model):
"""
Ticker model saves the data from Quandl API, cached for a day
"""
ticker = models.CharField(max_length=300, unique=True, db_index=True)
ticker_data = JSONField()
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
ordering = ['ticker']
index_together = [
["ticker", "updated_date"]
]
def __str__(self):
return self.ticker
| 600 |
motmot/hook-motmot.py
|
bwoodsend/motmot
| 0 |
2026722
|
# -*- coding: utf-8 -*-
"""
Hook for PyInstaller.
"""
from motmot._slug import slug
# Put the cslug binary and its types json in a `motmot` directory.
datas = [(str(slug.path), "motmot"), (str(slug.types_map.json_path), "motmot")]
| 233 |
board/db.py
|
mohit-chawla/recboard
| 3 |
2026189
|
from mongoengine import *
from .common import exceptions
from .models import *
class RecboardDB:
config = {}
db_type = 'mongodb'
# folder = 'databases'
host = 'localhost'
port = 27017
name = 'recboard_db'
collections = {
'index' : 'index',
'user' : 'User',
'dataset': 'Dataset',
'model' : 'Model',
'workspace' : 'Workspace'
}
def __init__(self):
pass
def get_db_name(self):
"""return db_name """
if self.db_type == "mongod":
return self.name
def get_collection(self, collection):
"""Get the corresponding database collection/table"""
if collection not in self.collections:
raise InvalidCollectionName
# return Model class name eval here User, Model ...
return eval(self.collections[collection])
@property
def connection(self):
"""
Get mongo connection
Note: Every mongo client maintains a pool of connections
capped at maxPoolSize internally, so only one client is
enough.
"""
if getattr(self, '_connection', None) is None:
if self.db_type == 'mongodb':
self._connection = connect(self.name,
# host=self.host,
port=self.port
)
return self._connection
# def generate_id(self):
# return generate_id()
def select(self, colname, *args, **kwargs):
"""Get all document(s) in collection colname with filter in **kwargs"""
return self.get_collection(colname).objects(*args, **kwargs)
def count(self, colname, *args, **kwargs):
"""Get count of document in collection colname with filter in **kwargs"""
return self.get_collection(colname).objects(*args, **kwargs).count()
def get(self, colname, *args, **kwargs):
"""Get first document in collection colname with filter in **kwargs"""
col = self.get_collection(colname)
if not args and not kwargs:
return col.objects
return col.objects(*args, **kwargs)[0]
def insert(self, colname, *args, **kwargs):
"""Insert all documents in collection colname"""
for doc in args:
doc.save()
def update(self,colname, *args, **kwargs):
"""Update all documents in collection colname with filter in **kwargs"""
# doc_to_update = self.get(colname,*args)
# doc_to_update.update(kwargs)
raise NotImplementedError
def delete(self, colname, *args, **kwargs):
"""Delete ALL documents in collection colname with filter in **kwargs"""
return self.get_collection(colname).objects(*args, **kwargs).delete()
def __dir__(self):
"""
Implement when needed
"""
pass
if __name__ == "__main__":
db = RecboardDB()
conn = db.connection
# #insert example
# # kriti = User(name="Kriti")
# # db.insert('user',kriti)
# mohit = User(name="Mohit",phones=['9294714415'])
# db.insert('user',mohit)
# # get collection usage
# print(db.get_collection('user'))
# # get all
# db.select('user',name='Mohit')
# got = db.get('user',name='Mohit')
# print("name:",got.name,"phones:",got.phones)
# got.phones.append('9939')
# print(got.phones)
# db.insert('user',got)
# # print(got.phones)
# # db.update('user',name='Mohit',phones = ['9999'])
# #get first
# print(db.get('user',name='Mohit').name)
# #get count
# print(db.count('user',name='Mohit'))
# db.insert('dataset',Dataset(name="mohit_dataset",tags=["tags"]))
user = db.get('user',id="<PASSWORD>")
print(user.id)
| 3,246 |
example/model_graph.py
|
charbeljc/sqla-graphs
| 4 |
2025991
|
from datetime import datetime
from sqla_graphs import ModelGrapher
from example_model import Base
def main():
print("Generating SQLAlchemy model graph")
grapher = ModelGrapher(
show_operations=True,
style={"node_table_header": {"bgcolor": "#000088"}},
)
graph = grapher.graph(Base.__subclasses__())
graph.write_png(f"model_graph_{datetime.now():%Y-%m-%d %H:%M}.png")
if __name__ == "__main__":
main()
| 447 |
lib/condorExe.py
|
ddbox/glideinwms
| 0 |
2024983
|
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements the functions to execute condor commands
#
# Author:
# <NAME> (Sept 7th 2006)
#
import os
from . import logSupport, subprocessSupport
class CondorExeError(RuntimeError):
"""Base class for condorExe module errors"""
def __init__(self, err_str):
RuntimeError.__init__(self, err_str)
class UnconfigError(CondorExeError):
def __init__(self, err_str):
CondorExeError.__init__(self, err_str)
class ExeError(CondorExeError):
def __init__(self, err_str):
CondorExeError.__init__(self, err_str)
#
# Configuration
#
def set_path(new_condor_bin_path, new_condor_sbin_path=None):
"""Set path to condor binaries, if needed
Works changing the global variables condor_bin_path and condor_sbin_path
Args:
new_condor_bin_path (str): directory where the HTCondor binaries are located
new_condor_sbin_path (str): directory where the HTCondor system binaries are located
"""
global condor_bin_path, condor_sbin_path
condor_bin_path = new_condor_bin_path
if new_condor_sbin_path is not None:
condor_sbin_path = new_condor_sbin_path
def exe_cmd(condor_exe, args, stdin_data=None, env={}):
"""Execute an arbitrary condor command and return its output as a list of lines
Fails if stderr is not empty
Args:
condor_exe (str): condor_exe uses a relative path to $CONDOR_BIN
args (str): arguments for the command
stdin_data (str): Data that will be fed to the command via stdin
env (dict): Environment to be set before execution
Returns:
Lines of stdout from the command
Raises:
UnconfigError:
ExeError:
"""
global condor_bin_path
if condor_bin_path is None:
raise UnconfigError("condor_bin_path is undefined!")
condor_exe_path = os.path.join(condor_bin_path, condor_exe)
cmd = f"{condor_exe_path} {args}"
return iexe_cmd(cmd, stdin_data, env)
def exe_cmd_sbin(condor_exe, args, stdin_data=None, env={}):
global condor_sbin_path
if condor_sbin_path is None:
raise UnconfigError("condor_sbin_path is undefined!")
condor_exe_path = os.path.join(condor_sbin_path, condor_exe)
cmd = f"{condor_exe_path} {args}"
return iexe_cmd(cmd, stdin_data, env)
############################################################
#
# P R I V A T E, do not use
#
############################################################
def generate_bash_script(cmd, environment):
"""Print to a string a shell script setting the environment in 'environment' and running 'cmd'
If 'cmd' last argument is a file it will be printed as well in the string
Args:
cmd (str): command string
environment (dict): environment as a dictionary
Returns:
str: multi-line string with environment, command and eventually the input file
"""
script = ["script to reproduce failure:", "-" * 20 + " begin script " + "-" * 20, "#!/bin/bash"]
# FROM:migration_3_1, 3 lines
# script = ['script to reproduce failure:']
# script.append('-' * 20 + ' begin script ' + '-' * 20)
# script.append('#!/bin/bash')
script += [f"{k}={v}" for k, v in environment.items()]
script.append(cmd)
script.append("-" * 20 + " end script " + "-" * 20)
cmd_list = cmd.split()
if len(cmd_list) > 1:
last_par = cmd_list[-1]
if last_par and os.path.isfile(last_par):
script.append("-" * 20 + " parameter file: %s " % last_par + "-" * 20)
try:
with open(last_par) as f:
script += f.read().splitlines()
except OSError:
pass
script.append("-" * 20 + " end parameter file " + "-" * 20)
return "\n".join(script)
def iexe_cmd(cmd, stdin_data=None, child_env=None):
"""Fork a process and execute cmd - rewritten to use select to avoid filling
up stderr and stdout queues.
Args:
cmd (str): Sting containing the entire command including all arguments
stdin_data (str): Data that will be fed to the command via stdin
child_env (dict): Environment to be set before execution
Returns:
list of str: Lines of stdout from the command
Raises:
ExeError
"""
stdout_data = ""
try:
# invoking subprocessSupport.iexe_cmd w/ text=True (default), stdin_data and returned output are str
stdout_data = subprocessSupport.iexe_cmd(cmd, stdin_data=stdin_data, child_env=child_env)
except Exception as ex:
msg = f"Unexpected Error running '{cmd}'. Details: {ex}. Stdout: {stdout_data}"
try:
logSupport.log.error(msg)
logSupport.log.debug(generate_bash_script(cmd, os.environ))
except:
pass
raise ExeError(msg)
return stdout_data.splitlines()
#########################
# Module initialization
#
def init1():
"""Set condor_bin_path"""
global condor_bin_path
# try using condor commands to find it out
try:
condor_bin_path = iexe_cmd("condor_config_val BIN")[0].strip() # remove trailing newline
except ExeError as e:
# try to find the RELEASE_DIR, and append bin
try:
release_path = iexe_cmd("condor_config_val RELEASE_DIR")
condor_bin_path = os.path.join(release_path[0].strip(), "bin")
except ExeError as e:
# try condor_q in the path
try:
condorq_bin_path = iexe_cmd("which condor_q")
condor_bin_path = os.path.dirname(condorq_bin_path[0].strip())
except ExeError as e:
# look for condor_config in /etc
if "CONDOR_CONFIG" in os.environ:
condor_config = os.environ["CONDOR_CONFIG"]
else:
condor_config = "/etc/condor/condor_config"
try:
# BIN = <path>
bin_def = iexe_cmd('grep "^ *BIN" %s' % condor_config)
condor_bin_path = bin_def[0].strip().split()[2]
except ExeError as e:
try:
# RELEASE_DIR = <path>
release_def = iexe_cmd('grep "^ *RELEASE_DIR" %s' % condor_config)
condor_bin_path = os.path.join(release_def[0].strip().split()[2], "bin")
except ExeError as e:
pass # don't know what else to try
def init2():
"""Set condor_sbin_path"""
global condor_sbin_path
# try using condor commands to find it out
try:
condor_sbin_path = iexe_cmd("condor_config_val SBIN")[0].strip() # remove trailing newline
except ExeError as e:
# try to find the RELEASE_DIR, and append bin
try:
release_path = iexe_cmd("condor_config_val RELEASE_DIR")
condor_sbin_path = os.path.join(release_path[0].strip(), "sbin")
except ExeError as e:
# try condor_q in the path
try:
condora_sbin_path = iexe_cmd("which condor_advertise")
condor_sbin_path = os.path.dirname(condora_sbin_path[0].strip())
except ExeError as e:
# look for condor_config in /etc
if "CONDOR_CONFIG" in os.environ:
condor_config = os.environ["CONDOR_CONFIG"]
else:
condor_config = "/etc/condor/condor_config"
try:
# BIN = <path>
bin_def = iexe_cmd('grep "^ *SBIN" %s' % condor_config)
condor_sbin_path = bin_def[0].strip().split()[2]
except ExeError as e:
try:
# RELEASE_DIR = <path>
release_def = iexe_cmd('grep "^ *RELEASE_DIR" %s' % condor_config)
condor_sbin_path = os.path.join(release_def[0].strip().split()[2], "sbin")
except ExeError as e:
pass # don't know what else to try
def init():
"""Set both Set condor_bin_path and condor_sbin_path"""
init1()
init2()
# This way we know that it is undefined
condor_bin_path = None
condor_sbin_path = None
init()
| 8,447 |
machine_learning/gan/pix2pix/tf_pix2pix/pix2pix_module/trainer/train_and_eval.py
|
ryangillard/artificial_intelligence
| 4 |
2025413
|
import tensorflow as tf
from . import image_utils
from .print_object import print_obj
def get_logits_and_losses(features, generator, discriminator, mode, params):
"""Gets logits and losses for both train and eval modes.
Args:
features: dict, feature tensors from input function.
generator: instance of generator.`Generator`.
discriminator: instance of discriminator.`Discriminator`.
mode: tf.estimator.ModeKeys with values of either TRAIN or EVAL.
params: dict, user passed parameters.
Returns:
Real and fake logits and generator and discriminator losses.
"""
func_name = "get_logits_and_losses"
# Extract images from features dictionary.
source_images = features["source_image"]
real_target_images = features["target_image"]
print_obj("\n" + func_name, "source_images", source_images)
print_obj(func_name, "real_target_images", real_target_images)
# Get generated target image from generator network from source image.
print("\nCall generator with source_images = {}.".format(source_images))
fake_target_images = generator.get_fake_images(
source_images=source_images, params=params
)
print_obj(func_name, "fake_target_images", fake_target_images)
# Resize fake target images to match real target image sizes.
fake_target_images = image_utils.resize_fake_images(
fake_images=fake_target_images, params=params
)
print_obj(func_name, "fake_target_images", fake_target_images)
# Add summaries for TensorBoard.
tf.summary.image(
name="fake_target_images",
tensor=tf.reshape(
tensor=fake_target_images,
shape=[-1, params["height"], params["width"], params["depth"]]
),
max_outputs=5,
)
# Get fake logits from discriminator with generator's output target image.
print(
"\nCall discriminator with fake_target_images = {}.".format(
fake_target_images
)
)
fake_logits = discriminator.get_discriminator_logits(
source_image=source_images,
target_image=fake_target_images,
params=params
)
print_obj(func_name, "fake_logits", fake_logits)
# Get real logits from discriminator using real target image.
print(
"\nCall discriminator with real_target_images = {}.".format(
real_target_images
)
)
real_logits = discriminator.get_discriminator_logits(
source_image=source_images,
target_image=real_target_images,
params=params
)
print_obj(func_name, "fake_logits", fake_logits)
# Get generator total loss.
generator_total_loss = generator.get_generator_loss(
fake_target_images=fake_target_images,
real_target_images=real_target_images,
fake_logits=fake_logits,
params=params
)
# Get discriminator total loss.
discriminator_total_loss = discriminator.get_discriminator_loss(
fake_logits=fake_logits, real_logits=real_logits, params=params
)
return (real_logits,
fake_logits,
generator_total_loss,
discriminator_total_loss)
| 3,184 |
test_messages.py
|
st33fo/OrigatoCloutBotWebscraper
| 1 |
2026377
|
"""
test_messages.py - Unit test class for message container class.
@author: <NAME>
"""
import messages
import unittest
import inspect
class TestMessages(unittest.TestCase):
def test_module_creation(self):
'''
desc: Tests that the messages class is actually a class.
'''
test_item = messages.Message()
class_name = 'Message' == test_item.__class__.__name__
self.assertTrue(class_name)
def test_owner_id_getter_and_setter(self):
'''
desc: test owner id getter and setter.
'''
test_item = messages.Message()
test_item.owner_id = 2042
self.assertEqual(2042, test_item.owner_id)
def test_message_id_getter_and_setter(self):
'''
desc: test message id getter and setter.
'''
test_item = messages.Message()
test_item.message_id = 2042
self.assertEqual(2042, test_item.message_id)
def test_text_getter_and_setter(self):
'''
desc: test text id getter and setter.
'''
test_item = messages.Message()
test_text = 'Doin a lil bit of modeling population dynamics ' \
'seems fun but would need to improve my math ' \
'background a lot for that kinda shiz'
test_item.text = test_text
self.assertEqual(test_text, test_item.text)
def test_reacts_getter_and_setter(self):
'''
desc: test reacts getter and setter.
'''
test_item = messages.Message()
test_item.reacts = 3
self.assertEqual(3, test_item.reacts)
def test_replies_getter_and_setter(self):
'''
desc: test replies getter and setter.
'''
test_item = messages.Message()
test_replies = [
'I like the nature',
'''Guess confidence is one of the most important
leadership qualities lol''',
'How many days there do you have left']
test_item.replies = test_replies
self.assertEqual(test_replies, test_item.replies)
if __name__ == '__main__':
unittest.main()
| 2,121 |
monitoring/notifications/tables.py
|
okozachenko1203/monasca-ui
| 17 |
2025527
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _ # noqa
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from monitoring import api
from monitoring.notifications import constants
class DeleteNotification(tables.DeleteAction):
name = "delete_notification"
verbose_name = _("Delete Notification")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Notification",
u"Delete Notifications",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Notification",
u"Deleted Notifications",
count
)
def allowed(self, request, datum=None):
return True
def delete(self, request, obj_id):
try:
api.monitor.notification_delete(request, obj_id)
except Exception:
exceptions.handle(
request, _('Unable to delete notification.'))
class CreateNotification(tables.LinkAction):
name = "create_notification"
verbose_name = _("Create Notification")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("notification", "notification:create"),)
ajax = True
def get_link_url(self):
url = constants.URL_PREFIX + 'notification_create'
return reverse(url)
def allowed(self, request, datum=None):
return True
class EditNotification(tables.LinkAction):
name = "edit_notification"
verbose_name = _("Edit Notification")
classes = ("ajax-modal", "btn-create")
def get_link_url(self, datum):
return reverse(constants.URL_PREFIX + 'notification_edit',
args=(datum['id'], ))
def allowed(self, request, datum=None):
return True
class NotificationsFilterAction(tables.FilterAction):
def filter(self, table, notifications, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [notif for notif in notifications
if q in notif['name'].lower()]
class NotificationsTable(tables.DataTable):
target = tables.Column('name', verbose_name=_('Name'))
type = tables.Column('type', verbose_name=_('Type'))
address = tables.Column('address', verbose_name=_('Address'))
period = tables.Column('period', verbose_name=_('Period'))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, obj):
return obj['name']
class Meta(object):
name = "notifications"
verbose_name = _("Notifications")
row_actions = (EditNotification, DeleteNotification, )
table_actions = (CreateNotification, NotificationsFilterAction,
DeleteNotification)
| 3,494 |
crayfish/pypix/attributes.py
|
CERNatschool/crayfish
| 0 |
2026258
|
# The attribute list
#
# To add an attribute define it as a standard function here.
#
# Then apply the attribute function decorator::
#
# @attribute(object_type, name, plottable, trainable)
#
# Where:
# object_type is the type of object that the attribute is applicable to,
# either Frame or Cluster, or PixelGrid for either.
#
# name is a human readable label which is used to identify it in the GUI
#
# plottable is a boolean that describes whether the attribute is polottable on
# a graph (may be omitted, defualts to false)
#
# plottable is a boolean that describes whether the attribute is usable in
# machine learning algorithms (may be omitted, defualts to the value of
# plottable)
#
#
# The attribute functions may be defined in any order. The order in which they are
# defined here is the order in which they will appear in the GUI
"""
.. note:: Although these functions appear in the documentation as functions, they are
converted into properties at runtime so do not need to be called with parenthesis.
"""
import hashlib
from pypix import *
# ============== Attributes begin here and maintain order ===============
@attribute(PixelGrid, "No. of hits", True)
def number_of_hits(self):
return len(self.hit_pixels)
@attribute(PixelGrid, "Volume", True)
def volume(self):
return sum(self.counts)
@attribute(PixelGrid, "Mean count", True)
def mean_count(self):
if self.number_of_hits == 0: # Don't divide by zero
return 0
return float(self.volume)/self.number_of_hits
@attribute(PixelGrid, "Count std. dev.", True)
def standard_deviation(self):
if self.number_of_hits == 0: #Don't divide by zero
return 0
mean_square = (float(sum([count**2 for count in self.counts]))
/self.number_of_hits)
square_mean = self.mean_count**2
return (mean_square - square_mean)**0.5
@attribute(Frame, "No. of clusters")
def number_of_clusters(self):
if not self.clusters:
self.calculate_clusters()
return len(self.clusters)
@attribute(Cluster, "Geo. centre")
def geometric_centre(self):
return (self.cluster_width/2.0 + self.min_x,
self.cluster_height/2.0 + self.min_y)
@attribute(Cluster, "C. of mass")
def centre_of_mass(self):
weighted_hits = [tuple([self[hit].value * coord for coord in hit])
for hit in self.hit_pixels]
x_coords, y_coords = zip(*weighted_hits)
total_weight = float(self.volume)
return (sum(x_coords)/total_weight, sum(y_coords)/total_weight)
@attribute(Cluster, "Radius", True)
def radius(self):
# Call centre of mass once to save computing multiple times
cofm_x, cofm_y = self.centre_of_mass
distances_squared = []
for pixel in self.hit_pixels:
x_diff = pixel[0] - cofm_x
y_diff = pixel[1] - cofm_y
distances_squared.append(x_diff**2 + y_diff**2)
return max(distances_squared)**0.5
@attribute(Cluster, "Most neighbours", True)
def most_neighbours(self):
return self.get_max_neighbours()[0]
@attribute(Cluster, "UUID")
def UUID(self):
"""
Return the cluster UUID
(SHA1 digest of the cluster.ascii_grid representation).
"""
return hashlib.sha1(self.ascii_grid).hexdigest()
| 3,221 |
lib/overwrite_glue.py
|
aws-samples/glue-overwrite-table-sample-cdk
| 0 |
2025665
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import sys
from datetime import datetime
import boto3
from pyspark.context import SparkContext
def get_catalog_table(glue_client, db_name, table_name):
try:
return glue_client.get_table(DatabaseName=db_name, Name=table_name)
except:
return None
def delete_partitions(glue_client, database, table, batch=25):
paginator = glue_client.get_paginator('get_partitions')
response = paginator.paginate(
DatabaseName=database,
TableName=table
)
for page in response:
partitions = page['Partitions']
for i in range(0, len(partitions), batch):
to_delete = [{k: v[k]} for k, v in zip(["Values"] * batch, partitions[i:i + batch])]
glue_client.batch_delete_partition(
DatabaseName=database,
TableName=table,
PartitionsToDelete=to_delete
)
def copy_partitions(glue_client, source_database, source_table, target_database, target_table):
paginator = glue_client.get_paginator('get_partitions')
response = paginator.paginate(
DatabaseName=source_database,
TableName=source_table,
PaginationConfig={'PageSize': 100}
)
for page in response:
partitions = page['Partitions']
clean_partitions = []
for part in partitions:
clean_partition = {key: part[key] for key in part if
key not in ['DatabaseName', 'TableName', 'CreationTime']}
clean_partitions.append(clean_partition)
glue_client.batch_create_partition(
DatabaseName=target_database,
TableName=target_table,
PartitionInputList=clean_partitions
)
def get_output_path(glue_client, output_database, output_table):
response = glue_client.get_database(Name=output_database)
database_location_uri = response['Database']['LocationUri']
if database_location_uri.endswith('/'):
database_location_uri = database_location_uri[:-1]
return f"{database_location_uri}/{output_table}"
def read_from_catalog(glue_context, source_database, source_table):
return glue_context.create_dynamic_frame.from_catalog(
database=source_database,
table_name=source_table,
transformation_ctx='source_dynamic_frame'
)
def write_parquet(glue_context, dyf, output_path, partition_keys, output_database, output_table):
sink = glue_context.getSink(
connection_type="s3",
path=output_path,
enableUpdateCatalog=True,
partitionKeys=partition_keys
)
sink.setFormat(format='glueparquet')
sink.setCatalogInfo(
catalogDatabase=output_database,
catalogTableName=output_table
)
sink.writeFrame(dyf)
def write_versioned_parquet(glue_client, glue_context, dyf, output_path, partition_keys, output_database, output_table):
existing_target_table = get_catalog_table(glue_client, output_database, output_table)
if existing_target_table is None:
output_path = f"{output_path}/version_0"
write_parquet(glue_context, dyf, output_path, partition_keys, output_database, output_table)
else:
now = datetime.now()
version_tmp_suffix = f"_version_tmp_{now.strftime('%Y%m%d%H%M')}"
version_tmp_table = f"{output_table}{version_tmp_suffix}"
next_location = calculate_next_location(existing_target_table)
write_parquet(glue_context, dyf, next_location, partition_keys, output_database, version_tmp_table)
version_tmp_table_result = get_catalog_table(glue_client, output_database, version_tmp_table)['Table']
table_input = {key: version_tmp_table_result[key] for key in version_tmp_table_result if
key not in ['CreatedBy', 'CreateTime', 'UpdateTime', 'DatabaseName',
'IsRegisteredWithLakeFormation']}
table_input['Name'] = output_table
table_input['StorageDescriptor']['Location'] = next_location
delete_partitions(glue_client, output_database, output_table)
copy_partitions(glue_client, output_database, version_tmp_table, output_database, output_table)
glue_client.update_table(DatabaseName=output_database, TableInput=table_input)
glue_client.delete_table(DatabaseName=output_database, Name=version_tmp_table)
def calculate_next_location(existing_target_table):
curr_table = existing_target_table['Table']
curr_location_split = curr_table['StorageDescriptor']['Location'].split('/')
curr_version_suffix = curr_location_split[-2]
curr_version_int = int(curr_version_suffix.replace('version_', ''))
next_version_int = curr_version_int + 1
next_location_split = curr_location_split[:-2] + [f'version_{str(next_version_int)}', '']
next_location = '/'.join(next_location_split)
return next_location
def main():
# -------------------------Resolve job parameters-------------------------------
params = [
'JOB_NAME',
'output_database',
'source_database',
'source_table',
'output_table',
'region',
'partition_keys'
]
args = getResolvedOptions(sys.argv, params)
# ------------------------------------------------------------------------------
# -------------------------Extract provided args--------------------------------
output_database = args['output_database']
source_database = args['source_database']
source_table = args['source_table']
output_table = args['output_table']
region = args['region']
partition_keys = []
if 'partition_keys' in args and args['partition_keys'] != "":
partition_keys = [x.strip() for x in args['partition_keys'].split(',')]
# ------------------------------------------------------------------------------
# -------------------------Initialize Glue Context-----------------------------
# Create Glue Context, Job and Spark Session
glue_context = GlueContext(SparkContext.getOrCreate())
spark = glue_context.spark_session
job = Job(glue_context)
job.init(args['JOB_NAME'], args)
glue_client = boto3.client('glue', region_name=region)
output_path = get_output_path(glue_client, output_database, output_table)
# ------------------------------------------------------------------------------
# -------------------------Read Source Dynamic Frame----------------------------
dyf = read_from_catalog(glue_context, source_database, source_table)
dyf = DynamicFrame.fromDF(dyf.toDF().sample(0.7), glue_context, "sampled")
# ------------------------------------------------------------------------------
# -------------------------Write Dynamic Frame to target -----------------------
write_versioned_parquet(glue_client, glue_context, dyf, output_path, partition_keys, output_database,
output_table)
# ------------------------------------------------------------------------------
# -------------------------------------Job Cleanup------------------------------
job.commit()
# ------------------------------------------------------------------------------
if __name__ == "__main__":
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.utils import getResolvedOptions
from awsglue.dynamicframe import DynamicFrame
main()
| 7,499 |
tests/util/host/conftest.py
|
spookey/git-sh-sync
| 1 |
2026320
|
from collections import namedtuple
from subprocess import PIPE, run
from pytest import fixture
@fixture(scope="session")
def hostname():
def retrieve(*cmdlines):
for cmdline in cmdlines:
cmd = run(cmdline, stdout=PIPE, universal_newlines=True)
if cmd.stdout:
return cmd.stdout.strip()
return ''
name = retrieve(
['hostname'],
['uname', '-n']
)
yield namedtuple('HostName', ('long', 'short'))(
long=name, short=name.split('.')[0]
)
| 537 |
rltorch/algs/q_learning/DQN/Scripts/random_tune.py
|
Jjschwartz/rltorch
| 0 |
2025999
|
from rltorch.tuner.random_tuner import RandomTuner
from rltorch.algs.q_learning.DQN.agent import DQNAgent
hyperparams = {
# constants
"training_steps": [100000],
"final_epsilon": [0.01],
"init_epsilon": [1.0],
"exploration": [1000],
"gamma": [0.999],
"start_steps": [32],
"network_update_freq": [1],
"model_save_freq": [None],
# to sample
"hidden_sizes": [[64], [64, 64], [64, 64, 64]],
"lr": [0.01, 0.001, 0.0001],
"batch_size": [1, 32, 128],
"replay_size": [1000, 10000, 100000],
"target_update_freq": [100, 1000, 10000],
}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--env_name", type=str, default='CartPole-v0')
parser.add_argument("--num_runs", type=int, default=10)
parser.add_argument("--exp_name", type=str, default=None)
parser.add_argument("--num_cpus", type=int, default=1)
args = parser.parse_args()
tuner = RandomTuner(name=args.exp_name, num_exps=args.num_runs)
hyperparams["env_name"] = [args.env_name]
for k, v in hyperparams.items():
tuner.add(k, v)
tuner.run(DQNAgent, args.num_cpus)
| 1,176 |
app/core/tests/test_models.py
|
johangenis/consumption-app-api
| 0 |
2023819
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
import datetime
def sample_user(email="<EMAIL>", password="<PASSWORD>"):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = "<EMAIL>"
password = "<PASSWORD>"
user = get_user_model().objects.create_user(
email=email, password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = "<EMAIL>"
user = get_user_model().objects.create_user(email, "test123")
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "test123")
def test_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
"<EMAIL>", "test123"
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_consumption_type_str(self):
"""Test the consumption type string representation"""
consumption_type = models.Consumption_type.objects.create(
user=sample_user(), cons_type="Electricity"
)
self.assertEqual(str(consumption_type), consumption_type.cons_type)
def test_consumption_record_str(self):
"""Test the consumption record string representation"""
consumption_record = models.Consumption_record.objects.create(
user=sample_user(),
title="Electricity reading",
date_time=datetime.date.today(),
amount=5.00,
)
self.assertEqual(str(consumption_record), consumption_record.title)
| 2,146 |
prototype/nplinker/scoring/iokr/run_iokr.py
|
louwenjjr/nplinker
| 6 |
2026532
|
# Copyright 2021 The NPLinker Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy
import os
import iokrdata as iokrdataserver
import iokr_opt
import multiprocessing
import time
def normalise_kernel(matrix):
return matrix / numpy.sqrt(numpy.outer(matrix.diagonal(), matrix.diagonal()))
def load_kernel_file(filename):
kernel = numpy.load(filename)
return normalise_kernel(kernel)
def load_kernels(kernel_files):
kernel_matrices = [load_kernel_file(x) for x in kernel_files]
kernel_sum = numpy.sum(kernel_matrices, axis=0)
return normalise_kernel(kernel_sum)
def gather_results(active_jobs, limit=25):
done_jobs = []
while len(active_jobs) > limit:
remaining_jobs = []
for line in active_jobs:
job = line[-1]
if job.ready():
res = job.get()
new_line = list(line[:-1])
new_line.append(res)
done_jobs.append(new_line)
else:
remaining_jobs.append(line)
active_jobs = remaining_jobs
time.sleep(1)
return active_jobs, done_jobs
def run_iokr(data):
p = multiprocessing.Pool()
active_jobs = []
job_limit = 25
excluded = []
missing_candidate = []
collected_rankings = []
for label in sorted(list(set(data.folds))):
print('label %s' % label)
label_indices = data.get_indices(label, complement=True)
iokr = iokr_opt.InputOutputKernelRegression(data)
iokr.set_training_indices(label_indices, _lambda=0.001)
iokr.fit()
test_indices = data.get_indices(label)
for i in test_indices:
if i not in data.test_sample_indices:
excluded.append(i)
continue
sample = data.get_sample(i)
formula = sample['formula']
sample_inchi = sample['inchi']
candidates = data.get_candidates(formula)
candidate_inchi = [x[0] for x in candidates]
correct_index = candidate_inchi.index(sample_inchi)
# TODO: Recalculate fingerprints
candidates = data.get_candidates(formula)
candidate_fingerprints = [numpy.array(x[1]) for x in candidates]
total_count = len(candidate_inchi)
print('iokr job idx %s, cand.set size %s' % (i, total_count))
# ranking = iokr.rank_candidates_opt(i, candidate_fingerprints)
# # print(ranking)
# ranking = list(ranking)
# correct_ranking = ranking.index(correct_index)
# print('ranked {} / {}'.format(correct_ranking, total_count))
latent, x_kernel_vector, latent_basis, gamma = iokr.get_data_for_candidate_ranking(i)
args = (i, candidate_fingerprints, latent, x_kernel_vector, latent_basis, gamma)
job = p.apply_async(iokr_opt.rank_candidates_opt, args)
active_jobs.append((i, formula, correct_index, label, total_count, job))
if len(active_jobs) > job_limit:
active_jobs, results = gather_results(active_jobs, job_limit)
for res_i, res_formula, res_correct_index, res_label, res_total_count, res_output in results:
res_ranking = list(res_output[0])
# print(res_ranking)
correct_ranking = res_ranking.index(res_correct_index)
collected_rankings.append((res_i, correct_ranking, res_total_count))
total = len(collected_rankings)
print(float([x[1] for x in collected_rankings].count(0)) / total, total)
# print(cr_b[res_i], cr_a[res_i], cr_a[res_i] == cr_b[res_i])
print('Clean up remaining jobs')
# clean up the last remaining jobs
active_jobs, results = gather_results(active_jobs, 0)
for res_i, res_formula, res_correct_index, res_label, res_total_count, res_output in results:
res_ranking = list(res_output[0])
correct_ranking = res_ranking.index(res_correct_index)
collected_rankings.append((res_i, correct_ranking, res_total_count))
total = len(collected_rankings)
print(float([x[1] for x in collected_rankings].count(0)) / total, total)
print('')
print('IOKR test run done!')
print('#samples: {}'.format(len(collected_rankings)))
print('top-1 acc: {}'.format(float([x[1] for x in collected_rankings].count(0)) / total, total))
return collected_rankings
def main():
parser = argparse.ArgumentParser('Run IOKR test on a set')
parser.add_argument('--kernel', dest='kernel', help='Kernel files', nargs='+')
parser.add_argument('--fp', dest='fingerprint', help='fingerprint type (substructure, cdk (default), klekota-roth', default='cdk_default')
parser.add_argument('--data', dest='datapath', help='data path', required=True)
parser.add_argument('--output', dest='output', help='output label', required=True)
args = parser.parse_args()
# read from args
# datapath = '/home/grimur/iokr/data'
datapath = args.datapath
# kernel_files = [datapath + os.sep + 'input_kernels_gh/ppk_dag_all_normalised_shifted_nloss.npy',
# datapath + os.sep + 'input_kernels_gh/ppk_dag_all_normalised_shifted_peaks.npy']
kernel_files = args.kernel
# fingerprint = None
fingerprint = args.fingerprint
output_file = 'IOKRranking_%s.bin' % args.output
iokrdata = iokrdataserver.IOKRDataServer(datapath, kernel=None)
kernel_matrix = load_kernels(kernel_files)
iokrdata.kernel = kernel_matrix
with open(datapath + os.sep + 'ind_eval.txt') as f:
raw_data = f.read()
test_sample_indices = [int(x) - 1 for x in raw_data.strip().split()]
iokrdata.test_sample_indices = test_sample_indices
if fingerprint is not None:
iokrdata.set_fingerprint(fingerprint)
print('run iokr')
rankings = run_iokr(iokrdata)
numpy.save(output_file, rankings)
if __name__ == '__main__':
main()
| 6,551 |
app/app.py
|
Basselbi/hikma-health-backend
| 0 |
2024069
|
from flask import Flask, jsonify
from flask_cors import CORS
from mobile_api.mobile_api import mobile_api
from user_api.user_api import user_api
from photos.photos_api import photos_api
from admin_api.admin_api import admin_api
from web_errors import WebError
from config import FLASK_DEBUG, FLASK_DEBUG_PORT
import startup_tasks
app = Flask(__name__)
CORS(app)
app.url_map.strict_slashes = False
app.register_blueprint(mobile_api)
app.register_blueprint(user_api)
app.register_blueprint(photos_api)
app.register_blueprint(admin_api)
@app.route('/')
def hello_world():
return jsonify({'message': 'Welcome to the Hikma Health backend.',
'status': 'OK'})
@app.errorhandler(WebError)
def handle_web_error(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.errorhandler(404)
def page_not_found(_err):
response = jsonify({'message': 'Endpoint not found.'})
response.status_code = 404
return response
@app.errorhandler(405)
def method_not_found(_err):
response = jsonify({'message': 'Method not found.'})
response.status_code = 405
return response
@app.errorhandler(500)
def internal_server_error(_err):
response = jsonify({'message': 'Internal Server Error'})
response.status_code = 500
return response
if __name__ == '__main__':
app.run(debug=FLASK_DEBUG, host='0.0.0.0', port=str(FLASK_DEBUG_PORT))
| 1,441 |
tests/test_schema/test_fix_type_references.py
|
johnpaulguzman/py-gql
| 6 |
2025802
|
# -*- coding: utf-8 -*-
from typing import cast
import pytest
from py_gql.schema import (
ID,
Argument,
Field,
InputField,
InputObjectType,
Int,
InterfaceType,
ListType,
NonNullType,
ObjectType,
Schema,
String,
UnionType,
)
@pytest.fixture
def schema() -> Schema:
Object = InterfaceType("Object", fields=[Field("id", NonNullType(ID))])
Person = ObjectType(
"Person",
fields=[
Field("id", NonNullType(ID)),
Field("name", NonNullType(String)),
Field("pets", NonNullType(ListType(lambda: Animal))),
],
interfaces=[Object],
)
Animal = ObjectType(
"Animal",
fields=[
Field("id", NonNullType(ID)),
Field("name", NonNullType(String)),
Field("owner", Person),
],
interfaces=[Object],
)
LivingBeing = UnionType("LivingBeing", [Person, Animal])
CreatePersonInput = InputObjectType(
"CreatePersonInput",
[InputField("id", ID), InputField("name", NonNullType(String))],
)
return Schema(
query_type=ObjectType(
"Query",
fields=[
Field("person", Person, args=[Argument("id", ID)]),
Field("pet", Animal, args=[Argument("id", ID)]),
Field("living_being", LivingBeing, args=[Argument("id", ID)]),
],
),
mutation_type=ObjectType(
"Mutation", fields=[Field("createPerson", CreatePersonInput)]
),
)
def test_replace_interface_in_implementers(schema: Schema) -> None:
NewObject = InterfaceType(
"Object",
fields=[
Field("id", NonNullType(ID)),
Field("name", NonNullType(String)),
],
)
schema._replace_types_and_directives({"Object": NewObject})
assert (
cast(ObjectType, schema.get_type("Person")).interfaces[0]
is cast(ObjectType, schema.get_type("Animal")).interfaces[0]
is schema.types["Object"]
is NewObject
)
def test_replace_type_in_union(schema: Schema) -> None:
NewPerson = ObjectType(
"Person",
fields=(
list(cast(ObjectType, schema.types["Person"]).fields)
+ [Field("last_name", NonNullType(String))]
),
interfaces=[cast(InterfaceType, schema.types["Object"])],
)
schema._replace_types_and_directives({"Person": NewPerson})
assert cast(ObjectType, schema.get_type("Person")) is NewPerson
union_type = cast(UnionType, schema.get_type("LivingBeing"))
assert NewPerson in union_type.types
assert 2 == len(union_type.types)
def test_replace_query_type(schema: Schema) -> None:
NewQuery = ObjectType("Query", fields=[Field("some_number", Int)])
schema._replace_types_and_directives({"Query": NewQuery})
assert schema.query_type is NewQuery
def test_replace_mutation_type(schema: Schema) -> None:
NewMutation = ObjectType(
"Mutation", fields=[Field("update_some_number", Int)],
)
schema._replace_types_and_directives({"Mutation": NewMutation})
assert schema.mutation_type is NewMutation
def test_root_type_is_not_created(schema: Schema) -> None:
Subscription = ObjectType(
"Subscription", fields=[Field("some_number", Int)]
)
schema._replace_types_and_directives({"Subscription": Subscription})
assert schema.subscription_type is None
| 3,459 |
scrape/scraper.py
|
dannguyen/biden-harris-transistion-teams
| 2 |
2026346
|
#!/usr/bin/env python3
"""
Just a simple scraper that converts the index.html page's tables into CSV
- requires Python 3.5+
- writes to stdout
- run `make scrape` task to scrape and write to scrape/data.csv
To interactively debug an exception:
$ python -m pdb -c continue scrape/scraper.py
"""
import csv
from lxml.html import fromstring as hparse
from pathlib import Path
from sys import stderr, stdout
from typing import Dict as DictType, List as ListType
SRC_PATH = Path("docs/index.html")
OUT_HEADER = (
"agency",
"last_name",
"first_name",
"middle_name",
"recent_employment",
"funding_source",
"is_team_lead",
"full_name",
"row_index",
)
TEAM_LEAD_TXT = (
", Team Lead" # this is boilerplate in the name of the first person in each table
)
def parse_page(html: str) -> ListType[DictType]:
"""convert html text to a list of team member data"""
data = []
doc = hparse(html)
for hed in doc.cssselect("h2"):
agency = hed.text_content()
for i, row in enumerate(hed.getnext().cssselect("tbody tr"), 1):
cells = [r.text_content().strip() for r in row.cssselect("td")]
d = {
"agency": agency,
"full_name": cells[0].replace(TEAM_LEAD_TXT, ""),
"recent_employment": cells[1],
"funding_source": cells[2],
"is_team_lead": True if i == 1 else False,
"row_index": i,
}
# A very sloppy, lazily assuming name parsing script, but good enough for now...
d["first_name"], d["middle_name"], *lname = d["full_name"].split(" ", 2)
d["last_name"] = lname[0] if lname else d.pop("middle_name")
data.append(d)
return data
def main():
data = parse_page(SRC_PATH.read_text())
stderr.write("Parsed %s data rows from: %s\n" % (len(data), SRC_PATH))
outs = csv.DictWriter(stdout, fieldnames=OUT_HEADER)
outs.writeheader()
outs.writerows(data)
if __name__ == "__main__":
main()
| 2,065 |
cfg/gym/bipedalwalker/bipedalwalker_ddpg.py
|
shubhamrauniyar/angela
| 21 |
2025944
|
algorithm='ddpg'
env_class='Gym'
model_class='LowDim2x'
environment = {
'name': 'BipedalWalker-v2'
}
model = {
'state_size': 24,
'action_size': 4
}
agent = {
'action_size': 4,
'update_every': 2,
'batch_size': 128,
'gamma': 0.98
}
train = {
'n_episodes': 10000,
'max_t': 70,
'solve_score': 300.0
}
| 341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.