max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
test/conftest.py
|
uc-cdis/cirrus
| 1 |
2169948
|
import time
from unittest.mock import MagicMock, patch
import pytest
from cirrus import GoogleCloudManager
from cirrus.google_cloud.utils import get_valid_service_account_id_for_user
def get_test_cloud_manager():
project_id = "test_project"
manager = GoogleCloudManager(project_id)
manager._authed_session = MagicMock()
manager._admin_service = MagicMock()
manager._storage_client = MagicMock()
manager.credentials = MagicMock()
return manager
@pytest.fixture
def test_cloud_manager():
patcher = patch(
"cirrus.google_cloud.manager.ServiceAccountCredentials.from_service_account_file"
)
patcher.start()
yield get_test_cloud_manager()
patcher.stop()
@pytest.fixture
def test_cloud_manager_group_and_service_accounts_mocked():
test_cloud_manager = get_test_cloud_manager()
test_domain = "test-domain.net"
new_member_1_id = "1"
new_member_1_username = "testuser"
primary_service_account = (
get_valid_service_account_id_for_user(new_member_1_id, new_member_1_username)
+ "@"
+ test_domain
)
group_name = _get_proxy_group_name_for_user(new_member_1_id, new_member_1_username)
group_email = group_name + "@" + test_domain
mock_get_group(test_cloud_manager, group_name, group_email)
mock_get_service_accounts_from_group(test_cloud_manager, primary_service_account)
mock_get_service_account(test_cloud_manager, primary_service_account)
return test_cloud_manager
@pytest.fixture(autouse=True)
def no_backoff_delay(monkeypatch):
"""
The ``backoff`` library uses ``time.sleep`` to implement the wait. Patch this to
disable actually waiting at all in the tests.
"""
monkeypatch.setattr(time, "sleep", lambda _: None)
def mock_get_group(test_cloud_manager, group_name, group_email):
test_cloud_manager.get_group = MagicMock()
test_cloud_manager.get_group.return_value = {
"kind": "admin#directory#group",
"id": group_name,
"etag": "",
"email": group_email,
"name": "",
"directMembersCount": 0,
"description": "",
"adminCreated": False,
"aliases": [""],
"nonEditableAliases": [""],
}
def mock_get_service_accounts_from_group(test_cloud_manager, primary_service_account):
test_cloud_manager.get_service_accounts_from_group = MagicMock()
test_cloud_manager.get_service_accounts_from_group.return_value = [
primary_service_account
]
def mock_get_service_account(test_cloud_manager, primary_service_account):
test_cloud_manager.get_service_account = MagicMock()
test_cloud_manager.get_service_account.return_value = {
"name": "",
"projectId": "",
"uniqueId": "",
"email": primary_service_account,
"displayName": "",
"etag": "",
"oauth2ClientId": "",
}
| 2,885 |
shortcuts.py
|
nbio/nbio-django
| 1 |
2169903
|
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2008 nb.io"
__author__ = "<NAME> - <EMAIL>"
from django.template import loader, RequestContext
from django.http import HttpResponse
DEFAULT_CONTENT_TYPE = 'text/html;charset=UTF-8'
def render_template(request, template, dictionary=None):
if template.__class__ == str or template.__class__ == unicode:
template = loader.get_template(template)
context = RequestContext(request, dictionary)
return template.render(context)
def render_response(request, template, dictionary=None, content_type=DEFAULT_CONTENT_TYPE, response_class=HttpResponse):
return response_class(render_template(request, template, dictionary), content_type)
def build_url(request, is_secure=None, host=None, port=None, path=None, query_string=None):
if is_secure is None:
is_secure = request.is_secure()
host = host or request.META['SERVER_NAME']
port = port or request.META['SERVER_PORT']
path = path or request.path
scheme = is_secure and 'https' or 'http'
if (is_secure and port == '443') or (not is_secure and port == '80') or not port:
port = ''
else:
port = ':' + port
if query_string:
url = "%s://%s%s%s?%s" % (scheme, host, port, path, query_string)
else:
url = "%s://%s%s%s" % (scheme, host, port, path)
return url
| 1,344 |
statesampling/utils/io.py
|
delemottelab/state-sampling
| 0 |
2170542
|
import os
import re
import shutil
import time
from .. import log
_log = log.getLogger("utils-io")
def sorted_alphanumeric(l):
"""
From https://arcpy.wordpress.com/2012/05/11/sorting-alphanumeric-strings-in-python/
Sorts the given iterable in the way that is expected.
Required arguments:
l -- The iterable to be sorted.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def read_file(path):
with open(path, "r") as myfile:
data = myfile.read()
return data
def get_backup_name(path):
return path.strip("/") + "#" + time.strftime('%Y%m%d-%H%M')
def backup_path(oldpath):
"""Backups/Moves the path to a backup name"""
if os.path.exists(oldpath):
newpath = get_backup_name(oldpath)
if os.path.exists(newpath):
backup_path(newpath)
shutil.move(oldpath, newpath)
def makedirs(path, overwrite=False, backup=True):
if os.path.exists(path):
if overwrite:
if backup:
backup_path(path)
else:
shutil.rmtree(path) # beware, removes all the subdirectories!
os.makedirs(path)
else:
os.makedirs(path)
def make_parentdirs(filepath):
try:
os.makedirs(os.path.dirname(filepath))
except Exception as ex:
pass # ok, exists already
def copy_and_inject(infile, outfile, params, marker="$%s", start_index=0):
text = read_file(infile)
with open(outfile, "w") as out:
out.write(inject(text, params, marker=marker, start_index=start_index))
def inject(text, params, marker="$%s", start_index=0):
for i in range(start_index, len(params) + start_index):
text = text.replace(marker % i, str(params[i - start_index]))
return text
def escape_to_filename(filename):
return re.sub(r'[\W]', '', filename)
| 1,969 |
beancmd/bury.py
|
EasyPost/beancmd
| 7 |
2170338
|
import argparse
import itertools
from . import util
import pystalk
def setup_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host', default='localhost', help='Host of beanstalk server (default %(default)s)')
parser.add_argument('-p', '--port', default=11300, type=int, help='Port of beanstalk server (default %(default)s)')
parser.add_argument('-n', '--num-jobs', default=None, type=int, help='How many jobs to bury (default all in tube)')
parser.add_argument('tubes', nargs='*', help='Tubes to bury from (if not passed, defaults to all)')
return parser
def run(args):
client = pystalk.BeanstalkClient(args.host, args.port)
tubes = util.get_tubes(client, args.tubes)
client.watch('unused-fake-tube')
for tube in tubes:
client.watch(tube)
iterator = client.reserve_iter()
if args.num_jobs is not None:
iterator = itertools.islice(iterator, args.num_jobs)
for job in iterator:
client.bury_job(job.job_id)
client.ignore(tube)
return 0
| 1,112 |
Round #589 (Div 2)/A.py
|
julianferres/Codeforces
| 4 |
2169206
|
from collections import Counter
ans = -1
l,r = map(int, input().split())
for i in range(l,r+1):
c = Counter(str(i))
flag = True
for j in c:
if c[j]>1:
flag = False
if flag: ans= i
print(ans)
| 207 |
part2.py
|
FishPain/DTRA-Assignment
| 0 |
2169565
|
def cal(n):
if n < 2:
return 1
elif n < 3:
return 3
else:
a = n + 1
b = n // 2
c = round((n + 1) / 2)
return int(a * b + c)
def f2(arr, c, n, t, counter):
if n <= len(arr):
# Check if the current index and the next index is the same
if ~(arr[c] ^ arr[n]) == -1:
# T increase by 1 everytime there is a match
t += 1
# Check for the next index, current index no move.
return f2(arr, c, n + 1, t, counter)
else:
# There are at least one matching index
if t != 0:
# use a loop to print out the matching index using "t" value.
# e.i. if there are 4 of the same index, t = 4, loop's range will be 3+2+1
for i in range(cal(n - c - 1)):
print(f"X={arr[c]}, Y={arr[n - 1]}")
# counter increase by 1 to log the total matches
counter += 1
# Index does not match, set this index to the current and the next index to current + 1
return f2(arr, n, n + 1, 0, counter)
# Error occurs as list index out of range.
# Catches the error. Error means this is the last index so we can return the answers alr.
else:
# use a loop to print out the matching index using "t" value.
# e.i. if there are 4 of the same index, t = 4, loop's range will be 3+2+1
if t != 0:
for i in range(cal(n - c - 1)):
print(c, n)
print(f"X={arr[c]}, Y={arr[n - 1]}")
counter += 1
# Print the total count
return f"Total match is {counter}."
def f21(arr, c, n, t, counter, loop):
# Paste it out
if loop > 0:
print(f"X={arr[c]}, Y={arr[n - 1]}")
counter += 1
loop -= 1
return f21(arr, c, n, 0, counter, loop)
if n < len(arr):
# Check if the current index and the next index is the same
if ~(arr[c] ^ arr[n]) == -1:
# T increase by 1 everytime there is a match
t += 1
# Check for the next index, current index no move.
return f21(arr, c, n + 1, t, counter, loop=0)
else:
# There are at least one matching index
if t != 0:
# use a loop to print out the matching index using "t" value.
# e.i. if there are 4 of the same index, t = 4, loop's range will be 3+2+1
# Index does not match, set this index to the current and the next index to current + 1
return f21(arr, c, n, 0, counter, loop=cal(n-c-1))
return f21(arr, n, n + 1, 0, counter, loop=0)
# Error occurs as list index out of range.
# Catches the error. Error means this is the last index so we can return the answers alr.
else:
# use a loop to print out the matching index using "t" value.
# e.i. if there are 4 of the same index, t = 4, loop's range will be 3+2+1
if t != 0:
return f21(arr, c, n, 0, counter, loop=cal(n-c-1))
# Print the total count
return f"Total match is {counter}."
def f3(arr, c, n, count):
# [1, 2, 3, 3, 4, 4, 4, 4, 7, 7, 7, 10]
if n < len(arr):
# Check if the current index and the next index is the same
if ~(arr[c] ^ arr[n]) == -1:
print(f"1) C => Index: {c}, Value: {arr[c]}\n"
f" N => Index: {n}, Value: {arr[n]}\n")
print(f"X={arr[c]}, Y={arr[n]}")
count += 1
if n + 1 >= len(arr):
return f3(arr, c + 1, c + 2, count)
return f3(arr, c, n + 1, count)
else:
print(f"2) C => Index: {c}, Value: {arr[c]}\n"
f" N => Index: {n}, Value: {arr[n]}\n")
if n + 1 != len(arr):
return f3(arr, c + 1, c + 2, count)
return f3(arr, n, n+1, count)
else:
return f"Total match is {count}."
def f4(arr, c, n, count):
# [1, 2, 3, 3, 4, 4, 4, 4, 7, 7, 7, 10]
if n < len(arr):
# if the current one and the next one the same number?
if ~(arr[c] ^ arr[n]) == -1:
print(f"1) C => Index: {c}, Value: {arr[c]}\n"
f" N => Index: {n}, Value: {arr[n]}\n")
print(f"X={arr[c]}, Y={arr[n]}")
count += 1
return f4(arr, c, n + 1, count)
# if it is not the same
else:
print(f"2) C => Index: {c}, Value: {arr[c]}\n"
f" N => Index: {n}, Value: {arr[n]}\n")
if n-c > 1:
return f4(arr, c+1, c+2, count)
pass
# If not the same then go to the next pair
return f4(arr, n, n+1, count)
else:
return f"Total match is {count}."
# Python program to show
# bitwise operators
if __name__ == "__main__":
arr = [1, 2, 3, 3, 4, 4, 4, 7, 7, 7]
# print(f2(arr, 0, 1, 0, 0))
print(f21(arr, 0, 1, 0, 0, 0))
| 5,240 |
lnbits/extensions/example/__init__.py
|
sidahmedabdelillah/attigoBTC
| 0 |
2170323
|
from quart import Blueprint
from lnbits.db import Database
db = Database("ext_example")
example_ext: Blueprint = Blueprint(
"example", __name__, static_folder="static", template_folder="templates"
)
from .views_api import * # noqa
from .views import * # noqa
| 281 |
tests/standalone/data_products/search_data_products_synchronous.py
|
ucalgary-aurora/pyaurorax
| 0 |
2169183
|
import pyaurorax
import datetime
import pprint
def main():
# set as staging API
pyaurorax.api.set_base_url("https://api.staging.aurorax.space")
# do search
s = pyaurorax.data_products.search(datetime.datetime(2020, 1, 1, 0, 0, 0),
datetime.datetime(
2020, 1, 1, 23, 59, 59),
programs=["auroramax"],
verbose=True)
print()
pprint.pprint(s.data)
# ----------
if (__name__ == "__main__"):
main()
| 585 |
tahoe-python/genolake/tahoe/test/__init__.py
|
genolake/tahoe
| 0 |
2169363
|
import os
import sys
import tempfile
import unittest
from pyspark.sql import SparkSession
class SparkTestCase(unittest.TestCase):
def resourceFile(self, file):
tahoeRoot = os.path.dirname(os.getcwd())
return os.path.join(os.path.join(tahoeRoot, "tahoe-core/src/test/resources"), file)
def exampleFile(self, file):
tahoeRoot = os.path.dirname(os.getcwd())
return os.path.join(os.path.join(tahoeRoot, "example-files"), file)
def tmpFile(self):
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
return tempFile.name
def checkFiles(self, file1, file2):
f1 = open(file1)
f2 = open(file2)
try:
self.assertEquals(f1.read(), f2.read())
finally:
f1.close()
f2.close()
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.ss = SparkSession.builder.master('local[4]').appName(class_name).getOrCreate()
self.sc = self.ss.sparkContext
def tearDown(self):
self.ss.stop()
sys.path = self._old_sys_path
| 1,165 |
ui/pages/create_customer_page.py
|
lukehassel/hotel_management
| 0 |
2170256
|
import tkinter as tk
from domain.entities.room.double_room import DoubleRoom
from domain.entities.room.single_room import SingleRoom
from domain.entities.room.suite import Suite
from domain.usecase.reception_usecase import ReceptionUseCase
from ui.pages.page import Page
class CreateCustomerPage(Page):
roomType = None
def show_entry_fields(self):
print("Dein Name ist %s" % (self.e1.get()))
ReceptionUseCase().createReservation(self.e1.get(), self.e2.get(), self.roomType)
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
label = tk.Label(self, text="This is page 1")
# label.pack(side="top", fill="both", expand=True)
self.frame = tk.Frame(self).pack()
self.create_input_fields()
self.create_radios()
self.create_btn()
title = tk.Label(self.frame,
text="Neuer Kunde")
title.config(font=("Courier", 44))
title.place(x=20, y=35)
def create_btn(self):
btnFrame = tk.Frame(self, borderwidth=20)
tk.Button(btnFrame,
text='Okay', command=self.show_entry_fields).grid(column=1, row=0)
btnFrame.pack(side="bottom")
def create_radios(self):
radioFrame = tk.Frame(self.frame, borderwidth=40)
var = tk.IntVar()
rad1 = tk.Radiobutton(radioFrame, text='EinzelZimmer', value=1, command=lambda: self.s1())
rad2 = tk.Radiobutton(radioFrame, text='DoppelZimmer', value=2, command=lambda: self.s2())
rad3 = tk.Radiobutton(radioFrame, text='Suite', value=3, command=lambda: self.s3())
rad1.grid(sticky="W", column=0, row=0)
rad2.grid(sticky="W", column=0, row=1)
rad3.grid(sticky="W", column=0, row=2)
radioFrame.place(x=200, y=175)
def s1(self):
print("asdf")
self.roomType = SingleRoom()
def s2(self):
self.roomType = DoubleRoom()
def s3(self):
self.roomType = Suite()
def create_input_fields(self):
inputFrame = tk.Frame(self.frame)
tk.Label(inputFrame,
text="Name").grid(sticky="W", column=1, row=0)
self.e1 = tk.Entry(inputFrame)
self.e1.grid(sticky="W", column=2, row=0)
tk.Label(inputFrame,
text="Hotel Besuche").grid(sticky="W", column=1, row=1)
self.e2 = tk.Entry(inputFrame)
self.e2.grid(sticky="W", column=2, row=1)
inputFrame.place(x=20, y=200)
| 2,488 |
pyvino/model/object_detection/person_detection/person_detection_retail_0013.py
|
hampen2929/openvino-python
| 62 |
2170610
|
from .person_detector import PersonDetector
from ....util.logger import get_logger
logger = get_logger(__name__)
class PersonDetectorRetail0013(PersonDetector):
model_name = 'person-detection-retail-0013'
model_loc = 'intel'
def __init__(self, xml_path=None, fp=None, conf=0.6, draw=False):
super().__init__(xml_path, fp, conf, draw)
| 363 |
1-2.py
|
skittlesvampir/adventofcode2021
| 1 |
2170466
|
# Immer drei Zahlen der Eingabe zusammenaddieren, dann mit einen versetzt
# weitermachen
eingabe = []
while True:
zeile = input()
if zeile.isdigit():
eingabe.append(int(zeile))
else:
break
summen = 0
for i in range(len(eingabe)-3):
if eingabe[i]+eingabe[i+1]+eingabe[i+2] < eingabe[i+1] + eingabe[i+2] + eingabe[i+3]:
summen += 1
print(summen)
| 377 |
program_synthesis/naps/examples/seq2seq/executor.py
|
kavigupta/program_synthesis
| 123 |
2168880
|
import copy
import collections
from program_synthesis.naps import uast
from program_synthesis.naps.uast import lisp_to_uast
from program_synthesis.naps.uast import uast_test_config
ExecutionResult = collections.namedtuple('ExecutionResult', ['result', 'trace'])
class ExecutorSyntaxException(Exception):
pass
class ExecutorRuntimeException(Exception):
pass
class UASTExecutor(object):
def execute(self, code, inputs):
if not isinstance(code, dict):
try:
code = lisp_to_uast.lisp_to_uast(code)
except lisp_to_uast.LispSyntaxException:
raise ExecutorSyntaxException()
e = uast.Executor(code)
trace_ = None
try:
assert isinstance(inputs, list)
inputs = copy.deepcopy(inputs)
try:
result = e.execute_func("__main__", inputs, True)
except KeyboardInterrupt:
print(code)
print(inputs)
raise
except Exception as e:
raise ExecutorRuntimeException(e)
return ExecutionResult(result, trace_)
def compare(self, gold, prediction):
return uast_test_config.test_passed(gold, prediction)
def evaluate_code(code, tests, executor_):
stats = {'tests-executed': len(tests), 'tests-passed': 0,
'result-none': 0, 'syntax-error': 0, 'runtime-exception': 0, 'exceptions': []}
if not code:
return stats
for test in tests:
try:
execution_result = executor_.execute(code, test['input'])
except ExecutorSyntaxException as e:
stats['syntax-error'] += 1
stats['exceptions'].append(str(e))
continue
except ExecutorRuntimeException as e:
stats['runtime-exception'] += 1
stats['exceptions'].append(str(e))
continue
except Exception as e:
stats['exceptions'].append(str(e))
continue
if execution_result.result is None:
stats['result-none'] += 1
if executor_.compare(test['output'], execution_result.result):
stats['tests-passed'] += 1
return stats
| 2,198 |
poc/model/time-based-dbscan.py
|
An-Dang/clustinator
| 0 |
2169101
|
import re
import numpy as np
import pandas as pd
from sklearn import preprocessing
from scipy.sparse import csr_matrix
from sklearn.cluster import DBSCAN
import datetime
import time
states = ["INITIAL","login","View_Items","home","logout","View_Items_quantity","Add_to_Cart","shoppingcart",
"remove","deferorder","purchasecart","inventory","sellinventory","clearcart","cancelorder","$"]
#Data imports
PATH = "../data/raw/"
sessions_file = (PATH+'sessions.dat')
def session_request_dict(sessions_file):
s_r_dict = {}
# Dict of sessions
with open(sessions_file) as fn:
sessions_raw = fn.readlines()
for session in sessions_raw:
key = re.search('([^.]+)', session).group()
value = re.findall('\"(.*?)\"', session)
s_r_dict[key] = value
return s_r_dict
def transition_matrix(sessions, states):
markovchains = []
for key, value in sessions.items():
# labelEncoding
le = preprocessing.LabelEncoder()
le.fit(value)
le.transform(value)
# factorize
factorize = pd.factorize(value)[0]
# matrix
n = 1 + max(factorize) # number of states
m = [[0] * n for _ in range(n)]
for (i, j) in zip(factorize, factorize[1:]):
m[i][j] += 1
# now convert to probabilities:
for row in m:
s = sum(row)
if s > 0:
row[:] = [f / s for f in row]
# unique array in the right order
value = np.array(value)
_, idx = np.unique(value, return_index=True)
df = pd.DataFrame(data=m, index=value[np.sort(idx)],
columns=value[np.sort(idx)])
df_1 = pd.DataFrame(index=states, columns=states, dtype='float64')
df_1.update(df, join='left')
merge = pd.concat([pd.concat([df_1, df], axis=1, sort=False)], axis=0).fillna(0).round(2).iloc[:, :-n]
# convert into Vector
merge = np.array(merge.values.flatten().tolist())
# 2-D array
markovchains.append(merge)
# csr sparse matrix
csr = csr_matrix(markovchains)
return csr
data = session_request_dict(sessions_file)
print(len(data))
set_1 = {k: data[k] for k in list(data)[:100]}
set_2 = {k: data[k] for k in list(data)[10:110]}
print('load data done', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
#for X in tqdm(range(len(small_data_set))):
X = transition_matrix(set_1, states)
print('matrix done', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S \n'))
print('start clustering')
clustering = DBSCAN(eps=1.5, min_samples=10).fit(X)
labels = clustering.labels_
#print(labels)
print(np.unique(labels, return_counts=True))
print(clustering, )
print("End clustering", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
| 2,869 |
child_management/migrations/0015_auto_20210305_1109.py
|
waicindia/clms-prototype
| 0 |
2170432
|
# Generated by Django 3.1.2 on 2021-03-05 11:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('child_management', '0014_auto_20210305_1104'),
]
operations = [
migrations.AlterField(
model_name='child',
name='sex',
field=models.IntegerField(choices=[(1, 'Male'), (2, 'Female'), (3, 'Transgender'), (4, 'Inter-sex'), (5, 'Other')]),
),
migrations.AlterField(
model_name='childshelterhomerelation',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='child_management.child', verbose_name='Case number'),
),
]
| 760 |
bloodhound_theme/bhtheme/tests/theme.py
|
beebopkim/bloodhound
| 84 |
2170471
|
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from trac.test import EnvironmentStub
from trac.web.chrome import Chrome
from bhdashboard.web_ui import DashboardModule
from bhtheme.theme import BloodhoundTheme
from bhtheme.tests import unittest
class ThemeTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=('trac.*', 'bhtheme.*'),
default_data=True)
self.bhtheme = BloodhoundTheme(self.env)
def tearDown(self):
self.env.reset_db()
def test_templates_dirs(self):
chrome = Chrome(self.env)
self.assertFalse(self.env.is_component_enabled(DashboardModule))
for dir in self.bhtheme.get_templates_dirs():
self.assertIn(dir, chrome.get_all_templates_dirs())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ThemeTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 1,756 |
tests/test_add_and_delete_contact_to_group.py
|
elenay/python_training
| 0 |
2170315
|
__author__ = 'eya'
from model.contact import Contact
from model.group import Group
from fixture.orm import OrmFixture
import random
orm_db = OrmFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_add_contact_to_group(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="GroupForContact"))
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="NoOneWasHere"))
old_contacts = db.get_contact_list()
if app.contact.check_presence_contact_not_in_any_group() != 0:
contacts_not_in_group = app.contact.contacts_not_in_group()
added_contact = random.choice(contacts_not_in_group)
else:
added_contact = app.contact.create(Contact(firstname="EmptyGroup"))
app.contact.add_contact_by_id_to_group(added_contact.id)
assert len(old_contacts) == app.contact.count()
new_contacts = db.get_contact_list()
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
def test_delete_contact_from_group(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="GroupForContact"))
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="NoOneWasHere"))
old_contacts = db.get_contact_list()
if app.contact.check_presence_contact_at_least_in_one_group() != 0:
contacts_in_group = app.contact.contacts_in_group()
deleted_contact = random.choice(contacts_in_group)
else:
deleted_contact = app.contact.add_contact_by_id_to_group(deleted_contact.id)
| 1,604 |
qaoa/vertex_cover.py
|
GiggleLiu/QAOA
| 18 |
2169357
|
from utils import get_bit
def is_vertex_cover(graph, z):
"""
checks if z (an integer) represents a valid vertex cover for graph adjacency
matrix graph, with n vertices
"""
for e in graph.es:
if get_bit(z, e.source) == 0 and get_bit(z, e.target) == 0:
return False
return True
def vertex_cover_loss(z, graph, mask):
"""
the objective function to minimize: -(# of 0 in a bit string),
corresponding to maximising the number of vertices NOT in the vertex cover
"""
if not mask[z]:
return 0
n = graph.vcount()
s = 0
for i in range(n):
s += get_bit(z, i)
return s - n
def get_vertex_cover_clauses(graph):
'''
C = \sum -0.5*(Zi+1), mapping is 0->down, 1->up.
'''
raise NotImplementedError()
clause_list = []
for v in graph.vs:
clause_list.append(-(0.5, (v.index,)))
return clause_list
| 913 |
modules/feedback/migrations/0001_initial.py
|
heolin123/funcrowd
| 0 |
2170180
|
# Generated by Django 2.0.8 on 2018-11-22 22:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='FeedbackField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='FeedbackScoreField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='feedback',
name='fields',
field=models.ManyToManyField(to='feedback.FeedbackField'),
),
migrations.AddField(
model_name='feedback',
name='score_fields',
field=models.ManyToManyField(to='feedback.FeedbackScoreField'),
),
migrations.AddField(
model_name='feedback',
name='task',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tasks.Task'),
),
]
| 1,646 |
birl.py
|
ppratikcr7/BHIRL
| 9 |
2170528
|
from mdp import *
from copy import deepcopy
from math import exp
# Class for Bayesian Inverse Reinforcement learning
class BIRL():
# Currently we are running BIRL for 2 iteration and step size of 2. The bounds of reward set her eis +10 and -10.
def __init__(self, expert_trace, grid_size, terminals, error_func, birl_iteration=2, step_size=2, r_min=-10,
r_max=10):
self.n_rows, self.n_columns = grid_size
self.r_min, self.r_max = r_min, r_max
self.step_size = step_size
self.expert_trace = expert_trace
self.birl_iteration = birl_iteration
self.terminals = terminals
self.error_func = error_func
# Run the main BIRL algorithm to calculate the posterior and prior probability of being in a state,
# also it returns the reward error, which can be updated with each iteration.
def run_birl(self):
policy_error, reward_error = [], []
# This is the core BIRL algorithm
mdp = self.create_rewards()
pi, u = policy_iteration(mdp)
q = get_q_values(mdp, u)
posterior = calculate_posterior(mdp, q, self.expert_trace)
for _ in range(self.birl_iteration):
new_mdp = deepcopy(mdp)
new_mdp.modify_rewards_randomly(self.step_size)
new_u = policy_evaluation(pi, u, new_mdp, 1)
if pi != best_policy(new_mdp, new_u):
new_pi, new_u = policy_iteration(new_mdp)
new_q = get_q_values(new_mdp, new_u)
new_posterior = calculate_posterior(new_mdp, new_q, self.expert_trace)
if probability(min(1, exp(new_posterior - posterior))):
pi, u, mdp, posterior = new_pi, new_u, deepcopy(new_mdp), new_posterior
else:
new_q = get_q_values(new_mdp, new_u)
new_posterior = calculate_posterior(new_mdp, new_q, self.expert_trace)
if probability(min(1, exp(new_posterior - posterior))):
mdp, posterior = deepcopy(new_mdp), new_posterior
policy_error.append(0)
reward_error.append(0)
return pi, mdp, policy_error, reward_error
#------------- Reward functions ------------
#TODO move priors out of the mdp
def create_rewards(self, reward_function_to_call=None):
# If no reward function is specified, sets all rewards as 0
if reward_function_to_call is None:
return self.create_zero_rewards()
return reward_function_to_call()
def create_zero_rewards(self):
return GridMDP([[0 for _ in range(self.n_columns)] for _ in range(self.n_rows)]
, terminals=deepcopy(self.terminals))
def create_random_rewards(self):
return GridMDP(
[[random.uniform(self.r_min, self.r_max) for _ in range(self.n_columns)] for _ in range(self.n_rows)]
, terminals=deepcopy(self.terminals))
def create_gaussian_rewards(self):
mean, stdev = 0, self.r_max / 3
return GridMDP(
[[self.bound_rewards(random.gauss(mean, stdev)) for _ in range(self.n_columns)] for _ in range(self.n_rows)]
, terminals=deepcopy(self.terminals))
def bound_rewards(self, reward):
if reward > self.r_max:
reward = self.r_max
elif reward < self.r_min:
reward = self.r_min
return reward
def calculate_posterior(mdp, q, expert_pi, gamma=0.95):
z = []
e = 0
for s in mdp.states:
for a in mdp.actions(s):
z.append(gamma * q[s, a])
if s in expert_pi:
e += gamma * q[s, a]
del z[:] #Removes contents of Z
return e * calculate_prior(mdp.reward.values())
# return e
def get_q_values(mdp, U):
Q = {}
for s in mdp.states:
for a in mdp.actions(s):
for (p, sp) in mdp.T(s, a):
Q[s, a] = mdp.reward[s] + mdp.gamma * p * U[sp]
return Q
def calculate_prior(rewards):
return sum([calculate_tri_prior(R) for R in rewards]) / 10
# return 1
def calculate_beta_prior(R, Rmax=10):
R = abs(R) + 0.00001
Rmax += 0.000001
return 1 / (((R / Rmax) ** 0.5) * ((1 - R / Rmax) ** 0.5))
def calculate_tri_prior(R, Rmax= 10):
R = abs(R) + 0.00001
Rmax += 0.000001
Rmin = -Rmax
return 0.4 * exp(-0.1 * (R - Rmax) ** 2) + 0.4 * exp(-0.1 * (R - Rmin) ** 2) + exp(-0.1 * R ** 2)
def uniform_prior(_): return 1
| 4,461 |
learn_python/codewars/sudoku_solver.py
|
PavliukKonstantin/learn-python
| 0 |
2168058
|
# Write a function that will solve a 9x9 Sudoku puzzle. The function will
# take one argument consisting of the 2D puzzle array, with the value 0
# representing an unknown square.
# The Sudokus tested against your function will be "easy" (i.e. determinable;
# there will be no need to assume and test possibilities on unknowns) and
# can be solved with a brute-force approach.
puzzle = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9],
]
right_result = [
[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9],
]
def find_solution_digits(puzzle):
solution_digits = {}
for i, row in enumerate(puzzle):
for j, elem in enumerate(row):
if elem == 0:
numbers = set(range(1, 10))
square = set()
start_row = ((i//3) * 3)
end_row = start_row + 3
start_elem = ((j//3) * 3)
end_elem = start_elem + 3
for row in puzzle[start_row:end_row]:
for elem in row[start_elem:end_elem]:
square.add(elem)
numbers -= set(puzzle[i]) | {i[j] for i in puzzle} | square
if len(numbers) == 1:
solution_digits[(i, j)] = numbers
return solution_digits
def sudoku(puzzle):
while True:
solution_digits = find_solution_digits(puzzle)
if not solution_digits:
break
for k, v in solution_digits.items():
i, j = k
puzzle[i][j] = v.pop()
return puzzle
print(sudoku(puzzle) == right_result)
| 2,132 |
project/save_as_text.py
|
ivanbaug/wn-scrape
| 0 |
2168092
|
import os
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from datetime import datetime as dt
from shared_funcs import get_page_txt
out_folder = "output/"
load_dotenv()
main_url = os.getenv("SITE_URL")
print(main_url)
def save_ch_txt(ch_num: int):
# get entire page
site_url = f"{main_url}{ch_num}"
wn_webpage = get_page_txt(site_url)
soup = BeautifulSoup(wn_webpage, "html.parser")
# get chapter data
ch_title = soup.find("p", class_="novel_subtitle")
novel_honbun = soup.find(id="novel_honbun")
novel_afterword = soup.find(id="novel_a")
with open(
f"output/CH{ch_num:03d}-{dt.now().strftime('%Y%m%d-%H%M%S')}.txt",
"w",
encoding="utf8",
) as myfile:
# Write title
myfile.write(f"CHAPTER {ch_num}: {ch_title.text}\n")
# Write paragraphs
keep_trying = 3
paragraph_id = 1
while keep_trying:
paragraph = novel_honbun.find(id=f"L{paragraph_id}")
if paragraph == None:
keep_trying -= 1
else:
myfile.write(paragraph.text + "\n")
paragraph_id += 1
# Write afterwords
keep_trying = 3
afterword_id = 1
myfile.write("----------\n")
while keep_trying:
aw = novel_afterword.find(id=f"La{afterword_id}")
if aw == None:
keep_trying -= 1
else:
myfile.write(aw.text + "\n")
afterword_id += 1
if __name__ == "__main__":
# Save raw chapters
# for i in range(1, 678):
for i in range(1, 3):
save_ch_txt(i)
| 1,641 |
python/074_search_a_2d_matrix.py
|
ufjfeng/leetcode-soln
| 0 |
2170143
|
"""
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
For example,
Consider the following matrix:
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
Given target = 3, return true.
"""
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if matrix is None or matrix == []:
return False
for row in matrix:
if target < row[0]:
return False
elif target > row[-1]:
continue
else:
left, right = 0, len(row)-1
while left < right - 1:
middle = (left + right) // 2
if row[middle] == target:
return True
elif row[middle] > target:
right = middle
else:
left = middle
if row[left] == target or row[right] == target:
return True
else:
return False
A=[[1,3,5,7],[10,11,16,20],[23,30,34,50]]
a=Solution()
print(a.searchMatrix(A,3))
| 1,433 |
datasets_tools/create_pascal_voc_tf_record.py
|
vghost2008/wml
| 6 |
2170057
|
#coding=utf-8
import os
import functools
import sys
import random
import time
import numpy as np
from multiprocessing import Pool
import tensorflow as tf
import object_detection.utils as odu
import object_detection.npod_toolkit as npod
import shutil
import xml.etree.ElementTree as ET
import sys
from iotoolkit.pascal_voc_data import *
from wml_tfutils import int64_feature,bytes_feature,floats_feature
from iotoolkit.pascal_voc_data import TEXT_TO_ID
import img_utils as wmli
import wml_utils as wmlu
SAMPLES_PER_FILES = 6000
def _category_id_filter(category_id):
return True
def _labels_text_to_labels(labels_text):
for x in labels_text:
if x not in TEXT_TO_ID:
print(f"Error \"{x}\" not in target set.")
return [TEXT_TO_ID[x] for x in labels_text]
class VOCMaker(object):
def __init__(self,filenames=None):
if filenames is not None and isinstance(filenames,str):
with open(filenames) as f:
self.filenames = [x.strip() for x in f.readlines()]
else:
self.filenames = filenames
self.category_id_filter = _category_id_filter
self.image_preprocess = None
#输入为list(str)
self.labels_text_to_labels = _labels_text_to_labels
'''
directory:图像目录路径
name:图像文件名,但不包含路径及文件名
返回图像数据,bbox(用[0,1]表示,bbox相对应的label
'''
def _process_image(self,xml_file,img_file):
if not os.path.exists(img_file):
return None,None,None,None,None,None,None
if self.image_preprocess is not None:
img = wmli.imread(img_file)
img = self.image_preprocess(img)
image_data = wmli.encode_img(img)
else:
image_data = tf.gfile.FastGFile(img_file, 'rb').read()
shape, _bboxes, _labels_text, _difficult, _truncated,_ = odu.read_voc_xml(xml_file, adjust=None)
_labels = self.labels_text_to_labels(_labels_text)
bboxes = []
labels_text = []
difficult = []
truncated = []
labels = []
for data in zip(_bboxes,_labels,_labels_text,_difficult,_truncated):
if self.category_id_filter(data[1]):
bboxes.append(data[0])
labels.append(data[1])
labels_text.append(data[2])
difficult.append(data[3])
truncated.append(data[4])
if len(labels) == 0:
#print(f"Ignore {name}.")
return None,None,None,None,None,None,None
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
def _convert_to_example(self,image_data, labels, labels_text, bboxes, shape,
difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
#每一个bbox应当包含四个元素(ymin,xmin,ymax,xmax)
assert len(b) == 4
# pylint: disable=expression-not-assigned
#l依次为ymin,xmin,ymax,xmax,point依次为b.ymin,b.xmin,b.ymax,b.xmax
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
'''
现在xmin,ymin,xmax,ymax包含了所有在bboxes中的数据
'''
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(shape[0]),
'image/width': int64_feature(shape[1]),
'image/channels': int64_feature(shape[2]),
'image/shape': int64_feature(shape),
'image/object/bbox/xmin': floats_feature(xmin),
'image/object/bbox/xmax': floats_feature(xmax),
'image/object/bbox/ymin': floats_feature(ymin),
'image/object/bbox/ymax': floats_feature(ymax),
'image/object/bbox/label': int64_feature(labels),
'image/object/bbox/label_text': bytes_feature(labels_text),
'image/object/bbox/difficult': int64_feature(difficult),
'image/object/bbox/truncated': int64_feature(truncated),
'image/format': bytes_feature(image_format),
'image/encoded': bytes_feature(image_data)}))
return example
'''
dataset_dir:图像目录路径
name:图像文件名,不包含路径及后辍
'''
def _add_to_tfrecord(self,img_file, tfrecord_writer):
xml_file = wmlu.change_suffix(img_file,"xml")
if not os.path.exists(img_file) or not os.path.exists(xml_file):
print(f"Error file {xml_file}, {img_file}.")
return False
image_data, shape, bboxes, labels, labels_text, difficult, truncated = \
self._process_image(xml_file,img_file)
if image_data is None:
return False
example = self._convert_to_example(image_data, labels, labels_text,
bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
return True
def _get_output_filename(self,output_dir, name, idx):
return '%s/%s_%04d.tfrecord' % (output_dir, name, idx)
def make_tfrecord(self,file_data,output_dir,name="train"):
fidx,files = file_data
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
for file in files:
self._add_to_tfrecord(file, tfrecord_writer)
'''
将所有图像文件按SAMPLES_PER_FILES(200)一批保存在tfrecored文件中
'''
def multi_thread_to_tfrecords(self,dataset_dir, output_dir, shuffling=False,fidx=0):
files = wmlu.recurse_get_filepath_in_dir(dataset_dir,suffix=".jpg")
return self.multi_thread_to_tfrecords_by_files(files,output_dir,shuffling,fidx)
'''
将所有图像文件按SAMPLES_PER_FILES(200)一批保存在tfrecored文件中
files: img file list
'''
def multi_thread_to_tfrecords_by_files(self,files, output_dir,shuffling=False,fidx=0):
wmlu.create_empty_dir(output_dir,remove_if_exists=True,yes_to_all=True)
if shuffling:
random.seed(time.time())
random.shuffle(files)
wmlu.show_list(files[:100])
if len(files)>100:
print("...")
print(f"Total {len(files)} files.")
sys.stdout.flush()
files = wmlu.list_to_2dlist(files,SAMPLES_PER_FILES)
files_data = list(enumerate(files))
if fidx != 0:
_files_data = []
for fid,file_d in files_data:
_files_data.append([fid+fidx,file_d])
files_data = _files_data
sys.stdout.flush()
pool = Pool(13)
pool.map(functools.partial(self.make_tfrecord,output_dir=output_dir),files_data)
#list(map(functools.partial(self.make_tfrecord,output_dir=output_dir),files_data))
pool.close()
pool.join()
print('\nFinished converting the dataset total %d examples.!'%(len(files)))
if __name__ == "__main__":
dataset_dir = "/media/vghost/Linux/constantData/MachineLearning/mldata/PASCAL/VOCdevkit/VOC2012"
output_dir = "/home/vghost/ai/mldata/VOC2012_tfdata"
output_name = "train"
print('Dataset directory:', dataset_dir)
print('Output directory:',output_dir)
m = VOCMaker()
m.to_tfrecords(dataset_dir, output_dir, output_name)
| 7,240 |
srfnef/corrections/normalization/normalization_correct.py
|
twj2417/srf
| 0 |
2170568
|
# encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@software: nef
@file: normalization_correct.py
@date: 5/8/2019
@desc:
'''
from srfnef import nef_class
from srfnef.data import Image, Listmode
from scipy import interpolate
from .amp_x import AmplitudeX
from .amp_z import AmplitudeZ
import numpy as np
@nef_class
class NormalizationCorrect:
amp_x: AmplitudeX
amp_z: AmplitudeZ
def __call__(self, image: Image) -> Image:
fr = interpolate.interp1d(self.amp_x.ux, self.amp_x.ax, fill_value = 'extrapolate')
x = np.arange(image.shape[0]) * image.unit_size[0] + image.center[0] - image.size[0] / 2
y = np.arange(image.shape[1]) * image.unit_size[1] + image.center[1] - image.size[1] / 2
x1, y1 = np.meshgrid(x, y, indexing = 'ij')
r = np.sqrt(x1 ** 2 + y1 ** 2)
mask_r = fr(r)
mask_r = mask_r / np.mean(mask_r)
fz = interpolate.interp1d(self.amp_z.uz, self.amp_z.az, fill_value = 'extrapolate')
z = np.arange(image.shape[2]) * image.unit_size[2] + image.center[2] - image.size[2] / 2
mask_z = fz(np.abs(z))
mask_z = mask_z / np.mean(mask_z)
image_out_data = image.data / mask_z ** 2
# image_out_data = (image_out_data.transpose() * mask_r).transpose()
return image.update(data = image_out_data)
@nef_class
class NormalizationAngledCorrect:
def __call__(self, listmode: Listmode) -> Listmode:
lx = np.abs(listmode.lors.data[:, 0] - listmode.lors.data[:, 3])
ly = np.abs(listmode.lors.data[:, 1] - listmode.lors.data[:, 4])
lz = np.abs(listmode.lors.data[:, 2] - listmode.lors.data[:, 5])
cos_ = np.sqrt(lx ** 2 + ly ** 2 + lz ** 2) / np.sqrt(lx ** 2 + ly ** 2)
return listmode * cos_.astype(np.float32)
| 1,779 |
Ago-Dic-2017/Ivan Carreon/OrdinarioEquipo/ResHotel/ResHotel/views.py
|
Arbupa/DAS_Sistemas
| 41 |
2167642
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
def index(request):
return render(request, 'base/index.html')
def ee(request):
return render(request, 'base/ee.html')
| 211 |
Server/src/quadradiusr_server/cron.py
|
kjarosh/QuadradiusR
| 6 |
2170439
|
import asyncio
import logging
from quadradiusr_server.config import CronConfig
from quadradiusr_server.db.base import Lobby
from quadradiusr_server.db.repository import Repository
from quadradiusr_server.db.transactions import transaction_context
from quadradiusr_server.notification import NotificationService, Notification
class Cron:
def __init__(
self, config: CronConfig, repository: Repository,
notification_service: NotificationService) -> None:
self.config = config
self.repository = repository
self.ns = notification_service
async def register(self):
logging.info('Registering cron jobs')
asyncio.create_task(self._cron_purge_game_invites())
asyncio.create_task(self._cron_purge_tokens())
logging.info('Cron jobs registered')
async def _cron_purge_game_invites(self):
logging.debug('Purging game invites')
while True:
await asyncio.sleep(self.config.purge_game_invites_delay)
await self._purge_game_invites()
async def _purge_game_invites(self):
async with transaction_context(self.repository.database):
gi_repo = self.repository.game_invite_repository
old_invites = await gi_repo.get_old_invites()
for invite in old_invites:
await gi_repo.remove(invite)
for subject_id in [invite.from_id_, invite.subject_id_]:
await self.repository.synchronize_transaction_on_commit(
self.ns.notify_now(Notification(
topic='game.invite.removed',
subject_id=subject_id,
data={
'game_invite_id': invite.id_,
'reason': 'expired',
},
)))
async def _cron_purge_tokens(self):
logging.debug('Purging game invites')
while True:
await asyncio.sleep(self.config.purge_tokens_delay)
await self._purge_tokens()
async def _purge_tokens(self):
async with transaction_context(self.repository.database):
at_repo = self.repository.access_token_repository
await at_repo.remove_old_tokens()
logging.debug('Tokens purged')
class SetupService:
def __init__(
self, repository: Repository) -> None:
self.repository = repository
async def run_setup_jobs(self):
logging.info('Running setup jobs')
await self._create_main_lobby()
logging.info('Setup jobs finished')
async def _create_main_lobby(self):
logging.debug('Creating main lobby')
async with transaction_context(self.repository.database):
lobby_repo = self.repository.lobby_repository
if await lobby_repo.get_by_id('@main') is None:
main = Lobby(
id_='@main',
name_='Main',
)
await lobby_repo.add(main)
logging.debug('Main lobby created')
| 3,113 |
ex046.py
|
thaisouza30/Exercicios-Python3-Curso-em-Video
| 1 |
2169698
|
from time import sleep
print('Contagem regressiva para a queima de fogos ')
print('-='*15)
for contador in range(10, -1, -1):
print(contador)
sleep(1)
print('-='*15)
print('Feliz Ano Novo !!!')
| 208 |
torm/model/ModelMetaclass.py
|
cofepy/torm
| 2 |
2170437
|
import collections
import urllib.parse
from torm.utl.Config import config
from torm.utl.Utl import to_snake_name
from torm.field import *
from torm.builder import MongoBuilder, MysqlBuilder
from torm.connection import MongoConnection, MysqlConnection
def _connection(config):
if config['db_type'] == 'mongo':
return MongoConnection(config)
if config['db_type'] == 'mysql':
return MysqlConnection(config)
class ModelMetaclass(type):
isinstance = False
isclass = True
@classmethod
def __prepare__(cls, name, bases, **kws):
return collections.OrderedDict()
# 返回子类
def __new__(cls, name, bases, attrs):
'''
name:类名
bases:父类
attrs:类的所有属性字典
'''
if name == 'Model':
return type.__new__(cls, name, bases, attrs) # 返回Model类型
model_fields = dict()
cls_keys = list(attrs.keys())
for k in cls_keys:
if isinstance(attrs[k], Field):
model_fields[k] = attrs[k]
field = model_fields.keys()
attrs['__fields__'] = model_fields # 保存字段的属性
attrs['__field__'] = list(field) # 保存字段名列表
attrs['__config__'] = cls.init_config(cls, name, attrs) # 连接配置
attrs['config'] = attrs['__config__']
attrs['db_name'] = attrs['__config__']['db']
# attrs['table_name'] = attrs['__config__']['table']
# attrs['connection'] = _connection(attrs['__config__'])
# 根据数据库类型,继承各数据库的builder
dbtype = attrs['__config__']['db_type']
bases = bases + (dict,)
if dbtype == 'mongo':
builder = MongoBuilder
elif dbtype == 'mysql':
builder = MysqlBuilder
builder.__config__ = attrs['__config__']
builder.table_name = attrs['__config__']['table']
builder.connection = _connection(attrs['__config__'])
attrs['builder'] = builder()
# attrs['model']=
# bases = bases + (builder,)
# cls.builder = builder
# 使Model同时继承builder
return type.__new__(cls, name, bases, attrs)
def __init__(self, *args, **kwargs):
if self.__name__ != 'Model':
self.builder.model = self
super().__init__(*args, **kwargs)
def init_config(cls, name, attrs):
__config = {}
# 环境变量配置名称
env_name = attrs.get("__config__", "default")
db_type = attrs.get(
"__dbtype__", config.env(env_name)('TORM_DB_TYPE'))
# 配置名称
__config['config_name'] = env_name
# 数据库类型
__config['db_type'] = db_type
# 表名
__config['table'] = attrs.get(
"__tablename__",
to_snake_name(name)
)
if db_type == "mysql":
__config['charset'] = config.env(env_name)('TORM_CHARSET')
__config['host'] = config.env(env_name)('TORM_HOST')
__config['port'] = int(config.env(env_name)('TORM_PORT'))
__config['db'] = attrs.get(
"__dbname__",
config.env(env_name)('TORM_DB')
)
else:
# 数据库连接参数
__config['url'] = config.env(env_name)('TORM_URL', default=None)
if not __config['url']:
__config['host'] = config.env(env_name)('TORM_HOST')
__config['port'] = int(config.env(env_name)('TORM_PORT'))
__config['db'] = attrs.get(
"__dbname__",
config.env(env_name)('TORM_DB')
)
else:
url_parts = urllib.parse.urlparse(__config['url'])
path_parts = url_parts[2].rpartition('/')
if path_parts[2]:
__config['db'] = attrs.get(
"__dbname__",
path_parts[2]
)
else:
__config['db'] = attrs.get(
"__dbname__",
config.env(env_name)('TORM_DB')
)
# 数据库用户名和密码配置
auth = config.env(env_name)("TORM_AUTH", default="off")
if auth == "on":
__config['user'] = config.env(env_name)('TORM_USER')
__config['password'] = config.env(env_name)('<PASSWORD>')
return __config
def __getattribute__(self, key):
if key == "__new__":
# return object.__new__(self)
# print(self)
return dict.__new__(self)
try:
return object.__getattribute__(self, key)
except:
return self.builder.__getattribute__(key)
| 4,624 |
gdsort_test.py
|
rennomarcus/gdsort
| 0 |
2170517
|
import unittest
from gdsort import SortScript
from io import StringIO
import sys
class TestSortScript(unittest.TestCase):
def setUp(self):
self.filename = 'test.gd'
self.sort_script = SortScript(self.filename)
self.function_names = ['_ready', 'func1', 'func2', 'func3', 'process']
def test_get_func_name__should_return_function_name(self):
self.assertEqual(self.sort_script.get_func_name('function1():\n'), 'function1', "Function name not returned correctly")
def test_parse_script__should_return_functions(self):
with open(self.filename, 'r') as f:
self.sort_script.parse_script(f)
functions = [f.name for f in self.sort_script.functions]
self.assertCountEqual(functions, self.function_names, "Not all functions were captured")
def test_write_script__output_should_return_as_golden_file(self):
# Sort the functions before we write. This is done in exec()
self.sort_script.sort_functions()
# Capture the output of our function
mock_output = StringIO()
self.sort_script.write_script(mock_output)
self.assertEqual(mock_output.getvalue(), self.get_golden_data())
def get_golden_data(self):
data = ''
with open('golden_file.gd', 'r') as f:
data = f.read()
return data
if __name__ == '__main__':
unittest.main()
| 1,412 |
args_det.py
|
idrispendisbey/cnn-surrogate
| 79 |
2169464
|
import argparse
import torch
import json
import random
from pprint import pprint
from utils.misc import mkdirs
# always uses cuda if avaliable
class Parser(argparse.ArgumentParser):
def __init__(self):
super(Parser, self).__init__(description='Dense Convolutional Encoder-Decoder Networks')
self.add_argument('--exp-name', type=str, default='deterministic', help='experiment name')
self.add_argument('--exp-dir', type=str, default="./experiments", help='directory to save experiments')
self.add_argument('--post', action='store_true', default=False, help='post training analysis')
# network
self.add_argument('--blocks', type=list, default=[3, 6, 3], help='list of number of layers in each dense block')
self.add_argument('--growth-rate', type=int, default=16, help='number of output feature maps of each conv layer within each dense block')
self.add_argument('--init-features', type=int, default=48, help='number of initial features after the first conv layer')
self.add_argument('--drop-rate', type=float, default=0., help='dropout rate')
self.add_argument('--bn-size', type=int, default=8, help='bottleneck size: bn_size * growth_rate')
self.add_argument('--bottleneck', action='store_true', default=False, help='enables bottleneck design in the dense blocks')
# data
self.add_argument('--data-dir', type=str, default="./dataset", help='directory to dataset')
self.add_argument('--kle', type=int, default=4225, help='num of KLE terms')
self.add_argument('--ntrain', type=int, default=512, help="number of training data")
self.add_argument('--ntest', type=int, default=500, help="number of test data")
# training
self.add_argument('--epochs', type=int, default=200, help='number of epochs to train (default: 200)')
self.add_argument('--lr', type=float, default=3e-3, help='learnign rate')
# self.add_argument('--lr-scheduler', type=str, default='plateau', help="scheduler, plateau or step")
self.add_argument('--weight-decay', type=float, default=5e-4, help="weight decay")
self.add_argument('--batch-size', type=int, default=16, help='input batch size for training (default: 16)')
self.add_argument('--test-batch-size', type=int, default=100, help='input batch size for testing (default: 100)')
self.add_argument('--seed', type=int, default=1, help='manual seed used in Tensor')
# logging
self.add_argument('--ckpt-epoch', type=int, default=None, help='which epoch of checkpoints to be loaded in post mode')
self.add_argument('--ckpt-freq', type=int, default=200, help='how many epochs to wait before saving model')
self.add_argument('--log-freq', type=int, default=2, help='how many epochs to wait before logging training status')
self.add_argument('--plot-freq', type=int, default=100, help='how many epochs to wait before plotting test output')
self.add_argument('--plot-fn', type=str, default='contourf', choices=['contourf', 'imshow'], help='plotting method')
def parse(self):
args = self.parse_args()
args.run_dir = args.exp_dir + '/' + args.exp_name \
+ '/kle{}/ntrain{}_blocks{}_growth{}_nif{}_drop{}_batch{}_lr{}_wd{}_epochs{}'.format(
args.kle, args.ntrain, args.blocks, args.growth_rate,
args.init_features, args.drop_rate, args.batch_size,
args.lr, args.weight_decay, args.epochs
)
args.ckpt_dir = args.run_dir + '/checkpoints'
mkdirs([args.run_dir, args.ckpt_dir])
assert args.epochs % args.ckpt_freq == 0, 'epochs must'\
'be dividable by ckpt_freq'
# seed
if args.seed is None:
args.seed = random.randint(1, 10000)
print("Random Seed: ", args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
print('Arguments:')
pprint(vars(args))
if not args.post:
with open(args.run_dir + "/args.txt", 'w') as args_file:
json.dump(vars(args), args_file, indent=4)
return args
# global
args = Parser().parse()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
| 4,340 |
tools/tags_validator_main.py
|
vasudevgupta7/tfhub.dev
| 48 |
2170230
|
# Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""YAML validator for tag definition files.
1) To validate all tag definition files, run:
$ python tools/tags_validator.py
2) To validate selected files, pass their relative paths:
$ python tools/tags_validator.py tags/dataset.yml [other_files]
Use the --root_dir flag to validate tag files outside of the current project.
TODO(b/182137324): Merge with test_tag_configs.py once `bazel test` can be used.
"""
import argparse
import os
import sys
from absl import app
from absl import logging
import tags_validator
FLAGS = None
def main(_):
root_dir = FLAGS.root_dir or os.getcwd()
documentation_dir = os.path.join(root_dir, "tags")
logging.info("Using %s for documentation directory.", documentation_dir)
file_to_error = dict()
if FLAGS.file:
logging.info("Going to validate files %s in documentation directory %s.",
FLAGS.file, documentation_dir)
files_to_validate = [os.path.join(documentation_dir, f) for f in FLAGS.file]
file_to_error = tags_validator.validate_tag_files(files_to_validate)
else:
logging.info("Going to validate all files in documentation directory %s.",
documentation_dir)
file_to_error = tags_validator.validate_tag_dir(documentation_dir)
if file_to_error:
logging.error("The following files contain issues: %s", file_to_error)
else:
logging.info("Successfully validated all files.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"file",
type=str,
default=None,
help=("Path to files to validate. Path is relative to `--root_dir`."),
nargs="*")
parser.add_argument(
"--root_dir",
type=str,
default=None,
help=("Root directory that contains tag definition files under "
"./tags. Defaults to current directory."))
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 2,621 |
example/migrations/0008_auto_20191125_1223.py
|
arturoless/django_rest
| 0 |
2169989
|
# Generated by Django 2.1.14 on 2019-11-25 18:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('example', '0007_career'),
]
operations = [
migrations.RemoveField(
model_name='career',
name='delete',
),
migrations.RemoveField(
model_name='person',
name='delete',
),
]
| 420 |
Algorithm Analysis and Design/fibonacci.py
|
tolgahanakgun/School-Projects
| 0 |
2170224
|
'''
Created on 31 May 2016
@author: TOLGAHAN
'''
def fib(x):
if x == 0:
return 0
if x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
print fib(50)
| 185 |
mooringlicensing/migrations/0160_auto_20210618_1609.py
|
jawaidm/mooringlicensing
| 0 |
2170150
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-06-18 08:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0159_merge_20210617_1552'),
]
operations = [
migrations.AlterModelOptions(
name='numberofdayssetting',
options={'ordering': ['-date_of_enforcement']},
),
migrations.AlterModelOptions(
name='numberofdaystype',
options={'verbose_name': 'Number of days Settings', 'verbose_name_plural': 'Number of days Settings'},
),
]
| 651 |
Final_Project/Dashboard/py_files/plotly_bar_frequency.py
|
JKocher13/DataZCW-Final-Project
| 0 |
2168780
|
import sqlalchemy
import pymysql
from sqlalchemy import create_engine
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash_table import DataTable
from dash_table.FormatTemplate import Format
from matplotlib import rcParams
from plotly.subplots import make_subplots
from wordcloud import WordCloud, STOPWORDS
import collections
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
import re
app1 = dash.Dash(__name__)
app1 = dash.Dash(external_stylesheets=[dbc.themes.CYBORG]) # DARKLY, LUX, SOLAR, FLATLY, MINTY, CYBORG
# ------------------------------------------------------------------------------
# Import and clean data (importing csv into pandas)
engine = create_engine('mysql+pymysql://root:zipcoder@localhost/twitter')
twitter_df = pd.read_sql("sentiments", con = engine)
twitter_df = twitter_df.drop_duplicates()
# ------------------------------------------------------------------------------
# App layout
app1.layout = html.Div([
html.H1("Covid-19 Sentiment Dasboard", style={'text-align': 'center'}),
dcc.Dropdown(id="select_count",
options=[
{"label": "5 Most Freq. Words", "value": 5},
{"label": "10 Most Freq. Words", "value": 10},
{"label": "25 Most Freq. Words", "value": 25},
{"label": "50 Most Freq. Words", "value": 50},
{"label": "75 Most Freq. Words", "value": 75},
{"label": "100 Most Freq. Words", "value": 100}],
multi=False,
value=5,
style={'width': "40%"}
),
html.Div(id='output_container', children=[]),
html.Br(),
dcc.Graph(id='sentiment_map', figure={})
])
# ------------------------------------------------------------------------------
# Connect the Plotly graphs with Dash Components
@app1.callback(
[Output(component_id='output_container', component_property='children'),
Output(component_id='sentiment_map', component_property='figure')],
[Input(component_id='select_count', component_property='value')]
)
def update_graph(option_select):
container = f"Current sentiment being shown: {option_select}"
dff = twitter_df.copy()
words = []
counts = []
# most_common word amout
x = option_select
# gather all tweets
all_words = ' '.join(dff['text'].str.lower())
#remove links, #hashtags, @, :
cleaned_words = re.sub(r'http\S+', '', all_words)
cleaned_word1 = re.sub(r"#(\w+)", ' ', cleaned_words, flags=re.MULTILINE)
cleaned_word2 = re.sub(r"@(\w+)", ' ', cleaned_word1, flags=re.MULTILINE)
cleaned_tweets = re.sub(r" : (\w+)", ' ', cleaned_word2, flags=re.MULTILINE)
# Stop Words
stopwords = list(STOPWORDS) + ["made","-","&","covid19.", "coronavirus", "covid-19","#covid19","covid", "#coronavirus", "now", "rt", "covid19", 'to', 'say', 'sort', 'right', 'now']
# Filter Words
filtered_words = [word for word in cleaned_words.split() if word not in stopwords]
# Counted words
counted_words = collections.Counter(filtered_words)
# Four loop to count most common
for letter, count in counted_words.most_common(x):
words.append(letter)
counts.append(count)
#df to be read by px
word_freq_df = pd.DataFrame(list(zip(words, counts)),
columns =['word', 'count'])
# most occuring word
most_occuring = word_freq_df.nlargest(1, ['count'])
# string
m_o = most_occuring['word'].item()
#containter to return call back
container = f"{option_select} Most Frequently Used Words\n Most Frequent Word was {m_o}"
# Bar Graph
fig = px.bar(word_freq_df, x='word', y='count',
hover_data=['count', 'word'], color='count',
labels={'words':'Words'}, height=400,
orientation='v')
return container, fig
# ------------------------------------------------------------------------------
if __name__ == '__main__':
app1.run_server(port=8054,debug=True)
| 4,350 |
Python_Heapq.py
|
BrettLampson/its-python
| 0 |
2170203
|
import heapq
from pprint import pprint
# ---------------------------------------------------------------------------------------------- #
# LIST of the largest or smallest N items in a collection
# heapq module does exactly that
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
# print(heapq.nlargest(3, nums)) # Prints [42, 37, 23]
# print(heapq.nsmallest(3, nums)) # Prints [-4, 1, 2]
# Both functions also accept a key parameter that allows them to be used with more
# complicated data structures. For example:
# portfolio = [
# {'name': 'IBM', 'shares': 100, 'price': 91.1},
# {'name': 'AAPL', 'shares': 50, 'price': 543.22},
# {'name': 'FB', 'shares': 200, 'price': 21.09},
# {'name': 'HPQ', 'shares': 35, 'price': 31.75},
# {'name': 'YHOO', 'shares': 45, 'price': 16.35},
# {'name': 'ACME', 'shares': 75, 'price': 115.65}
# ]
# cheap = heapq.nsmallest(3, portfolio, key=lambda s: s['price'])
# expensive = heapq.nlargest(3, portfolio, key=lambda s: s['price'])
# pprint(cheap)
# print('-'*65)
# pprint(expensive)
# ---------------------------------------------------------------------------------------------- #
# MAKING A PRIORITY QUEUE
import heapq
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0
def push(self, item, priority):
heapq.heappush(self._queue, (-priority, self._index, item))
self._index += 1
def pop(self):
return heapq.heappop(self._queue)[-1]
class Item:
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Item({!r})'.format(self.name)
q = PriorityQueue()
q.push(Item('foo'), 3)
q.push(Item('bar'), 5)
q.push(Item('spam'), 4)
q.push(Item('grok'), 1)
for i in range(5):
print(q.pop())
# print(q.pop())
# print(q.pop())
# print(q.pop())
# print(q.pop())
| 1,861 |
Code Forces/Problem Set/B/jeff_and_periods.py
|
mishrakeshav/Competitive-Programming
| 2 |
2169816
|
n = int(input())
a = list(map(int,input().split()))
v = dict()
for i in range(n):
x = a[i]
if x in v:
v[x] = [i,-1 if v[x][1] and i - v[x][0] != v[x][1] else i - v[x][0]]
else:
v[x] = (i,0)
b = [(x,v[x][1]) for x in sorted(v.keys()) if v[x][1] >=0]
print(len(b))
for x,p in b: print(x,p)
| 319 |
classification/models/__init__.py
|
SACGF/variantgrid
| 5 |
2170561
|
from classification.models.discordance_models import *
from classification.models.classification import *
from classification.models.classification_allele_hooks import * # so we load the receivers
from classification.models.classification_discordance_hooks import * # so we load the receivers
from classification.models.classification_ref import *
from classification.models.classification_share_flag_hooks import * # so we load the receivers
from classification.models.classification_user_hooks import * # so we load the receivers
from classification.models.classification_notification_hooks import *
from classification.models.classification_variant_fields_validation import * # so we load the receivers
from classification.models.variant_models import *
from classification.models.condition_text_matching import *
from classification.models.clinvar_export_models import *
from classification.models.classification_report_models import *
from classification.models.upload_file_lab import *
from classification.models.clinvar_export_exclude_utils import * # so we load the receivers
| 1,090 |
demo2.py
|
scottslowe/2017-denver-npug-meeting
| 0 |
2170130
|
#!/usr/bin/env python
# Import the necessary modules
import napalm
import json
# Define dictionaries for devices
veos01 = {
'hostname': '127.0.0.1',
'username': 'admin',
'password': '<PASSWORD>',
'optional_args': { 'port': 14431 }
}
veos02 = {
'hostname': '127.0.0.1',
'username': 'admin',
'password': '<PASSWORD>',
'optional_args': { 'port': 14432 }
}
# Create a list of all devices
all_devices = [veos01, veos02]
# Loop over all the devices, retrieve the configuration, and
# write the configuration to a file
for a_device in all_devices:
driver = napalm.get_network_driver('eos')
device = driver(**a_device)
device.open()
output = json.dumps(device.get_config(retrieve=u'running'))
formatstr = '{ip}-{port}-{type}.json'
filename = formatstr.format(ip=a_device['hostname'], port=a_device['optional_args']['port'], type='arista_eos')
with open(filename,'w') as text_file:
text_file.write(output)
device.close()
| 989 |
jianzhi_offer_47.py
|
JasonLeeSJTU/Algorithms_Python
| 2 |
2168955
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: <NAME>
@license: (C) Copyright @ <NAME>
@contact: <EMAIL>
@file: jianzhi_offer_47.py
@time: 2019/5/15 15:04
@desc:
'''
class Solution:
def Add(self, num1, num2):
# 异或:两数相加,不考虑进位
# 与: 进位的位置(左移一位)
while num2:
sum = num1 ^ num2
carry = (num1 & num2) << 1
num1 = sum & 0xffffffff #转化为无符号二进制
num2 = carry & 0xffffffff
# 负数的第32位是1,正数是0
return num1 if num1 >> 31 == 0 else ~(num1 ^ 0xffffffff) # 或者是 num1 - 0xffffffff - 1, 4294967296
if __name__ == '__main__':
res = Solution()
a = res.Add(111, 899)
print(a)
| 671 |
src/data_analysis/retrievers.py
|
otimgren/centrex-data-analysis
| 0 |
2170548
|
"""
Contains classes for retrieving data from file (or wherever it's stored)
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import List, Union
import h5py
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
@dataclass
class Retriever(ABC):
"""
Abstract parent class for data retrievers
"""
@abstractmethod
def retrieve_data(self) -> pd.DataFrame:
"""
Retrieves data from file
"""
class SPARetriever(Retriever):
"""
Retriever used with SPA test data
"""
def retrieve_data(
self,
filepath: Union[Path, str],
run_name: Union[str, int],
camera_path: str = "camera_test",
NI_DAQ_path: str = "readout",
scan_param: str = None,
muwave_shutter=True,
scan_param_new_name: str = None,
) -> pd.DataFrame:
"""
Reterieves SPA test data from file
"""
# Retrieve camera data
df_CAM = self.retrieve_camera_data(filepath, run_name, camera_path)
# Retrieve DAQ data
df_DAQ = self.retrieve_NI_DAQ_data(
filepath, run_name, NI_DAQ_path, scan_param, muwave_shutter
)
# Merge dataframes
df = df_CAM.merge(df_DAQ, left_index=True, right_index=True)
# If needed, give scan parameter a new name
if scan_param_new_name:
df.rename(mapper={scan_param: scan_param_new_name}, inplace=True, axis=1)
# Remove lines where camera returned all zeros
df = df[~df["CameraData"].apply(lambda x: np.allclose(x, np.zeros(x.shape)))]
# Store run name in metadata of dataframe
df.attrs["run_name"] = run_name
# Return merged dataframe
return df
def retrieve_camera_data(
self, filepath: Union[Path, str], run_name: Union[str, int], camera_path: str
) -> pd.DataFrame:
"""
Loads camera data from hdf file.
"""
# Initialize containers for camera images and their timestamps
camera_data = []
camera_time = []
# If run_name given as an index, get the string version
if type(run_name) == int:
run_name = self.get_run_names(filepath)[run_name]
# Determine the path to data within the hdf file
data_path = f"{run_name}/{camera_path}/PIProEM512Excelon"
# Open hdf file
with h5py.File(filepath, "r") as f:
# Loop over camera images (1 image per molecule pulse)
for dataset_name in f[data_path]:
if "events" not in dataset_name:
n = int(dataset_name.split("_")[-1])
camera_data.append(f[data_path][dataset_name][()])
camera_time.append(f[data_path][dataset_name].attrs[f"timestamp"])
# Convert lists into a dataframe and return it
dataframe = pd.DataFrame(
data={"CameraTime": camera_time, "CameraData": camera_data}
)
return dataframe
def retrieve_NI_DAQ_data(
self,
filepath: Union[Path, str],
run_name: Union[str, int],
NI_DAQ_path: str,
scan_param: str = None,
muwave_shutter=True,
) -> pd.DataFrame:
"""
Retrieves data obtained using the NI5171 PXIe DAQ
"""
# Define which channel on DAQ corresponds to which data
yag_ch = 0 # Photodiode observing if YAG fired
abs_pd_ch = 2 # Photodiode observing absorption outside cold cell
abs_pd_norm_ch = (
3 # Photodiode to normalize for laser intensity fluctuations in absorption
)
rc_shutter_ch = 4 # Tells if rotational cooling laser shutter is open or closed
rc_pd_ch = 5 # Photodiode for checking that rotaional cooling is on
muwave_shutter_ch = 6 # Tells if SPA microwaves are on or off
# Initialize containers for data
DAQ_data = []
DAQ_time = []
DAQ_attrs = []
# If run_name given as an index, get the string version
if type(run_name) == int:
run_name = self.get_run_names(filepath)[run_name]
# Determine path to data within the hdf file
data_path = f"{run_name}/{NI_DAQ_path}/PXIe-5171"
# Open hdf file
with h5py.File(filepath, "r") as f:
# Loop over camera images (1 image per molecule pulse)
for dataset_name in f[data_path]:
if "events" not in dataset_name:
n = int(dataset_name.split("_")[-1])
DAQ_data.append(f[data_path][dataset_name][()])
DAQ_time.append(f[data_path][dataset_name].attrs["ch0 : timestamp"])
DAQ_attrs.append(
{
key: value
for key, value in f[data_path][dataset_name].attrs.items()
}
)
# Convert lists to dataframes
data_dict = {
"YAGPD": [dataset[:, yag_ch] for dataset in DAQ_data],
"AbsPD": [dataset[:, abs_pd_ch] for dataset in DAQ_data],
"AbsNormPD": [dataset[:, abs_pd_norm_ch] for dataset in DAQ_data],
"RCShutter": [dataset[:, rc_shutter_ch] for dataset in DAQ_data],
"RCPD": [dataset[:, rc_pd_ch] for dataset in DAQ_data],
"DAQTime": DAQ_time,
}
# If microwave shutter was used, need that
if muwave_shutter:
data_dict["MicrowaveShutter"] = [
dataset[:, muwave_shutter_ch] for dataset in DAQ_data
]
# If scan parameter was specified, get data for that
if scan_param:
data_dict[scan_param] = [dataset[scan_param] for dataset in DAQ_attrs]
# Convert dictionary to dataframe and return it
dataframe = pd.DataFrame(data=data_dict)
return dataframe
def get_run_names(self, filepath: Union[Path, str]) -> List[str]:
"""
Gets the names of the datasets stored in a given file
"""
with h5py.File(filepath, "r") as f:
keys = list(f.keys())
return keys
def print_run_names(self, filepath: Union[Path, str]) -> None:
"""
Prints the names of the datasets stored in the given file
"""
# Get dataset names
keys = self.get_run_names(filepath)
# Print dataset names
print("Dataset names:")
for i, key in enumerate(keys):
print(f"{i} -- {key}")
| 6,584 |
tests/00-pymod_basics/005-add_1_to_arg/test_add_1_to_arg.py
|
nandub/nim-pymod
| 256 |
2169568
|
def test_0_compile_pymod_test_mod(pmgen_py_compile):
pmgen_py_compile(__name__)
def test_cfloatAdd1ToArg(pymod_test_mod):
arg = 1.0
res = pymod_test_mod.cfloatAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_cdoubleAdd1ToArg(pymod_test_mod):
arg = 1.0
res = pymod_test_mod.cdoubleAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_cshortAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.cshortAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_cintAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.cintAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_clongAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.clongAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_cushortAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.cushortAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_cuintAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.cuintAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_culongAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.culongAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_floatAdd1ToArg(pymod_test_mod):
arg = 1.0
res = pymod_test_mod.floatAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_float32Add1ToArg(pymod_test_mod):
arg = 1.0
res = pymod_test_mod.float32Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_float64Add1ToArg(pymod_test_mod):
arg = 1.0
res = pymod_test_mod.float64Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_intAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.intAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
# TODO
#def test_int8Add1ToArg(pymod_test_mod):
# arg = 1
# res = pymod_test_mod.int8Add1ToArg(arg)
# assert res == (arg + 1)
# assert type(res) == type(arg)
def test_int16Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.int16Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_int32Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.int32Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_int64Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.int64Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_uintAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.uintAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_uint8Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.uint8Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_uint16Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.uint16Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_uint32Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.uint32Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_uint64Add1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.uint64Add1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
# TODO
#def test_boolAdd1ToArg(pymod_test_mod):
# arg = True
# res = pymod_test_mod.boolAdd1ToArg(arg)
# assert res == (arg + 1)
# assert type(res) == type(arg)
def test_byteAdd1ToArg(pymod_test_mod):
arg = 1
res = pymod_test_mod.byteAdd1ToArg(arg)
assert res == (arg + 1)
assert type(res) == type(arg)
def test_ccharAdd1ToArg(pymod_test_mod, python_major_version):
# Python 3 or above: bytes vs strings, yay!
arg = b"a" if python_major_version >= 3 else "a"
expectedRes = b"b" if python_major_version >= 3 else "b"
res = pymod_test_mod.ccharAdd1ToArg(arg)
assert res == expectedRes
assert type(res) == type(expectedRes)
assert type(res) == type(arg)
def test_charAdd1ToArg(pymod_test_mod, python_major_version):
# Python 3 or above: bytes vs strings, yay!
arg = b"a" if python_major_version >= 3 else "a"
expectedRes = b"b" if python_major_version >= 3 else "b"
res = pymod_test_mod.charAdd1ToArg(arg)
assert res == expectedRes
assert type(res) == type(expectedRes)
assert type(res) == type(arg)
def test_stringAdd1ToArg(pymod_test_mod):
arg = "abc"
expectedRes = arg + "def"
res = pymod_test_mod.stringAdd1ToArg(arg)
assert res == expectedRes
assert type(res) == type(expectedRes)
assert type(res) == type(arg)
# TODO
#def test_unicodeRuneAdd1ToArg(pymod_test_mod, python_major_version):
# arg = "a" if python_major_version >= 3 else u"a"
# expectedRes = "b" if python_major_version >= 3 else u"b"
# res = pymod_test_mod.unicodeRuneAdd1ToArg(arg)
# assert res == expectedRes
# assert type(res) == type(expectedRes)
# assert type(res) == type(arg)
# TODO
#def test_seqCharAdd1ToArg(pymod_test_mod, python_major_version):
# # Python 3 or above: bytes vs strings, yay!
# arg = b"abc" if python_major_version >= 3 else "abc"
# expectedRes = (arg + b"def") if python_major_version >= 3 else (arg + "def")
# res = pymod_test_mod.seqCharAdd1ToArg(arg)
# assert res == expectedRes
# assert type(res) == type(expectedRes)
# assert type(res) == type(arg)
# TODO
#def test_seqRuneAdd1ToArg(pymod_test_mod, python_major_version):
# arg = "abc" if python_major_version >= 3 else u"abc"
# expectedRes = (arg + "def") if python_major_version >= 3 else (arg + u"def")
# res = pymod_test_mod.seqRuneAdd1ToArg(arg)
# assert res == expectedRes
# assert type(res) == type(expectedRes)
# assert type(res) == type(arg)
| 5,987 |
Leetcode/hard/regular-expression-matching.py
|
jen-sjen/data-structures-basics-leetcode
| 6 |
2170093
|
"""
# REGULAR EXPRESSION MATCHING
Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*' where:
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
Example 1:
Input: s = "aa", p = "a"
Output: false
Explanation: "a" does not match the entire string "aa".
Example 2:
Input: s = "aa", p = "a*"
Output: true
Explanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes "aa".
Example 3:
Input: s = "ab", p = ".*"
Output: true
Explanation: ".*" means "zero or more (*) of any character (.)".
Example 4:
Input: s = "aab", p = "c*a*b"
Output: true
Explanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches "aab".
Example 5:
Input: s = "mississippi", p = "mis*is*p*."
Output: false
Constraints:
0 <= s.length <= 20
0 <= p.length <= 30
s contains only lowercase English letters.
p contains only lowercase English letters, '.', and '*'.
It is guaranteed for each appearance of the character '*', there will be a previous valid character to match.
"""
class Solution:
def isMatch(self, s: str, p: str) -> bool:
table = [[None for _ in range(len(p) + 1)] for _ in range(len(s) + 1)]
table[0][0] = True
for row in range(1, len(s) + 1):
table[row][0] = False
for col in range(1, len(p) + 1):
if p[col - 1] == "*":
table[0][col] = table[0][col - 2]
else:
table[0][col] = False
for row in range(1, len(s) + 1):
for col in range(1, len(p) + 1):
if p[col - 1] == ".":
table[row][col] = table[row - 1][col - 1]
elif p[col - 1] != "*":
table[row][col] = table[row - 1][col - 1] and s[row - 1] == p[col - 1]
else:
if p[col - 2] == ".":
table[row][col] = table[row][col - 1] or table[row][col - 2] or table[row - 1][col - 1] or table[row - 1][col]
else:
table[row][col] = table[row][col - 1] or table[row][col - 2] or (table[row - 1][col - 1] and p[col - 2] == s[row - 1])
for row in range(len(s) + 1):
print(table[row])
return table[-1][-1]
| 2,498 |
apps/DeepFaceLive/ui/widgets/QErrorCSWError.py
|
kitiv/DeepFaceLive
| 4 |
2170635
|
from typing import Union
from PyQt6.QtCore import *
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
from resources.fonts import QXFontDB
from resources.gfx import QXImageDB
from xlib import qt as lib_qt
from xlib.mp import csw as lib_csw
from .QCSWControl import QCSWControl
class QErrorCSWError(QCSWControl):
"""
Implements lib_csw.Error control as widget
"""
def __init__(self, csw_error : lib_csw.Error.Client):
if not isinstance(csw_error, lib_csw.Error.Client):
raise ValueError('csw_error must be an instance of Error.Client')
super().__init__(csw_control=csw_error)
self._csw_error = csw_error
csw_error.call_on_error(self._on_csw_error)
label_warning = self._label_warning = lib_qt.QXLabel(image=QXImageDB.warning_outline('red'),
scaled_contents=True,
size_policy=(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed),
fixed_size=(32,32),
)
label = self._label = lib_qt.QXLabel(font=QXFontDB.get_default_font(size=7), word_wrap=True)
bar = lib_qt.QXFrame(layout=lib_qt.QXHBoxLayout(
[ lib_qt.QXWidget(layout=lib_qt.QXHBoxLayout([label_warning]), size_policy=(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Minimum)) ,
lib_qt.QXWidget(layout=lib_qt.QXHBoxLayout([label]))
], spacing=0) )
self.setLayout(lib_qt.QXHBoxLayout([bar]))
self.hide()
def _on_csw_state_change(self, state):
super()._on_csw_state_change(state)
if state == lib_csw.Control.State.DISABLED:
self._label.setText(None)
def _on_csw_error(self, text: Union[str,None]):
self._label.setText(text)
| 1,976 |
codeforces/contests/313/D.py
|
harry-7/mycodes
| 1 |
2169868
|
def F(s):
if len(s)%2==1:return s
s1 = F(s[:len(s)//2])
s2 = F(s[len(s)//2:])
if s1 < s2:return s1 + s2
return s2 + s1
if F(input()) == F(input()):
print("YES")
else:
print("NO")
| 207 |
src/losses.py
|
huyhoang17/kuzushiji_recognition
| 16 |
2170514
|
from tensorflow.keras import losses
from tensorflow.keras import backend as K
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred = K.cast(y_pred, 'float32')
y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
intersection = y_true_f * y_pred_f
score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
return score
def dice_loss(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = y_true_f * y_pred_f
score = (2. * K.sum(intersection) + smooth) / \
(K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return 1. - score
def bce_dice_loss(y_true, y_pred):
return losses.binary_crossentropy(y_true, y_pred) + \
dice_loss(y_true, y_pred)
def bce_logdice_loss(y_true, y_pred):
return losses.binary_crossentropy(y_true, y_pred) - \
K.log(1. - dice_loss(y_true, y_pred))
| 936 |
client/agc_monitor/instruction_register.py
|
vandersonpc/agc_monitor
| 14 |
2170268
|
from PySide2.QtWidgets import QWidget, QFrame, QVBoxLayout, QHBoxLayout, QGridLayout, QLineEdit, QLabel
from PySide2.QtGui import QFont, QColor
from PySide2.QtCore import Qt
from collections import OrderedDict
from indicator import Indicator
import usb_msg as um
import agc
STATUS_INDS = OrderedDict([
('iip', 'IIP'),
('inhl', 'INHL'),
('inkl', 'INKL'),
('ld', 'LD'),
('chld', 'CHLD'),
('rd', 'RD'),
('chrd', 'CHRD'),
])
class InstructionRegister(QWidget):
def __init__(self, parent, usbif, color):
super().__init__(parent)
self._br_inds = []
self._st_inds = []
self._sq_inds = []
self._status_inds = {}
self._setup_ui(color)
usbif.poll(um.ReadMonRegI())
usbif.poll(um.ReadMonRegStatus())
usbif.poll(um.ReadStatusPeripheral())
usbif.listen(self)
def handle_msg(self, msg):
if isinstance(msg, um.MonRegI):
self.set_i_values(msg.br, msg.st, msg.sqext, msg.sq)
elif isinstance(msg, um.MonRegStatus):
self._status_inds['iip'].set_on(msg.iip)
self._status_inds['inhl'].set_on(msg.inhl)
self._status_inds['inkl'].set_on(msg.inkl)
elif isinstance(msg, um.StatusPeripheral):
self._status_inds['ld'].set_on(msg.ld)
self._status_inds['chld'].set_on(msg.chld)
self._status_inds['rd'].set_on(msg.rd)
self._status_inds['chrd'].set_on(msg.chrd)
def set_i_values(self, br, st, sqext, sq):
self._set_reg_value(self._br_inds, self._br_value, br)
self._set_reg_value(self._st_inds, self._st_value, st)
self._set_reg_value(self._sq_inds, self._sq_value, (sqext << 5) | sq)
self._inst_value.setText(agc.disassemble_subinst(sqext, sq, st))
def _setup_ui(self, color):
# Set up our basic layout
layout = QHBoxLayout(self)
self.setLayout(layout)
layout.setSpacing(3)
layout.setMargin(1)
# Construct register groups for BR, ST, and SQ
br_frame, self._br_value = self._create_reg(self._br_inds, 'BR', 2, color)
st_frame, self._st_value = self._create_reg(self._st_inds, 'ST', 3, color)
sq_frame, self._sq_value = self._create_reg(self._sq_inds, 'SQ', 7, color)
layout.addWidget(br_frame)
layout.addWidget(st_frame)
layout.addWidget(sq_frame)
stat_group = QWidget(self)
layout.addWidget(stat_group)
stat_layout = QGridLayout(stat_group)
stat_layout.setMargin(0)
stat_layout.setSpacing(0)
col = 0
for name, label in STATUS_INDS.items():
self._status_inds[name] = self._create_status_light(label, stat_group, stat_layout, col)
col += 1
# Create a grouping widget for the I label and decoded instruction value box
label_value = QWidget(self)
lv_layout = QHBoxLayout(label_value)
lv_layout.setSpacing(3)
lv_layout.setMargin(1)
lv_layout.setContentsMargins(0, 32, 0, 0)
label_value.setLayout(lv_layout)
layout.addWidget(label_value)
# Create a value box for displaying the overall decoded instruction
self._inst_value = QLineEdit(label_value)
self._inst_value.setReadOnly(True)
self._inst_value.setMaximumSize(65, 32)
self._inst_value.setText('TC0')
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
font.setPointSize(10)
self._inst_value.setFont(font)
self._inst_value.setAlignment(Qt.AlignCenter)
lv_layout.addWidget(self._inst_value)
# Create a label to show 'I'
label = QLabel('I', label_value)
font = label.font()
font.setPointSize(14)
font.setBold(True)
label.setFont(font)
lv_layout.addWidget(label)
# Add some spacing to account for lack of parity indicators
layout.addSpacing(52)
def _set_reg_value(self, inds, value_box, x):
# Generic function to display in octal the value of a register, with the
# appropriate number of digits
for i in range(0, len(inds)):
inds[i].set_on((x & (1 << i)) != 0)
fmt_string = '%%0%oo' % int((len(inds)+2)/3)
value_box.setText(fmt_string % x)
def _create_reg(self, ind_list, name, width, color):
# Create a widget to hold the register's bits
reg_widget = QWidget(self)
reg_layout = QVBoxLayout(reg_widget)
reg_widget.setLayout(reg_layout)
reg_layout.setSpacing(0)
reg_layout.setMargin(0)
# Create a widget to hold the register's label and value textbox
label_value = QWidget(reg_widget)
lv_layout = QHBoxLayout(label_value)
label_value.setLayout(lv_layout)
lv_layout.setSpacing(1)
lv_layout.setMargin(0)
reg_layout.addWidget(label_value)
# Create a label to show the register's name
reg_label = QLabel(name, label_value)
reg_label.setAlignment(Qt.AlignCenter)
font = reg_label.font()
font.setPointSize(8)
reg_label.setFont(font)
lv_layout.addWidget(reg_label)
# Create a textbox to show the register's value in octal
n_digits = int((width+2)/3)
if n_digits == 1:
value_width = 25
elif n_digits == 2:
value_width = 30
else:
value_width = 45
reg_value = QLineEdit(label_value)
reg_value.setReadOnly(True)
reg_value.setMaximumSize(value_width, 32)
reg_value.setText(n_digits * '0')
font = QFont('Monospace')
font.setStyleHint(QFont.TypeWriter)
font.setPointSize(10)
reg_value.setFont(font)
reg_value.setAlignment(Qt.AlignCenter)
lv_layout.addWidget(reg_value)
# Create a frame to hold the register's bits
bit_frame = QFrame(reg_widget)
bit_layout = QHBoxLayout(bit_frame)
bit_layout.setSpacing(1)
bit_layout.setMargin(0)
bit_frame.setLayout(bit_layout)
bit_frame.setFrameStyle(QFrame.StyledPanel | QFrame.Raised)
# Add indicators for each bit in the register, from MSB to LSB
for i in range(width, 0, -1):
ind = Indicator(bit_frame, color)
ind.setFixedSize(20, 32)
bit_layout.addWidget(ind)
ind_list.insert(0, ind)
# Add separators between each group of 3 bits
if (i > 1) and ((i % 3) == 1):
sep = QFrame(bit_frame)
sep.setFrameStyle(QFrame.VLine | QFrame.Raised)
bit_layout.addWidget(sep)
reg_layout.addWidget(bit_frame)
return reg_widget, reg_value
def _create_status_light(self, name, parent, layout, col):
label = QLabel(name, parent)
label.setAlignment(Qt.AlignBottom | Qt.AlignCenter)
label.setFixedSize(30,20)
font = label.font()
font.setPointSize(8)
label.setFont(font)
layout.addWidget(label, 1, col)
layout.setAlignment(label, Qt.AlignBottom)
# Add an indicator to show inhibit state
ind = Indicator(parent, QColor(0, 255, 255))
ind.setFixedSize(20, 20)
layout.addWidget(ind, 2, col)
layout.setAlignment(ind, Qt.AlignCenter)
return ind
| 7,379 |
Python-For-Everyone-Horstmann/Chapter6-Lists/P6.20.py
|
islayy/Books-solutions
| 0 |
2168212
|
# Magic squares. An n × n matrix that is filled with the numbers 1, 2, 3, . . ., square n is a
# magic square if the sum of the elements in each row, in each column, and in the two
# diagonals is the same value.
# Write a program that reads in 16 values from the keyboard and tests whether they
# form a magic square when put into a 4 × 4 table. You need to test two features:
# 1. Does each of the numbers 1, 2, ..., 16 occur in the user input?
# 2. When the numbers are put into a square, are the sums of the rows, columns,
# and diagonals equal to each other?
# IMPORT
from sys import exit
# FUNCTIONS
# main
def main():
square_list = []
# input
print("Enter 16 values: ")
for i in range(16):
inputN = int(input())
square_list.append(inputN)
# check if the numbers from 1 to 16 occur exactly once
for i in range(1, 17):
found = False
for j in range(len(square_list)):
if found == False:
if square_list[j] == i:
found = True
if found == False:
print(i, "not in the matrix")
# magic square matrix
magicSquare = [[0 for x in range(4)] for x in range(4) ]
# construct a matrix from square_list
for i in range(4):
for j in range(4):
magicSquare[i][j] = square_list[i * 4 + j]
sumMatrix = 0
# sum each row
for i in range(4):
total = 0
for j in range(4):
total += magicSquare[i][j]
if i == 0:
sumMatrix = total
elif sumMatrix != total:
exit("Not a magic square")
# sum each column
for i in range(4):
total = 0
for j in range(4):
total += magicSquare[j][i]
if sumMatrix != 0:
exit("Not a magic square")
# sum first diagonal
total = 0
for i in range(4):
total += magicSquare[i][i]
if sumMatrix != total:
exit("Not a magic square")
# sum second diagonal
total = 0
for i in range(4):
total += magicSquare[i][4 - 1 - i]
if sumMatrix != total:
exit("Not a magic square")
# if nothing fails, then it's a square
print("It's a magic square")
# PROGRAM RUN
main()
| 2,251 |
software/pynguin/pynguin/ga/operators/selection/selection.py
|
se2p/artifact-pynguin-ssbse2020
| 3 |
2170416
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""Provide abstract selection function."""
from abc import abstractmethod
from typing import Generic, List, TypeVar
import pynguin.ga.chromosome as chrom
# pylint: disable=invalid-name
T = TypeVar("T", bound=chrom.Chromosome)
class SelectionFunction(Generic[T]):
"""Abstract base class for selection functions."""
def __init__(self) -> None:
self._maximize = True
@abstractmethod
def get_index(self, population: List[T]) -> int:
"""Provide an index within the population."""
def select(self, population: List[T], number: int = 1) -> List[T]:
"""Return N parents."""
offspring: List[T] = []
for _ in range(number):
offspring.append(population[self.get_index(population)])
return offspring
@property
def maximize(self):
"""Do we maximize fitness?"""
return self._maximize
@maximize.setter
def maximize(self, new_value: bool) -> None:
self._maximize = new_value
| 1,663 |
Datetime.py
|
code-lover636/Calendar-Analog-Clock-GUI
| 0 |
2170654
|
from tkinter import*
import turtle
from time import strftime
from tkcalendar import Calendar
# Setting up window
root = Tk()
root.config(bg="black")
root.title("My Clock")
root.iconbitmap("assets/icon.ico")
def time_now(hands):
Time = strftime("%I %M %S")
Time = Time.split()
hands[2].right(6*int(Time[2]) + 6)
hands[1].right(6*int(Time[1]))
hands[0].right(0.5*( 60*int(Time[0]) + int(Time[1]) ))
def setup_hands( length:tuple, width:tuple, shape="arrow", colour=("white", "blue", "red")):
try:
hands = []
for x in range(3):
hands.append(turtle.RawTurtle(screen,shape))
hands[x].hideturtle()
hands[x].left(90)
hands[x].color(colour[x])
hands[x].shapesize(stretch_len=length[x], stretch_wid=width[x])
hands[x].showturtle()
time_now(hands)
center.stamp()
return hands
except Exception:
print("error0")
def tick():
try:
center.stamp()
hands[2].right(6)
hands[1].right(1/10)
hands[0].right(1/120)
center.stamp()
analog_canvas.after(1000,tick)
except Exception:
print("error1")
def show_time():
try:
Time = strftime("%I:%M:%S %p")
Date = strftime("%d %h %Y")
digital_time.config(text=Time)
digital_date.config(text=Date)
digital_time.after(1000,show_time)
digital_date.after(86400000, show_time)
except Exception:
print("error2")
def cal():
cal_win = Toplevel(root, bg="black", cursor="dotbox")
cal_win.title("My Calendar")
calendar = Calendar(cal_win, selectmode='day', year=int(strftime("%Y")), month=int(strftime("%m")) , day=int(strftime("%d")), bg='black', fg='white')
calendar.pack()
# Widgets
analog_canvas = Canvas(root, width=400, height=350, bg="black")
digital_time = Label(root, bg="black", fg="blue", font=("ROG FONTS",30,"normal"), width=10, justify="center")
digital_date = Label(root, bg="black", fg="red", font=("Algerian",30,"normal"), width=10, justify="left")
calendar_but = Button(root, bg="black",activebackground="black", fg="grey", text="📆", font=("consolas",20,"normal"), bd=0, justify="left", command=cal)
analog_canvas.grid(row=0, column=0, columnspan=2)
digital_time.grid(row=1, column=0, columnspan=2)
digital_date.grid(row=2, column=1, sticky="nw")
calendar_but.grid(row=2, column=0)
# Turtle screen
screen = turtle.TurtleScreen(analog_canvas)
screen.bgpic("assets\clock.png")
screen.bgcolor("black")
center = turtle.RawTurtle(screen,"circle")
center.color("white")
center.shapesize(0.5)
hands = setup_hands(length=(5,10,14), width=(0.2,0.3,0.3))
try:
show_time()
tick()
except Exception:
print('error3')
root.mainloop()
| 2,861 |
tools/losses/bce_loss.py
|
unademo/UNet_Nested4Tiny_Objects_Keypoints
| 6 |
2169106
|
# * Loss: BCE Loss
# Referenced the 3rd-party codes.
# Loss for heatmap (pixel-by-pixel)
#
# * Test Status: Not tested
#
#-*- coding: utf-8 -*
import torch
import torch.nn as nn
def BCE_loss(results, labels, topk=10):
# if len(results.shape) == 1:
# results = results.view(1, -1)
# batch_size, class_num = results.shape
# labels = labels.view(-1, 1)
# # one_hot_target = torch.zeros(batch_size, class_num + 1).cuda().scatter_(1, labels, 1)[:, :5004 * 2]
# lovasz_loss = lovasz_hinge(results,labels )#one_hot_target
error = torch.abs(labels - torch.sigmoid(results))#one_hot_target
error = error.topk(topk, 1, True, True)[0].contiguous()
target_error = torch.zeros_like(error).float().cuda()
error_loss = nn.BCELoss(reduce=True)(error, target_error)
# labels = labels.view(-1)
# indexs_new = (labels != 5004 * 2).nonzero().view(-1)
# if len(indexs_new) == 0:
# return error_loss
# results_nonew = results[torch.arange(0, len(results))[indexs_new], labels[indexs_new]].contiguous()
# target_nonew = torch.ones_like(results_nonew).float().cuda()
# nonew_loss = nn.BCEWithLogitsLoss(reduce=True)(results_nonew, target_nonew)
return error_loss # nonew_loss + error_loss + lovasz_loss * 0.5
class BCELogitsLossWithMask(nn.Module):
def __init__(self, size_average=True):
super(BCELogitsLossWithMask, self).__init__()
self.size_average = size_average
def forward(self, input, target, mask=None):
'''
:param input: Variable of shape (N, C, H, W) logits 0~1
:param target: Variable of shape (N, C, H, W) 0~1 float
:param mask: Variable of shape (N, C) 0. or 1. float
:return:
'''
# print(target[0,0,1]) # target: N C 2 input: N C H W
_,C,_ = target.shape
if not (C == input.shape[1]):
raise ValueError("Target channel ({}) must be the same as input channel ({})".format(C, input.shape[1]))
N, C, H, W = input.shape
# print(input.shape)
# target_matrix = np.zeros((N, C, H, W)).astype(np.float32) #0.0~1.0
# for n in range(N):
# for c in range(C):
# target_matrix[n,c,int(target[n,c,1]/TARGET_SIZE),int(target[n,c,0]/TARGET_SIZE)] = 1.0
target_matrix = self.produceTargetHeatmap(target,H, W)
# print(input, np.max(target_matrix))
target_matrix = torch.from_numpy(target_matrix)
# BCELogitsLossWithMask
max_val = (-input).clamp(min=0)
loss = - input * target_matrix + max_val + ((-max_val).exp() + (-input - max_val).exp()).log() #TODO:Need to check #input
if self.size_average:
# w, h = input.shape
return loss.sum() / (H*W)
else:
return loss.sum()
# neg_abs = - input.abs()
# loss = input.clamp(min=0) - input * target_matrix + (1 + neg_abs.exp()).log()
# return loss.mean()
| 2,970 |
nutshell_config/nutshell_app/management/commands/bootstrap_data.py
|
josephdubon/shoe-store-api
| 0 |
2169885
|
"""
Populate the ShoeType table with the following entries:
sneaker
boot
sandal
dress
other
Populate the ShoeColor table with the following entries:
Red
Orange
Yellow
Green
Blue
Indigo
Violet
White
Black
"""
from django.core.management.base import BaseCommand
from nutshell_config.nutshell_app.models import NSShoeType, NSShoeColor
class Command(BaseCommand):
help = 'Create data for database: NSShoeType and NSShowColor'
def add_arguments(self, parser):
parser.add_argument(
'style',
nargs=5,
choices=[
'SNEAKER',
'BOOT',
'SANDAL',
'DRESS',
'OTHER'
]
)
parser.add_argument(
'color_name',
nargs=9,
choices=[
'RED',
'ORANGE',
'YELLOW',
'GREEN',
'BLUE',
'INDIGO',
'VIOLET',
'BLACK',
'WHITE'
]
)
def handle(self, *args, **options):
for (k, v) in NSShoeType.TYPE_NAME_CHOICES:
NSShoeType.objects.create(style=k)
for (k, v) in NSShoeColor.COLOR_NAME_CHOICES:
NSShoeColor.objects.create(color_name=k)
self.stdout.write(
self.style.SUCCESS(
'Successfully added newly created data to project database.'
)
)
| 1,466 |
pkgname/cfg/parse.py
|
StoneT2000/rl-boilerplate
| 1 |
2168253
|
from omegaconf import OmegaConf
import os
import re
def parse_cfg(cfg_path: str = None, default_cfg_path: str = None) -> OmegaConf:
"""Parses a config file and returns an OmegaConf object."""
if default_cfg_path is not None:
base = OmegaConf.load(default_cfg_path)
else:
base = OmegaConf.create()
cli = OmegaConf.from_cli()
for k, v in cli.items():
if v is None:
cli[k] = True
base.merge_with(cli)
if cfg_path is not None:
cfg = OmegaConf.load(cfg_path)
base.merge_with(cfg)
return base
| 575 |
python/12_sorts/merge_sort.py
|
aiorosxul/algo
| 0 |
2170552
|
def merge_sort(list):
if len(list) == 1:
return list
else:
mid = len(list) // 2
left = list[:mid]
right = list[mid:]
l1 = merge_sort(left)
r1 = merge_sort(right)
return merge(l1, r1)
def merge(left, right):
result = []
while len(left) > 0 and len(right) > 0:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
result += left
result += right
return result
print(merge_sort(list))
| 540 |
product/migrations/0004_auto_20200427_0347.py
|
Medinaaz/Vodafone-Payment
| 0 |
2170016
|
# Generated by Django 3.0.1 on 2020-04-27 03:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0003_productproperty_is_main_property'),
]
operations = [
migrations.AddField(
model_name='product',
name='description_en',
field=models.TextField(blank=True, null=True, verbose_name='Description'),
),
migrations.AddField(
model_name='product',
name='description_tr',
field=models.TextField(blank=True, null=True, verbose_name='Description'),
),
migrations.AddField(
model_name='product',
name='name_en',
field=models.CharField(max_length=255, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='product',
name='name_tr',
field=models.CharField(max_length=255, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='productcategory',
name='description_en',
field=models.TextField(blank=True, null=True, verbose_name='Description'),
),
migrations.AddField(
model_name='productcategory',
name='description_tr',
field=models.TextField(blank=True, null=True, verbose_name='Description'),
),
migrations.AddField(
model_name='productcategory',
name='name_en',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='productcategory',
name='name_tr',
field=models.CharField(max_length=100, null=True, verbose_name='Name'),
),
migrations.AddField(
model_name='productimages',
name='title_en',
field=models.CharField(max_length=255, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='productimages',
name='title_tr',
field=models.CharField(max_length=255, null=True, verbose_name='Title'),
),
]
| 2,195 |
sum_terms.py
|
adriennekarnoski/code-katas
| 0 |
2166418
|
"""Sum of the first nth term of Series
A function which returns the sum of the series upto nth term(parameter).
#1 Best Practices Solution by kevinplybon (plus 546 more warriors):
def row_sum_odd_numbers(n):
return n ** 3
"""
def series_sum(n):
"""Takes in n and returns sum of values up to nth term."""
series_values = [0.00, 1.00]
if n <= 1:
return "%.2f" % round(series_values[n], 2)
x = 4
while len(series_values) != n + 1:
series_values.append(1 / x)
x += 3
total = "%.2f" % round(sum(series_values), 2)
return total
| 583 |
scripts/examples/rest_example_client.py
|
lalten/kreathon2018
| 0 |
2169664
|
#! /usr/bin/python
# coding: utf8
import requests
from here_connector import calc_route, route_to_image
# url = 'http://10.13.144.90:5000/containers'
# data_dict = {"user":"asddd"}
#
# response = requests.get(url, data_dict)
# print response.json()['containers'][0]
# url = 'http://10.13.144.90:5000/feedback'
# data_dict = {"user_id": 123, "first_name": "horst", "lat": 51.312031, "lng": 6.558730, "clean": 3}
# response = requests.post(url, data_dict)
# ask for closest container and show route to it
url = 'http://10.13.144.90:5000/get_closest'
data_dict = {"lat": 51.33, 'lng': 6.6}
response = requests.post(url, data_dict)
js = response.json()['best_container']
pos = js['closest_container_pos']
dist = js['dist']
print js
s = ["%f,%f" % (data_dict['lat'], data_dict['lng']), pos]
route = calc_route(s, 'pedestrian')
route_to_image(route)
| 854 |
hello_world/models/rise_classification/inflow_clasification.py
|
keenan-mckenzie-lqd/test_cd_azure_appservice
| 0 |
2170304
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
import string
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import GradientBoostingClassifier
from joblib import dump, load
def df_run_CashInflow(json):
#loading data of csv
df = pd.json_normalize(json)
# input, name of column values looked at
x = df['Source Description']
# load model
clf = load(r'hello_world/models/rise_classification/InflowClassification_model.joblib')
# Make predictions, has to be looped through the array for more than 1 output
y_pred = clf.predict(x)
return str(y_pred)
# In[ ]:
| 740 |
pd.py
|
corder-ybh/gpst
| 0 |
2170468
|
import tushare as ts
import numpy as np
import pandas as pd
#df = ts.get_sina_dd('000799',date='2018-06-29')
df = ts.profit_data(year=2018,top=60)
print df
| 156 |
cart-service/service.py
|
piotrb5e3/sanic-uservices-demo
| 0 |
2169818
|
import asyncio
from functools import reduce
from sanic import Sanic
from sanic.response import json
from sanic.exceptions import NotFound
from products_service_client import get_product_price
app = Sanic('cart-service')
cart = {
'id':
1234,
'products': [
{
'id': 0,
'amount': 2
},
{
'id': 2,
'amount': 5
},
{
'id': 3,
'amount': 7
},
],
}
async def get_product_entry_price(product):
return await get_product_price(product['id']) * product['amount']
@app.get('/<id:int>')
async def cartDetail(request, id):
if id != cart['id']:
raise NotFound("Cart not found")
return json(cart)
@app.get('/<id:int>/getTotal')
async def cartTotal(request, id):
if id != cart['id']:
raise NotFound("Cart not found")
prices = await asyncio.gather(
*map(get_product_entry_price, cart['products']))
price = reduce(lambda x, y: x + y, prices)
return json({'price': price})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8001)
| 1,116 |
time.py
|
darkless456/Python
| 0 |
2170557
|
# time.py
class Time(object):
def __str__(self):
return '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second)
time = Time (9,45)
print(time)
# 有错误
| 165 |
utils/tuning_utils.py
|
INK-USC/Upstream-Bias-Mitigation
| 4 |
2170110
|
from .metric_utils import *
import os
def decide_checkpoints_with_early_stopping(df, key, by_lower, iter_per_epoch=None):
def best_row(group):
arr = group.loc[:, key]#.to_numpy().astype(np.float32)
#print(arr)
#print(np.isnan(arr))
#print('---')
if iter_per_epoch:
steps = []
for i, (index, row) in enumerate(arr.items()):
if int(index[-1]) // iter_per_epoch == 0:
steps.append(i)
arr = arr.iloc[steps]
arr = arr.to_numpy().astype(np.float32)
if len(arr[~np.isnan(arr)]) == 0:
best_lr_idx = 0
else:
if by_lower:
best_lr_idx = np.nanargmin(arr)
else:
best_lr_idx = np.nanargmax(arr)
# return group.iloc[best_seed_idx]
return group.iloc[best_lr_idx].name
return df.groupby(level=['method','lr','seed']).apply(best_row)
def get_early_stopping_step(df, key, early_stop):
def get_truncate_step_by_group(group):
global global_group
steps = []
rows = []
for row_idx, row in group.iterrows():
global_group = group
if row_idx[-1] != -1:
steps.append(row_idx)
rows.append(row)
prev_best = -1e10
prev_best_step = None
patience = early_stop
for step in steps:
if group.loc[step, key] < prev_best:
patience -= 1
else:
prev_best = group.loc[step, key]
prev_best_step = step
patience = early_stop
if patience <= 0:
break
return prev_best_step
return df.groupby(level=['method','lr','seed']).apply(get_truncate_step_by_group)
def decide_checkpoints_to_keep(base, *steps_tab):
ckpts_to_keep = set()
model_bases = set()
model_base_ckpt_dirs = set()
kept_model = set()
for steps in steps_tab:
model_base = steps[:3]
model_bases.add(model_base)
for folder in os.listdir(os.path.join('base', model_base[0], model_base[1], model_base[2])):
if folder.startswith('epoch'):
model_base_ckpt_dirs.add(os.path.join('base', model_base[0], model_base[1], model_base[2], folder))
| 2,308 |
src/generative_playground/models/embedder/multi_embedder.py
|
ZmeiGorynych/generative_playground
| 9 |
2169147
|
import torch.nn as nn
import torch
class MultiEmbedder(nn.Module):
'''
Multi-language embedder, uses the first index in each sequence to determine language
'''
def __init__(self,
languages,
lang_mapping,
input_dim,
output_dim,
index_offset=1,
padding_idx=0):
super().__init__()
self.output_dim = output_dim
self.language_map = {key: value - int(index_offset) for key, value in lang_mapping.items()
if key in languages}
self.language_embed = nn.Embedding(len(lang_mapping)-2,output_dim) # no padding index needed
self.index_offset = index_offset
self.embedders = nn.ModuleDict({lang: nn.Embedding(input_dim,
output_dim,
padding_idx=padding_idx)
for lang in languages})
def forward(self, x):
'''
Embed the sequences, using the first index to pick the embedder
The output's first element embeds the language
:param x: batch_size x num_steps long
:return: batch_size x num_steps x output_dim float
'''
language_codes = x[:,0] - int(self.index_offset)
embed_languages = self.language_embed(language_codes.unsqueeze(1))
embed = torch.zeros(x.size()[0], x.size()[1]-1, self.output_dim,
dtype=torch.float32, device=x.device)
for lang, embedder in self.embedders.items():
i = self.language_map[lang]
#if in language_codes:
embed[language_codes == i, :, :] = embedder(x[:,1:][language_codes == i, :])
out = torch.cat([embed_languages, embed], dim=1)
return out
| 1,864 |
pratt.py
|
bourguet/shunting_yard
| 43 |
2170591
|
#! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, token, lprio, rprio, evaluator):
self.token = token
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
def __repr__(self):
return '<Symbol {} {}/{}>'.format(self.token.lexem, self.lprio, self.rprio)
def identity_evaluator(parser, sym):
result = Node(sym.token)
return result
def unary_prefix_evaluator(parser, sym):
arg = parser.parse_to(sym.rprio)
if arg is None:
return CompositeNode(sym.token, [Node(lexer.Token('ERROR', 'MISSING VALUE'))])
else:
return CompositeNode(sym.token, [arg])
def binary_evaluator(parser, left_arg, sym):
right_arg = parser.parse_to(sym.rprio)
if right_arg is None:
return CompositeNode(sym.token, [left_arg, Node(lexer.Token('ERROR', 'MISSING VALUE'))])
else:
return CompositeNode(sym.token, [left_arg, right_arg])
def unary_postfix_evaluator(parser, left_arg, sym):
return CompositeNode('post' + sym.token, [left_arg])
class Parser:
def __init__(self):
self.lexer = None
self.cur_token = None
self.presymbols = {}
self.postsymbols = {}
def register_presymbol(self, oper, rprio, evaluator=unary_prefix_evaluator):
if type(oper) is str:
self.presymbols[oper] = SymbolDesc(oper, None, rprio, evaluator)
else:
for op in oper:
self.presymbols[op] = SymbolDesc(op, None, rprio, evaluator)
def register_postsymbol(self, oper, lprio, rprio, evaluator=binary_evaluator):
if type(oper) is str:
self.postsymbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.postsymbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def reset(self, s):
self.lexer = lexer.tokenize(s)
self.advance()
def advance(self):
try:
self.cur_token = self.lexer.__next__()
except StopIteration:
self.cur_token = None
def prefix_sym(self):
if self.cur_token is None:
return None
elif self.cur_token.kind == 'ID':
return SymbolDesc(self.cur_token, None, None, identity_evaluator)
elif self.cur_token.kind == 'NUMBER':
return SymbolDesc(self.cur_token, None, None, identity_evaluator)
elif self.cur_token.lexem in self.presymbols:
return self.presymbols[self.cur_token.lexem]
else:
return None
def postfix_sym(self):
if self.cur_token is None:
return None
elif self.cur_token.lexem in self.postsymbols:
return self.postsymbols[self.cur_token.lexem]
else:
return None
def parse_to(self, prio):
sym = self.prefix_sym()
if sym is None:
sym = self.postfix_sym()
if sym is None:
return None
node = Node(lexer.Token('ERROR', 'MISSING VALUE'))
else:
self.advance()
node = sym.evaluator(self, sym)
while True:
sym = self.postfix_sym()
if sym is None or prio >= sym.lprio:
if sym is None:
sym = self.prefix_sym()
if sym is not None:
sym = SymbolDesc('MISSING OPERATOR', 1000, 1000, binary_evaluator)
if sym is None or prio >= sym.lprio:
break
else:
self.advance()
node = sym.evaluator(self, node, sym)
return node
def parse(self, s):
self.reset(s)
res = self.parse_to(0)
if self.cur_token is not None:
res = CompositeNode('REMAINING INPUT', [res, self.cur_token])
return res
def prefix_open_parenthesis_evaluator(parser, sym):
result = parser.parse_to(sym.rprio)
if parser.cur_token is not None:
if parser.cur_token.lexem == ')':
parser.advance()
return result
elif parser.cur_token.lexem == ']':
parser.advance()
return CompositeNode('(] ERROR', [result])
else:
return CompositeNode('( ERROR', [result])
def postfix_open_parenthesis_evaluator(parser, left_arg, sym):
if parser.cur_token is not None and parser.cur_token.lexem == ')':
parser.advance()
return CompositeNode('call '+str(left_arg), [])
else:
result = parser.parse_to(sym.rprio)
if parser.cur_token is not None:
if parser.cur_token.lexem == ')':
parser.advance()
if result.token == ',':
return CompositeNode('call ' + str(left_arg), result.children)
else:
return CompositeNode('call ' + str(left_arg), [result])
elif parser.cur_token.lexem == ']':
parser.advance()
if result.token == ',':
return CompositeNode('call (] ' + str(left_arg), result.children)
else:
return CompositeNode('call (] ' + str(left_arg), [result])
return CompositeNode('( ERROR', [result])
def postfix_close_parenthesis_evaluator(parser, left_arg, sym):
return CompositeNode(') ERROR', [left_arg])
def postfix_open_bracket_evaluator(parser, left_arg, sym):
result = parser.parse_to(sym.rprio)
if parser.cur_token is not None:
if parser.cur_token.lexem == ']':
parser.advance()
return CompositeNode('get ' + str(left_arg), [result])
elif parser.cur_token.lexem == ')':
parser.advance()
return CompositeNode('get [) ' + str(left_arg), [result])
return CompositeNode('[ ERROR', [left_arg, result])
def postfix_close_bracket_evaluator(parser, left_arg, sym):
return CompositeNode('] ERROR', [left_arg])
def coma_evaluator(parser, left_arg, sym):
args = [left_arg]
while True:
args.append(parser.parse_to(sym.rprio))
sym = parser.postfix_sym()
if sym is None or sym.token != ',':
break
parser.advance()
return CompositeNode(',', args)
def question_evaluator(parser, left_arg, sym):
true_exp = parser.parse_to(sym.rprio)
sym = parser.postfix_sym()
if sym is not None and sym.token == ':':
parser.advance()
false_exp = parser.parse_to(sym.rprio)
return CompositeNode('?', [left_arg, true_exp, false_exp])
else:
return CompositeNode('? ERROR', [left_arg, true_exp])
def colon_evaluator(parser, left_arg, sym):
return CompositeNode(': ERROR', [left_arg])
def cexp_parser():
parser = Parser()
parser.register_postsymbol(',', 2, 2, coma_evaluator)
parser.register_postsymbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_postsymbol('?', 7, 1, question_evaluator)
parser.register_postsymbol(':', 1, 6, colon_evaluator)
parser.register_postsymbol('||', 8, 9)
parser.register_postsymbol('&&', 10, 11)
parser.register_postsymbol('|', 12, 13)
parser.register_postsymbol('^', 14, 15)
parser.register_postsymbol('&', 16, 17)
parser.register_postsymbol(['==', '!='], 18, 19)
parser.register_postsymbol(['<', '>', '<=', '>='], 20, 21)
parser.register_postsymbol(['<<', '>>'], 22, 23)
parser.register_postsymbol(['+', '-'], 24, 25)
parser.register_postsymbol(['/', '%', '*'], 26, 27)
parser.register_postsymbol('**', 29, 28)
parser.register_presymbol(['+', '-', '++', '--', '~', '!', '&', '*'], 30)
parser.register_postsymbol(['++', '--'], 32, 33, unary_postfix_evaluator)
parser.register_postsymbol(['.', '->'], 32, 33)
parser.register_presymbol('(', 1, prefix_open_parenthesis_evaluator)
parser.register_postsymbol('(', 100, 1, postfix_open_parenthesis_evaluator)
parser.register_postsymbol(')', 1, 100, postfix_close_parenthesis_evaluator)
parser.register_postsymbol('[', 100, 1, postfix_open_bracket_evaluator)
parser.register_postsymbol(']', 1, 100, postfix_close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
| 8,540 |
test_course_schedule_II.py
|
brigitteunger/katas
| 0 |
2169666
|
import unittest
from typing import Dict, List
from data_course_schedule import num_courses_2, prerequisites_2, schedule_2
class CircleFound(Exception):
pass
class Solution:
def findOrder(self, numCourses: int,
prerequisites: List[List[int]]) -> List[int]:
courses_to_visit = self.collect_prerequisites_per_course(
numCourses, prerequisites)
try:
schedule = []
for course in range(numCourses):
if course in courses_to_visit:
schedule = schedule + self.scheduleForCourse(
course, courses_to_visit)
return schedule
except CircleFound:
return []
def collect_prerequisites_per_course(self, numCourses: int,
prerequisites: List[List[int]]
) -> Dict[int, List[int]]:
courses_with_prerequisites = {i: [] for i in range(numCourses)}
for prerequisite_pair in prerequisites:
course = prerequisite_pair[0]
prerequisite = prerequisite_pair[1]
courses_with_prerequisites[course].append(prerequisite)
return courses_with_prerequisites
def scheduleForCourse(self, course: int, courses_to_visit: Dict[int, List[int]],
visited: List[int] = []) -> List[int]:
schedule = []
for pre in courses_to_visit[course]:
if pre in courses_to_visit:
if pre in visited:
raise CircleFound
schedule = schedule + self.scheduleForCourse(
pre, courses_to_visit, visited + [course])
schedule.append(course)
del courses_to_visit[course]
return schedule
class TestFindOrders(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def testFindOrder10(self):
num_courses = 2
prerequisites = [[1, 0]]
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [0, 1])
def testFindOrder0123(self):
num_courses = 4
prerequisites = [[1, 0], [2, 0], [3, 1], [3, 2]]
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [0, 1, 2, 3])
def testFindOrder01234(self):
num_courses = 5
prerequisites = [[4, 1], [4, 2], [4, 3], [1, 0]]
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [0, 1, 2, 3, 4])
def testFindOrder1(self):
num_courses = 1
prerequisites = []
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [0])
def testFindOrder013(self):
num_courses = 4
prerequisites = [[3, 0], [0, 1]]
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [1, 0, 2, 3])
def testFindOrderCircle(self):
num_courses = 4
prerequisites = [[1, 0], [2, 1], [3, 2], [0, 3]]
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [])
def testFindOrderCircle_3210(self):
num_courses = 3
prerequisites = [[0, 1], [0, 2], [1, 2]]
schedule = self.sol.findOrder(num_courses, prerequisites)
self.assertEqual(schedule, [2, 1, 0])
def testFindOrderCircle_big(self):
schedule = self.sol.findOrder(num_courses_2, prerequisites_2)
self.assertEqual(schedule, schedule_2)
if __name__ == "__main__":
unittest.main()
| 3,616 |
tests/test-function/test_parametrize.py
|
qianchilang/learning-pytest
| 42 |
2168656
|
import pytest
@pytest.mark.parametrize('passwd',
['<PASSWORD>',
'<PASSWORD>',
'<PASSWORD>'])
def test_passwd_length(passwd):
assert len(passwd) >= 8
@pytest.mark.parametrize('user, passwd',
[('jack', '<PASSWORD>'),
('tom', '<PASSWORD>')])
def test_passwd_md5(user, passwd):
db = {
'jack': '<PASSWORD>',
'tom': '<PASSWORD>'
}
import hashlib
assert hashlib.md5(passwd.encode()).hexdigest() == db[user]
@pytest.mark.parametrize('user, passwd',
[pytest.param('jack', 'abcdefgh', id='User<Jack>'),
pytest.param('tom', '<PASSWORD>', id='User<Tom>')])
def test_passwd_md5_id(user, passwd):
db = {
'jack': '<PASSWORD>',
'tom': '<PASSWORD>'
}
import hashlib
assert hashlib.md5(passwd.encode()).hexdigest() == db[user]
| 966 |
KurtGroup/Kurt/__init__.py
|
TheoBuchwald/UCPH-KVM
| 1 |
2169515
|
import os, sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from . import chemical_information
from . import output_processing
from . import structures
from . import xyz
| 188 |
examples/mb_hbreps_hbvf/pole/pole.py
|
hanyas/reps
| 8 |
2169141
|
import numpy as np
import gym
import scipy as sc
from scipy.special import comb
import torch
from sds.distributions.gamma import Gamma
from sds.models import HybridController
from reps.mb_hbreps_hbvf import hbREPS_hbVf
# np.random.seed(1337)
torch.set_num_threads(1)
env = gym.make('Pole-RL-v0')
env._max_episode_steps = 5000
env.unwrapped.dt = 0.01
env.unwrapped.sigma = 1e-8
# env.seed(1337)
state_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
dyn = torch.load(open('rarhmm_pole.pkl', 'rb'))
nb_modes = dyn.nb_states
# ctl type
ctl_type = 'ard'
ctl_degree = 1
# ctl_prior
feat_dim = int(comb(ctl_degree + state_dim, ctl_degree)) - 1
input_dim = feat_dim + 1
output_dim = act_dim
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1,)) + 1e-8,
betas=25. * np.ones((1,)))
parameter_precision_prior = Gamma(dim=input_dim, alphas=np.ones((input_dim,)) + 1e-8,
betas=1e1 * np.ones((input_dim,)))
ctl_prior = {'likelihood_precision_prior': likelihood_precision_prior,
'parameter_precision_prior': parameter_precision_prior}
ctl_kwargs = {'degree': ctl_degree}
ctl = HybridController(dynamics=dyn, ctl_type=ctl_type,
ctl_prior=ctl_prior, ctl_kwargs=ctl_kwargs)
# init controller
Ks = np.stack([np.zeros((output_dim, input_dim))] * nb_modes, axis=0)
lmbdas = np.stack([1. / 25. * np.eye(output_dim)] * nb_modes, axis=0)
ctl.controls.params = Ks, lmbdas
from reps.mb_hbreps_hbvf import RFFVfunction, PolyVfunction
# vfunc = RFFVfunction(nb_modes, state_dim, nb_feat=25,
# scale=[0.25, 1.5], mult=0.5)
vfunc = PolyVfunction(nb_modes, state_dim, degree=2)
hbreps = hbREPS_hbVf(env=env, dyn=dyn, ctl=ctl,
kl_bound=0.25, discount=0.98,
vfunc=vfunc, vf_reg=1e-8)
ctl_mstep_kwargs = {'nb_iter': 5}
hbreps.run(nb_iter=10, nb_train_samples=2500,
nb_eval_rollouts=25, nb_eval_steps=100,
ctl_mstep_kwargs=ctl_mstep_kwargs)
rollouts, _ = hbreps.evaluate(nb_rollouts=25, nb_steps=250)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=hbreps.state_dim + hbreps.act_dim, figsize=(12, 4))
for roll in rollouts:
for k, col in enumerate(ax[:-1]):
col.plot(roll['x'][:, k])
ax[-1].plot(roll['uc'])
plt.show()
# def beautify(ax):
# ax.set_frame_on(True)
# ax.minorticks_on()
#
# ax.grid(True)
# ax.grid(linestyle=':')
#
# ax.tick_params(which='both', direction='in',
# bottom=True, labelbottom=True,
# top=True, labeltop=False,
# right=True, labelright=False,
# left=True, labelleft=True)
#
# ax.tick_params(which='major', length=6)
# ax.tick_params(which='minor', length=3)
#
# ax.autoscale(tight=True)
# # ax.set_aspect('equal')
#
# if ax.get_legend():
# ax.legend(loc='best')
#
# return ax
#
#
# xlim = (-0.25, 0.25)
# ylim = (-1.5, 1.5)
#
# npts = 18
#
# x = np.linspace(*xlim, npts)
# y = np.linspace(*ylim, npts)
#
# X, Y = np.meshgrid(x, y)
# XYi = np.stack((X, Y))
# XYn = np.zeros((2, npts, npts))
#
# hr = 3
# XYh = np.zeros((hr, 2, npts, npts))
#
# ##
# env.reset()
# for i in range(npts):
# for j in range(npts):
# XYh[0, :, i, j] = XYi[:, i, j]
# for t in range(1, hr):
# XYh[t, :, i, j] = env.unwrapped.fake_step(XYh[t - 1, :, i, j], np.array([0.0]))
#
# env.reset()
# for i in range(npts):
# for j in range(npts):
# hist_obs, hist_act = XYh[..., i, j], np.zeros((hr, act_dim))
# u = hbreps.ctl.action(hist_obs, hist_act, False, False)[-1]
# XYn[:, i, j] = env.unwrapped.fake_step(XYh[-1, :, i, j], u)
#
# dXY = XYn - XYh[-1, ...]
#
# # re-interpolate data for streamplot
# xh, yh = XYh[-1, 0, 1, :], XYh[-1, 1, :, 0]
# xi = np.linspace(xh.min(), xh.max(), x.size)
# yi = np.linspace(yh.min(), yh.max(), y.size)
#
# from scipy.interpolate import interp2d
#
# dxh, dyh = dXY[0, ...], dXY[1, ...]
# dxi = interp2d(xh, yh, dxh)(xi, yi)
# dyi = interp2d(xh, yh, dyh)(xi, yi)
#
# import matplotlib.pyplot as plt
#
# fig = plt.figure(figsize=(5, 5), frameon=True)
# ax = fig.gca()
#
# ax.streamplot(xi, yi, dxi, dyi,
# color='magenta', linewidth=1, density=1.25,
# arrowstyle='-|>', arrowsize=1.,
# minlength=0.25)
#
# ax = beautify(ax)
# ax.grid(False)
#
# ax.set_xlim((xh.min(), xh.max()))
# ax.set_ylim((yh.min(), yh.max()))
#
# plt.show()
# #
# # from tikzplotlib import save
# # save("hbreps_pole_rl.tex")
| 4,604 |
geneticalgorithm2/classes.py
|
PasaOpasen/geneticalgorithm
| 0 |
2168730
|
from __future__ import annotations
from typing import Dict, Any, List, Optional, Union, Callable, Tuple
from dataclasses import dataclass
import warnings
import numpy as np
from .crossovers import Crossover
from .mutations import Mutations
from .selections import Selection
from .utils import can_be_prob, union_to_matrix
class DictLikeGetSet:
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def get(self, item):
return getattr(self, item)
_algorithm_params_slots = {
'max_num_iteration',
'max_iteration_without_improv',
'population_size',
'mutation_probability',
'mutation_discrete_probability',
'elit_ratio',
'crossover_probability',
'parents_portion',
'crossover_type',
'mutation_type',
'mutation_discrete_type',
'selection_type'
}
@dataclass
class AlgorithmParams(DictLikeGetSet):
max_num_iteration: Optional[int] = None
max_iteration_without_improv: Optional[int] = None
population_size: int = 100
mutation_probability: float = 0.1
mutation_discrete_probability: Optional[float] = None
# deprecated
crossover_probability: Optional[float] = None
elit_ratio: float = 0.04
parents_portion: float = 0.3
crossover_type: Union[str, Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]] = 'uniform'
mutation_type: Union[str, Callable[[float, float, float], float]] = 'uniform_by_center'
mutation_discrete_type: Union[str, Callable[[int, int, int], int]] = 'uniform_discrete'
selection_type: Union[str, Callable[[np.ndarray, int], np.ndarray]] = 'roulette'
def _check_if_valid(self):
assert int(self.population_size) > 0, f"population size must be integer and >0, not {self.population_size}"
assert (can_be_prob(self.parents_portion)), "parents_portion must be in range [0,1]"
assert (can_be_prob(self.mutation_probability)), "mutation_probability must be in range [0,1]"
assert (can_be_prob(self.elit_ratio)), "elit_ratio must be in range [0,1]"
if self.max_iteration_without_improv is not None and self.max_iteration_without_improv < 1:
warnings.warn(f"max_iteration_without_improv is {self.max_iteration_without_improv} but must be None or int > 0")
self.max_iteration_without_improv = None
def get_CMS_funcs(self) -> Tuple[
Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]],
Callable[[float, float, float], float],
Callable[[int, int, int], int],
Callable[[np.ndarray, int], np.ndarray]
]:
"""
returns gotten crossover, mutation, discrete mutation, selection
as necessary functions
"""
result = []
for name, value, dct in (
('crossover', self.crossover_type, Crossover.crossovers_dict()),
('mutation', self.mutation_type, Mutations.mutations_dict()),
('mutation_discrete', self.mutation_discrete_type, Mutations.mutations_discrete_dict()),
('selection', self.selection_type, Selection.selections_dict())
):
if type(value) == str:
if value not in dct:
raise Exception(
f"unknown name of {name}: '{value}', must be from {tuple(dct.keys())} or a custom function"
)
result.append(dct[value])
else:
assert callable(value), f"{name} must be string or callable"
result.append(value)
return tuple(result)
@staticmethod
def from_dict(dct: Dict[str, Any]):
result = AlgorithmParams()
for name, value in dct.items():
if name not in _algorithm_params_slots:
raise AttributeError(f"name '{name}' does not exists in AlgorithmParams fields")
setattr(result, name, value)
return result
@dataclass
class Generation(DictLikeGetSet):
variables: Optional[np.ndarray] = None
scores: Optional[np.ndarray] = None
def __check_dims(self):
if self.variables is not None:
assert len(self.variables.shape) == 2, f"'variables' must be matrix with shape (objects, dimensions), not {self.variables.shape}"
if self.scores is not None:
assert len(self.scores.shape) == 1, f"'scores' must be 1D-array, not with shape {self.scores.shape}"
assert self.variables.shape[0] == self.scores.size, f"count of objects ({self.variables.shape[0]}) must be equal to count of scores ({self.scores.size})"
@property
def size(self):
return self.scores.size
@property
def dim_size(self):
return self.variables.shape[1]
def as_wide_matrix(self):
# should not be used in main code -- was needed for old versions
return union_to_matrix(self.variables, self.scores)
def save(self, path: str):
np.savez(path, population=self.variables, scores=self.scores)
@staticmethod
def load(path: str):
try:
st = np.load(path)
except Exception as err:
raise Exception(
f"if generation object is a string, it must be path to npz file with needed content, but raised exception {repr(err)}"
)
assert 'population' in st and 'scores' in st, "saved generation object must contain 'population' and 'scores' fields"
return Generation(variables=st['population'], scores=st['scores'])
@staticmethod
def from_object(
dim: int,
object: Union[
str,
Dict[str, np.ndarray],
Generation,
np.ndarray,
Tuple[
Optional[np.ndarray],
Optional[np.ndarray]
]
]
):
obj_type = type(object)
if obj_type == str:
generation = Generation.load(object)
elif obj_type == np.ndarray:
assert len(object.shape) == 2 and (object.shape[1] == dim or object.shape[1] == dim + 1), f"if start_generation is numpy array, it must be with shape (samples, dim) or (samples, dim+1), not {object.shape}"
generation = Generation(object, None) if object.shape[1] == dim else Generation.from_pop_matrix(object)
elif obj_type == tuple:
assert len(object) == 2, f"if start_generation is tuple, it must be tuple with 2 components, not {len(object)}"
variables, scores = object
assert ( (variables is None or scores is None) or (
variables.shape[0] == scores.size)), "start_generation object must contain variables and scores components which are None or 2D- and 1D-arrays with same shape"
generation = Generation(variables=variables, scores=scores)
elif obj_type == dict:
assert (('variables' in object and 'scores' in object) and (
object['variables'] is None or object['scores'] is None) or (
object['variables'].shape[0] == object[
'scores'].size)), "start_generation object must contain 'variables' and 'scores' keys which are None or 2D- and 1D-arrays with same shape"
generation = Generation(variables=object['variables'], scores=object['scores'])
elif obj_type == Generation:
generation = Generation(variables=object['variables'], scores=object['scores'])
else:
raise TypeError(f"invalid type of generation! Must be in (Union[str, Dict[str, np.ndarray], Generation, np.ndarray, Tuple[Optional[np.ndarray], Optional[np.ndarray]]]), not {obj_type}")
generation.__check_dims()
if generation.variables is not None:
assert generation.dim_size == dim, f"generation dimension size {generation.dim_size} does not equal to target size {dim}"
return generation
@staticmethod
def from_pop_matrix(pop: np.ndarray):
warnings.warn("depricated! pop matrix style will be removed at version 7, use samples and scores separetly")
return Generation(
variables=pop[:, :-1],
scores=pop[:, -1]
)
@dataclass
class GAResult(DictLikeGetSet):
last_generation: Generation
@property
def variable(self):
return self.last_generation.variables[0]
@property
def score(self):
return self.last_generation.scores[0]
@property
def function(self):
warnings.warn(f"'function' field is deprecated, will be removed in version 7, use 'score' to get best population score")
return self.score
@dataclass
class MiddleCallbackData(DictLikeGetSet):
"""
data object using with middle callbacks
"""
reason_to_stop: Optional[str]
last_generation: Generation
current_generation: int
report_list: List[float]
mutation_prob: float
mutation_discrete_prob: float
mutation: Callable[[float, float, float], float]
mutation_discrete: Callable[[int, int, int], int]
crossover: Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
selection: Callable[[np.ndarray, int], np.ndarray]
current_stagnation: int
max_stagnation: int
parents_portion: float
elit_ratio: float
set_function: Callable[[np.ndarray], np.ndarray]
| 9,440 |
accounts/migrations/0002_user_phone.py
|
Naveendata-ux/tor_redesign
| 0 |
2168795
|
# Generated by Django 3.0.5 on 2020-04-10 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=20, verbose_name='Contact number'),
),
]
| 414 |
src/cryptocurrencyeda/retrieve_data.py
|
flor14/cryptocurrencyeda
| 0 |
2170701
|
import requests
import json
import pandas as pd
import datetime
def retrieve_data(symbol:str="BTC-USDT",
time_period:str="1day",
start_date:str="2018-01-01",
end_date:str="2022-01-10",
):
"""
Retrieves historical data from the KuCoin API.
Using open API adress "https://openapi-v2.kucoin.com/api/v1/market/history/trade"
Parameters
----------
name : array-like
Inputted cryptocurrency symbol.
time_period : str
Inputted time period.
1min, 3min, 5min, 15min, 30min, 1hour,
2hour, 4hour, 6hour, 8hour, 12hour, 1day, 1week
start_date : string "%Y-%m-%d"
Inputted datetime. Minimum is 2018-01-01
end_date : string "%Y-%m-%d"
Inputted time frame.
Returns
-------
pandas.DataFrame
Historical data of the cryptocurrency.
"""
if not isinstance(symbol, str):
raise TypeError("The input symbol must be of string type")
date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
start_date = int(datetime.datetime.timestamp(date))
date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
end_date = int(datetime.datetime.timestamp(date))
# Define the API URL
urllink = f"https://api.kucoin.com/api/v1/market/candles?type={time_period}&symbol={symbol}&startAt={start_date}&endAt={end_date}"
# Make the API call and convert the JSON response to a Python dictionary
response = requests.get(urllink).json()
assert type(response) == dict, "It is not a dictionary response"
# Convert the JSON response to a Python dictionary
data = response["data"]
# Create a pandas dataframe from the Python dictionary
cols = ["Date", "Open", "Close", "High", "Low", "Volume", "Turnover"]
df = pd.DataFrame(data, columns=cols)
df['Symbol'] = symbol
df['Date'] = pd.to_datetime(df['Date'], unit='s')
df['Close'] = df['Close'].astype(float)
assert len(df) >= 1, "Empty dataframe"
# Test whether output data is of pd.DataFrame type
if not isinstance(df, pd.DataFrame):
raise TypeError("The output dataframe must be of pd.DataFrame type")
# Return the dataframe
return df[['Symbol','Date','Close']]
| 2,321 |
Adversary/attacks.py
|
Ezaldeen99/artificial-adversary
| 0 |
2168725
|
import json
import os
from random import choice, randint, sample, randrange
from string import punctuation
import spacy
from regex import search
from spacy import displacy
from constants import *
'''These act on a single text'''
def emojis_attack(text):
text = text.lower()
words = text.split()
# get the emojis path
script_dir = os.path.dirname(__file__)
rel_path = "assets/emoji.json"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, 'r') as emojis:
# load emojis as lists
json_data = json.load(emojis)
for emoji in json_data:
# extract each emoji tags
tags_list = emoji['tags'] + emoji['aliases'] + [emoji['description']]
# check if there are any possible keywords in the text
emojis_in_text = set(words) & set(tags_list)
if len(emojis_in_text) > 0:
# source https://stackoverflow.com/questions/15658187/replace-all-words-from-word-list-with-another-string-in-python
big_regex = re.compile(r'\b%s\b' % r'\b|\b'.join(map(re.escape, emojis_in_text)))
text = big_regex.sub(emoji['emoji'], str(text))
return text
def advanced_emojis_attack(text):
nlp = spacy.load("en_core_web_sm")
doc = nlp(text.lower())
# get the emojis path
script_dir = os.path.dirname(__file__)
rel_path = "assets/emoji.json"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, 'r') as emojis:
# load emojis as lists
json_data = json.load(emojis)
for emoji in json_data:
# extract each emoji tags
tags_list = emoji['tags'] + emoji['aliases'] + [emoji['description']]
for token in doc:
if (token.pos_ == "PROPN" or token.pos_ == "VERB"
or token.pos_ == "SYM" or token.pos_ == "NOUN") \
and token.text in tags_list:
# source https://stackoverflow.com/questions/15658187/replace-all-words-from-word-list-with-another-string-in-python
big_regex = re.compile(r'\b%s\b' % r'\b|\b'.join(map(re.escape, [token.text])))
text = big_regex.sub(emoji['emoji'], str(text.lower()))
# uncomment to launch analysis in browser
# displacy.serve(doc, style="dep")
return text
def surrounding_chars(text):
text = text.lower()
# get the spam texts file path
script_dir = os.path.dirname(__file__)
rel_path = "assets/spam_data.txt"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, 'r') as spam:
spam_list = spam.read().split(",")
spam_list_lowered = list(map(str.lower, spam_list))
for spam_word in spam_list_lowered:
# start surrounding to text if it is in spam
if search(spam_word, text):
# source https://stackoverflow.com/questions/15658187/replace-all-words-from-word-list-with-another-string-in-python
big_regex = re.compile(r'\b%s\b' % r'\b|\b'.join(map(re.escape, [spam_word])))
surround_char = ' '.join('("' + item + '")' for item in spam_word if item != " ")
text = big_regex.sub(surround_char, str(text.lower()))
return text
def homophones_chars(text):
nlp = spacy.load("en_core_web_sm")
doc = nlp(text.lower())
# get the spam texts file path
script_dir = os.path.dirname(__file__)
rel_path = "assets/homophones.json"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, 'r') as homophones:
# load homophones as list
json_data = json.load(homophones)
for token in doc:
if (token.pos_ == "PROPN" or token.pos_ == "VERB"
or token.pos_ == "SYM" or token.pos_ == "NOUN") \
and token.text in json_data:
# source https://stackoverflow.com/questions/15658187/replace-all-words-from-word-list-with-another-string-in-python
big_regex = re.compile(r'\b%s\b' % r'\b|\b'.join(map(re.escape, [token.text])))
text = big_regex.sub(json_data[token.text][0], str(text.lower()))
return text
def good_word_attack(text):
if randint(1, 2) == 1:
return text + ' ' + ' '.join(sample(NEUTRAL_WORDS, randint(5, 15)))
else:
return ' '.join(sample(NEUTRAL_WORDS, randint(2, 10))) + ' ' + text
def swap_words(text):
words = text.split()
if len(words) <= 3:
return ' '.join(words)
swapped = list(range(len(words)))
idxs = sample(range(1, len(words) - 2), randint(1, min(3, len(words) // 2 - 1)))
for i in idxs:
swapped[i], swapped[i + 1] = swapped[i + 1], swapped[i]
return ' '.join([words[i] for i in swapped])
def remove_spacing(text):
chars = list(text)
for i, c in enumerate(chars):
if c == ' ' and randint(1, 3) == 1:
chars[i] = choice(',.-'"`*")
return ''.join(chars)
'''These act on a single word within a text'''
def synonym(word):
return choice(SYNONYMS.get(word, [word]))
def letter_to_symbol(word):
return ''.join([choice(HOMOGLPYH_MAP.get(c.lower(), [c])) for c in word])
def swap_letters(word):
if len(word) < 4:
return word
swapped = list(range(len(word)))
max_swap = randint(1, min(3, len(word) // 2 - 1))
idxs = sample(range(1, len(word) - 2), max_swap)
for i in idxs:
swapped[i], swapped[i + 1] = swapped[i + 1], swapped[i]
return ''.join([word[i] for i in swapped])
def insert_punctuation(word):
word_with_punct = list(word)
for _ in range(2):
word_with_punct.insert(randrange(len(word_with_punct)), choice(punctuation))
return ''.join(word_with_punct)
def insert_duplicate_characters(word):
word_with_dupes = list(word)
for _ in range(2):
i = randrange(len(word_with_dupes))
word_with_dupes.insert(i, word_with_dupes[i])
return ''.join(word_with_dupes)
def delete_characters(word):
if len(word) < 4:
return word
max_del = 1 if len(word) <= 5 else 2
idxs_delete = sample(range(1, len(word) - 1), max_del)
return ''.join([c for i, c in enumerate(
list(word)) if i not in idxs_delete])
def change_case(word):
word_with_changed_case = list(word)
idx = sample(range(len(word)), randint(1, len(word)))
for i in idx:
c = word[i]
word_with_changed_case[i] = c.upper() if c.lower() == c else c.lower()
return ''.join(word_with_changed_case)
def num_to_word(word):
return NUM_TO_WORD.get(word, word)
'''Keeps track of all attacks and their types'''
ATTACK_MAP = {
'text': {
'good_word_attack': good_word_attack,
'emoji_attack': emojis_attack,
'swap_words': swap_words,
'remove_spacing': remove_spacing,
},
'word': {
'synonym': synonym,
'letter_to_symbol': letter_to_symbol,
'swap_letters': swap_letters,
'insert_punctuation': insert_punctuation,
'insert_duplicate_characters': insert_duplicate_characters,
'delete_characters': delete_characters,
'change_case': change_case,
'num_to_word': num_to_word
}
}
| 7,242 |
locking/tests/forms.py
|
liberation/django-locking
| 0 |
2169744
|
# -*- coding: utf-8 -*-
from locking.forms import LockableForm
import models
class StoryAdminForm(LockableForm):
class Meta:
model = models.Story
| 161 |
ScriptEngine/logging.py
|
daizhaolin/scriptengine
| 0 |
2168542
|
# -*- coding: UTF-8 -*-
'''
Created on 2020-03-08
@author: daizhaolin
'''
import sys
import logging
def create_logger(app):
logger = logging.getLogger(app.name)
if app.debug:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
"[%(asctime)s] %(levelname)s in %(module)s: %(message)s"
)
)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
| 492 |
mira/core/training.py
|
faustomorales/odtk
| 0 |
2167706
|
import types
import typing
import random
import logging
import tqdm
try:
import timm
import timm.optim
import timm.scheduler
except ImportError:
timm = None # type: ignore
try:
import torch
except ImportError:
torch = None # type: ignore
import numpy as np
LOGGER = logging.getLogger()
DEFAULT_SCHEDULER_PARAMS = dict(
sched="cosine",
min_lr=1e-3,
warmup_lr=0,
warmup_epochs=0,
cooldown_epochs=0,
epochs=10,
lr_cycle_decay=1,
lr_cycle_limit=1e5,
lr_cycle_mul=1,
)
DEFAULT_OPTIMIZER_PARAMS = dict(lr=1e-2, opt="sgd", weight_decay=4e-5)
InputType = typing.TypeVar("InputType")
def train(
model: "torch.nn.Module",
loss: typing.Callable[[typing.List[InputType]], "torch.Tensor"],
training: typing.List[InputType],
validation: typing.List[InputType] = None,
batch_size: int = 1,
augment: typing.Callable[[typing.List[InputType]], typing.List[InputType]] = None,
epochs=100,
on_epoch_start: typing.Callable = None,
on_epoch_end: typing.Callable[[typing.List[dict]], dict] = None,
shuffle=True,
optimizer_params=None,
scheduler_params=None,
clip_grad_norm_params=None,
):
"""Run training job.
Args:
model: The model that we're training.
loss: A function to compute the loss for a batch.
training: The collection of training images
validation: The collection of validation images
batch_size: The batch size to use for training
augmenter: The augmenter for generating samples
epochs: The number of epochs to train.
on_epoch_start: A callback to run when starting a new epoch.
on_epoch_end: A callback to run when finishing an epoch.
shuffle: Whether to shuffle the training data on each epoch.
optimizer_params: Passed to timm.optim.create_optimizer_v2 to build
the optimizer.
scheduler_params: Passed to timm.scheduler.create_scheduler to build
the scheduler.
"""
assert timm is not None, "timm is required for this function"
assert torch is not None, "torch is required for this function."
optimizer = timm.optim.create_optimizer_v2(
model, **(optimizer_params or DEFAULT_OPTIMIZER_PARAMS)
)
scheduler, _ = timm.scheduler.create_scheduler(
types.SimpleNamespace(**(scheduler_params or DEFAULT_SCHEDULER_PARAMS)),
optimizer=optimizer,
)
train_index = np.arange(len(training)).tolist()
summaries: typing.List[typing.Dict[str, typing.Any]] = []
terminated = False
try:
for epoch in range(epochs):
with tqdm.trange(len(training) // batch_size) as t:
model.train()
t.set_description(f"Epoch {epoch + 1} / {epochs}")
scheduler.step(
epoch=epoch,
metric=None
if not summaries
else summaries[-1].get("val_loss", summaries[-1]["loss"]),
)
if on_epoch_start:
on_epoch_start()
cum_loss = 0
for batchIdx, start in enumerate(range(0, len(training), batch_size)):
if batchIdx == 0 and shuffle:
random.shuffle(train_index)
end = min(start + batch_size, len(train_index))
batch = [training[train_index[idx]] for idx in range(start, end)]
if augment:
batch = augment(batch)
optimizer.zero_grad()
batch_loss = loss(batch)
batch_loss.backward()
if clip_grad_norm_params is not None:
torch.nn.utils.clip_grad_norm_(
model.parameters(), **clip_grad_norm_params
)
cum_loss += batch_loss.detach().cpu().numpy()
avg_loss = cum_loss / end
optimizer.step()
t.set_postfix(loss=avg_loss)
t.update()
summaries.append({"loss": avg_loss})
if validation:
summaries[-1]["val_loss"] = np.sum(
[
loss(
[
validation[idx]
for idx in range(
vstart,
min(vstart + batch_size, len(validation)),
)
]
)
.detach()
.cpu()
.numpy()
for vstart in range(0, len(validation), batch_size)
]
) / len(validation)
summaries[-1]["lr"] = next(g["lr"] for g in optimizer.param_groups)
if on_epoch_end:
try:
summaries[-1] = {**summaries[-1], **on_epoch_end(summaries)}
except StopIteration:
terminated = True
t.set_postfix(**summaries[-1])
if terminated:
break
except KeyboardInterrupt:
LOGGER.warning("Terminating early due to keyboard interrupt.")
return summaries
return summaries
| 5,480 |
wiki_music/library/lyrics.py
|
marian-code/wikipedia-music-tags
| 5 |
2169822
|
r"""Get lyrics from.
Anime Lyrics, AZLyrics, Genius, Lyricsmode, \
Lyrical Nonsense, Musixmatch, darklyrics
"""
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Union, Tuple
import rapidfuzz.fuzz as fuzz # lazy loaded
from wiki_music.constants import GREEN, NO_LYRIS, RESET
from wiki_music.external_libraries import lyricsfinder # lazy loaded
from wiki_music.utilities import (GoogleApiKey, ThreadPool, caseless_equal,
exception, normalize)
if TYPE_CHECKING:
from wiki_music.external_libraries.lyricsfinder import LyricsManager
from typing_extensions import TypedDict
LyrData = TypedDict("LyrData", {"track": List[int], "lyrics": str,
"source_url": str})
LyrDict = Dict[str, LyrData]
from wiki_music.external_libraries.lyricsfinder.models.lyrics import (
LyricsDict)
log = logging.getLogger(__name__)
log.debug("lyrics imports done")
__all__ = ["save_lyrics"]
def save_lyrics(tracks: List[str], types: List[str], band: str, album: str,
GUI: bool, multi_threaded: bool
) -> Tuple[List[str], List[Union[str, None]]]:
"""Searches and downloads lyrics for each track.
Does some preprocessing before it starts the lyricsfinder
module and downloads the lyrics. In preproces, tracks which will have same
lyrics are identified so the same lyrics are not downloaded twice. The
lyrics are then downloaded asynchronously each in separate thread for
speed.
See also
--------
:mod:`wiki_music.external_libraries.lyricsfinder`
module used to download lyrics
:class:`wiki_music.utilities.parser_utils.ThreadPool`
async download
Parameters
----------
tracks: List[str]
list of album tracks
types: List[str]
list of album types, to infer which tracks have same lyrics, e.g.
<track> and <track (acoustic)> are considered to have same lyrics.
Instrumental and Orchestral types are set to no lyrics
band: str
album artist name
album: str
album name
GUI: bool
whether app is running in GUI mode
multi_threaded: bool
whether to download lyrics in parallel of in orderely fasion
Returns
-------
List[str]
list of track lyrics in same order as tracks list was passed in
List[Union[str, None]]
list of lyrics source urls
"""
log.info("starting save lyrics")
GOOGLE_API_KEY = GoogleApiKey.value(GUI)
lyrics: List[str]
sources: List[Union[str, None]]
tracks_dict: "LyrDict"
raw_lyrics: List["LyricsDict"]
lyrics = []
sources = []
for i, tp in enumerate(types):
sources.append(None)
for nl in NO_LYRIS:
if caseless_equal(nl, tp):
lyrics.append(nl)
break
else:
lyrics.append("")
log.info("Initialize duplicates")
tracks_dict = dict()
for i, (tr, lyr) in enumerate(zip(tracks, lyrics)):
# TODO might be able to use defaultdict here with custom factory
# defaultdict(lambda x: something...)
if not lyr:
for tr_k in tracks_dict.keys():
if fuzz.token_set_ratio(tr, tr_k, score_cutoff=90):
tracks_dict[tr_k]["track"].append(i)
break
else:
tracks_dict[tr] = {"track": [i], "lyrics": "",
"source_url": ""}
log.info("Download lyrics")
# manager must be initialized in main thread
manager = lyricsfinder.LyricsManager()
# run search
t = ThreadPool(target=_get_lyrics,
args=[(manager, band, album, t, GOOGLE_API_KEY)
for t in tracks_dict.keys()])
if multi_threaded:
t.run()
else:
t.run_serial()
raw_lyrics = t.results()
log.info("Assign lyrics to tracks_dict")
# report results
for i, l in enumerate(raw_lyrics):
if l["lyrics"]:
print(GREEN + "Saved lyrics for:" + RESET,
f"{l['artist']} - {l['title']} " + GREEN +
f"({l['origin']['source_name']})")
else:
print(GREEN + "Couldn't find lyrics for:" + RESET,
f"{l['artist']} - {l['title']}")
tracks_dict[l["title"]]["lyrics"] = l["lyrics"]
tracks_dict[l["title"]]["source_url"] = l["origin"]["source_url"]
for track in tracks_dict.values():
for i in track["track"]:
lyrics[i] = track["lyrics"]
sources[i] = track["source_url"]
return lyrics, sources
@exception(log)
def _get_lyrics(manager: 'LyricsManager', artist: str, album: str, song: str,
GOOGLE_API_KEY: str
) -> "LyricsDict":
"""Find and download lyrics for specified song.
See also
--------
:meth:`wiki_music.external_libraries.lyricsfinder.LyricsManager`
low level lyricsfinding implementation
Parameters
----------
manager: lyricsfinder.LyricsManager
instance of LyricsManager which encapsulates the lyrics finding and
downloading methods
artist: str
artist name
album: str
album name
song: str
song name
Returns
-------
dict
dictionary with lyrics and information where it was downloaded from
"""
lyrics = next(manager.search_lyrics(song, album, artist,
google_api_key=GOOGLE_API_KEY), None)
if not lyrics:
log.info(f"Couldn't find lyrics for: {artist} - {song}")
return {"lyrics": "", "artist": artist, "title": song,
"release_date": None, "origin": {"source_name": "",
"query": "", "url": "",
"source_url": ""}}
else:
log.info(f"Saved lyrics for: {artist} - {song}")
response = lyrics.to_dict()
response["title"] = song
return response
| 6,079 |
quaddicted/packages/migrations/0001_initial.py
|
hemebond/quaddicted
| 8 |
2169451
|
# Generated by Django 3.1.1 on 2020-11-01 14:02
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import quaddicted.packages.models
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
]
operations = [
migrations.CreateModel(
name='Package',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(max_length=256, upload_to=quaddicted.packages.models.package_upload_to, validators=[quaddicted.packages.models.validate_package_file])),
('file_name', models.CharField(blank=True, editable=False, max_length=128)),
('file_hash', models.CharField(blank=True, editable=False, max_length=64, unique=True)),
('file_size', models.BigIntegerField(blank=True, editable=False, null=True)),
('name', models.CharField(max_length=128)),
('created', models.DateTimeField(auto_now_add=True)),
('rating', models.FloatField(blank=True, editable=False, null=True)),
('game', models.CharField(choices=[('q1', 'Quake 1'), ('q2', 'Quake 2'), ('q3', 'Quake 3')], default='q1', max_length=2)),
('description', models.TextField(blank=True)),
('type', models.IntegerField(choices=[(1, 'Single BSP File(s)'), (2, 'Partial conversion'), (3, 'Total conversion'), (4, 'Speedmapping'), (5, 'Misc. Files'), (6, 'undefined, please tell Spirit')], default=6)),
('published', models.BooleanField(default=False)),
('uploaded_on', models.DateTimeField(auto_now_add=True)),
('base_dir', models.CharField(blank=True, help_text='directory where this package should be extracted to', max_length=256, null=True)),
('command_line', models.CharField(blank=True, help_text='command-line arguments for running the package', max_length=256, null=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='PackageAuthor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='slug')),
],
options={
'verbose_name': 'Package Author',
'verbose_name_plural': 'Package Authors',
},
),
migrations.CreateModel(
name='PackageUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('url', models.URLField()),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='urls', to='quaddicted_packages.package')),
],
),
migrations.CreateModel(
name='PackageScreenshot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(help_text='A 16x9 resolution image at least 1024x576', max_length=256, upload_to=quaddicted.packages.models.screenshot_upload_to)),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='screenshots', to='quaddicted_packages.package')),
],
),
migrations.CreateModel(
name='PackageFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('last_modified', models.DateTimeField(auto_now_add=True)),
('size', models.BigIntegerField(blank=True, editable=False, null=True)),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='quaddicted_packages.package')),
],
),
migrations.AddField(
model_name='package',
name='authors',
field=models.ManyToManyField(help_text='A comma-separated list of authors.', related_name='packages', to='quaddicted_packages.PackageAuthor'),
),
migrations.AddField(
model_name='package',
name='dependencies',
field=models.ManyToManyField(to='quaddicted_packages.Package'),
),
migrations.AddField(
model_name='package',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='package',
name='uploaded_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='PackageRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=32, null=True)),
('score', models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('created', models.DateTimeField(auto_now_add=True)),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='quaddicted_packages.package')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'package'), ('username', 'package')},
},
),
]
| 6,455 |
drf_jwt_2fa/sending.py
|
trendlee/drf-jwt-2fa
| 21 |
2170385
|
from django.conf import settings
from django.core.mail import send_mail
from django.utils.translation import ugettext as _
from .settings import api_settings
class CodeSendingFailed(Exception):
pass
def send_verification_code(user, code):
sender = api_settings.CODE_SENDER
return sender(user, code)
def send_verification_code_via_email(user, code):
user_email_address = getattr(user, 'email', None)
if not user_email_address:
raise CodeSendingFailed(_("No e-mail address known"))
subject_template = _(
api_settings.EMAIL_SENDER_SUBJECT_OVERRIDE or
_("{code}: Your verification code"))
body_template = (
api_settings.EMAIL_SENDER_BODY_OVERRIDE or
_("{code} is the verification code needed for the login."))
messages_sent = send_mail(
subject=subject_template.format(code=code),
message=body_template.format(code=code),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user_email_address],
fail_silently=True)
if not messages_sent:
raise CodeSendingFailed(_("Unable to send e-mail"))
| 1,124 |
skml/datasets/sample_down_label_space.py
|
ChristianSch/skml
| 5 |
2170374
|
import numpy as np
from scipy.sparse import issparse
from operator import itemgetter
def sample_down_label_space(y, k, method='most-frequent'):
"""
Samples down label space, such that the returned label
space retains order of the original labels, but
removes labels which do not meet certain criteria
(see `method`).
Parameters
----------
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-label targets
k : number
Number of returned labels, has to be smaller than the number of
distinct labels in `y`
method : string, default = 'most-frequent'
Method to sample the label space down. Currently supported
is only by top k most frequent labels.
"""
if k > y.shape[1]:
raise ValueError('Cannot sample more labels than given')
if method == 'most-frequent':
# create mapping of frequencies per column (label)
if issparse(y):
# sum of sparse matrix returns a matrix. A1 holds the matrix as
# a one dimensional array, just like if y was dense
freqs = list(map(lambda x: (x[0], x[1]),
enumerate(np.sum(y, axis=0).A1)))
else:
freqs = list(map(lambda x: (x[0], x[1]),
enumerate(np.sum(y, axis=0))))
freqs.sort(key=itemgetter(1), reverse=True)
# select top k labels, restore original order
# if we wouldn't restore the original order, the labels would
# be ordered not by original column, but by "most frequent occuring"
sampled_indices = sorted(list(map(lambda x: x[0], freqs[:k])))
return y[:, sampled_indices]
else:
raise ValueError('No such sample method {0}'.format(method))
| 1,793 |
anonboard/settings_production.sample.py
|
anehx/anonboard-backend
| 0 |
2170316
|
from settings import *
DEBUG = False
STATIC_ROOT = '/var/www/anonboard/django-static/static'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'anonboard',
'USER': 'anonboard',
'PASSWORD': '*********',
'HOST': 'localhost'
}
}
| 328 |
2019/ml/notebook/mydata/data/__init__.py
|
hyeyoom/Study-Note
| 0 |
2170581
|
__description__ = 'Dataset loader'
from .artificial import make_linear_regression_data
from .artificial import make_polynomial_regression_data
from .io import readtxt
from .io import load_pima_indians_data
from .io import load_drink_data
| 239 |
python/muonPrep.py
|
chadfreer/monoZ_Analysis
| 0 |
2170070
|
import FWCore.ParameterSet.Config as cms
'''
Produces "preppedMuons"
Uses the MET uncertainty variations from runMETCorrectionsAndUncertainties() function
See L768 of PhysicsTools/PatUtils/python/tools/runMETCorrectionsAndUncertainties.py
'''
def muonPrep(process, isMC):
# TODO: corrections
# KaMuCa? Rochester? MuscleFit?
process.preppedMuons = cms.EDProducer("MuonPrep",
muonSrc = cms.InputTag("slimmedMuons"),
vertexSrc = cms.InputTag("offlineSlimmedPrimaryVertices"),
kinematicCutAnyVariation = cms.string('pt > 10 && abs(eta) < 2.4'),
finalCut = cms.string('isLooseMuon'),
)
| 631 |
jamf/models/computer_operating_system.py
|
jensenbox/python-jamf
| 1 |
2169335
|
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class ComputerOperatingSystem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'version': 'str',
'build': 'str',
'active_directory_status': 'str',
'master_password_set': 'bool',
'file_vault2_status': 'str',
'extension_attributes': 'list[ComputerExtensionAttribute]'
}
attribute_map = {
'name': 'name',
'version': 'version',
'build': 'build',
'active_directory_status': 'activeDirectoryStatus',
'master_password_set': '<PASSWORD>',
'file_vault2_status': 'fileVault2Status',
'extension_attributes': 'extensionAttributes'
}
def __init__(self, name=None, version=None, build=None, active_directory_status=None, master_password_set=None, file_vault2_status=None, extension_attributes=None, local_vars_configuration=None): # noqa: E501
"""ComputerOperatingSystem - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._version = None
self._build = None
self._active_directory_status = None
self._master_password_set = None
self._file_vault2_status = None
self._extension_attributes = None
self.discriminator = None
if name is not None:
self.name = name
if version is not None:
self.version = version
if build is not None:
self.build = build
if active_directory_status is not None:
self.active_directory_status = active_directory_status
if master_password_set is not None:
self.master_password_set = master_password_set
if file_vault2_status is not None:
self.file_vault2_status = file_vault2_status
if extension_attributes is not None:
self.extension_attributes = extension_attributes
@property
def name(self):
"""Gets the name of this ComputerOperatingSystem. # noqa: E501
:return: The name of this ComputerOperatingSystem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ComputerOperatingSystem.
:param name: The name of this ComputerOperatingSystem. # noqa: E501
:type name: str
"""
self._name = name
@property
def version(self):
"""Gets the version of this ComputerOperatingSystem. # noqa: E501
:return: The version of this ComputerOperatingSystem. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ComputerOperatingSystem.
:param version: The version of this ComputerOperatingSystem. # noqa: E501
:type version: str
"""
self._version = version
@property
def build(self):
"""Gets the build of this ComputerOperatingSystem. # noqa: E501
:return: The build of this ComputerOperatingSystem. # noqa: E501
:rtype: str
"""
return self._build
@build.setter
def build(self, build):
"""Sets the build of this ComputerOperatingSystem.
:param build: The build of this ComputerOperatingSystem. # noqa: E501
:type build: str
"""
self._build = build
@property
def active_directory_status(self):
"""Gets the active_directory_status of this ComputerOperatingSystem. # noqa: E501
:return: The active_directory_status of this ComputerOperatingSystem. # noqa: E501
:rtype: str
"""
return self._active_directory_status
@active_directory_status.setter
def active_directory_status(self, active_directory_status):
"""Sets the active_directory_status of this ComputerOperatingSystem.
:param active_directory_status: The active_directory_status of this ComputerOperatingSystem. # noqa: E501
:type active_directory_status: str
"""
self._active_directory_status = active_directory_status
@property
def master_password_set(self):
"""Gets the master_password_set of this ComputerOperatingSystem. # noqa: E501
:return: The master_password_set of this ComputerOperatingSystem. # noqa: E501
:rtype: bool
"""
return self._master_password_set
@master_password_set.setter
def master_password_set(self, master_password_set):
"""Sets the master_password_set of this ComputerOperatingSystem.
:param master_password_set: The master_password_set of this ComputerOperatingSystem. # noqa: E501
:type master_password_set: bool
"""
self._master_password_set = master_password_set
@property
def file_vault2_status(self):
"""Gets the file_vault2_status of this ComputerOperatingSystem. # noqa: E501
:return: The file_vault2_status of this ComputerOperatingSystem. # noqa: E501
:rtype: str
"""
return self._file_vault2_status
@file_vault2_status.setter
def file_vault2_status(self, file_vault2_status):
"""Sets the file_vault2_status of this ComputerOperatingSystem.
:param file_vault2_status: The file_vault2_status of this ComputerOperatingSystem. # noqa: E501
:type file_vault2_status: str
"""
allowed_values = ["NOT_APPLICABLE", "NOT_ENCRYPTED", "BOOT_ENCRYPTED", "SOME_ENCRYPTED", "ALL_ENCRYPTED"] # noqa: E501
if self.local_vars_configuration.client_side_validation and file_vault2_status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `file_vault2_status` ({0}), must be one of {1}" # noqa: E501
.format(file_vault2_status, allowed_values)
)
self._file_vault2_status = file_vault2_status
@property
def extension_attributes(self):
"""Gets the extension_attributes of this ComputerOperatingSystem. # noqa: E501
:return: The extension_attributes of this ComputerOperatingSystem. # noqa: E501
:rtype: list[ComputerExtensionAttribute]
"""
return self._extension_attributes
@extension_attributes.setter
def extension_attributes(self, extension_attributes):
"""Sets the extension_attributes of this ComputerOperatingSystem.
:param extension_attributes: The extension_attributes of this ComputerOperatingSystem. # noqa: E501
:type extension_attributes: list[ComputerExtensionAttribute]
"""
self._extension_attributes = extension_attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComputerOperatingSystem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComputerOperatingSystem):
return True
return self.to_dict() != other.to_dict()
| 9,276 |
setup.py
|
mytab0r/RaveGen-Telegram-bot-generator
| 1 |
2170600
|
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="ravegen",
version="0.3.1",
scripts=['ravegen/ravegen'],
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
description="Program for generate, create and deploy telegram bots readable way.",
long_description=long_description,
install_requires=[
'python-telegram-bot',
],
package_data={'ravegen': ["LICENSE", "rave_compl.bash", "ravegen/commands", "ravegen/version"]},
include_package_data=True,
url="https://github.com/ChrisChV/RaveGen-Telegram-bot-generator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2.7",
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",
],
)
| 851 |
jcms/templatetags/get_object_attr.py
|
jessielaf/jcms-pip
| 0 |
2170310
|
from django import template
register = template.Library()
@register.filter
def get_object_attr(use_object, name):
return getattr(use_object, name)
| 154 |
textvinf/modules/callbacks.py
|
vikigenius/textvinf
| 0 |
2168196
|
# -*- coding: utf-8 -*-
import logging
import random
from typing import Any, Dict
from allennlp.common.util import END_SYMBOL, START_SYMBOL
from allennlp.training.trainer import EpochCallback, GradientDescentTrainer
logger = logging.getLogger(__name__)
@EpochCallback.register('print_reconstruction_example')
class PrintReconstructionExample(EpochCallback):
"""Callback that prints an example reconstruction."""
def __call__(
self,
trainer: GradientDescentTrainer,
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
):
"""Callback call implementation."""
batch = next(iter(trainer._validation_data_loader))
outputs = trainer.model.make_output_human_readable(
trainer.batch_outputs(batch, for_training=False),
)['predicted_sentences']
idx = random.randrange(0, len(outputs))
vocab = trainer.model.vocab
removal_tokens = {START_SYMBOL, END_SYMBOL, vocab._padding_token}
pred_sentence = outputs[idx]
source_sentence = ' '.join(
[
vocab.get_token_from_index(tidx.item())
for tidx in batch['source_tokens']['tokens']['tokens'][idx]
if vocab.get_token_from_index(tidx.item()) not in removal_tokens
],
)
logger.info('{0} -> {1}'.format(source_sentence, pred_sentence))
@EpochCallback.register('print_generation_example')
class PrintGenerationExample(EpochCallback):
"""Callback that prints an example reconstruction."""
def __call__(
self,
trainer: GradientDescentTrainer,
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
):
"""Callback call implementation."""
pred_sentence = random.choice(trainer.model.generate()['predicted_sentence'])
logger.info(pred_sentence)
| 1,883 |
neolight/api/migrations/0001_initial.py
|
Bleno/django-light
| 0 |
2169349
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-24 01:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LBBase',
fields=[
('id_base', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=120, unique=True)),
('struct', models.CharField(max_length=120)),
('dt_base', models.DateTimeField()),
('idx_exp', models.BooleanField()),
('idx_exp_url', models.CharField(max_length=120, null=True)),
('idx_exp_time', models.IntegerField()),
('file_ext', models.BooleanField()),
('file_ext_time', models.IntegerField(null=True)),
('txt_mapping', models.CharField(max_length=120, null=True)),
],
options={
'db_table': 'lb_base',
},
),
]
| 1,109 |
Console_Interface/variants_generator.py
|
El-Dios/exam-paper-variants-generator-
| 0 |
2169952
|
import random
from colorama import init, Fore, Back, Style
from os import system
def generate_unique_variants(students_count, exam_papers_count):
"""
Случайным образом генерирует и выводит на экран
номера билетов для каждого студента
"""
# check if exam papers number more than students amount
if exam_papers_count < students_count:
for val in range(students_count):
print(f'{val+1:2}й студент - вариант № {random.randrange(exam_papers_count)+1}')
return None
# define list of exam papers numbers
number_list = []
# fulfill list by numbers
for i in range(1, exam_papers_count+1):
number_list.append(i)
# randomly shuffle fulfilled list
random.shuffle(number_list)
# formatting print exam papers number in order of students number
for val in range(students_count):
print(f'{val+1:2}й студент - вариант № {number_list[val]}')
return None
init()
flag = True
# console interface in while cycle
while(flag):
system('cls')
print(Back.GREEN, Style.BRIGHT, Fore.WHITE + "Здравствуйте! Добро пожаловать в генератор билетов!")
# Input values for function
print(Back.CYAN)
count_stud = int(input('Введите количество студентов: '))
count_exam_papers = int(input('Введите количество билетов: '))
# Execute function
print(Back.RESET, Fore.GREEN)
generate_unique_variants(count_stud, count_exam_papers)
decision = input("\nПовторить?[Да/Нет]")
if (decision.lower() != 'да'):
flag = False
| 1,531 |
yoi/storage_factory.py
|
zhzLuke96/Yoi
| 0 |
2170277
|
from .utils import getId
import time
class factory_simple(dict):
def __init__(self, MaxAge=3600):
self.max_age = MaxAge
def is_alive(self, id):
obj, t = self.get(id, (None, 0))
if time.time() - t > self.max_age:
return False
return True
def save(self, obj, id=getId()):
self[id] = (obj, int(time.time()))
return id
def load(self, id):
if self.is_alive(id):
return self.get(id, (None,None))[0]
return None
| 514 |
seriouslylib/cp437.py
|
Mego/Seriously
| 104 |
2170287
|
#!/usr/bin/env python3
class CP437():
table = (
'\x00\u263a\u263b\u2665\u2666\u2663\u2660\u2022\u25d8\u25cb\u25d9'
'\u2642\u2640\u266a\u266b\u263c\u25ba\u25c4\u2195\u203c\xb6\xa7'
'\u25ac\u21a8\u2191\u2193\u2192\u2190\u221f\u2194\u25b2\u25bc !"'
+r"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]"
+'^_`abcdefghijklmnopqrstuvwxyz{|}~\u2302\xc7\xfc\xe9\xe2\xe4\xe0'
'\xe5\xe7\xea\xeb\xe8\xef\xee\xec\xc4\xc5\xc9\xe6\xc6\xf4\xf6\xf2'
'\xfb\xf9\xff\xd6\xdc\xa2\xa3\xa5\u20a7\u0192\xe1\xed\xf3\xfa\xf1'
'\xd1\xaa\xba\xbf\u2310\xac\xbd\xbc\xa1\xab\xbb\u2591\u2592\u2593'
'\u2502\u2524\u2561\u2562\u2556\u2555\u2563\u2551\u2557\u255d'
'\u255c\u255b\u2510\u2514\u2534\u252c\u251c\u2500\u253c\u255e'
'\u255f\u255a\u2554\u2569\u2566\u2560\u2550\u256c\u2567\u2568'
'\u2564\u2565\u2559\u2558\u2552\u2553\u256b\u256a\u2518\u250c'
'\u2588\u2584\u258c\u2590\u2580\u03b1\xdf\u0393\u03c0\u03a3\u03c3'
'\xb5\u03c4\u03a6\u0398\u03a9\u03b4\u221e\u03c6\u03b5\u2229\u2261\xb1'
'\u2265\u2264\u2320\u2321\xf7\u2248\xb0\u2219\xb7\u221a\u207f\xb2\u25a0\xa0'
)
@staticmethod
def ord(c):
return CP437.table.find(c)
@staticmethod
def chr(o):
if not 0 <= o < 256:
raise ValueError
return CP437.table[o]
@staticmethod
def from_Unicode(s):
res = []
for c in s:
if c in CP437.table:
res.append(CP437.ord(c))
else:
res.extend(c.encode('utf-8'))
return res
| 1,550 |
answerdiff/serializers.py
|
saganshul/django-portals
| 0 |
2170139
|
from rest_framework import serializers
from .models import Question, Submission, Profile, Comment
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('question_level', 'question_number', 'question_title', 'question_desc', 'question_image')
class SubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = Submission
fields = ('id','submission_string','submission_storage')
class ScoreboardSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('user_nick','user_score')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('comment_message',)
class SubmissionSerializerforMySubmission(serializers.ModelSerializer):
class Meta:
model = Submission
fields = ('submission_user',
'submission_question',
'submission_question',
'submission_state'
)
depth = 2
| 1,045 |
Text2Handwriting.py
|
pavitra14/Text2Handwriting
| 0 |
2170601
|
from flask import Flask, render_template, flash, request, redirect
from TokenManagement import TokenManager
import utils
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = "<KEY>"
UPLOAD_FOLDER = './static/uploads/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#WSGI Module
application = app
tk = TokenManager()
@app.route("/", methods=["GET"])
def home():
handwriting_list = utils.get_handwriting_list()
hw_json = utils.list_to_json(handwriting_list)
return render_template('index.html', handwriting_list=hw_json)
@app.route("/custom_handwriting", methods=['GET','POST'])
def showCustomHandwritingPage():
if request.method == 'GET':
return render_template('custom_handwriting.html')
form = request.form
token = form['token']
if not tk.checkToken(token):
return render_template('custom_handwriting.html', msg="Invalid Token")
print(request.files)
if 'file' not in request.files:
return render_template('custom_handwriting.html', msg="No file selected")
file = request.files['file']
if (not file) and not utils.allowed_file(file.filename):
return render_template('custom_handwriting.html', msg="Invalid File selected")
filename = secure_filename(file.filename)
path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(path)
# Start Training
image = utils.load_image(path)
if image is False:
return render_template('custom_handwriting.html', msg="Some Error Occured with image, try again")
boxes = utils.get_boxes(image)
b64 = utils.boxes_web(boxes, image)
return render_template('custom_progress.html',img=b64, token=token, filename=path)
@app.route("/extract_handwriting", methods=["POST"])
def extractHandwriting():
form = request.form
filename = form['filename']
token = form['token']
correct = form['correct']
if correct != "Yes":
return redirect("/custom_handwriting")
if not tk.checkToken(token):
return "Invalid token"
# Extract each letter and save it in it's folder
print(filename)
image = utils.load_image(filename)
boxes = utils.get_boxes(image)
processed = {}
for char,status in utils.extract_letters(image,boxes,token):
processed[char] = status
print("Processing {} ".format(char))
utils.add_custom_handwriting(tk.getTokenName(token))
return processed
@app.route("/ajax/load_token", methods=["POST"])
def load_token():
token = request.form['token']
if tk.checkToken(token):
key = tk.getTokenName(token)
return {"status":"valid","hw_name":key}
return {"status":"invalid","hw_name":""}
if __name__ == '__main__':
app.run(debug=True)
| 2,790 |
data_load.py
|
zhouliupku/LGtagging_LSTM
| 3 |
2170409
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 22:59:19 2019
Load from unstructured data and produce datasets
@author: Zhou
"""
import os
import pickle
import numpy as np
from bs4 import BeautifulSoup as BS
import lg_utils
import config
from config import NULL_TAG, PADDING_CHAR, MAX_LEN
from DataStructures import Page, Record
class DataLoader(object):
def __init__(self, size="small"):
'''
size: "small" or "full"
'''
self.datapath = os.path.join(os.getcwd(), "logart_html", size)
def load_data(self, interested_tags,
train_perc=0.6, cv_perc=0.2):
'''
return a tuple containing lists of pages or records, depending on mode
'''
pages = []
records = []
for file in self.get_file():
try:
ps, rs = self.load_section(file, interested_tags)
pages.extend(ps)
records.extend(rs)
except ValueError:
print("VALUE ERROR!")
print(file)
pages_train, pages_cv, pages_test = lg_utils.random_separate(pages,
[train_perc, cv_perc])
records_train, records_cv, records_test = lg_utils.random_separate(records,
[train_perc, cv_perc])
print("Loaded {} pages for training.".format(len(pages_train)))
print("Loaded {} pages for cross validation.".format(len(pages_cv)))
print("Loaded {} pages for testing.".format(len(pages_test)))
return pages_train, pages_cv, pages_test, records_train, records_cv, records_test
def load_section(self, files, interested_tags):
"""
return a list of Page instances and Record instances
"""
html_filename = files
with open(html_filename, 'r', encoding = "utf8") as file:
lines = file.readlines()
pages = []
records = []
all_text, all_tags = self.format_raw_data(lines)
rest_tags = all_tags # list of tag, together page
page_texts = all_text.split('【') # list of str, except the first str, all other str starts with "】"
rest_tags = rest_tags[len(page_texts[0]):]
for page_text in page_texts[1:]:
candi = page_text.split('】')
if len(candi) != 2:
raise ValueError
if len(candi[1]) == 0:
# print("Page {} is empty!".format(candi[0]))
continue
if len(candi[1]) >= MAX_LEN:
print("Page {} is too long!".format(candi[0]))
continue
pid, txt = int(candi[0]), candi[1]
page_tags = rest_tags[(len(candi[0]) + 2):(len(candi[0]) + 2 + len(txt))]
rest_tags = rest_tags[(len(candi[0]) + 2 + len(txt)):]
eos_idx = []
for i, tag in enumerate(page_tags):
# EOS is the index just before a name' beginning
if i == 0 and tag == self.get_person_begin_tag():
continue
if tag == self.get_person_begin_tag() \
and not self.is_person_tag(page_tags[i-1]):
# avoid consecutive person names
eos_idx.append(i-1)
eos_idx.append(len(txt)-1)
page = Page(pid, txt, eos_idx)
record_txt_len = page.get_sep_len()
head_char_idx = 0
records_in_page = []
for sent_len in record_txt_len:
text = page.txt[head_char_idx : (head_char_idx + sent_len)]
tags = page_tags[head_char_idx : (head_char_idx + sent_len)]
# substitution
record_tag = []
for tag in tags: # None means take all
if (interested_tags is None) or (tag in interested_tags):
record_tag.append(tag)
else:
record_tag.append(NULL_TAG)
records_in_page.append(Record(text, record_tag))
head_char_idx += sent_len
records.extend(records_in_page)
pages.append(page)
return pages, records
def get_file(self):
input_path = os.path.join(self.datapath, "train")
tagged_filelist = [os.path.join(input_path, x) for x in os.listdir(input_path)]
return tagged_filelist
def format_raw_data(self, lines):
"""
return (all_text, all_tags) for a given section represented as list of str
all_text: a long str representing whole text for the section
all_tags: list of tags (each tag as a str) with same len as all_text
"""
Xs, Ys = [], []
for line in lines:
line = line.replace(PADDING_CHAR, '')
# line = line.replace(' ', '')
# line = line.replace('[T4]', '')
soup = BS(line, "html.parser")
with_tag_item = list(soup.find_all(recursive=False))
result = []
without_tag_item = []
rest_contents = str(soup)
for item in with_tag_item:
item_str = str(item)
title_flag = '〉' in item.text or '〈' in item.text
item_start_idx = rest_contents.find(item_str)
if not title_flag:
# white space around non-tagged string
str_before_tag = rest_contents[:item_start_idx].strip()
without_tag_item.append(str_before_tag)
# Step 1. if there is non-trivial string before tag, add as null tag
if len(str_before_tag) > 0:
null_tag = soup.new_tag(NULL_TAG)
null_tag.string = str_before_tag.replace(' ', '')
result.append(null_tag)
# Step 2. add the tag itself to result, with modification w.r.t. spaces
item.string = item.text.replace(' ', '')
result.append(item)
# Step 3. update rest_contents so that it contains the part after this tag
rest_contents = rest_contents[(item_start_idx + len(item_str)):]
# Lastly, if there is anything left, these should be with null tag. Add it
rest_contents = rest_contents.strip().replace(' ', '')
if len(rest_contents) > 0:
null_tag = soup.new_tag(NULL_TAG)
null_tag.string = rest_contents
result.append(null_tag)
without_tag_item.append(rest_contents)
X = ''.join([t.text for t in result])
Y = lg_utils.concat_lists([self.get_bio(t) for t in result])
# hierarchy: section -> page -> record -> char
Xs.append(X) # Xs is list of str, each str: record
Ys.append(Y) # Ys is list of list of tag
return ''.join(Xs), lg_utils.concat_lists(Ys)
def get_bio(self, t):
"""
input t: BS tag instance
output: BIO style tags as list of string
special treatment: due to historical reasons, entry_addr should be biog_addr
"""
if len(t.text) == 0:
return []
elif t.name == NULL_TAG:
return [NULL_TAG] * len(t.text)
else:
tag_name = "biog_addr" if t.name == "entry_addr" else t.name
return [config.BEG_PREFIX + tag_name] + [config.IN_PREFIX + tag_name] * (len(t.text) - 1)
def get_person_begin_tag(self):
return config.BEG_PREFIX + "person"
def is_person_tag(self, tag):
return tag in [config.BEG_PREFIX + "person", config.IN_PREFIX + "person"]
def dump_data_to_pickle(d, filename, size):
path = os.path.join(os.getcwd(), "data", size)
if not os.path.exists(path):
os.mkdir(path)
pickle.dump(d, open(os.path.join(path, filename), "wb"))
if __name__ == "__main__":
np.random.seed(0)
for size in ["small", "medium", "full"]:
loader = DataLoader(size)
# Model hyper-parameter definition
interested_tags = ["person", "post_time", "jiguan", "entry_way",
"post_type", "office", "entry_addr", "next_office",
"prev_office", "zi", "kins", "entry_time", "post_address",
"source_tag", "othername", "hao", "biog_addr"]
interested_tags = lg_utils.concat_lists([[config.BEG_PREFIX + t, config.IN_PREFIX + t] for t in interested_tags])
data = loader.load_data(interested_tags, train_perc=0.6, cv_perc=0.2)
dump_data_to_pickle(data[0], "pages_train.p", size)
dump_data_to_pickle(data[1], "pages_cv.p", size)
dump_data_to_pickle(data[2], "pages_test.p", size)
dump_data_to_pickle(data[3], "records_train.p", size)
dump_data_to_pickle(data[4], "records_cv.p", size)
dump_data_to_pickle(data[5], "records_test.p", size)
| 9,307 |
problems/meeting_room.py
|
smartdolphin/recommandation-tutorial
| 1 |
2167834
|
# 252. Meeting Rooms
# https://leetcode.com/problems/meeting-rooms
import unittest
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def canAttendMeetings(self, intervals):
"""
:type intervals: List[Interval]
:rtype: bool
"""
if not intervals or len(intervals) <= 1:
return True
intervals = sorted(intervals, key=lambda x: x.start)
size = len(intervals)
for i in range(size - 1):
if intervals[i].end > intervals[i + 1].start:
return False
return True
class TestMeetingRoom(unittest.TestCase):
def test(self):
sol = Solution()
intervals = [Interval(i, j) for i, j in [[0, 30], [5, 10], [15, 20]]]
self.assertFalse(sol.canAttendMeetings(intervals))
intervals = [Interval(i, j) for i, j in [[7, 10], [2, 4]]]
self.assertTrue(sol.canAttendMeetings(intervals))
if __name__ == '__main__':
unittest.TestCase()
| 1,065 |
edx_6.00.1x_python/problem2_2.py
|
TimothyDJones/learn-python
| 0 |
2170153
|
# problem2_2.py
# Paying debt off in a year
import math
def balance_remaining(bal, ir, pmt):
"""
bal: float, starting account balance
ir: float, annual interest rate
pmt: int, minimum fixed monthly payment
Returns the remaining balance as a float, rounded to 2 decimal points,
after applying the minimum monthly payment.
"""
unpaid_bal = bal - pmt
return (1 + (ir / 12.0)) * unpaid_bal
balance = 3926
annualInterestRate = 0.2
# Strategy: Divide total balance by 12 to find lower bound for monthly payment.
# Then increment monthly payment by 10 until we find payment amount that
# results in zero or negative unpaid balance after 12 months.
min_payment = math.floor(balance/(12 * 10)) * 10 - 10
initial_balance = balance
while balance >= 0.0:
balance = initial_balance
min_payment += 10
for m in range(0, 12):
balance = balance_remaining(balance, annualInterestRate, min_payment)
print("With minimum payment of " + str(min_payment) + ", remaining balance: " + str(round(balance, 2)))
print("Lowest Payment: " + str(min_payment))
| 1,105 |
python/oneflow/nn/modules/distributed_partial_fc_sample.py
|
L-Net-1992/oneflow
| 1 |
2170303
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import oneflow as flow
import oneflow._oneflow_internal._C as _C
from oneflow.nn.module import Module
class DistributedPariticalFCSample(Module):
def __init__(self, num_sample):
super().__init__()
self.num_sample = num_sample
self._op = (
flow.stateful_op("distributed_partial_fc_sample")
.Input("weight")
.Input("label")
.Output("mapped_label")
.Output("sampled_label")
.Output("sampled_weight")
.Build()
)
def forward(self, weight, label):
res = _C.dispatch_distributed_partial_fc_sample(
self._op, weight=weight, label=label, num_sample=self.num_sample
)
return res
def distributed_partial_fc_sample_op(weight, label, num_sample):
warnings.warn(
"oneflow.distributed_partial_fc_sample is deprecated. Please use nn.DistributedPariticalFCSample module instead.",
DeprecationWarning,
)
return DistributedPariticalFCSample(num_sample)(weight, label)
| 1,648 |
openpype/plugins/publish/extract_jpeg_exr.py
|
Tilix4/OpenPype
| 0 |
2170229
|
import os
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_path,
get_oiio_tools_path,
is_oiio_supported,
run_subprocess,
path_to_subprocess_arg,
execute,
)
class ExtractThumbnail(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Thumbnail"
order = pyblish.api.ExtractorOrder
families = [
"imagesequence", "render", "render2d",
"source", "plate", "take"
]
hosts = ["shell", "fusion", "resolve"]
enabled = False
# presetable attribute
ffmpeg_args = None
def process(self, instance):
self.log.info("subset {}".format(instance.data['subset']))
# skip crypto passes.
# TODO: This is just a quick fix and has its own side-effects - it is
# affecting every subset name with `crypto` in its name.
# This must be solved properly, maybe using tags on
# representation that can be determined much earlier and
# with better precision.
if 'crypto' in instance.data['subset'].lower():
self.log.info("Skipping crypto passes.")
return
# Skip if review not set.
if not instance.data.get("review", True):
self.log.info("Skipping - no review set on instance.")
return
filtered_repres = self._get_filtered_repres(instance)
for repre in filtered_repres:
repre_files = repre["files"]
if not isinstance(repre_files, (list, tuple)):
input_file = repre_files
else:
file_index = int(float(len(repre_files)) * 0.5)
input_file = repre_files[file_index]
stagingdir = os.path.normpath(repre["stagingDir"])
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
thumbnail_created = False
# Try to use FFMPEG if OIIO is not supported (for cases when
# oiiotool isn't available)
if not is_oiio_supported():
thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa
else:
# Check if the file can be read by OIIO
oiio_tool_path = get_oiio_tools_path()
args = [
oiio_tool_path, "--info", "-i", full_output_path
]
returncode = execute(args, silent=True)
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
if returncode == 0:
self.log.info("Input can be read by OIIO, converting with oiiotool now.") # noqa
thumbnail_created = self.create_thumbnail_oiio(full_input_path, full_output_path) # noqa
else:
self.log.info("Converting with FFMPEG because input can't be read by OIIO.") # noqa
thumbnail_created = self.create_thumbnail_ffmpeg(full_input_path, full_output_path) # noqa
# Skip the rest of the process if the thumbnail wasn't created
if not thumbnail_created:
self.log.warning("Thumbanil has not been created.")
return
new_repre = {
"name": "thumbnail",
"ext": "jpg",
"files": jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ["thumbnail"]
}
# adding representation
self.log.debug("Adding: {}".format(new_repre))
instance.data["representations"].append(new_repre)
def _get_filtered_repres(self, instance):
filtered_repres = []
src_repres = instance.data.get("representations") or []
for repre in src_repres:
self.log.debug(repre)
tags = repre.get("tags") or []
valid = "review" in tags or "thumb-nuke" in tags
if not valid:
continue
if not repre.get("files"):
self.log.info((
"Representation \"{}\" don't have files. Skipping"
).format(repre["name"]))
continue
filtered_repres.append(repre)
return filtered_repres
def create_thumbnail_oiio(self, src_path, dst_path):
self.log.info("outputting {}".format(dst_path))
oiio_tool_path = get_oiio_tools_path()
oiio_cmd = [oiio_tool_path, "-a",
src_path, "-o",
dst_path
]
subprocess_exr = " ".join(oiio_cmd)
self.log.info(f"running: {subprocess_exr}")
try:
run_subprocess(oiio_cmd, logger=self.log)
return True
except Exception:
self.log.warning(
"Failed to create thubmnail using oiiotool",
exc_info=True
)
return False
def create_thumbnail_ffmpeg(self, src_path, dst_path):
self.log.info("outputting {}".format(dst_path))
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
ffmpeg_args = self.ffmpeg_args or {}
jpeg_items = []
jpeg_items.append(path_to_subprocess_arg(ffmpeg_path))
# override file if already exists
jpeg_items.append("-y")
# flag for large file sizes
max_int = 2147483647
jpeg_items.append("-analyzeduration {}".format(max_int))
jpeg_items.append("-probesize {}".format(max_int))
# use same input args like with mov
jpeg_items.extend(ffmpeg_args.get("input") or [])
# input file
jpeg_items.append("-i {}".format(
path_to_subprocess_arg(src_path)
))
# output arguments from presets
jpeg_items.extend(ffmpeg_args.get("output") or [])
# we just want one frame from movie files
jpeg_items.append("-vframes 1")
# output file
jpeg_items.append(path_to_subprocess_arg(dst_path))
subprocess_command = " ".join(jpeg_items)
try:
run_subprocess(
subprocess_command, shell=True, logger=self.log
)
return True
except Exception:
self.log.warning(
"Failed to create thubmnail using ffmpeg",
exc_info=True
)
return False
| 6,752 |
controller_node/reset_db.py
|
tsm55555/NCU-HASS
| 1 |
2168680
|
import pymysql
dbIP = "192.168.4.14" # IP address of the MySQL database server
dbUserName = "root" # User name of the database server
dbUserPassword = "<PASSWORD>" # Password for the database user
databaseForDeletion = "hass" # Name of the database that is to be deleted
charSet = "utf8mb4" # Character set
cusrorType = pymysql.cursors.DictCursor
connection = pymysql.connect(host=dbIP, user=dbUserName, password=<PASSWORD>,charset=charSet,cursorclass=cusrorType)
try:
# Create a cursor object
dbCursor = connection.cursor()
# SQL Statement to delete a database
sql = "DROP DATABASE "+databaseForDeletion
# Execute the create database SQL statment through the cursor instance
dbCursor.execute(sql)
# SQL query string
sqlQuery = "CREATE DATABASE hass"
# Execute the sqlQuery
dbCursor.execute(sqlQuery)
print 'db reset done'
except Exception as e:
print("Exeception occured:{}".format(e))
finally:
connection.close()
| 1,177 |
Volume analysis/volume-stats.py
|
akac0297/PETLAB
| 0 |
2169317
|
from scipy import stats
import pandas as pd
import scikit_posthocs as sp
"""
Kruskal-Wallis test - volume analysis
"""
volume_data=pd.read_csv("/home/alicja/PET-LAB Code/PET-LAB/Volume analysis/tumour_volume_analysis_new.csv")
timepoints=[1,2,3]
def runKW(image_type,timepoints,volume_data):
subset1=volume_data[volume_data["TIMEPOINT"]==timepoints[0]]
subset1=subset1[subset1["IMAGE_TYPE"]==image_type]
tp1=subset1["TUMOUR VOLUME_CM3"].to_list()
subset2=volume_data[volume_data["TIMEPOINT"]==timepoints[1]]
subset2=subset2[subset2["IMAGE_TYPE"]==image_type]
tp2=subset2["TUMOUR VOLUME_CM3"].to_list()
subset3=volume_data[volume_data["TIMEPOINT"]==timepoints[2]]
subset3=subset3[subset3["IMAGE_TYPE"]==image_type]
tp3=subset3["TUMOUR VOLUME_CM3"].to_list()
_, pvalue = stats.kruskal(tp1,tp2,tp3)
if pvalue!=1:
print(image_type, stats.kruskal(tp1,tp2,tp3))
def runDunn(image_type,timepoints,volume_data):
subset1=volume_data[volume_data["TIMEPOINT"]==timepoints[0]]
subset1=subset1[subset1["IMAGE_TYPE"]==image_type]
tp1=subset1["TUMOUR VOLUME_CM3"].to_list()
subset2=volume_data[volume_data["TIMEPOINT"]==timepoints[1]]
subset2=subset2[subset2["IMAGE_TYPE"]==image_type]
tp2=subset2["TUMOUR VOLUME_CM3"].to_list()
subset3=volume_data[volume_data["TIMEPOINT"]==timepoints[2]]
subset3=subset3[subset3["IMAGE_TYPE"]==image_type]
tp3=subset3["TUMOUR VOLUME_CM3"].to_list()
data=[tp1, tp2, tp3]
result=sp.posthoc_dunn(data,p_adjust='bonferroni')
#result.to_csv(f"/home/alicja/PET-LAB Code/PET-LAB/Volume analysis/Dunn dataframes/{image_type}_vol_dataframe_Dunn.csv")
return(result)
image_types = ["MPE MRI", "T2w MRI", "B50T MRI", "B800T MRI"]
for image_type in image_types:
runKW(image_type,timepoints,volume_data)
result=runDunn(image_type,timepoints,volume_data)
print(image_type)
print(result)
PET_data=volume_data[volume_data["IMAGE_TYPE"]=="PET"]
PET_data=PET_data.drop([195,200,205,210,215,220,225,230,235])
runKW("PET",timepoints,PET_data)
result=runDunn("PET",timepoints,PET_data)
print("PET")
print(result)
| 2,143 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.