max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
algorithms_and_data_structures/algorithms/math/fibonacci/nth_fibonacci/nth_fibo_analytic.py
|
JCPedroza/algorithms-and-data-structures-py
| 2 |
2172758
|
from math import sqrt
def fibo(index: int) -> int:
'''Compute the fibonacci number located at the given index, using
an analytic approach.
:param index: Location of the fibonacci number.
:return: Fibonacci number located at the given index.
'''
if index == 0: # Otherwise fibo(0) evaluates to 1
return 0
sqrt5 = sqrt(5)
p = (1 + sqrt5) / 2
q = 1 / p
return int((p ** index + q ** index) / sqrt5 + 0.5)
algorithm = fibo
name = 'analytic'
| 493 |
energyuse/apps/logall/models.py
|
evhart/energyuse
| 0 |
2168236
|
from django.contrib.auth.models import User
from django.db import models
from django.contrib.auth.models import User
from energyuse import settings
class Record(models.Model):
"""
Basic log record describing all user interaction with the UI.
Will be propagated by a middle ware.
This will be one BIG DB table!
"""
created_at = models.DateTimeField(auto_now_add = True)
sessionId = models.CharField(max_length=256)
requestUser = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
requestPath = models.TextField()
requestQueryString = models.TextField()
requestVars = models.TextField()
requestMethod = models.CharField(max_length=4)
requestSecure = models.BooleanField(default=False)
requestAjax = models.BooleanField(default=False)
#requestMETA = models.TextField(null=True, blank=True)
requestAddress = models.GenericIPAddressField()
viewFunction = models.CharField(max_length=256)
viewDocString = models.TextField(null=True, blank=True)
viewArgs = models.TextField()
responseCode = models.CharField(max_length=3)
def __unicode__(self):
return self.viewFunction
| 1,167 |
services/core-api/app/api/now_applications/models/administrative_amendments/__init__.py
|
bcgov/mds
| 25 |
2171481
|
from .application_reason_code import *
from .application_reason_code_xref import *
from .application_source_type_code import *
| 127 |
src/pacientes/migrations/0001_initial.py
|
mava-ar/sgk
| 0 |
2172879
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-08 03:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0002_auto_20160507_1734'),
('coberturas_medicas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Antecedente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('patologicos', models.TextField(blank=True, verbose_name='patológicos')),
('quirurgicos', models.TextField(blank=True, verbose_name='quirúrgicos')),
('traumaticos', models.TextField(blank=True, verbose_name='traumáticos')),
('alergicos', models.TextField(blank=True, verbose_name='alérgicos')),
('heredo_familiar', models.TextField(blank=True, verbose_name='heredo familiar')),
('habitos_fisiologicos', models.TextField(blank=True, verbose_name='hábitos fisiológicos')),
('habitos_patologicos', models.TextField(blank=True, verbose_name='hábitos patológicos')),
('medicaciones', models.TextField(blank=True, verbose_name='medicaciones')),
('estudios_complementarios', models.TextField(blank=True, verbose_name='estudios complementarios')),
('menarca', models.DateField(null=True, verbose_name='MENARCA')),
('fum', models.DateField(null=True, verbose_name='FUM')),
('tipo_partos', models.TextField(blank=True, verbose_name='tipo de partos')),
('observaciones', models.TextField(blank=True, verbose_name='observaciones')),
],
options={
'verbose_name': 'antecedente',
'verbose_name_plural': 'antecedentes',
},
),
migrations.CreateModel(
name='ComentariosHistoriaClinica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('comentarios', models.TextField(verbose_name='comentarios')),
],
options={
'verbose_name': 'comentario de historia clinica',
'verbose_name_plural': 'comentarios de historia clinica',
},
),
migrations.CreateModel(
name='Paciente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('fecha_ingreso', models.DateField(verbose_name='fecha de ingreso')),
('observaciones', models.TextField(blank=True, verbose_name='observaciones')),
('cobertura_medica', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='coberturas_medicas.Cobertura', verbose_name='cobertural')),
('persona', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='core.Persona', verbose_name='persona')),
],
options={
'verbose_name': 'paciente',
'verbose_name_plural': 'pacientes',
},
),
migrations.CreateModel(
name='RegistroBiometrico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creado_el', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('modificado_el', models.DateTimeField(auto_now=True, verbose_name='Fecha de modificación')),
('peso', models.DecimalField(decimal_places=2, max_digits=5, null=True, verbose_name='peso (kg)')),
('altura', models.DecimalField(decimal_places=2, max_digits=5, null=True, verbose_name='altura (mts)')),
('paciente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='registros_biometricos', to='pacientes.Paciente')),
('profesional', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Profesional')),
],
options={
'verbose_name': 'registro biométrico',
'verbose_name_plural': 'registros biométricos',
},
),
migrations.CreateModel(
name='ImagenesHistoriaClinica',
fields=[
('comentarioshistoriaclinica_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pacientes.ComentariosHistoriaClinica')),
('imagen', models.ImageField(upload_to='historia_imagenes', verbose_name='imágen')),
],
options={
'verbose_name': 'imagen de histori clínica',
'verbose_name_plural': 'imágenes de historia clínica',
},
bases=('pacientes.comentarioshistoriaclinica',),
),
migrations.AddField(
model_name='comentarioshistoriaclinica',
name='paciente',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entradas_historiaclinica', to='pacientes.Paciente'),
),
migrations.AddField(
model_name='comentarioshistoriaclinica',
name='profesional',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Profesional'),
),
migrations.AddField(
model_name='antecedente',
name='paciente',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='pacientes.Paciente'),
),
]
| 6,510 |
1319/slu.py
|
matrixjoeq/timus_solutions
| 0 |
2171952
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
1319. Hotel
Time limit: 1.0 second
Memory limit: 64 MB
[Description]
— You programmers are lucky! You don't have to deal with these terrible people
– designers… This story happened with me not so long ago. We had an order from
a company building a new hotel. One day they brought a sketch to our workshop.
They said that THIS was invented by a very cool designer. They said they had
paid heaps of money for THIS. So, THIS had to be built. In general, THIS was
not a very complex thing. It was just a square set of shelves where a porter
puts guests' mail.
Usual hotels have usual stands with shelves for this purpose. But this cool
designer had turned everything upside down! To be more precise, not exactly
upside down, but upon a corner. Moreover, the cells should be numbered from
the right to the left, from the top to the bottom, looking at THIS, staying on
its corner, of course. Tell me please, how can the master attach the labels
with numbers to THIS? He will look on the shelves, staying normally on its
side, you know. He will get tangled on the fourth label already! I will get
tangled on the seventh, myself… Actually one should make such designers to
label the shelves themselves.
— Oh! You are the cool programmer, I know. Couldn’t you help me? I need just a
printout of the table with an arrangement of the labels in the cells. But not
in such way as THIS will hang on the wall, but as THIS stands on the table of
my workshop. Yes, I understand that you are busy, but you are busy every time!
Preparations to the Ural Championship, tests, solutions… So what? If you can’t
do it yourself – entrust your competitors with this task. They are the best
programmers all over the world, aren’t they? I don’t believe that they couldn’t
print the desired table having the size of the square! I would never believe it!
So… Excellent! I will take the desired printout away after the contest.
[Input]
The input consists of the only one integer N (1 ≤ N ≤ 100), which is the size
of the square.
[Output]
You are to write a program that outputs the table of numbers, as they would be
arranged when THIS would stand in the workshop. The label with number 1 should
be in the upper right corner and other numbers should be arranged along the
diagonals from the top to the bottom. The label with the last number (N*N)
should be in the lower left corner.
'''
import sys;
import math;
def calc():
n = int(sys.stdin.readline())
m = []
for i in range(n):
l = []
for j in range(n):
l.append(0)
m.append(l)
c = 1
for sum_xy in range(2 * n - 1):
x_max = min(sum_xy, n - 1)
x_min = sum_xy - x_max
for x in range(x_max, x_min - 1, -1):
y = sum_xy - x
m[y][x] = c
c = c + 1
for l in m:
l.reverse()
s = ''
for i in l:
s = s + str(i) + ' '
s = s[:-1]
print s
if __name__ == '__main__':
calc()
| 3,011 |
tests/unitary/test_get_pool_rates.py
|
DeggetLoveCrypto/curve-pool-registry
| 0 |
2172890
|
import brownie
import pytest
from scripts.utils import pack_values, right_pad
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
def test_get_rates_compound(accounts, registry_compound, pool_compound, cDAI):
assert registry_compound.get_pool_rates(pool_compound) == [10**18, 10**18, 0, 0, 0, 0, 0, 0]
cDAI._set_exchange_rate(31337, {'from': accounts[0]})
assert registry_compound.get_pool_rates(pool_compound) == [31337, 10**18, 0, 0, 0, 0, 0, 0]
def test_get_rates_y(accounts, registry_y, pool_y, yDAI):
assert registry_y.get_pool_rates(pool_y) == [10**18, 10**18, 10**18, 10**18, 0, 0, 0, 0]
yDAI._set_exchange_rate(31337, {'from': accounts[0]})
assert registry_y.get_pool_rates(pool_y) == [31337, 10**18, 10**18, 10**18, 0, 0, 0, 0]
def test_pool_without_lending(accounts, registry_susd, pool_susd):
assert registry_susd.get_pool_rates(pool_susd) == [10**18, 10**18, 10**18, 10**18, 0, 0, 0, 0]
def test_unknown_pool(accounts, registry):
assert registry.get_pool_rates(accounts[-1]) == [0, 0, 0, 0, 0, 0, 0, 0]
def test_removed_pool(accounts, registry_y, pool_y, yDAI):
yDAI._set_exchange_rate(31337, {'from': accounts[0]})
assert registry_y.get_pool_rates(pool_y) == [31337, 10**18, 10**18, 10**18, 0, 0, 0, 0]
registry_y.remove_pool(pool_y)
assert registry_y.get_pool_rates(pool_y) == [0, 0, 0, 0, 0, 0, 0, 0]
def test_fix_incorrect_calldata(accounts, registry, pool_compound, lp_compound, cDAI):
registry.add_pool(
pool_compound,
2,
lp_compound,
ZERO_ADDRESS,
right_pad("0xdEAdbEEf"),
pack_values([8, 8]),
pack_values([18, 6]),
True,
{'from': accounts[0]}
)
with brownie.reverts("dev: bad response"):
registry.get_pool_rates(pool_compound)
registry.remove_pool(pool_compound)
registry.add_pool(
pool_compound,
2,
lp_compound,
ZERO_ADDRESS,
right_pad(cDAI.exchangeRateStored.signature),
pack_values([8, 8]),
pack_values([18, 6]),
True,
{'from': accounts[0]}
)
assert registry.get_pool_rates(pool_compound) == [10**18, 10**18, 0, 0, 0, 0, 0, 0]
def test_without_underlying(accounts, registry, pool_compound, cDAI, cUSDC):
registry.add_pool_without_underlying(
pool_compound,
2,
ZERO_ADDRESS,
ZERO_ADDRESS,
right_pad(cDAI.exchangeRateStored.signature),
pack_values([8, 8]),
pack_values([True] + [False] * 7),
True,
{'from': accounts[0]}
)
assert registry.get_pool_rates(pool_compound) == [10**18, 10**18, 0, 0, 0, 0, 0, 0]
cDAI._set_exchange_rate(31337, {'from': accounts[0]})
cUSDC._set_exchange_rate(31337, {'from': accounts[0]})
assert registry.get_pool_rates(pool_compound) == [31337, 10**18, 0, 0, 0, 0, 0, 0]
| 2,876 |
13_force_fields_and_classical_md/4_mm_md/2_atomistic_md/1_SubPc-C60/case1/run_aa_md.py
|
langerest/Tutorials_Libra
| 8 |
2172564
|
#*********************************************************************************
#* Copyright (C) 2016-2021 <NAME>
#*
#* This file is distributed under the terms of the GNU General Public License
#* as published by the Free Software Foundation, either version 3 of
#* the License, or (at your option) any later version.
#* See the file LICENSE in the root directory of this distribution
#* or <http://www.gnu.org/licenses/>.
#*
#*********************************************************************************/
###################################################################
# Starting with the file from the test_hamiltonian_mm/test_mm6a.py
#
###################################################################
import sys
if sys.platform=="cygwin":
from cyglibra_core import *
elif sys.platform=="linux" or sys.platform=="linux2":
from liblibra_core import *
from libra_py import *
def main():
#--------------------- Initialization ----------------------
# Create Universe and populate it
U = Universe(); LoadPT.Load_PT(U, "elements.txt")
# Create force field
uff = ForceField({"bond_functional":"Harmonic",
"angle_functional":"Fourier",
"dihedral_functional":"General0",
"oop_functional":"Fourier",
"mb_functional":"LJ_Coulomb","R_vdw_on":10.0,"R_vdw_off":15.0 })
LoadUFF.Load_UFF(uff,"uff.dat")
# Create molecular system and initialize the properties
syst = System()
LoadMolecule.Load_Molecule(U, syst, "Pc-C60.ent", "pdb")
syst.determine_functional_groups(0) # do not assign rings
syst.init_fragments()
print("Number of atoms in the system = ", syst.Number_of_atoms)
atlst1 = list(range(1,syst.Number_of_atoms+1))
# Creating Hamiltonian and initialize it
ham = Hamiltonian_Atomistic(1, 3*syst.Number_of_atoms)
ham.set_Hamiltonian_type("MM")
ham.set_interactions_for_atoms(syst, atlst1, atlst1, uff, 1, 0) # 0 - verb, 0 - assign_rings
ham.show_interactions_statistics()
# Bind Hamiltonian and the system
ham.set_system(syst); ham.compute(); print("Energy = ", ham.H(0,0), " a.u.")
# Electronic DOFs
el = Electronic(1,0)
# Nuclear DOFs
mol = Nuclear(3*syst.Number_of_atoms)
# Initialize MD variables
nve_md.nve_md_init(syst, mol, el, ham)
#=================== Propagation ====================
integrator = "DLML"
########################## Cooling #################################
f = open("_en_cooling.txt","w")
f.close()
params = { "dt":20.0, "integrator": "DLML"}
for i in range(1):
syst.set_atomic_q(mol.q)
syst.print_xyz("_mol_cooling.xyz",i)
for j in range(1):
ekin, epot, etot = nve_md.nve_md_step(syst, mol, el, ham, params)
syst.cool()
f = open("_en_cooling.txt","a")
f.write("i= %3i ekin= %8.5f epot= %8.5f etot= %8.5f\n" % (i, ekin, epot, etot))
f.close()
########################## Production MD #################################
rnd = Random()
syst.init_atom_velocities(300.0, rnd)
f = open("_en_md.txt","w")
f.close()
params["dt"] = 40.0
for i in range(1000):
syst.set_atomic_q(mol.q)
syst.print_xyz("_mol_md.xyz",i)
for j in range(10):
ekin, epot, etot = nve_md.nve_md_step(syst, mol, el, ham, params)
f = open("_en_md.txt","a")
f.write("i= %3i ekin= %8.5f epot= %8.5f etot= %8.5f\n" % (i, ekin, epot, etot))
f.close()
main()
| 3,663 |
pommerman/research/local_config.template.py
|
cinjon/playground
| 1 |
2172913
|
cluster_directory = '/path/to/directory/on/cluster'
email = '<EMAIL>'
def write_extra_sbatch_commands(f):
f.write("cd ${HOME}/Code/venvs" + "\n")
f.write("source selfplayground/bin/activate" + "\n")
f.write("SRCDIR=${HOME}/Code/selfplayground" + "\n")
f.write("cd ${SRCDIR}/pommerman/research" + "\n")
| 319 |
core/functions.py
|
GoSecure/CredSniper
| 0 |
2171940
|
import random
import string
import uuid
from core import output
def generate_token(size=32):
return ''.join(random.SystemRandom().choice(
string.ascii_lowercase + string.ascii_uppercase + string.digits
) for _ in range(size)
)
def store_creds(
module,
user,
password,
two_factor_token,
two_factor_type,
remote_addr,
city,
region,
zip_code,
time,
cookies
):
try:
with open('.sniped','a') as fh:
cred_id = str(uuid.uuid4())
fh.write('{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(
cred_id,
module,
user,
password,
two_factor_token,
two_factor_type,
remote_addr,
city,
region,
zip_code,
time,
cookies
))
except Exception as ex:
output.exception(ex)
def cache_creds(module, username, password):
try:
with open('.cache','a+') as fh:
if username and password:
fh.write('{},{},{}\n'.format(module, username, password))
except Exception as ex:
output.exception(ex)
def reload_creds(seen):
creds = {'creds': []}
try:
with open('.sniped','r') as fh:
for cred in fh.read().split('\n'):
if len(cred) >= 3:
cl = cred.split(',')
cred_id = cl[0]
module = cl[1]
user = cl[2]
password = cl[3]
two_factor_token = cl[4]
two_factor_type = cl[5]
ip_address = cl[6]
city = cl[7]
region = cl[8]
zip_code = cl[9]
add_cred = {
'cred_id': cred_id,
'module': module,
'username': user,
'password': password,
'two_factor_token': two_factor_token,
'two_factor_type': two_factor_type,
'seen': True if cred_id in seen else False,
'ip_address': ip_address,
'city': city,
'region': region,
'zip_code': zip_code
}
creds['creds'].append(add_cred)
except Exception as ex:
output.exception(ex)
return creds
| 2,555 |
Easy/Minimum_Time_Visiting_All_Points/Minimum_Time_Visiting_All_easy.py
|
nitin3685/LeetCode_Solutions
| 0 |
2172694
|
class Solution:
def dist(self, a, b):
dx, dy = a[0] - b[0], a[1] - b[1]
if dx < 0:
dx = -dx
if dy < 0:
dy = -dy
return max(dx, dy)
def minTimeToVisitAllPoints(self, points: List[List[int]]) -> int:
return sum(self.dist(points[i - 1], points[i]) for i in range(1, len(points)))
| 354 |
tests/apache/hive/hooks/test_hive.py
|
astronomer/astronomer-providers
| 27 |
2172170
|
from unittest import mock
from unittest.mock import PropertyMock
import pytest
from airflow import models
from impala.hiveserver2 import HiveServer2Connection, HiveServer2Cursor
from astronomer.providers.apache.hive.hooks.hive import HiveCliHookAsync
TEST_TABLE = "test_table"
TEST_SCHEMA = "test_schema"
TEST_POLLING_INTERVAL = 5
TEST_PARTITION = "state='FL'"
TEST_METASTORE_CONN_ID = "test_conn_id"
@mock.patch("astronomer.providers.apache.hive.hooks.hive.HiveCliHookAsync.get_connection")
@mock.patch("airflow.configuration.AirflowConfigParser.get")
@mock.patch("impala.hiveserver2.connect")
def test_get_hive_client_with_conf(mock_get_connect, mock_get_conf, mock_get_connection):
"""Checks the connection to hive client"""
mock_get_connect.return_value = mock.AsyncMock(HiveServer2Connection)
mock_get_conf.return_value = "kerberos"
mock_get_connection.return_value = models.Connection(
conn_id="metastore_default",
conn_type="metastore",
port=10000,
host="localhost",
)
hook = HiveCliHookAsync(TEST_METASTORE_CONN_ID)
result = hook.get_hive_client()
assert isinstance(result, HiveServer2Connection)
@mock.patch("astronomer.providers.apache.hive.hooks.hive.HiveCliHookAsync.get_connection")
@mock.patch("impala.hiveserver2.connect")
def test_get_hive_client(mock_get_connect, mock_get_connection):
"""Checks the connection to hive client"""
mock_get_connect.return_value = mock.AsyncMock(HiveServer2Connection)
mock_get_connection.return_value = models.Connection(
conn_id="metastore_default",
conn_type="metastore",
port=10000,
host="localhost",
)
hook = HiveCliHookAsync(TEST_METASTORE_CONN_ID)
result = hook.get_hive_client()
assert isinstance(result, HiveServer2Connection)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"result,response",
[
(["123"], "success"),
([], "failure"),
],
)
@mock.patch("astronomer.providers.apache.hive.hooks.hive.HiveCliHookAsync.get_connection")
@mock.patch("astronomer.providers.apache.hive.hooks.hive.HiveCliHookAsync.get_hive_client")
async def test_partition_exists(mock_get_client, mock_get_connection, result, response):
"""
Tests to check if a partition in given table in hive
is found or not
"""
hook = HiveCliHookAsync(metastore_conn_id=TEST_METASTORE_CONN_ID)
hiveserver_connection = mock.AsyncMock(HiveServer2Connection)
mock_get_client.return_value = hiveserver_connection
cursor = mock.AsyncMock(HiveServer2Cursor)
hiveserver_connection.cursor.return_value = cursor
cursor.is_executing = PropertyMock(side_effect=[True, False])
cursor.fetchall.return_value = result
res = await hook.partition_exists(TEST_TABLE, TEST_SCHEMA, TEST_PARTITION, TEST_POLLING_INTERVAL)
assert res == response
@pytest.mark.parametrize(
"partition,expected",
[
("user_profile/city=delhi", ("default", "user_profile", "city=delhi")),
("user.user_profile/city=delhi", ("user", "user_profile", "city=delhi")),
],
)
def test_parse_partition_name_success(partition, expected):
"""Assert that `parse_partition_name` correctly parse partition string"""
actual = HiveCliHookAsync.parse_partition_name(partition)
assert actual == expected
def test_parse_partition_name_exception():
"""Assert that `parse_partition_name` throw exception if partition string not correct"""
with pytest.raises(ValueError):
HiveCliHookAsync.parse_partition_name("user_profile.city=delhi")
@pytest.mark.parametrize(
"result,expected",
[
(["123"], True),
([], False),
],
)
@mock.patch("astronomer.providers.apache.hive.hooks.hive.HiveCliHookAsync.get_connection")
@mock.patch("astronomer.providers.apache.hive.hooks.hive.HiveCliHookAsync.get_hive_client")
def test_check_partition_exists(mock_get_client, mock_get_connection, result, expected):
"""Assert that `check_partition_exists` return True if partition found else return False."""
hook = HiveCliHookAsync(metastore_conn_id=TEST_METASTORE_CONN_ID)
hiveserver_connection = mock.AsyncMock(HiveServer2Connection)
mock_get_client.return_value = hiveserver_connection
cursor = mock.AsyncMock(HiveServer2Cursor)
hiveserver_connection.cursor.return_value = cursor
cursor.is_executing.return_value = False
cursor.fetchall.return_value = result
actual = hook.check_partition_exists(TEST_SCHEMA, TEST_TABLE, TEST_PARTITION)
assert actual == expected
| 4,536 |
algorithms/leetcode/medium/0098_验证二叉搜索树.py
|
bigfoolliu/liu_aistuff
| 1 |
2171460
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
给你一个二叉树的根节点 root ,判断其是否是一个有效的二叉搜索树。
有效 二叉搜索树定义如下:
节点的左子树只包含 小于 当前节点的数。
节点的右子树只包含 大于 当前节点的数。
所有左子树和右子树自身必须也是二叉搜索树。
示例 1:
输入:root = [2,1,3]
输出:true
示例 2:
输入:root = [5,1,4,null,null,3,6]
输出:false
解释:根节点的值是 5 ,但是右子节点的值是 4 。
提示:
树中节点数目范围在[1, 104] 内
-231 <= Node.val <= 231 - 1
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/validate-binary-search-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
import doctest
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
pass
if __name__ == '__main__':
doctest.testmod()
| 810 |
cogs/bsboxsim.py
|
not-cree-py/Remixbotfork
| 0 |
2170814
|
'''
MIT License
Copyright (c) 2017 Cree-Py
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Import dependencies
import discord
from discord.ext import commands
import random
# Define class
class Brawl_Stars_Box_Simulator:
# Initialization method
def __init__(self, bot):
self.bot = bot
# Boxsim command
@commands.command(aliases=['open', 'box'])
async def boxsim(self, ctx):
'''Simulate a box opening in Brawl Stars'''
common = ["Shelly", "<NAME>", "Colt", "Nita", "Dynamike"]
rare = ["Bull", "Brock", "Barley", "Jessie"]
superrare = ["Poco", "Ricochet", "Bo"]
epic = ["Pam", "Piper"]
mythic = ["Mortis", "Tara"]
legendary = ["Spike", "Crow"]
num = random.randint(0, 100)
if num < 35:
result = "1 Elixir"
elif num < 40:
result = "2 Elixir"
elif num < 44:
result = "3 Elixir"
elif num < 47:
result = "5 Elixir"
elif num < 49:
result = "7 Elixir"
elif num < 50:
result = "10 Elixir"
elif num < 85:
rand = random.randint(0, 4)
result = common[rand]
elif num < 85:
rand = random.randint(0, 3)
result = rare[rand]
elif num < 94:
rand = random.randint(0, 2)
result = superrare[rand]
elif num < 97:
rand = random.randint(0, 1)
result = epic[rand]
elif num < 99:
rand = random.randint(0, 1)
result = mythic[rand]
else:
rand = random.randint(0, 1)
result = legendary[rand]
await ctx.send("**Tap! Tap!**")
await ctx.send(result)
result = result.replace(" ", "-")
if num >= 50:
try:
await ctx.send(file=discord.File(f'./data/img/{result.lower()}.png'))
except Exception as e:
print(e)
# Add cog to bot
def setup(bot):
bot.add_cog(Brawl_Stars_Box_Simulator(bot))
| 3,040 |
config/settings/test.py
|
oddbird/metadeploy
| 0 |
2172591
|
from .base import * # NOQA
CHANNEL_LAYERS = {
"default": {
"BACKEND": "metadeploy.tests.layer_utils.MockedRedisInMemoryChannelLayer"
}
}
HEROKU_TOKEN = "<KEY>"
HEROKU_APP_NAME = "test_heroku_app_name"
| 220 |
tests/module_tests/youtube.py
|
Javex/qllbot
| 0 |
2169099
|
import os
import sys
import unittest
import unittest.mock
import urllib.request
sys.path.append('../../')
import lib.bot
import lib.irc
import modules.youtube
YT_API_DATA_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'youtube_api_data.xml')
class TestYoutubeModule(unittest.TestCase):
def setUp(self):
lib.irc.say = lambda channel, msg: msg
def test_link(self):
meta = 'Trololo [00:04:56] (by testuser)'
msg = lib.irc.Message('https://www.youtube.com/watch?v=QH2-TGUlwu4')
bot = lib.bot.Bot('localhost')
with open(YT_API_DATA_FILE, 'r') as f:
with unittest.mock.patch.object(bot, 'send') as mocked_send:
with unittest.mock.patch.object(urllib.request, 'urlopen',
return_value=f) as _:
modules.youtube.display_youtube_metadata(bot=bot, msg=msg)
mocked_send.assert_called_with(meta)
def test_nospoiler(self):
msg = lib.irc.Message('https://www.youtube.com/watch?v=QH2-TGUlwu4 '
'nospoiler')
bot = lib.bot.Bot('localhost')
with unittest.mock.patch.object(bot, 'send') as mocked:
mocked.side_effect = Exception('Send method should not be called.')
modules.youtube.display_youtube_metadata(bot=bot, msg=msg)
if __name__ == '__main__':
unittest.main()
| 1,455 |
tests/pytests/integration/conftest.py
|
Bacon-Unlimited/salt
| 9,425 |
2172077
|
"""
tests.pytests.integration.conftest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PyTest fixtures
"""
import logging
import pytest
log = logging.getLogger(__name__)
@pytest.fixture(scope="package")
def salt_master(salt_master_factory):
"""
A running salt-master fixture
"""
with salt_master_factory.started():
yield salt_master_factory
@pytest.fixture(scope="package")
def salt_minion(salt_master, salt_minion_factory):
"""
A running salt-minion fixture
"""
assert salt_master.is_running()
with salt_minion_factory.started():
# Sync All
salt_call_cli = salt_minion_factory.salt_call_cli()
ret = salt_call_cli.run("saltutil.sync_all", _timeout=120)
assert ret.exitcode == 0, ret
yield salt_minion_factory
@pytest.fixture(scope="module")
def salt_sub_minion(salt_master, salt_sub_minion_factory):
"""
A second running salt-minion fixture
"""
assert salt_master.is_running()
with salt_sub_minion_factory.started():
# Sync All
salt_call_cli = salt_sub_minion_factory.salt_call_cli()
ret = salt_call_cli.run("saltutil.sync_all", _timeout=120)
assert ret.exitcode == 0, ret
yield salt_sub_minion_factory
@pytest.fixture(scope="package")
def salt_proxy(salt_master, salt_proxy_factory):
"""
A running salt-proxy fixture
"""
assert salt_master.is_running()
with salt_proxy_factory.started():
yield salt_proxy_factory
@pytest.fixture(scope="package")
def deltaproxy_pillar_tree(base_env_pillar_tree_root_dir, salt_delta_proxy_factory):
"""
Create the pillar files for controlproxy and two dummy proxy minions
"""
top_file = """
base:
'{}':
- controlproxy
dummy_proxy_one:
- dummy_proxy_one
dummy_proxy_two:
- dummy_proxy_two
""".format(
salt_delta_proxy_factory.id
)
controlproxy_pillar_file = """
proxy:
proxytype: deltaproxy
ids:
- dummy_proxy_one
- dummy_proxy_two
"""
dummy_proxy_one_pillar_file = """
proxy:
proxytype: dummy
"""
dummy_proxy_two_pillar_file = """
proxy:
proxytype: dummy
"""
top_tempfile = pytest.helpers.temp_file(
"top.sls", top_file, base_env_pillar_tree_root_dir
)
controlproxy_tempfile = pytest.helpers.temp_file(
"controlproxy.sls", controlproxy_pillar_file, base_env_pillar_tree_root_dir
)
dummy_proxy_one_tempfile = pytest.helpers.temp_file(
"dummy_proxy_one.sls",
dummy_proxy_one_pillar_file,
base_env_pillar_tree_root_dir,
)
dummy_proxy_two_tempfile = pytest.helpers.temp_file(
"dummy_proxy_two.sls",
dummy_proxy_two_pillar_file,
base_env_pillar_tree_root_dir,
)
with top_tempfile, controlproxy_tempfile, dummy_proxy_one_tempfile, dummy_proxy_two_tempfile:
yield
@pytest.fixture(scope="package")
def salt_delta_proxy(salt_master, salt_delta_proxy_factory, deltaproxy_pillar_tree):
"""
A running salt-proxy fixture
"""
assert salt_master.is_running()
with salt_delta_proxy_factory.started():
yield salt_delta_proxy_factory
@pytest.fixture(scope="package")
def salt_cli(salt_master):
"""
The ``salt`` CLI as a fixture against the running master
"""
assert salt_master.is_running()
return salt_master.salt_cli()
@pytest.fixture(scope="package")
def salt_call_cli(salt_minion):
"""
The ``salt-call`` CLI as a fixture against the running minion
"""
assert salt_minion.is_running()
return salt_minion.salt_call_cli()
@pytest.fixture(scope="package")
def salt_cp_cli(salt_master):
"""
The ``salt-cp`` CLI as a fixture against the running master
"""
assert salt_master.is_running()
return salt_master.salt_cp_cli()
@pytest.fixture(scope="package")
def salt_key_cli(salt_master):
"""
The ``salt-key`` CLI as a fixture against the running master
"""
assert salt_master.is_running()
return salt_master.salt_key_cli()
@pytest.fixture(scope="package")
def salt_run_cli(salt_master):
"""
The ``salt-run`` CLI as a fixture against the running master
"""
assert salt_master.is_running()
return salt_master.salt_run_cli()
@pytest.fixture(scope="module")
def salt_ssh_cli(salt_master, salt_ssh_roster_file, sshd_config_dir):
"""
The ``salt-ssh`` CLI as a fixture against the running master
"""
assert salt_master.is_running()
return salt_master.salt_ssh_cli(
timeout=180,
roster_file=salt_ssh_roster_file,
target_host="localhost",
client_key=str(sshd_config_dir / "client_key"),
base_script_args=["--ignore-host-keys"],
)
| 4,795 |
librecron/launcher.py
|
john-charles/recron
| 2 |
2172824
|
import os, pwd, json, uuid, datetime
import logging
import sys
from crontab import CronTab
from .timer import MinuteTimer
from subprocess import Popen, STDOUT
TIME_PATTERN = "%Y-%m-%d-%H-%M"
class Job:
def __init__(self, user_info, command, now, config):
self.now = now
self.config = config
self.command = command
self.user_info = user_info
self.events_log = "events.log"
self.user_logdir = os.path.join("/var/log/recron", user_info.pw_name)
self.job_logfile = "%s-%s" % (now.strftime("%Y-%m-%d-%H-%M"), str(uuid.uuid4()))
def run(self):
if not os.path.exists(self.user_logdir):
os.makedirs(self.user_logdir)
job_args = {
'command': self.command,
'user_id': self.user_info.pw_uid,
'logfile': os.path.join(self.user_logdir, self.job_logfile)
}
job_process = Popen(("recron-launch", json.dumps(job_args)), stderr=STDOUT, stdout=sys.stdout.fileno())
job_args['status'] = job_process.wait()
with open(os.path.join(self.user_logdir, self.events_log), 'ab') as log:
log.write(json.dumps(job_args).encode('utf-8'))
log.write(b'\n')
log.flush()
class Launcher(MinuteTimer):
def __init__(self, config):
MinuteTimer.__init__(self)
self.config = config
def time_matches(self, job, now):
print("checking job", job.strftime(TIME_PATTERN) + " " + now.strftime(TIME_PATTERN))
return job.strftime(TIME_PATTERN) == now.strftime(TIME_PATTERN)
def run_job(self, user_info, job_info, now):
schedule = job_info.schedule()
# cron iter needs to go forward then back to get the.
# current cronjob...
schedule.get_next()
schedule.get_prev()
if self.time_matches(schedule.get_current(), now):
job = Job(user_info, job_info.command, now, self.config)
job.run()
def run_usercrontab(self, username, now):
logging.debug("running jobs for: " + username)
user_info = pwd.getpwnam(username)
user_crontab = os.path.join(user_info.pw_dir, ".recron/crontab")
if not os.path.exists(user_crontab):
logging.warning("crontab for: " + username + " not found!")
else:
crontab = CronTab(tabfile=user_crontab)
logging.debug("loaded crontab for: " + username)
for job in crontab.crons:
self.run_job(user_info, job, now)
def task(self):
logging.info("launching jobs")
now = datetime.datetime.now()
for username in self.config.active_user_list:
self.run_usercrontab(username, now)
| 2,909 |
02-analysis/data_creater.py
|
FiveSixSenvenEight/109B_flu_prediction
| 0 |
2171195
|
import os
from ast import literal_eval
import pandas as pd
import numpy as np
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima_model import ARIMA
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import seaborn as sns
from data_creater import *
from sklearn.preprocessing import MinMaxScaler
# Load in dfs for all states
all_states = [] # List of all the states
train_dfs, test_dfs = {}, {}
train_path = '../data/state_flu_google/train/'
test_path = '../data/state_flu_google/test/'
for root,dirs,files in os.walk(train_path):
for file in files:
if file.endswith('csv'):
state = file[:-4]
all_states.append(state)
train_dfs[state] = pd.read_csv(train_path + file)
for root,dirs,files in os.walk(test_path):
for file in files:
if file.endswith('csv'):
state = file[:-4]
test_dfs[state] = pd.read_csv(test_path + file)
# Helper functions-normalize the data
def minmax_transform(X_train, X_test, return_scaler=False):
""" Uses MinMaxScaler to scale X_train and X_test
to 0 and 1, scaled using X_train
"""
col_name = X_train.columns
scaler = MinMaxScaler().fit(X_train)
X_test = scaler.transform(X_test)
X_train = scaler.transform(X_train)
if return_scaler:
return pd.DataFrame(X_train), pd.DataFrame(X_test), scaler
else:
return pd.DataFrame(X_train), pd.DataFrame(X_test)
def get_data(state, future_week, with_gt=False, predictor_state=None):
'''
state: state to predict
future_week: the future week to forcast, chosen from [1,2,4,8]
with_gt: whether to include GT data
predictor_state: list of states as predictors
'''
assert future_week in [1,2,4,8]
train, test = train_dfs[state], test_dfs[state]
X_train, X_test = pd.DataFrame(), pd.DataFrame()
# if not specified, only include the flu data as predictor
flu_train, flu_test, scaler = minmax_transform(pd.DataFrame(train.iloc[:, 1]), pd.DataFrame(test.iloc[:, 1]),
return_scaler=True)
X_train['flu_data'], X_test['flu_data'] = flu_train[0], flu_test[0]
target = 'target_' + str(future_week)
y_train, y_test = pd.DataFrame(scaler.transform(train[[target]])), test[[target]]
if with_gt and predictor_state:
for p in predictor_state:
# flu
state_train, state_test = train_dfs[p], test_dfs[p]
flu_train, flu_test = minmax_transform(
pd.DataFrame(state_train.iloc[:, 1]), pd.DataFrame(state_test.iloc[:, 1]))
X_train[p], X_test[p] = flu_train, flu_test
# google trend
X_train_gt, X_test_gt = state_train.iloc[:, 2:-4], state_test.iloc[:, 2:-4]
gt_train, gt_test = minmax_transform(X_train_gt, X_test_gt)
for i, col in enumerate(X_train_gt.columns):
X_train[p+' '+col], X_test[p+' '+col] = gt_train[i], gt_test[i]
elif with_gt: # include google trend data
X_train_gt, X_test_gt = train.iloc[:, 2:-4], test.iloc[:, 2:-4] # google trend data
gt_train, gt_test = minmax_transform(X_train_gt, X_test_gt)
for i, col in enumerate(X_train_gt.columns):
X_train[col], X_test[col] = gt_train[i], gt_test[i]
elif predictor_state:
for p in predictor_state:
state_train, state_test = minmax_transform(
pd.DataFrame(train_dfs[p].iloc[:, 1]), pd.DataFrame(test_dfs[p].iloc[:, 1]))
X_train[p], X_test[p] = state_train, state_test
X_all, y_all = pd.concat([X_train, X_test], ignore_index=True), pd.concat([y_train.iloc[:, 0], y_test.iloc[:, 0]], ignore_index=True)
return X_train, X_test, X_all, y_train, y_test, y_all, scaler
| 4,083 |
qmldataset/utilities/check_noise.py
|
rajibchakravorty/QDataSet
| 0 |
2172826
|
"""Checking noise
"""
from typing import Dict, Any
from numpy import average, eye, zeros
from numpy.linalg import norm
from ..system_layers.quantum_ml_simulator import QuantumMLSimulator
def check_noise(simulation_parameters: Dict[str, Any], dimension: int):
"""
This function calculates the coherence measurements to check the noise behaviour,
based on the simulation parameters passed as a dictionary
:param simulation_parameters: Simulation Parameters
:param dimension: Dimension of the system
"""
simulator = QuantumMLSimulator(
simulation_parameters["evolution_time"],
simulation_parameters["num_time_steps"],
simulation_parameters["dynamic_operators"],
simulation_parameters["static_operators"],
simulation_parameters["noise_operators"],
simulation_parameters["measurement_operators"],
simulation_parameters["initial_states"],
simulation_parameters["num_realizations"],
simulation_parameters["pulse_shape"],
simulation_parameters["num_pulses"],
False,
simulation_parameters["noise_profile"])
# 3) Run the simulator and collect the results
print("Running the simulation\n")
simulation_results = simulator.simulate(zeros((1,)), batch_size=1)
expectations = simulation_results[9]
obs_vector = simulation_results[10:]
obs_vector = [average(V, axis=1) for V in obs_vector]
print("Analyzing results\n")
print("Measurement are:")
print(average(expectations, axis=1))
print("The Vo operators are:")
print(obs_vector)
print("The distance measures are:")
print([norm(vector[0, :] - eye(dimension), 2) for vector in obs_vector])
| 1,705 |
lab01-bayesian-linear-regression/coin.py
|
gcollura/advanced-statistical-inference-labs
| 0 |
2171719
|
import numpy as np
class Coin:
def __init__(self):
self.__r = np.random.randint(1)
def __call__(self, N):
return sum([i <= self.r for i in np.random.rand(N, 1)])
@property
def r(self):
return self.__r
| 242 |
mlaas/api/score/endpoints/modelscore.py
|
ayanray-tech/faas
| 2 |
2172867
|
import logging
from flask import request
from flask_restplus import Resource
from api.score.business import fetch_model_score_using_factory
from api.score.business import fetch_model_score_using_strategy
from api.score.serializers import model_score
from api.restplus import api
log = logging.getLogger(__name__)
ns = api.namespace('model/score', description='Scoring by different ML Models')
@ns.route('/usingfactory')
class PostsCollectionFactory(Resource):
@api.expect(model_score)
def post(self):
"""
Calculates score of a Model using Factory Design Pattern.
"""
score = fetch_model_score_using_factory(request.json)
print('SCORE', score)
return score, 200
@ns.route('/usingstrategy')
class PostsCollectionStrategy(Resource):
@api.expect(model_score)
def post(self):
"""
Calculates score of a Model using Strategy Design Pattern.
"""
score = fetch_model_score_using_strategy(request.json)
print('SCORE', score)
return score, 200
| 1,054 |
transformer/gen_transformer.py
|
jindal2309/conv-ai-model
| 5 |
2171932
|
import torch
from torch import nn
import torch.nn.functional as F
from transformer.transformer_block import TransformerBlock
class GenTransformer(nn.Module):
"""
Generate text (character by character)
"""
def __init__(self, emb, heads, num_tokens, seq_len, depth, ):
super().__init__()
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_embeddings=num_tokens, embedding_dim=emb)
self.pos_emb = nn.Embedding(num_embeddings=seq_len, embedding_dim=emb)
trans_blocks = []
for i in range(depth):
trans_blocks.append(TransformerBlock(emb, heads))
| 635 |
migrations/versions/4346dca42023_.py
|
hreeder/nexus-auth
| 0 |
2172335
|
"""empty message
Revision ID: 4346dca42023
Revises: <KEY>
Create Date: 2014-06-26 11:14:24.075725
"""
# revision identifiers, used by Alembic.
revision = '4346dca42023'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('group_member',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('member_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('group_admin', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'group', sa.Column('open', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'group', 'open')
op.drop_table('group_member')
### end Alembic commands ###
| 979 |
gooddata-sdk/gooddata_sdk/utils.py
|
hkad98/gooddata-python-sdk
| 7 |
2170160
|
# (C) 2021 GoodData Corporation
from __future__ import annotations
import functools
import os
import re
from pathlib import Path
from typing import Any, Callable, Dict, NamedTuple, Union, cast
import yaml
from gooddata_metadata_client import ApiAttributeError
from gooddata_sdk.compute.model.base import ObjId
# Use typing collection types to support python < py3.9
IdObjType = Union[str, ObjId, Dict[str, Dict[str, str]], Dict[str, str]]
def id_obj_to_key(id_obj: IdObjType) -> str:
"""
Given an object containing an id+type pair, this function will return a string key.
For convenience, this also recognizes the `ref` format used by GoodData.UI SDK. In that format, the id+type
are wrapped in 'identifier'.
:param id_obj: id object
:return: string that can be used as key
"""
if isinstance(id_obj, str):
return id_obj
elif isinstance(id_obj, ObjId):
return str(id_obj)
try:
unwrapped_id = id_obj["identifier"]
if isinstance(unwrapped_id, str):
raise ValueError(
f'Invalid value type "{type(id_obj)}" for key "identifier" in id object. Expected dict[str, str]'
)
# let type checking rule out str type after accessing key "identifier"
unwrapped = unwrapped_id
except KeyError:
# make sure id_obj is of type dict[str, str], eliminate dict[str, dict[str, str]]
value_item = list(id_obj.values())[0]
if isinstance(value_item, dict):
raise ValueError(f'Invalid value type "{type(id_obj)}" of id object. Expected dict[str, str]')
# Use typing collection types to support python < py3.9
unwrapped = cast(Dict[str, str], id_obj)
if "id" not in unwrapped or "type" not in unwrapped:
raise KeyError(
f"Invalid id object used to find side loaded entity: {str(id_obj)}. Need dict with 'id' and 'type' keys"
)
return f"{unwrapped['type']}/{unwrapped['id']}"
class AllPagedEntities(NamedTuple):
data: list[Any]
included: list[Any]
# Use functools.partial instead of Protocol because Protocol is available starting by py3.8
def load_all_entities(get_page_func: functools.partial[Any], page_size: int = 500) -> AllPagedEntities:
"""
Loads all entities from a paged resource. The primary input to this function is a partial function that is setup
with all the fixed parameters. Given this the function will get entities page-by-page and merge them into a single
'pseudo-response' containing data and included attributes.
An example usage:
>>> import functools
>>> import gooddata_metadata_client as metadata_client
>>> import gooddata_metadata_client.apis as metadata_apis
>>> api = metadata_apis.EntitiesApi(metadata_client.ApiClient())
>>> get_func = functools.partial(api.get_all_entities_visualization_objects, 'some-workspace-id',
>>> include=["ALL"], _check_return_type=False)
>>> vis_objects = load_all_entities(get_func)
:param get_page_func: an API controller from the metadata client
:param page_size: optionally specify page length, default is 500
:return:
"""
all_paged_entities = AllPagedEntities(data=[], included=[])
current_page = 0
while True:
result = get_page_func(page=current_page, size=page_size)
all_paged_entities.data.extend(result.data)
try:
all_paged_entities.included.extend(result.included)
except ApiAttributeError:
pass
if len(result.data) < page_size:
break
current_page += 1
return all_paged_entities
def load_all_entities_dict(
get_page_func: functools.partial[Any], page_size: int = 500, camel_case: bool = False
) -> dict[str, Any]:
all_entities = load_all_entities(get_page_func, page_size)
all_entities_dict = {"data": all_entities.data, "included": all_entities.included}
return all_entities_dict if camel_case else change_case(all_entities_dict, camel_to_snake)
class SideLoads:
def __init__(self, objs: list[Any]) -> None:
self._objects = dict([(f"{o['type']}/{o['id']}", o) for o in objs])
def find(self, id_obj: IdObjType) -> Union[Any, None]:
id_obj_key = id_obj_to_key(id_obj)
if id_obj_key not in self._objects:
return None
return self._objects[id_obj_key]
def all_for_type(self, obj_type: str) -> list[Any]:
return [o for o in self._objects.values() if o["type"] == obj_type]
def __str__(self) -> str:
return str(self._objects)
def __repr__(self) -> str:
return f"SideLoads({','.join(self._objects.keys())})"
def __len__(self) -> int:
return len(self._objects)
def get_sorted_yaml_files(folder: Path) -> list[Path]:
return sorted([p for p in folder.glob("*.yaml")], key=lambda x: x.stem)
def create_directory(path: Path) -> None:
if not os.path.exists(path):
os.makedirs(path)
def write_layout_to_file(path: Path, content: Union[dict[str, Any], list[dict]]) -> None:
with open(path, "w", encoding="utf-8") as fp:
yaml.safe_dump(content, fp, indent=2)
def read_layout_from_file(path: Path) -> Any:
if not os.path.isfile(path):
raise ValueError(f"There is no file in the given path {path}")
try:
with open(path, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
except yaml.YAMLError as exc:
raise ValueError(f"File [{path}] has wrong yaml format. Following exception was raised during loading: {exc}")
def camel_to_snake(camel_case_str: str) -> str:
return re.sub(r"([A-Z]+)", r"_\1", camel_case_str).lower()
def snake_to_camel(snake_case_str: str) -> str:
components = snake_case_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
def change_case_helper(value: Union[list, dict, str], case: Callable[[str], str]) -> Union[list, dict, str]:
if isinstance(value, list):
return [change_case_helper(v, case) for v in value]
elif isinstance(value, dict):
return change_case(value, case)
else:
return value
def change_case(dictionary: dict, case: Callable[[str], str]) -> dict:
temp = dict()
for k, v in dictionary.items():
temp[case(k)] = change_case_helper(v, case)
return temp
| 6,371 |
08_Liner-SORT/countingsort.py
|
NiuXWolf/Introduction-to-Algorithms
| 3 |
2172570
|
def countingsort(A,k):# A<-[0,k]
C=[]
B=[]
for x in xrange(0,k):
C.append(0)
for x in xrange(0,len(A)):
B.append(0)
for x in xrange(0,len(A)):
C[A[x]]=C[A[x]]+1
print_data(C,True)
for x in xrange(1,k-1):
C[x]=C[x]+C[x-1]
print_data(C,True)
for x in xrange(len(A),0,-1):
B[C[A[x-1]]-1]=A[x-1]
C[A[x-1]]=C[A[x-1]]-1
return B
def print_data(data, has_line = False):
if has_line:
print "-----------------------------"
for i in range(len(data)):
print str(data[i]),
if i != len(data) - 1:
print ",",
print ""
if __name__ == "__main__":
#data = [-2, -8, -7, -10, -3,-78, -5, -6, -4, -6]
data = [2, 8, 33, 10, 3,78, 5, 6, 4, 6,9,90]
print_data(data,True)
data2=countingsort(data,98)
print_data(data2,True)
| 744 |
doc/SM/PythonOuput/run.py
|
NuxDD/pyrate
| 7 |
2172387
|
import sys
sys.path.append('/home/lohan/ownCloud/PyR@TE_3/doc/SM/PythonOuput')
from SM import RGEsolver
##############################################
# First, create an instance of the RGEsolver #
##############################################
rge = RGEsolver('rge', tmin=0, tmax=20, initialScale=0)
##########################################################
# We fix the running scheme and initial conditions below #
##########################################################
# Running scheme :
rge.loops = {'GaugeCouplings': 2,
'Yukawas': 2,
'QuarticTerms': 2,
'ScalarMasses': 2,
'Vevs': 2}
# Gauge Couplings
rge.g1.initialValue = 0
rge.g2.initialValue = 0
rge.g3.initialValue = 0
# Yukawa Couplings
rge.yt.initialValue = 0
rge.yb.initialValue = 0
rge.ytau.initialValue = 0
# Quartic Couplings
rge.lambda_.initialValue = 0
# Scalar Mass Couplings
rge.mu.initialValue = 0
# Vacuum-expectation Values
rge.v.initialValue = 0
############################
# Solve the system of RGEs #
############################
rge.solve(step = .05)
# Another way to call rge.solve() :
# rge.solve(Npoints = 500)
####################
# Plot the results #
####################
rge.plot(subPlots=True, printLoopLevel=True)
#############################################
# Possibly save the results for a later use #
#############################################
# Save the results in some file
# rge.save('rgeResults.save')
# Later, load the rge object with :
# rge = RGEsolver.load('rgeResults.save')
| 1,565 |
archive/peckdisk.py
|
Guillermo-Hidalgo-Gadea/RPi4Toolbox
| 0 |
2172552
|
import gpiozero
import time
# Maybe include a popup pinout to mark button connection
GPIOA = 4
GPIOB = 1
peckdiskA = gpiozero.Button(GPIOA)
peckdiskB = gpiozero.Button(GPIOB)
starttime = time.time()
try:
while True:
if peckdiskA.is_pressed:
print('Coice A :)')
time.sleep(0.2)
elif peckdiskB.is_pressed:
print('Choice B :(')
time.sleep(0.2)
looptime = time.time()
if looptime - starttime > 20:
print('session ended')
break
except KeyboardInterrupt:
print('session aborted')
peckdiskA.close()
peckdiskB.close()
| 641 |
pyefun/wxefun/component/FloatSpin.py
|
nuo010/pyefun
| 94 |
2171061
|
import wx.lib.agw.floatspin as floatspin
from .wxControl import *
class 小数微调框(floatspin.FloatSpin, 公用方法):
pass
def 取当前数值(self):
return self.GetDefaultValue().GetValue()
def 取当前数值2(self):
return self.GetValue()
def 取默认数值(self):
return self.GetDefaultValue()
def 取显示的位数(self):
return self.GetDigits()
def 取字符格式(self):
return self.GetFormat()
def 取最大值(self):
return self.GetMax()
def 取最小值(self):
return self.GetMin()
@组件_异常检测
def 置最大值(self,最大值):
return self.SetMax(最大值)
@组件_异常检测
def 置最小值(self,最小值):
return self.SetMin(最小值)
@组件_异常检测
def 置数值范围(self,最小值,最大值):
return self.SetRange(最小值,最大值)
@组件_异常检测
def 置数值范围2(self,最小值,最大值):
return self.SetRangeDontClampValue(最小值,最大值)
def 是否设置数值范围(self):
return self.HasRange()
@组件_异常检测
def 是否在数值范围内容(self,数值):
'测试指定数值是否在允许的范围内'
return self.InRange(数值)
def 是否已设置默认值(self):
return self.IsDefaultValue()
@组件_异常检测
def 置小数位数(self,位数):
return self.SetDigits(位数)
@组件_异常检测
def 置字符格式(self,格式):
return self.SetFormat(格式)
@组件_异常检测
def 置当前数值(self,数值):
return self.SetValue(数值)
| 1,261 |
compas_fofin/src/compas_fofin/equilibrium/__init__.py
|
BlockResearchGroup/WS_cluster
| 0 |
2171872
|
"""
compas_fofin.equilibrium
========================
.. currentmodule:: compas_fofin.equilibrium
Classes
-------
.. autosummary::
:toctree: generated/
:nosignatures:
cablenet_fd_alglib
cablenet_fd_numpy
cablenet_fd_rpc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .fd import *
from .fd_alglib import *
def cablenet_fd_rpc(data, **kwargs):
"""Convenience wrapper for ``cablenet_fd_numpy`` that can be used with ``RPC``.
Parameters
----------
data : dict
The data dictionary representing a cablenet data structure.
Returns
-------
dict
A data dict with the same structure as the input dict.
Notes
-----
For additional named parameters that can be passed to this function,
see ``cablenet_fd_numpy``.
Examples
--------
.. code-block:: python
proxy = Proxy('compas_fofin.equilibrium')
result = proxy.cablenet_fd_rpc(cablenet.to_data())
cablenet.data = result
"""
from compas_fofin.datastructures import Cablenet
cablenet = Cablenet.from_data(data)
cablenet_fd_numpy(cablenet)
return cablenet.to_data()
__all__ = [name for name in dir() if not name.startswith('_')]
| 1,287 |
train.py
|
michaelschwier/DQNCartPole
| 0 |
2172871
|
import gym
import numpy as np
from agent import DQNAgent
EPISODES = 1000
if __name__ == "__main__":
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
game_over = False
batch_size = 32
best_score = 0
for episodeIdx in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size]).astype(np.float32)
for time in range(500):
action = agent.act(state)
next_state, reward, game_over, _ = env.step(action)
reward = reward if not game_over else -10
next_state = np.reshape(next_state, [1, state_size]).astype(np.float32)
agent.remember(state, action, reward, next_state, game_over)
state = next_state
if game_over:
print("episode: {}/{}, score: {}, e: {:.2}".format(episodeIdx, EPISODES, time, agent.exploration_rate))
break
if (episodeIdx > 100) and (time >= best_score):
print("Saving current model")
agent.save("model.h5")
best_score = time
if len(agent.memory) > batch_size:
agent.replay(batch_size)
| 1,135 |
pybeepbeep/ranging.py
|
LumineerLabs/BeepBeep
| 3 |
2168379
|
import math
from typing import Callable, Dict, List
from librosa.core import time_to_samples, tone
import numpy as np
from scipy.signal import correlate, find_peaks, hilbert
# set fft window size
# this corresponds to a resolution of about 2% of the sampling frequency
_fft_width = 512
def _get_window_size_ms(duration_ms: float):
return 20 * duration_ms
def get_minimum_channel_width(sampling_freq_hz: float):
resolution = sampling_freq_hz / _fft_width
return 10 * resolution
def _find_beep_in_window(samples: np.ndarray,
sampling_freq_hz: float,
target_signal_freq_hz: float,
duration_ms: float) -> int:
if target_signal_freq_hz >= sampling_freq_hz / 2:
raise Exception(
"Sampling frequency must be > 2x the target frequency. See https://en.wikipedia.org/wiki/Nyquist_rate"
)
# generate target signal
signal = tone(target_signal_freq_hz, sampling_freq_hz, duration=duration_ms/1000.0)
# find onset, this differs from the description in the paper which uses a sharpness and peak finding algorithm
correlation = correlate(samples, signal, mode='valid', method='fft')
envelope = np.abs(hilbert(correlation))
max_correlation = np.max(correlation)
peaks, _ = find_peaks(envelope)
filtered_peaks = peaks[(peaks < len(samples)).nonzero()]
peaks = peaks[(envelope[filtered_peaks] > .85 * max_correlation).nonzero()]
ratio = (np.max(signal) / np.max(correlation))
correlation *= ratio
envelope *= ratio
if len(peaks) == 0:
# if not found, use None
return None
else:
return peaks[0]
def _calculate_windows_for_schedule(sampling_freq_hz: float,
schedule: [{}]) -> [(int, int)]:
if len(schedule) == 0:
return None
window_duration_s = _get_window_size_ms(schedule[0]["duration_ms"]) / 1000.0
half_window_s = window_duration_s / 2
return [
(
time_to_samples(entry["time_s"] - half_window_s, sampling_freq_hz),
time_to_samples(entry["time_s"] + half_window_s, sampling_freq_hz)
)
for entry in schedule
]
def find_deltas(samples: np.ndarray,
sampling_freq_hz: float,
schedule: [{}],
self_id: str) -> [float]:
windows = _calculate_windows_for_schedule(sampling_freq_hz=sampling_freq_hz,
schedule=schedule)
onsets = np.zeros(len(schedule))
self_n = 0
for i, window in enumerate(windows):
n_onset = _find_beep_in_window(samples=samples[window[0]:window[1]],
sampling_freq_hz=sampling_freq_hz,
target_signal_freq_hz=schedule[i]["target_hz"],
duration_ms=schedule[i]["duration_ms"])
if n_onset is None:
onsets[i] = math.inf
else:
n_onset += window[0]
onsets[i] = float(n_onset)
if schedule[i]["id"] == self_id:
self_n = n_onset
return np.absolute(onsets - self_n)
def single_tone_scheduler(nodes: [str],
target_hz: float,
duration_ms: float):
window = _get_window_size_ms(duration_ms) / 1000.0
return [{"id": node, "target_hz": target_hz, "duration_ms": duration_ms, "time_s": (i * window) + window}
for i, node in enumerate(nodes)]
def band_scheduler(nodes: [str],
channels: [float],
duration_ms: float):
n_windows = math.ceil(len(nodes) / float(len(channels)))
schedule = []
for i in range(len(channels)):
index = i * n_windows
channel_schedule = single_tone_scheduler(nodes=nodes[index:index + n_windows],
target_hz=channels[i],
duration_ms=duration_ms)
schedule.extend(channel_schedule)
return schedule
def generate_schedule(nodes: [str],
schedule_strategy: Callable[[List[str], List[float], float], List[Dict]] = single_tone_scheduler,
scheduler_kwargs: {} = None) -> [{}]:
if scheduler_kwargs is None:
scheduler_kwargs = {"target_hz": 6000, "duration_ms": 50}
return schedule_strategy(nodes, **scheduler_kwargs)
def calculate_distances(deltas: np.ndarray, sampling_freq_hz: float, c: float = 343) -> np.ndarray:
"""
If the caller wants to account for the distance between the speaker and microphone on the node, the k factors
should be converted to a sample count and placed in the diagonal of the deltas matrix (d1,1, d2,2, etc.).
d = [d1,1 d1,2 d1,3 d1,4]
[d2,1 d2,2 d2,3 d2,4]
[d3,1 d3,2 d3,3 d3,4]
[d4,1 d4,2 d4,3 d4,4]
distance = (c / 2fs)(|d1,2 - d2,1| + d1,1 + d2,2)
(c/2fs)|d-dT|+
[0 |d12-d21| |d13-d31| |d14-d41|] [0 d11+d22 d11+d33 d11+d44]
[|d21-d12| 0 |d23-d32| |d24-d42|] [d22+d11 0 d22+d33 d22+d44]
[ ... ... 0 |d34-d43|] [ 0 d33+d44]
[ ... ... ... 0 ] [ 0 ]
[d11 d11 d11 d11] [d11 d22 d33 d44]
[d22 d22 d22 d22] [d11 d22 d33 d44]
[d33 d33 d33 d33] [d11 d22 d33 d44]
[d44 d44 d44 d44] [d11 d22 d33 d44]
"""
conversion_factor = c / (2 * sampling_freq_hz)
deltas_t = deltas.T
k1 = deltas * np.eye(deltas.shape[0]) @ np.ones(deltas.shape)
k2 = k1.T
k = k1 + k2
return conversion_factor * (np.abs(deltas - deltas_t) + k)
def index_distances(distances: np.ndarray, schedule: List[Dict]) -> Dict[str, Dict]:
indexed_distances = {}
for i in range(len(schedule)):
i_id = schedule[i]["id"]
for j in range(i, len(schedule)):
j_id = schedule[j]["id"]
distance = distances[i][j]
if i_id not in indexed_distances.keys():
indexed_distances[i_id] = {}
if j_id not in indexed_distances.keys():
indexed_distances[j_id] = {}
indexed_distances[i_id][j_id] = distance
indexed_distances[j_id][i_id] = distance
return indexed_distances
| 6,361 |
models/networks/discriminator.py
|
saxenabhishek/swapping-autoencoder-pytorch
| 5 |
2172537
|
from models.networks import BaseNetwork
from models.networks.stylegan2_layers import Discriminator as OriginalStyleGAN2Discriminator
from dataclasses import dataclass
@dataclass
class DiscConfig:
netD_scale_capacity: float = 1.0
class StyleGAN2Discriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument("--netD_scale_capacity", default=1.0, type=float)
return parser
def __init__(self, opt):
super().__init__(opt)
self.stylegan2_D = OriginalStyleGAN2Discriminator(
opt.crop_size, 2.0 * opt.netD_scale_capacity, blur_kernel=[1, 3, 3, 1] if self.opt.use_antialias else [1]
)
def forward(self, x):
pred = self.stylegan2_D(x)
return pred
def get_features(self, x):
return self.stylegan2_D.get_features(x)
def get_pred_from_features(self, feat, label):
assert label is None
feat = feat.flatten(1)
out = self.stylegan2_D.final_linear(feat)
return out
| 1,042 |
maestro/core/provider.py
|
estudio89/maestro-python
| 0 |
2171364
|
from typing import TYPE_CHECKING, Optional
from abc import ABC
if TYPE_CHECKING: # pragma: no cover
from maestro.core.store import BaseDataStore
from maestro.core.events import EventsManager
from maestro.core.metadata import VectorClock, ItemChangeBatch
from maestro.core.execution import ChangesExecutor
from maestro.core.query.metadata import Query
class BaseSyncProvider(ABC):
"""Manages the changes that will be synchronized to a data store.
Attributes:
provider_id (str): This provider's unique identifier.
data_store (BaseDataStore): The data store.
events_manager (EventsManager): The class that will handle synchronization events.
changes_executor (ChangesExecutor): The class that will process the changes to be applied to the data store.
max_num (int): The maximum number of changes that will be processed in each batch of changes.
"""
provider_id: "str"
data_store: "BaseDataStore"
events_manager: "EventsManager"
changes_executor: "ChangesExecutor"
max_num: "int"
def __init__(
self,
provider_id: "str",
data_store: "BaseDataStore",
events_manager: "EventsManager",
changes_executor: "ChangesExecutor",
max_num: "int",
):
"""
Args:
provider_id (str): This provider's unique identifier.
data_store (BaseDataStore): The data store.
events_manager (EventsManager): The class that will handle synchronization events.
changes_executor (ChangesExecutor): The class that will process the changes to be applied to the data store.
max_num (int): The maximum number of changes that will be processed in each batch of changes.
"""
self.provider_id = provider_id
self.data_store = data_store
self.events_manager = events_manager
self.changes_executor = changes_executor
self.max_num = max_num
def get_vector_clock(self, query: "Optional[Query]" = None) -> "VectorClock":
"""Returns the current VectorClock for this provider.
Returns:
VectorClock: The current VectorClock for this provider.
"""
return self.data_store.get_local_vector_clock(query=query)
def download_changes(
self, vector_clock: "VectorClock", query: "Optional[Query]" = None
) -> "ItemChangeBatch":
"""Retrieves the changes that occurred in the data store linked to this provider after the timestamps defined by the given VectorClock.
Args:
vector_clock (VectorClock): VectorClock used for selecting changes.
Returns:
ItemChangeBatch: The batch of changes that was selected.
"""
item_change_batch = self.data_store.select_changes(
vector_clock=vector_clock, max_num=self.max_num, query=query
)
item_change_batch.reset_status()
return item_change_batch
def upload_changes(
self, item_change_batch: "ItemChangeBatch", query: "Optional[Query]"
):
"""Applies changes obtained from a remote provider to the data store.
Args:
item_change_batch (ItemChangeBatch): The batch of changes to be applied.
query (Optional[Query]): The query that's being synced.
"""
self.changes_executor.run(
item_changes=item_change_batch.item_changes, query=query
)
def get_deferred_changes(
self, vector_clock: "VectorClock", query: "Optional[Query]" = None
) -> "ItemChangeBatch":
"""Retrieves the changes received previously but that weren't applied in the last session due to an exception having occurred.
Args:
vector_clock (VectorClock): VectorClock used to select the changes.
Returns:
ItemChangeBatch: The batch of changes that was selected.
"""
return self.data_store.select_deferred_changes(
vector_clock=vector_clock, max_num=self.max_num, query=query
)
def __repr__(self): # pragma: no cover
return f"{self.__class__.__name__}(provider_id='{self.provider_id}')"
| 4,174 |
04.100 Python Exercises Evaluate and Improve Your Skills - AS/Exercise 01-25/exercise_20.py
|
ptyadana/python-dojo
| 3 |
2172476
|
# Question: Calculate the sum of all dictionary values.
d = {"a": 1, "b": 2, "c": 3}
# Expected output:
# 6
#Answer:
my_sum=0
for value in d.values():
my_sum+=value
print(my_sum)
#Answer 2:
d = {"a": 1, "b": 2, "c": 3}
print(sum(d.values()))
# Explanation:
# d.values() returns a list-like dict_values object while the sum function calculates the sum of the dict_values items.
| 394 |
src/sos/__init__.py
|
pgcudahy/sos
| 90 |
2170502
|
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import os
import textwrap
from ._version import __version__
from .__main__ import get_run_parser
from .workflow_executor import Base_Executor
from sos.parser import SoS_Script
assert __version__
def execute_workflow(script: str,
workflow=None,
targets=None,
args=[],
options={},
config={}):
'''
Execute a SoS workflow with the following parameters:
script:
A (multi-line) string that defines one or more workflows.
workflow (string, optional):
Name of workflow to execute. This option can be ignored if
ignored if the script defines a default workflow (with no name
or with name `default`) or defines only a single workflow, or
if the workflow is triggered by option "targets".
targets (string or list, or sos_targets, optional):
One (string) or more (list) of filenames as targets to be generated
by the workflow, equivalent to option "-t" from command line.
args (list or dict, optional):
Command line arguments as a list in the format of "[`--cutoff', '0.5']"
or a dictionary in the format of {"cutoff": 0.5} for workflow parameters
defined in "parameter" statements. SoS options such as '-c', '-j',
and '-s' should be defined in options.
options (dict, optional):
Dictionary with the following configuration options. Please
refer to output of "sos run -h" for details about each option.
config_file: configuration file ("-c"). The content of the config
file can also be specified with option config.
output_dag: option "-d"
output_report: option "-p"
default_queue: option "-q"
worker_procs: option "-j"
max_running_jobs: option "-J"
sig_mode: option "-s"
run_mode: "run" or "dryrun"
verbosity: option "-v"
trace_existing: option "-T"
config_file: option "-c"
config (dict, optional):
config as if loaded from "options['config_file']"
Note: executing on specified host (option "-r") is not supported by this function.
'''
try:
script = SoS_Script(textwrap.dedent(script))
except Exception as e:
# show script with error
raise ValueError(f'Failed to parse script {script}: {e}')
#
if workflow and targets:
raise ValueError(
"Only one of parameters workflow and targets should be specified.")
wf = script.workflow(workflow, use_default=not targets)
if not isinstance(config, dict):
raise ValueError('Option config should be a dictionary.')
run_options = {
'config_file': None,
'extra_config': config,
'output_dag': None,
'output_report': None,
'default_queue': None,
'worker_procs': None,
'max_running_jobs': None,
'sig_mode': 'default',
'run_mode': 'run',
'verbosity': 2,
'workdir': os.getcwd(),
'script': script,
'workflow': workflow,
'targets': targets,
'workflow_args': args,
'trace_existing': False,
'error_mode': 'default',
'exec_mode': None
}
# a convenience feature
if isinstance(args, dict):
run_options['workflow_vars'] = args
workflow_args = []
elif args:
# for convenience,
parser = get_run_parser(interactive=True, with_workflow=False)
for arg in args:
if arg.startswith(
'-') and not arg.startswith('--') and not arg in ['-c']:
raise ValueError(
'SoS options should be specified with parameter "option"')
# check args
sos_args, workflow_args = parser.parse_known_args(args)
if sos_args.__config__ and not 'config_file' in options:
options['config_file'] = sos_args.__config__
else:
workflow_args = []
run_options.update(options)
from .utils import env
env.verbosity = run_options['verbosity']
executor = Base_Executor(wf, args=workflow_args, config=run_options)
if isinstance(targets, str):
targets = [targets]
return executor.run(targets=targets)
| 4,476 |
custom_components/waveshare_ups_hat/const.py
|
mykhailog/hacs_waveshare_ups_hat
| 7 |
2171625
|
MIN_ONLINE_CURRENT = -100
MIN_CHARGING_CURRENT = 1
MIN_BATTERY_CONNECTED_CURRENT = 0.1
LOW_BATTERY_PERCENTAGE = 20
| 115 |
repo/plugin.video.exodus/resources/lib/sources/genvideo_mv.py
|
Hades01/Addons
| 0 |
2171762
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.domains = ['genvideos.org']
self.base_link = 'http://genvideos.org'
self.search_link = '/results?q=%s'
def movie(self, imdb, title, year):
try:
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs = {'class': 'cell_container'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
referer = urlparse.urljoin(self.base_link, url)
headers = {'X-Requested-With': 'XMLHttpRequest'}
post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0]
post = urllib.urlencode({'v': post})
url = urlparse.urljoin(self.base_link, '/video_info/iframe')
r = client.request(url, post=post, headers=headers, referer=referer)
r = re.findall('"(\d+)"\s*:\s*"([^"]+)', r)
r = [(urllib.unquote(i[1].split('url=')[-1]), i[0]) for i in r]
links = [(i[0], '1080p') for i in r if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in r if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in r if 480 <= int(i[1]) < 720]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Genvideo', 'url': i[0], 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| 3,512 |
scripts/pub_subs_batteryStatus.py
|
UbiCALab/battery_status_turtlebot
| 0 |
2172725
|
#!/usr/bin/env python
import roslib
import rospy
from kobuki_msgs.msg import SensorState
from std_msgs.msg import String
def Avg(lst):
return sum(lst) / len(lst)
class BatCommunication(): #here we will have both the publisher and the subscriber
def __init__(self):
self.batgroup=[]; #queue
self.chargingStatus=False
self.batstatus=["high battery", "medium battery", "low battery", "critical battery"]
self.i=0
self.highmed=150 #at some stage we can send this value by parameter.
self.medlow=142
self.lowcritical=138
self.publish_rate = rospy.Rate(1)
#subscriber
rospy.Subscriber("mobile_base/sensors/core", SensorState, self.f_bat_status)
#publisher
self.pub = rospy.Publisher('battery_status', String, queue_size=1)
# publish status to leds. We just publish batstatus[i] and chargingStatus.
# We will need a subscriber that will listen to "talker" on the topic "pub_status_to_led" and change the leds.
def f_bat_status(self, data):
self.batgroup.append(float(data.battery))
if(int(data.charger) > 0):
self.chargingStatus = True
else:
self.chargingStatus = False #charging
if len(self.batgroup)>200:
self.batgroup.pop(0) #extract first element that entered
if Avg(self.batgroup) == self.highmed:
if self.i !=1:
self.i+=1
elif Avg(self.batgroup) == self.medlow:
if self.i!=2:
self.i+=1
elif Avg(self.batgroup) == self.lowcritical:
if self.i!=3:
self.i+=1
def publish_data(self):
if not self.chargingStatus:
self.pub.publish("Status: " + self.batstatus[self.i])
else:
self.pub.publish("Status: Charging")
#self.pub.publish("Status: " + self.batstatus[i] + " and charging is " + str(self.chargingStatus)
if __name__ == '__main__':
try:
rospy.init_node("BatteryManager")
bc = BatCommunication()
while not rospy.is_shutdown():
bc.publish_data()
bc.publish_rate.sleep()
except rospy.ROSInterruptException:
rospy.loginfo("exception")
| 2,297 |
helper/abbrev.py
|
avgupta456/HackMIT-2019
| 3 |
2172524
|
import enchant, difflib
d = enchant.Dict("en_US")
def complete(str):
out = ""
for s in str.split(" "):
out += __complete__(s) + " "
return out
def __complete__(str):
try:
dict, max = {}, 0
a = set(d.suggest(str))
for b in a:
tmp = difflib.SequenceMatcher(None, str, b).ratio();
dict[tmp] = b
if tmp > max: max = tmp
return dict[max]
except KeyError:
return str
| 467 |
python/8kyu/check_the_exam.py
|
Sigmanificient/codewars
| 3 |
2172784
|
"""Kata url: https://www.codewars.com/kata/5a3dd29055519e23ec000074."""
from typing import List
def check_exam(arr1: List[str], arr2: List[str]) -> int:
return max(
0, sum(
(-1 if x != y else 4) if y else 0
for x, y in zip(arr1, arr2)
)
)
| 290 |
obman_render/mesh_generator.py
|
hassony2/obman_render
| 58 |
2171506
|
import os
import sys
if sys.hexversion >= 0x03000000:
def load_model(model_file='SMPLH_female.pkl', ncomps=12):
return _SmplModelClient(model_file, ncomps)
else:
def load_model(model_file='SMPLH_female.pkl', ncomps=12):
return _SmplModelDirect(model_file, ncomps)
class _SmplModelDirect():
""" Use this class direct from python 2.7 """
def __init__(self, model_file, ncomps):
self._model = _load_model(model_file, ncomps)
@property
def pose(self):
return self._model.pose
@property
def betas(self):
return self._model.betas
@property
def trans(self):
return self._model.trans
def generate_mesh(self, pose=None, betas=None, trans=None):
if betas is not None:
self._model.betas[:] = betas
if pose is not None:
self._model.pose[:] = pose
if trans is not None:
self._model.trans[:] = trans
return self._model.r, self._model.f
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class _SmplModelClient():
""" Use this wrapper from python 3.5 """
def __init__(self, model_file, ncomps):
self._model_file = model_file
self._ncomps = ncomps
@property
def pose(self):
return self._pose
@property
def betas(self):
return self._betas
@property
def trans(self):
return self._trans
@property
def J_transformed(self):
return self._J_transformed
def generate_mesh(self, pose=None, betas=None, trans=None,
center_idx=None):
if pose is not None:
self.pose[:] = pose
if betas is not None:
self.betas[:] = betas
if trans is not None:
self.trans[:] = trans
if center_idx is not None:
offset = self.J_transformed[center_idx, :]
self.trans[:] = -offset
self._proc.stdin.write(b'\n')
self._proc.stdin.flush()
self._proc.stdout.readline()
return self._verts, self._faces
def __enter__(self):
args = [
'python2',
os.path.abspath(__file__), '--model_file', self._model_file,
'--ncomps',
str(self._ncomps)
]
from subprocess import Popen, PIPE
self._proc = Popen(args, stdin=PIPE, stdout=PIPE)
assert self._proc.stdout.readline() == b'READY\n'
import SharedArray as sa
self._pose = sa.attach('shm://pose')
self._betas = sa.attach('shm://betas')
self._trans = sa.attach('shm://trans')
self._verts = sa.attach('shm://verts')
self._faces = sa.attach('shm://faces')
self._J_transformed = sa.attach('shm://J_transformed')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
outs, errs = self._proc.communicate(input=b'DONE\n', timeout=2)
except Exception:
self._proc.kill()
def _load_model(model_file, ncomps):
mano_path = os.environ.get('MANO_LOCATION', '~/mano')
sys.path.append(mano_path)
from webuser.smpl_handpca_wrapper import load_model
if not os.path.isabs(model_file):
model_file = os.path.join(mano_path, 'models', model_file)
return load_model(model_file, ncomps=ncomps, flat_hand_mean=False)
def _run_server(model_file, ncomps):
model = _load_model(model_file, ncomps)
try:
import SharedArray as sa
for name in [
'verts', 'faces', 'pose', 'betas', 'trans', 'J_transformed'
]:
try:
sa.delete(name)
except:
pass
pose = sa.create('shm://pose', model.pose.shape, model.pose.dtype)
betas = sa.create('shm://betas', model.betas.shape, model.betas.dtype)
trans = sa.create('shm://trans', model.trans.shape, model.trans.dtype)
verts = sa.create('shm://verts', model.r.shape, model.r.dtype)
faces = sa.create('shm://faces', model.f.shape, model.f.dtype)
J_transformed = sa.create('shm://J_transformed',
model.J_transformed.shape,
model.J_transformed.dtype)
pose[:] = model.pose
betas[:] = model.betas
trans[:] = [0, 0,
0] # model.J_transformed[25, :] + [0,0,-3] # model.trans
faces[:] = model.f
J_transformed[:] = model.J_transformed
sys.stdout.write('READY\n')
sys.stdout.flush()
while True:
cmd = sys.stdin.readline()
if cmd == b'DONE\n':
break
model.betas[:] = betas
model.pose[:] = pose
model.trans[:] = trans
# TODO see with Igor
# model.J_transformed[:] = J_transformed
verts[:] = model.r
sys.stdout.write('\n')
sys.stdout.flush()
finally:
sa.delete('pose')
sa.delete('betas')
sa.delete('trans')
sa.delete('verts')
sa.delete('faces')
sa.delete('J_transformed')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Server.')
parser.add_argument('--model_file', type=str)
parser.add_argument('--ncomps', type=int)
args = parser.parse_args()
_run_server(args.model_file, args.ncomps)
| 5,441 |
hn_tools/hn_vlcctrl.py
|
Krasnikov05/hn_tools
| 0 |
2172613
|
#!/usr/bin/python3
"""
Start VLC on a remote host and send commands
"""
import re
import os
import sys
import time
import shlex
import base64
import atexit
import shutil
import logging
import tempfile
import argparse
import subprocess
import http.client
try:
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
except:
...
import hn_general
CONFIG_FILE = "~/.config/hn-vlcs.ini"
DEFAULT_DISPLAY = ":0"
def gen_command_template(
util="cvlc", fullscreen=False, http_port=None, http_host="0.0.0.0"
):
"""
Returns args list for VLC
"""
command = [util, "--play-and-exit", "--gain", "2"]
if fullscreen:
command.append("-f")
if http_port is not None:
command += [
"-I",
"http",
"--http-host",
http_host,
"--http-port",
str(http_port),
"--http-password",
"1",
]
return command
def ssh_send(host, command, display=DEFAULT_DISPLAY, port=None, stdin=None):
"""
Send command via SSH
"""
ssh_command = ["ssh", host]
if port is not None:
ssh_command += ["-p", str(port)]
command = shlex.join(command)
if display is not None:
command = f"DISPLAY={display} " + command
ssh_command.append(command)
pipe = subprocess.PIPE
if stdin is None:
stdin = pipe
return subprocess.Popen(ssh_command, stdin=stdin, stdout=pipe, stderr=pipe)
def ftp_tempdir(path, addr):
"""
Create a temporary folder with a link to the file (ftp://{}:{}/file)
Start FTP server in this directory
After the disconnect, the folder is deleted and the server is stopped
"""
logging.basicConfig(level=logging.WARNING)
tdir = tempfile.mkdtemp()
os.link(path, f"{tdir}/file")
authorizer = DummyAuthorizer()
authorizer.add_anonymous(tdir)
handler = FTPHandler
handler.on_disconnect = lambda _: sys.exit()
handler.authorizer = authorizer
atexit.register(lambda: shutil.rmtree(tdir))
try:
server = FTPServer(addr, handler)
server.max_cons = 5
server.max_cons_per_ip = 5
server.serve_forever()
except OSError:
...
class HTTPControl:
"""
Remote VLC control over HTTP
"""
def __init__(self, addr: (str, int), password="1"):
self.addr = addr
auth = "Basic " + base64.b64encode((":" + password).encode()).decode()
self.__headers = {"Authorization": auth}
def __get(self, path):
connection = http.client.HTTPConnection(*self.addr)
connection.request("GET", path, headers=self.__headers)
resp = connection.getresponse()
return resp.read().decode()
def status(self):
"""
Returns state as a string, current time position
and duration in seconds
"""
try:
data = self.__get("/requests/status.xml")
except ConnectionRefusedError:
return ("", 0.0, 0.0)
position_query = re.compile(r"<position>(.+)</position>")
duration_query = re.compile(r"<length>(.+)</length>")
state_query = re.compile(r"<state>(.+)</state>")
position = float(position_query.search(data).group(1))
duration = float(duration_query.search(data).group(1))
state = state_query.search(data).group(1)
return state, position, duration
def toggle_play(self):
"""
Pause if it's playing and play if it's paused
"""
self.__get("/requests/status.xml?command=pl_pause")
def stop(self):
"""
Stop playback completely
"""
self.__get("/requests/status.xml?command=pl_stop")
def seek(self, seconds: int):
"""
Set current time position in seconds
"""
self.__get(f"/requests/status.xml?command=seek&val={seconds}s")
def jump(self, seconds: int):
"""
Move current time position by offset
"""
if seconds == 0:
return
val = str(seconds)
if seconds > 0:
val = "+" + val
self.__get(f"/requests/status.xml?command=seek&val={val}s")
def pause(self):
"""
Pause playback
"""
if self.status()[0] == "playing":
self.toggle_play()
def play(self):
"""
Unpause playback
"""
if self.status()[0] == "paused":
self.toggle_play()
def main():
parser = argparse.ArgumentParser(prog="HOME NETWORK VLCCTRL")
parser.add_argument("-c", dest="conf", help="Name in config")
parser.add_argument("-f", dest="file", help="File to send")
parser.add_argument("-u", dest="url", help="URL to send")
parser.add_argument("-display", dest="display", help="DISPLAY envvar")
parser.add_argument("-http-host", dest="http_host", help="Remote's hostname")
parser.add_argument("-ssh-host", dest="ssh_host", help="Remote's hostname")
parser.add_argument("-http-port", dest="http_port")
parser.add_argument("-ssh-port", dest="ssh_port")
parser.add_argument("--send", action="store_true")
parser.add_argument("--stdin", action="store_true")
parser.add_argument("--play", action="store_true")
parser.add_argument("--pause", action="store_true")
parser.add_argument("--stop", action="store_true")
args = parser.parse_args()
if args.conf:
config = hn_general.load_config(CONFIG_FILE)[args.conf]
args.__dict__.update(config)
display = DEFAULT_DISPLAY
if args.display:
display = args.display
if args.stdin:
command = gen_command_template(http_port=args.http_port) + ["-"]
ssh_send(
args.ssh_host, command, display=display, port=args.ssh_port, stdin=sys.stdin
)
sys.exit()
if args.send:
template = gen_command_template(http_port=args.http_port)
if args.url:
command = template + [args.url]
else:
addr = hn_general.get_self_address()
command = template + [f"ftp://{addr[0]}:{addr[1]}/file"]
if os.fork() == 0:
time.sleep(0.1)
ssh_send(args.ssh_host, command, display=display, port=args.ssh_port)
ftp_tempdir(args.file, addr)
sys.exit()
if any((args.play, args.pause, args.stop)):
control = HTTPControl((args.http_host, args.http_port))
if args.play:
control.play()
elif args.pause:
control.pause()
elif args.stop:
control.stop()
sys.exit()
parser.print_help()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| 6,820 |
mlf_core/__init__.py
|
KevinMenden/mlf-core
| 0 |
2172928
|
"""Top-level package for mlf-core."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '1.10.0'
| 110 |
viewer/__init__.py
|
jzw0025/fem-with-python
| 148 |
2172109
|
import os
from traits.etsconfig.api import ETSConfig
toolkit = os.getenv('ETS_TOOLKIT', 'qt4')
ETSConfig.toolkit = toolkit
os.environ['ETS_TOOLKIT'] = toolkit
from _viewer import launch_viewer
| 193 |
twitter_crawler/got/models/Tweet.py
|
AlexHtZhang/A-billion-dollar-deal
| 0 |
2171627
|
import datetime
class Tweet(object):
'''
Tweet object has all the properties Twitter displays.
param id: id
type id: str
param permalink: link to tweet
type permalink: str
param username: username account that wrote the tweet
type username: str
param text: text written on the tweet
type text: str
param date: date the tweet was published
type date: datetime
param retweets: the number of total retweets
type retweets: int
param favorites: the number of total favorites
type favorites: int
param mentions: mentioned usernames on the tweet
type mentions: a list of strings
param hashtags: hashtags on the tweet
type hashtag: a list of strings
'''
def __init__(self):
self.id = ''
self.permalink = 'https://twitter.com' + ''
self.username = ''
self.text = ''
self.date = datetime.datetime.fromtimestamp(0)
self.retweets = 0
self.favorites = 0
self.mentions = []
self.hashtags = []
| 924 |
server/www/html/error.py
|
ericmatte/hassio-google-drive-backup
| 1 |
2171855
|
#!/usr/bin/env python3
import cgi
import cgitb
import datetime
from urllib.parse import unquote
cgitb.enable()
args = cgi.FieldStorage()
if 'error' in args and 'version' in args:
with open('/etc/user_error_log/log', "a") as log:
log.write("\n\nWhen: {2}\nVersion: {0}\nError:{1}\n".format(
unquote(args.getvalue('version')),
unquote(args.getvalue('error')),
datetime.datetime.now()))
print("Status: 200 OK")
print("")
print("OK")
| 493 |
classifier/utils/difangzhi.py
|
joker452/Keyword-spotting-system
| 0 |
2172220
|
import os
import cv2
import sys
import json
import pickle
import untangle
import numpy as np
sys.path.append('./')
from util import mkdir, compare
from functools import cmp_to_key
def write_char(f, position, class_id, next_char_class, nnext_char_class):
for pos in position:
f.write(str(pos) + " ")
f.write(str(class_id) + " ")
f.write(str(next_char_class) + " " + str(nnext_char_class) + "\n")
def make_dataset(root_dir):
not_wanted = ('╳', '阝', '═', '︺', '︹', '━', '', '│', '□', '○', '。', '、', '\u3000', '\ue002')
data_dir = [f.name for f in os.scandir(root_dir) if f.is_dir() and f.name != 'out']
page_id = 1
out_dir = os.path.join(root_dir, "out")
train_dir = os.path.join(out_dir, "train")
test_dir = os.path.join(out_dir, "test")
mkdir(out_dir)
mkdir(train_dir)
mkdir(test_dir)
with open('./difangzhi_freq.json', 'r', encoding='utf-8') as f:
d1 = json.load(f)
d2 = d1.copy()
for k in d2.keys():
d2[k] = d2[k] // 2
if k not in not_wanted:
mkdir(os.path.join(test_dir, k))
mkdir(os.path.join(train_dir, k))
with open('./char2index.pkl', 'rb') as f:
index_dict = pickle.load(f)
counter = 0
for data in data_dir:
img_dir = os.path.join(root_dir, data, "jpg")
xml_dir = os.path.join(root_dir, data, "xml")
doc_list = os.listdir(xml_dir)
for i in range(len(doc_list)):
doc = untangle.parse(os.path.join(xml_dir, doc_list[i]))
page_name = doc.page['id'].replace("xml", "jpg")
if hasattr(doc.page, 'text'):
positions = [text['image_position'] for text in doc.page.text]
characters = [text.cdata for text in doc.page.text]
else:
# for images containing multi-text
characters = []
positions = []
if hasattr(doc.page, 'text_line'):
for text_line in doc.page.text_line:
pos1 = pos2 = char1 = char2 = None
if hasattr(text_line, 'text'):
pos1 = [text['image_position'] for text in text_line.text if text['image_position']]
char1 = [text.cdata for text in text_line.text if text['image_position']]
if hasattr(text_line, 'multi_text'):
pos2 = [text['image_position'] for multi_text in text_line.multi_text
for text in multi_text.text if text['image_position']]
char2 = [text.cdata for multi_text in text_line.multi_text
for text in multi_text.text if text['image_position']]
if pos1:
if pos2:
positions += pos1 + pos2
characters += char1 + char2
else:
positions += pos1
characters += char1
elif pos2:
positions += pos2
characters += char2
labels = []
for k in range(len(characters)):
positions[k] = positions[k].split(';')
characters[k] = characters[k].replace('\n', '')
for char_index in range(len(characters[k])):
# 去掉无用字符
char = characters[k][char_index]
if char not in not_wanted:
coordinates = list(map(int, positions[k][char_index].split(',')))
labels.append({'text': char, 'coordinates': coordinates})
labels.sort(key=cmp_to_key(compare))
img = cv2.imread(os.path.join(img_dir, page_name), -1)
cv2.imwrite(os.path.join(out_dir, '{}.jpg'.format(page_id)), img)
img = cv2.morphologyEx(img, 6, cv2.getStructuringElement(1, (33, 33), (16, 16)))
img = img.mean(2)[:, :, None]
img = np.where(img < 50, (0, 0, 0), (255, 255, 255)).astype(np.uint8)
for k in range(len(labels)):
position = labels[k]['coordinates']
label = labels[k]['text']
char = img[position[1]: position[3], position[0]: position[2]]
file_name = '{}-{}-{}.jpg'.format(data, page_name[: -4], counter)
counter += 1
if d1[label] != 1 and d2[label] > 0:
d2[label] = d2[label] - 1
# put it in train
cv2.imwrite(os.path.join(train_dir, label, file_name), char)
else:
cv2.imwrite(os.path.join(test_dir, label, file_name), char)
with open(os.path.join(out_dir, "{}.txt".format(page_id)), "a", encoding='utf-8') as f:
char_num = len(labels)
for k in range(char_num - 2):
position = labels[k]['coordinates']
class_id = index_dict[labels[k]['text']]
next_class_id = index_dict[labels[k + 1]['text']]
nnext_class_id = index_dict[labels[k + 2]['text']]
write_char(f, position, class_id, next_class_id, nnext_class_id)
# -2
write_char(f, labels[char_num - 2]['coordinates'],
index_dict[labels[char_num - 2]['text']],
index_dict[labels[char_num - 1]['text']], -1)
# -1
write_char(f, labels[char_num - 1]['coordinates'],
index_dict[labels[char_num - 1]['text']], -1, -1)
page_id += 1
if __name__ == '__main__':
root_dir = "d:/project/lunwen/data/difangzhi"
make_dataset(root_dir)
| 5,876 |
example/stackoverflow.py
|
starrify/scrapy-inline-requests
| 58 |
2172491
|
from scrapy.http import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from inline_requests import inline_requests
class StackoverflowSpider(CrawlSpider):
name = "stackoverflow"
allowed_domains = ["stackoverflow.com"]
start_urls = ['http://stackoverflow.com/users']
user_agent = 'scrapy-inline-requests (+https://github.com/rolando/scrapy-inline-requests)'
rules = [
Rule(LinkExtractor(allow=r'/users/\d+/'), callback='parse_profile'),
# Follow only first pages in pagination links.
Rule(LinkExtractor(allow=r'\?page=\d&')),
]
@inline_requests
def parse_profile(self, response):
# Scrape user info from user page.
user = self.load_user(response)
# Scrapy user's answers.
answers_url = response.urljoin('?tab=answers')
answers_resp = yield Request(answers_url)
user['answers'] = list(
self.iter_links(answers_resp, answers_resp.css('.answer-link'))
)
# Scrape user's questions.
questions_url = response.urljoin('?tab=questions')
questions_resp = yield Request(questions_url)
user['questions'] = list(
self.iter_links(questions_resp, questions_resp.css('.user-questions h3'))
)
# Scrape user's tags.
tags_url = response.urljoin('?tab=tags')
tags_resp = yield Request(tags_url)
user['tags'] = tags_resp.css('.user-tags .post-tag::text').extract()
# Item complete.
yield user
def load_user(self, response):
return {
'name': response.css('h1::text').extract_first(),
'website': response.css('.url[rel=me]::text').extract_first(),
'location': response.css('.label.adr::text').extract_first(),
'url': response.url,
}
def iter_links(self, response, sel, expr='a[href]'):
for link in sel.css(expr):
yield {
'title': link.css('::text').extract_first(),
'url': response.urljoin(link.xpath('@href').extract_first()),
}
| 2,132 |
logie/urls.py
|
anirbanroydas/logie
| 0 |
2171755
|
from tornado import web
from tornado.web import URLSpec as url
# from sockjs.tornado import SockJSRouter
from settings import settings
from utils import include
# from apps.webapp.views import LogWebsocketHandler
# # Register SocjJsRouter Connection
# SockjsWebsocketRouter = SockJSRouter(LogWebsocketHandler, '/log')
urls = [
url(r"/static/(.*)", web.StaticFileHandler,
{"path": settings.get('static_path')}),
]
urls += include(r"/", "apps.webapp.urls")
# urls = urls + SockjsWebsocketRouter.urls
| 515 |
fast/FAST/FASTV7.py
|
yagweb/pyfast
| 0 |
2171216
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 20:04:55 2015
@author: YangWenguang
Issues contact with: <EMAIL>
"""
import _pyCAEV7
def linearize(inpfile, is_end=True):
'''
'''
_pyCAEV7.pymarch(inpfile)
_pyCAEV7.pyfast_init(inpfile)
# _pyCAEV7.pyfastdynamics(ztime_s, qt_s, qdt_s,
# blpitchcom_s, yawposcom_s, yawratecom_s, elecpwr_s, gentrq_s,
# qd2t_s,outdata_s)
if is_end:
_pyCAEV7.pyfast_end()
def test_linearize():
# path = r'..\Data\FASTV7_demo_a\test13.fst'
# path = r'..\Data\FASTV8_demo_a\FASTV7.fst'
path = r'..\Data\FASTV8_demo_hr\FASTV7_linear.fst'
linearize(path)
if __name__ == '__main__':
print('start')
test_linearize()
| 757 |
math2/stat/counting.py
|
AussieSeaweed/math2
| 2 |
2172527
|
from functools import cache
from auxiliary import product
@cache
def factorial(x):
return factorial(x - 1) if x else 1
def cyclic_permutations(n):
return factorial(n - 1)
def permutations(n, r):
return product(range(n, n - r, -1))
def combinations(n, r):
return permutations(n, r) // factorial(r)
def partitions(n, kinds):
return factorial(n) // product(factorial(kind) for kind in kinds)
| 420 |
django/engagementmanager/rest/signup.py
|
onap/vvp-engagementmgr
| 0 |
2171685
|
#
# ============LICENSE_START==========================================
# org.onap.vvp/engagementmgr
# ===================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the “License”);
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import uuid
from django.template.loader import get_template
from django.utils import timezone
from rest_framework import status
from rest_framework.parsers import JSONParser
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from engagementmanager import mail
from engagementmanager.decorator.class_decorator import classDecorator
from engagementmanager.decorator.log_func_entry import logFuncEntry
from engagementmanager.models import Vendor, Engagement, Role, Invitation, \
IceUserProfile, CustomUser
from engagementmanager.rest.vvp_api_view import VvpApiView
from engagementmanager.serializers import\
SuperThinIceUserProfileModelSerializer
from engagementmanager.service.invite_service import markInvitationAsAccepted
from engagementmanager.service.user_service import UserService
from engagementmanager.utils.constants import Constants, Roles
from engagementmanager.utils.validator import Validator, logEncoding
from engagementmanager.views_helper import generateActivationLink, \
createUserTemplate
@classDecorator([logFuncEntry])
class SignUp(VvpApiView):
permission_classes = (AllowAny,)
def post(self, request):
data = request.data
data_dont_save = JSONParser().parse(request)
if ('company' not in data or not data['company'] or
'full_name' not in data or not data['full_name'] or
'email' not in data or not data['email'] or
'password' not in data or not data['password'] or
'phone_number' not in data or not data['phone_number'] or
'regular_email_updates' not in data):
msg = "One of the input parameters is missing"
self.logger.error(msg)
return Response(msg, status=status.HTTP_400_BAD_REQUEST)
Validator.validatePassword(data['password'])
i_full_name = data['full_name']
i_email = data['email']
i_phone_number = data['phone_number']
i_password = data['password']
i_regular_email_updates = data['regular_email_updates']
i_is_service_provider_contact = False
add_without_confirm = False
user_svc = UserService()
if 'add_from_import_excel' in data:
add_without_confirm = True
Validator.validateEmail(i_email)
if data_dont_save['company'] == \
Constants.service_provider_company_name:
i_is_service_provider_contact = True
mailTokens = i_email.split("@")
if mailTokens[1] not in Constants.service_provider_mail_domain and \
i_is_service_provider_contact:
msg = "Email address should be with service provider domain for \
signees that their company =" + \
Constants.service_provider_company_name
self.logger.error(logEncoding(msg))
return Response(msg, status=status.HTTP_400_BAD_REQUEST)
iceuser = IceUserProfile.objects.filter(email=i_email)
if (not iceuser.exists()):
roleObj = Role.objects.get(
name=Roles.standard_user.name) # @UndefinedVariable
activationToken = str(uuid.uuid4().hex)
i_company = Vendor.objects.get(name=data_dont_save['company'])
user_object = CustomUser.objects.create_user(
username=i_email, email=i_email, password=<PASSWORD>,
activation_token=activationToken,
activation_token_create_time=timezone.now(), is_active=False)
info = createUserTemplate(
i_company, i_full_name, roleObj, i_phone_number,
i_is_service_provider_contact,
None, i_regular_email_updates, user_object)
newUserObj, is_profile_created = \
IceUserProfile.objects.update_or_create(
email=user_object.email, defaults=info)
self.logger.debug(
"Creating Non activated User: " + str(newUserObj))
userData = SuperThinIceUserProfileModelSerializer(newUserObj).data
# If we eng_uuid and inviter_uuid is supplied it means that this
# user was invited. We want to add them to the engagement team
# of the inviter
if 'invitation' in data:
invitation = Invitation.objects.get(
invitation_token=data['invitation'])
self.logger.debug(
"Looks like user " + i_full_name +
" has arrived to the sign-up page from an invite email \
initiated by user with uuid=" +
invitation.invited_by_user_uuid + ". Adding them to the \
inviter's engagement_team...")
userData['eng_uuid'] = invitation.engagement_uuid
if data["is_contact_user"] == "true":
engObj = Engagement.objects.get(
uuid=invitation.engagement_uuid)
engObj.contact_user = newUserObj
self.logger.debug(
"Attaching the user (" + newUserObj.full_name +
") to the engagement's (" + engObj.uuid +
") contact_user")
engObj.save()
user_svc.addUserToEngAndFireProvisionVfSig(
newUserObj, invitation)
otherInviteObj = Invitation.objects.filter(
accepted=False, email=i_email).exclude(
uuid=invitation.uuid)
if data['is_contact_user'] == "true" or \
data['is_contact_user'] == "True":
engObj = Engagement.objects.get(
uuid=invitation.engagement_uuid)
engObj.contact_user = newUserObj
self.logger.debug(
"Attaching the user (" + newUserObj.full_name +
") to the engagement's (" +
engObj.uuid + ") contact_user")
engObj.save()
markInvitationAsAccepted(data['invitation'])
for inviteObj in otherInviteObj:
user_svc.addUserToEngAndFireProvisionVfSig(
newUserObj, inviteObj)
markInvitationAsAccepted(inviteObj.invitation_token)
if (add_without_confirm):
newUserObj.is_active = True
newUserObj.save()
else:
data['activation_link'] = generateActivationLink(
activationToken, newUserObj)
self.logger.debug(
"Activation Link: " + data['activation_link'])
body = get_template(
"{activate_template_dir}activate_mail_body.html".format(
activate_template_dir=Constants.activate_template_dir))
subject = get_template(
"{activate_template_dir}activate_mail_subject.html".format(
activate_template_dir=Constants.activate_template_dir))
mail.sendMail(i_email, data, body, subject)
self.logger.debug(
"sign-up has passed successfully for [email=" + i_email + "]")
return Response(userData)
else:
msg = "email " + i_email + \
" already exists, no need to perform signup, try to login"
self.logger.info(logEncoding(msg))
return Response(msg, status=status.HTTP_409_CONFLICT)
| 9,321 |
to_bmp.py
|
MineRobber9000/imagemosh
| 0 |
2172837
|
from PIL import Image
import sys, traceback, os.path, helpers
from config import *
def crop_image_to(im,w,h):
if w is None or h is None: return im
imw, imh = im.size
# get desired aspect ratio
# if imw<imh: # gotta crop height
# desw = int(imh*(w/h))
# wmargin = int((imw/2)-(desw/2))
# im = im.crop((wmargin,0,imw-wmargin,imh))
# else: # gotta crop width
desh = int(imw*(h/w))
hmargin = int((imh/2)-(desh/2))
im = im.crop((0,hmargin,imw,imh-hmargin))
return im.resize((w,h))
@helpers.errors_to_txt
def main(args):
# open image
im = Image.open(args[1])
# quantize to 16 colors (turn off dither pattern insofar as we can)
im = crop_image_to(im,DESIRED_WIDTH,DESIRED_HEIGHT).quantize(colors_used,dither=Image.NONE)
# save as bmp
im.save(os.path.splitext(args[1])[0]+".bmp")
if __name__=="__main__": main(sys.argv)
| 899 |
kryptobot/ta/generic_indicator.py
|
eristoddle/Kryptobot
| 24 |
2172927
|
import importlib
from talib import abstract
from .base_indicator import BaseIndicator
import numpy as np
class GenericIndicator(BaseIndicator):
#Sometimes to Inherit all you need to do is have an init that sets this init up
def __init__(self, market, interval, periods, lib=None, indicator=None, params=None):
super().__init__(market, interval, periods)
if lib is not None:
self.indicator_lib = lib
if lib == 'pyti':
self.indicator_name = indicator
self.indicator = self.dynamic_import(
'pyti.' + indicator,
indicator
)
print(self.indicator)
elif lib == 'talib':
self.indicator = abstract.Function(indicator)
self.params = params
# For pyti period parameter and get_analysis method
# So you don't have to copy and paste an extra parameter on each call
if self.params is None:
self.params = {}
if 'period' not in self.params:
self.params['period'] = periods
# Set this up to be the type you need when inherited
self.value = None
def dynamic_import(self, abs_module_path, class_name):
module_object = importlib.import_module(abs_module_path)
target_class = getattr(module_object, class_name)
return target_class
def talib_data(self, data):
if isinstance(data, dict):
talib_data = {}
for k, v in data:
talib_data[k] = np.asarray(v, dtype=float)
return talib_data
return np.asarray(data, dtype=float)
def get_datawindow(self):
dataset = self.market.candles[self.interval]
if self.periods is None:
print('periods is None in GenericIndicator.get_datawindow')
if len(dataset) >= self.periods:
self.data_window = dataset[-self.periods:]
return self.data_window
return None
def get_timestamp(self):
return list(c[0] for c in self.data_window)
def get_open(self):
return list(c[1] for c in self.data_window)
def get_high(self):
return list(c[2] for c in self.data_window)
def get_low(self):
return list(c[3] for c in self.data_window)
def get_close(self):
return list(c[4] for c in self.data_window)
def get_volume(self):
return list(c[5] for c in self.data_window)
# Inherit from and override this if the data is more complex
def get_analysis(self, data):
return self.indicator(data, **self.params)
# Override this if you just need one column of the candles
def next_calculation(self, candle):
if self.get_datawindow() is not None:
self.value = self.get_analysis(self.data_window)
| 2,834 |
addons14/document_page/__manifest__.py
|
odoochain/addons_oca
| 1 |
2172781
|
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Document Page",
"version": "14.0.1.0.0",
"category": "Knowledge Management",
"author": "OpenERP SA, Odoo Community Association (OCA)",
"images": [
"images/category_list.png",
"images/create_category.png",
"images/page_list.png",
"images/create_page.png",
"images/customer_invoice.jpeg",
"images/page_history.png",
],
"website": "https://github.com/OCA/knowledge",
"license": "AGPL-3",
"depends": ["mail", "knowledge"],
"data": [
"security/document_page_security.xml",
"security/ir.model.access.csv",
"wizard/document_page_create_menu.xml",
"wizard/document_page_show_diff.xml",
"views/document_page.xml",
"views/document_page_category.xml",
"views/document_page_history.xml",
"views/document_page_assets.xml",
"views/report_document_page.xml",
],
"demo": ["demo/document_page.xml"],
}
| 1,086 |
codeprep/bpepkg/merge.py
|
maximzubkov/codeprep
| 33 |
2172971
|
# SPDX-FileCopyrightText: 2020 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: Apache-2.0
import copy
from typing import List, Tuple, Union, Optional, Iterator, Dict
from codeprep.util import is_python_3_6_and_higher, to_literal_str, to_non_literal_str
# TODO this class should be frozen
class Merge(object):
def __init__(self, pair: Tuple[str, str], freq: int = None, priority: int = None):
self.pair = pair
self.freq = freq
self.priority = priority
@classmethod
def parse_file_entry(cls, line: str, priority: int) -> "Merge":
try:
spl = to_non_literal_str(line).split(" ")
if len(spl) == 2:
return cls((spl[0], spl[1]), priority=priority)
else:
return cls((spl[0], spl[1]), freq=int(spl[2]), priority=priority)
except (IndexError, TypeError) as err:
raise ValueError(f"Invalid merge entry format: {line}", err)
def __str__(self):
return self.__repr__()
def __repr__(self):
return f'{self.pair}: ({self.freq}, {self.priority})'
def __eq__(self, other):
return self.__class__ == other.__class__ and self.pair == other.pair and self.priority == other.priority \
and self.freq == other.freq
def __hash__(self):
return hash((self.pair, self.priority, self.freq))
class MergeList(object):
"""
>>> merges = MergeList()
>>> merges = merges.append(Merge(('a', 'b'), 34, 0)).append(Merge(('b', 'c'), 44, 1))
>>> [m for m in merges]
[('a', 'b'): (34, 0), ('b', 'c'): (44, 1)]
>>> len(merges)
2
>>> merges[0]
('a', 'b'): (34, 0)
>>> merges[1]
('b', 'c'): (44, 1)
>>> merges[-1]
('b', 'c'): (44, 1)
>>> merges[0:-1]
[('a', 'b'): (34, 0)]
>>> type(merges[0:-1])
<class 'list'>
>>> merges[2]
Traceback (most recent call last):
...
IndexError: list index out of range
>>> ('a', 'b') in merges
True
>>> ('a', 'x') in merges
False
>>> merge1 = Merge(('a', 'b'), 34, 0)
>>> merge2 = Merge(('a', 'b'), 34, 0)
>>> dct = {merge1: 3}
>>> dct[merge2]
3
>>> merges + MergeList().append(Merge(('d', 'e'), 84, 0))
[('a', 'b'): (34, 0), ('b', 'c'): (44, 1), ('d', 'e'): (84, 2)]
>>> merges + [(('d', 'e'), 84, 1)]
Traceback (most recent call last):
...
TypeError: Cannot add <class 'list'> to a MergeList
>>> merges + merges
Traceback (most recent call last):
...
ValueError: It's only possible to add merges in priority order. The priority of the next merge should be 2 but is 3
>>> merges.append(Merge(('x', 'y'), 34, 0))
Traceback (most recent call last):
...
ValueError: It's only possible to add merges in priority order. The priority of the next merge should be 2 but is 0
>>> merges = merges.append(Merge(('x', 'y'), 34))
>>> merges
[('a', 'b'): (34, 0), ('b', 'c'): (44, 1), ('x', 'y'): (34, 2)]
>>> merges.get_priority(('x', 'y'))
2
"""
def __init__(self):
self.merges: Dict[Tuple[str, str], Merge] = {}
def __contains__(self, item):
return item in self.merges
def __len__(self):
return len(self.merges)
def __iter__(self) -> Iterator[Merge]:
return iter(self._get_sorted_merges())
def _get_sorted_merges(self) -> List[Merge]:
if not is_python_3_6_and_higher():
# we cannot rely on dict order for python versions lower than 3.6
raise NotImplementedError()
return list(self.merges.values())
def __add__(self, other: 'MergeList'):
if self.__class__ != other.__class__:
raise TypeError(f"Cannot add {other.__class__} to a MergeList")
new_merge_list = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
first_list_len = len(new_merge_list)
for merge in other_copy:
merge.priority += first_list_len
new_merge_list.append(merge)
return new_merge_list
def append(self, merge: Merge) -> 'MergeList':
# along with the pair we save its priority and the number of its occurrences
if merge.priority is None:
merge.priority = len(self.merges)
elif merge.priority != len(self.merges):
raise ValueError(f"It's only possible to add merges in priority order. "
f"The priority of the next merge should be {len(self.merges)} but is {merge.priority}")
self.merges[merge.pair] = merge
return self
def get_priority(self, pair: Tuple[str, str]) -> int:
return self.merges[pair].priority
def __getitem__(self, item) -> Union[List[Merge], Merge]:
lst = self._get_sorted_merges()
return lst[item]
def __repr__(self):
return repr(self[:])
def __eq__(self, other):
return self.__class__ == other.__class__ and self[:] == other[:]
def read_merges(file: str, n_merges: Optional[int] = None) -> MergeList:
merges = MergeList()
with open(file, 'r') as f:
for idx, line in enumerate(f):
if n_merges and idx >= n_merges:
break
line = line.rstrip('\n')
merges.append(Merge.parse_file_entry(line, idx))
return merges
def dump_merges(merges: MergeList, file: str):
with open(file, 'w') as f:
for merge in merges:
f.write(f"{to_literal_str(' '.join(merge.pair))} {merge.freq}\n")
| 5,506 |
setup.py
|
pikhovkin/pull-repos
| 0 |
2172141
|
from setuptools import setup
setup(
name='pull-repos',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
description='Pull and update repositories',
url='https://github.com/pikhovkin/pull-repos',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
scripts=[
'pull_repos.py'
],
entry_points={
'console_scripts': [
'pull-repos = pull_repos:execute_from_command_line'
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
| 841 |
menu/_menu.py
|
felipejunio05/Dino_Google_RNA
| 1 |
2172611
|
# -*- utf-8 -*-
# Author: <NAME>
# Date: 2020/04/13
from game import Game
from os import listdir
from os import name as os_name
from os import system as os_system
__all__ = ['Menu']
class Menu:
aux: str = None
def __init__(self):
self.__game = Game()
def run(self):
option: int = 0
while option != 3:
try:
os_system('cls' if os_name == 'nt' else 'clear')
option = int(input("------------ DINO IA --------------\n\n"
"1 - Criar uma nova população\n"
"2 - Carregar uma População. \n"
"3 - Sair \n\n->:"))
except ValueError:
os_system('cls' if os_name == 'nt' else 'clear')
print("valor digitado não é valido")
input()
except TypeError:
os_system('cls' if os_name == 'nt' else 'clear')
print("valor digitado não é valido")
input()
else:
if (option >= 1) and (option <= 3):
try:
if option == 1:
self.__fit(self.__fitPeg())
elif option == 2:
aux = self.__loadPeg()
if aux != '':
if self.__game.loadDNA(aux):
self.__fit(self.__fitPeg(), True)
else:
os_system('cls' if os_name == 'nt' else 'clear')
print("Falha em carregar os genomas")
input()
else:
os_system('cls' if os_name == 'nt' else 'clear')
print("falha em carregar os Genomas.")
input()
except ValueError as Error:
os_system('cls' if os_name == 'nt' else 'clear')
print("Ops! Algo errado aconteceu -> Erro: " + str(Error))
input()
except TypeError as Error:
os_system('cls' if os_name == 'nt' else 'clear')
print("Ops! Algo errado aconteceu -> Erro: " + str(Error))
input()
else:
os_system('cls' if os_name == 'nt' else 'clear')
print("Opção invalida!")
def __fit(self, ge: int, reload: bool = False):
try:
self.__game.open()
if not reload:
self.__game.createDNA(12)
self.__game.generation(ge, 4, 20)
finally:
self.__game.destroyDNA()
self.__game.close()
# def __play(self, p: int):
# try:
# self.__game.open()
# self.__game.createDNA(1)
# self.__game.dino[0].model.load_weights('models/#/' + listdir('models/#/')[-1])
# self.__game.play(p)
#
# finally:
# self.__game.close()
# self.__game.destroyDNA()
@staticmethod
def __playPeg():
ret: int
while True:
try:
os_system('cls' if os_name == 'nt' else 'clear')
ret = int(input("Numero de Jogadas: "))
os_system('cls' if os_name == 'nt' else 'clear')
except ValueError:
os_system('cls' if os_name == 'nt' else 'clear')
print("Informação digitada está incorreta, só é permitido números inteiros.")
input()
except TypeError:
os_system('cls' if os_name == 'nt' else 'clear')
print("Valor digitado tém que ser um numero inteiro.")
input()
else:
return ret
@staticmethod
def __fitPeg():
ret: int
while True:
try:
os_system('cls' if os_name == 'nt' else 'clear')
ret = int(input("Numero de gerações: "))
os_system('cls' if os_name == 'nt' else 'clear')
except ValueError:
os_system('cls' if os_name == 'nt' else 'clear')
print("Informação digitada está incorreta, só é permitido números inteiros.")
input()
except TypeError:
os_system('cls' if os_name == 'nt' else 'clear')
print("Valor digitado tém que ser um numero inteiro.")
input()
else:
return ret
@staticmethod
def __loadPeg():
base: str = 'models/'
content: list = [listdir(base)]
ans: int
ret: str = ''
if content[0]:
if len(content[0]) == 1:
os_system('cls' if os_name == 'nt' else 'clear')
content.append([listdir(base + content[0][0] + '/')])
ans = int(input("Você possui " + str(len(content[1])) + " população com " + str(len(content[1][0])) + " gerações, escolha uma: "))
for d in content[1][0]:
if int(d[-len(str(ans)):]) == ans:
ret = base + content[0][0] + "/" + d + "/"
break
else:
os_system('cls' if os_name == 'nt' else 'clear')
ans = int(input("Você possui " + str(len(content[0])) + " populações, escolha uma: "))
os_system('cls' if os_name == 'nt' else 'clear')
for d1 in content[0]:
if int(d1) == ans:
content.append([listdir(base + d1 + '/')])
break
ans = int(input("Você possui " + str(len(content[1][0])) + " gerações, escolha uma: "))
for d2 in content[1][0]:
if int(d2[-len(str(ans)):]) == ans:
ret = base + d1 + "/" + d2 + "/"
break
else:
print("Você não possui população para ser carregada.")
input()
return ret
| 6,215 |
processor/payload.py
|
flopezag/f4w-challenges-sww
| 0 |
2171150
|
from config.settings import AT_CONTEXT
__author__ = '<NAME>'
class Payload:
def __init__(self):
pass
def create(self, dma, period, observedAt, measure):
entity_id = "urn:ngsi-ld:WaterConsumption:" + dma
data = {
"@context": AT_CONTEXT,
"id": entity_id,
"type": "WaterConsumption",
"dma": {
"type": "Property",
"value": dma
},
"litres": {
"type": "Property",
"value": measure,
"observedAt": observedAt.astype(str),
"unitCode": "LTR",
"period": {
"type": "Property",
"value": int(period),
"unitCode": "SEC"
}
}
}
return entity_id, data
def patch(self, observedAt, measure, period):
data = {
"@context": AT_CONTEXT,
"litres": {
"type": "Property",
"value": measure,
"observedAt": observedAt.astype(str),
"unitCode": "LTR",
"period": {
"type": "Property",
"value": period,
"unitCode": "SEC"
}
}
}
return data
| 1,353 |
src/sns_monitor/dependencies.py
|
SUNET/python-sns-monitor
| 0 |
2170663
|
# -*- coding: utf-8 -*-
import logging
from fastapi import Header, HTTPException, Request
from sns_monitor.models import MessageType
__author__ = 'lundberg'
logger = logging.getLogger(__name__)
async def verify_topic(
request: Request, x_amz_sns_topic_arn: str = Header(...), x_amz_sns_message_type: str = Header(...)
):
# Only check if the topic is allowed if it is a notification as we probably want to receive subscription and
# unsubscription events
if MessageType(x_amz_sns_message_type) is MessageType.NOTIFICATION and request.app.state.config.topic_allow_list:
if x_amz_sns_topic_arn not in request.app.state.config.topic_allow_list:
logger.info(f'Notification from topic {x_amz_sns_topic_arn} rejected')
raise HTTPException(status_code=400, detail=f"Notifications from topic {x_amz_sns_topic_arn} not allowed")
| 874 |
scraper/tests/test_service.py
|
ulturt/appfollow_test
| 0 |
2172941
|
from unittest import TestCase, mock
from nameko.testing.services import worker_factory
from services import Service
TEST_URL = 'http://example.com'
class ScraperServiceTestCase(TestCase):
@mock.patch('services.add_posts_into_db', return_value=10)
def test_parse_service(self, add_posts_into_db):
service = worker_factory(Service)
result = service.parse()
self.assertTrue(add_posts_into_db.called)
self.assertEqual(result, {'added': 10})
| 484 |
Hashing/4sum.py
|
jainanisha90/codepath_interviewBit
| 0 |
2172024
|
class Solution:
# @param A : list of integers
# @param B : integer
# @return a list of list of integers
def fourSum(self, nums, B):
nums.sort()
res = []
for i in xrange(len(nums) - 3):
if i and nums[i] == nums[i - 1]:
continue
for j in xrange(i + 1, len(nums) - 2):
if j != i + 1 and nums[j] == nums[j - 1]:
continue
sum = B - nums[i] - nums[j]
left, right = j + 1, len(nums) - 1
while left < right:
if nums[left] + nums[right] == sum:
res.append([nums[i], nums[j], nums[left], nums[right]])
right -= 1
left += 1
while left < right and nums[left] == nums[left - 1]:
left += 1
while left < right and nums[right] == nums[right + 1]:
right -= 1
elif nums[left] + nums[right] > sum:
right -= 1
else:
left += 1
return res
print Solution().fourSum([1,0,-1,0,-2,2], 0) # Output [[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]]
| 1,275 |
test/core/sampler/polychord_test.py
|
deepchatterjeeligo/bilby
| 0 |
2171401
|
import unittest
from unittest.mock import MagicMock
import numpy as np
import bilby
class TestPolyChord(unittest.TestCase):
def setUp(self):
self.likelihood = MagicMock()
self.priors = bilby.core.prior.PriorDict(
dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
)
self.sampler = bilby.core.sampler.PyPolyChord(
self.likelihood,
self.priors,
outdir="outdir",
label="polychord",
use_ratio=False,
plot=False,
skip_import_verification=True,
)
def tearDown(self):
del self.likelihood
del self.priors
del self.sampler
def test_default_kwargs(self):
expected = dict(
use_polychord_defaults=False,
nlive=self.sampler.ndim * 25,
num_repeats=self.sampler.ndim * 5,
nprior=-1,
do_clustering=True,
feedback=1,
precision_criterion=0.001,
logzero=-1e30,
max_ndead=-1,
boost_posterior=0.0,
posteriors=True,
equals=True,
cluster_posteriors=True,
write_resume=True,
write_paramnames=False,
read_resume=True,
write_stats=True,
write_live=True,
write_dead=True,
write_prior=True,
compression_factor=np.exp(-1),
base_dir="outdir",
file_root="polychord",
seed=-1,
grade_dims=list([self.sampler.ndim]),
grade_frac=list([1.0] * len([self.sampler.ndim])),
nlives={},
)
self.sampler._setup_dynamic_defaults()
self.assertDictEqual(expected, self.sampler.kwargs)
def test_translate_kwargs(self):
expected = dict(
use_polychord_defaults=False,
nlive=123,
num_repeats=self.sampler.ndim * 5,
nprior=-1,
do_clustering=True,
feedback=1,
precision_criterion=0.001,
logzero=-1e30,
max_ndead=-1,
boost_posterior=0.0,
posteriors=True,
equals=True,
cluster_posteriors=True,
write_resume=True,
write_paramnames=False,
read_resume=True,
write_stats=True,
write_live=True,
write_dead=True,
write_prior=True,
compression_factor=np.exp(-1),
base_dir="outdir",
file_root="polychord",
seed=-1,
grade_dims=list([self.sampler.ndim]),
grade_frac=list([1.0] * len([self.sampler.ndim])),
nlives={},
)
self.sampler._setup_dynamic_defaults()
for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs:
new_kwargs = self.sampler.kwargs.copy()
del new_kwargs["nlive"]
new_kwargs[equiv] = 123
self.sampler.kwargs = new_kwargs
self.assertDictEqual(expected, self.sampler.kwargs)
if __name__ == "__main__":
unittest.main()
| 3,173 |
task/w2/practic/5-sort min max.py
|
beregok/pythontask
| 1 |
2172291
|
## Дано три числа. Упорядкуйте їх у порядку неспадання. Програма повинна зчитувати три числа a, b, c, потім програма повинна змінювати їх значення так, щоб стали виконані умови a ≤ b ≤ c, потім програма виводить трійку a, b, c.
## Формат введення
## Вводяться три числа.
## Формат виведення
## Виведіть відповідь до задачі.
# Спосіб 1
# = int(input())
# = int(input())
# = int(input())
# f a <= b <= c:
# print(a, b, c)
# lif b <= a <= c:
# print(b, a, c)
# lif c <= b <= a:
# print(c, b, a)
# lif c <= a <= b:
# print(c, a, b)
# lif a <= c <= b:
# print(a, c, b)
# lse:
# print(b, c, a)
# Спосіб 2
a = int(input())
b = int(input())
c = int(input())
if a > b:
a = a + b
b = a - b
a = a - b
if b > c:
b = b + c
c = b - c
b = b - c
if a > b:
a = a + b
b = a - b
a = a - b
print(a, b, c)
| 849 |
load_factor.py
|
Taku0411/myScripts
| 0 |
2171876
|
import pandas as pd
import sys
from subprocess import call
def plot_load_factor():
args = '''
set terminal qt font "Arial, 27";
set output "sensitivity.svg";
set xlabel "t (s)" font "Arial, 28";
set ylabel "load factor" font "Arial, 30";
set yrange [0.0:1.0];
plot
'load_factor.dat' with lines linewidth 5 notitle;
pause -1;
'''
call([ "gnuplot", "-e", args])
plot_load_factor()
| 542 |
tests/test_auth.py
|
pareschi/verify-v2-quickstart-python
| 10 |
2172631
|
import re
from unittest.mock import patch
sample_user = {
'username': 'tanjiro',
'password': '<PASSWORD>',
'full_phone': '0909090909',
'channel': 'sms'
}
@patch('verify.auth.start_verification')
def test_creates_an_user(app, client):
res = client.post('/auth/register', data=sample_user)
assert res.status_code == 302
assert '/auth/verify' in res.location
@patch('verify.auth.start_verification')
def test_username_exists_raises_failure(app, client):
client.post('/auth/register', data=sample_user)
res = client.post('/auth/register', data=sample_user)
assert res.status_code == 200
assert re.search('User tanjiro is already registered.', res.get_data(as_text=True))
@patch('verify.auth.start_verification')
def test_create_user_logout_and_log_back_in(app, client):
client.post('/auth/register', data=sample_user)
res = client.get('/auth/logout')
assert res.status_code == 302
assert '/auth/login' in res.location
res = client.post('/auth/login', data=dict(username='tanjiro', password='<PASSWORD>'), follow_redirects=True)
assert re.search('Congratulations, you have accessed the secret content!', res.get_data(as_text=True))
| 1,202 |
main.py
|
EwaneGao/EverydayMailTips
| 0 |
2171798
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2022/1/23 15:57
# @Author : 高文俊
# @FileName: main.py
# @Software: PyCharm 2021.3.1
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import requests
import smtplib
import datetime
from email.mime.text import MIMEText
from email.header import Header
import json
import time
import schedule
# 朋友圈 https://pyq.shadiao.app/api.php
# 彩虹屁 https://chp.shadiao.app/api.php
# Press the green button in the gutter to run the script.
weather_url = "http://t.weather.itboy.net/api/weather/city/"
word_url = "https://pyq.shadiao.app/api.php"
mail_host = "smtp.qq.com" # 设置服务器
mail_user = "**************" # 用户名
mail_pass = "**************" # 口令
sender = '****************'
def GetHoneyWorld():
honeyword_text = requests.get(word_url).text
return honeyword_text
def GetMailText(city, honeyword_text):
now_time = datetime.datetime.now()
weather_url_get = weather_url + city
weather_data_dict = requests.get(weather_url_get).json()
mail_send_str = "每日提醒\n" + now_time.strftime("%Y-%m-%d %H:%M:%S") + "\n" + honeyword_text + "\n今天" + \
weather_data_dict['cityInfo']['city'] + "天气为" + weather_data_dict['data']['forecast'][0]['type'] + \
"\n温度" + weather_data_dict['data']['wendu'] + "℃" + "\n湿度" + weather_data_dict['data']['shidu'] + \
"\n风向" + weather_data_dict['data']['forecast'][0]['fx'] + "风力" + \
weather_data_dict['data']['forecast'][0]['fl'] + "\n空气质量" + weather_data_dict['data']['quality'] + \
"\n" + weather_data_dict['data']['ganmao'] + "\n" + weather_data_dict['data']['forecast'][0][
'notice']
return mail_send_str
def SendMail(mail_send_str, ResName, Resurl):
message = MIMEText(mail_send_str, 'plain', 'utf-8')
message['From'] = Header("老高", 'utf-8')
message['To'] = Header(ResName, 'utf-8')
subject = '每日提醒'
message['Subject'] = Header(subject, 'utf-8')
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 587) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, Resurl, message.as_string())
def SendTesk():
mylog = open('./mailtext.log', mode='a', encoding='utf-8')
with open('./receiver.json', 'r', encoding='utf8') as fp:
json_data = json.load(fp)
honeyword_text = GetHoneyWorld()
for res in json_data['receivers']:
mail_send_str = GetMailText(res['city'], honeyword_text)
SendMail(mail_send_str, res['name'], res['mailurl'])
print(res['name'] + ':' + res['mailurl'])
print(mail_send_str)
print("邮件发送成功")
print(res['name'] + ':' + res['mailurl'], file=mylog)
print(mail_send_str, file=mylog)
print("邮件发送成功", file=mylog)
time.sleep(0.5)
mylog.close()
if __name__ == "__main__":
schedule.every().day.at("8:00").do(SendTesk)
schedule.every().day.at("12:00").do(SendTesk)
schedule.every().day.at("18:00").do(SendTesk)
schedule.every().day.at("21:00").do(SendTesk)
while True:
schedule.run_pending() # 运行所有可以运行的任务
time.sleep(1)
| 3,384 |
tests/examples-bad/classmethod1.py
|
JohannesBuchner/pystrict3
| 1 |
2172937
|
class FooMany(object):
def __init__(self, a):
self.a = a
def foo0(self):
pass
def foo1(self, a):
self.foo0(1) ## error: too many arguments
| 151 |
model.py
|
zhy0216-collection/another-one
| 0 |
2172782
|
# -*- coding: utf-8 -*-
from datetime import datetime
from mongoengine import (connect, Document, StringField,
IntField, DateTimeField, ListField)
from dateutil.relativedelta import relativedelta
connect('another-one')
class OneIssue(Document):
issue_number = IntField()
create_time = DateTimeField()
articles = ListField(StringField())
@classmethod
def create(cls, issue_number=None, articles=None):
if not articles:
return
one_issue = cls.get_issue_by_issue_number(issue_number=issue_number) or \
cls(issue_number=issue_number, articles=articles)
one_issue.create_time = one_issue.gen_time()
one_issue.save()
@classmethod
def get_issue_by_issue_number(cls, issue_number=0):
return cls.objects(issue_number=issue_number).first()
def gen_time(self):
return datetime(2012, 6, 11) + relativedelta(days=(self.issue_number-1))
| 993 |
Aula 05 - Agregate/main.py
|
G-ilian/S202-Repositorio
| 0 |
2170476
|
from db.database import Database
from helper.WriteAJson import writeAJson
from dataset.produto_database import dataset
from dataset.pessoa_dataset import dataset as cliente
compras = Database(database="database", collection="produtos", dataset=dataset)
compras.resetDatabase()
cliente = Database(
database="database",
collection="pessoas",
dataset=cliente
)
cliente.resetDatabase()
result1 = compras.collection.aggregate([
{"$project": {
"_id": 0,
"cliente": 1,
"desconto": {
"$cond": {"if": {"$gte": ["$total", 10]}, "then": 0.1, "else": 0.05}
}
}}
])
| 628 |
shipyard/rules/host/gradle/build.py
|
clchiou/garage
| 3 |
2171868
|
from foreman import get_relpath, rule
from garage import scripts
from templates import common
common.define_archive(
uri='https://services.gradle.org/distributions/gradle-4.7-bin.zip',
filename='gradle-4.7-bin.zip',
output='gradle-4.7',
checksum='md5-3e5af867778cd0a8e00e62257f426e09',
)
@rule
@rule.depend('//base:build')
@rule.depend('//host/java:install')
@rule.depend('download')
def install(parameters):
"""Install Gradle build system."""
bin_dir = (parameters['//base:drydock'] / get_relpath() /
parameters['archive_info'].output / 'bin')
scripts.ensure_file(bin_dir / 'gradle') # Sanity check
scripts.insert_path(bin_dir)
| 683 |
app/index.py
|
shantanugoel/dash-example
| 0 |
2171127
|
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
from apps import app1
from components.header import make_header
from components.sidebar import make_sidebar
import config
app.layout = html.Div(
[
dcc.Location(id='url', refresh=False),
make_header(),
html.Div(
dbc.Row(
[
make_sidebar(),
html.Main(
html.Div(id='page-content',
className="d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom"),
className="col-md-9 ml-sm-auto col-lg-10 px-4",
)
]
),
className="container-fluid",
),
]
)
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/apps/app1':
return app1.layout
elif pathname == '/':
return "Hello!\n" * 10000
else:
return '404'
if __name__ == '__main__':
app.run_server(host=config.HOST_IP,
port=config.PORT,
debug=config.DEBUG_FLAG)
| 1,331 |
app/enc/routes.py
|
Jeoffreybauvin/puppenc
| 5 |
2171749
|
from flask import jsonify, request, g
from app.puppenc import app, db, output_yaml, auth, PuppencResource
from app.nodes.models import Node
from app.hostgroups.models import Hostgroup
from app.environments.models import Environment
from app.classes.models import Class
import json
class Enc(PuppencResource):
@auth.login_required
def get(self, page=1, node_name=None):
"""
@api {get} /enc/<node-name> Get node informations (ENC)
@apiName get_enc
@apiGroup ENC
@apiPermission user
@apiVersion 1.0.0
@apiParam {String} node_name (uri parameter) The node's name
@apiParam {String} [output=yaml] (query parameter) Output result. Avaiable methods : yaml / json
@apiSuccess {String} name The hostgroup's name.
@apiSuccess {Datetime} insert_date The hostgroup's inserted date
@apiSuccess {Datetime} update_date The hostgroup's updated date
@apiSuccess {Datetime} delete_date The hostgroup's deleted date
@apiExample {curl} Example usage :
curl -i http://1192.168.3.11:5000/api/v1/enc/:node_name
@apiSuccessExample {yaml} Success-Response:
HTTP/1.0 200 OK
classes:
- role::webserver
environment: stable
parameters:
hostgroup: my_hostgroup
puppenc_node_id: 2740
puppetmaster: ''
"""
output = str(request.args.get('output', 'yaml'))
if not node_name:
return { "success": False, "message": "No node provided" }, 304
else:
node = Node.query.filter_by(name=node_name).first()
if not node:
app.logger.warning('ENC : Cannot find %s, by %s', node_name, g.user)
return { "success": False, "message": "Node not found" }, 404
if node.hostgroup_id is None or node.environment_id is None:
app.logger.warning('ENC : Please, set a hostgroup and an environment')
return { "success": False, "message": "Please, set a hostgroup and an environment" }, 404
else:
# I have an hostgroup or an environment, we can continue
data = Node.query.join(
Hostgroup,
Node.hostgroup_id == Hostgroup.id,
).join(
Class,
Hostgroup.class_id == Class.id
).join(
Environment,
Node.environment_id == Environment.id
).add_columns(
Node.id.label('node_id'),
Class.name.label('class_name'),
Hostgroup.name.label('hostgroup_name'),
Environment.name.label('environment_name')
).filter(
Node.name == node_name
).first()
if not data:
# I have all except a class in my hostgroup
app.logger.warning('No class for the node %s', node_name)
class_name = ''
# Let's make a different request to handle a missing class
data = Node.query.join(
Hostgroup,
Node.hostgroup_id == Hostgroup.id,
).join(
Environment,
Node.environment_id == Environment.id
).add_columns(
Node.id.label('node_id'),
Hostgroup.name.label('hostgroup_name'),
Environment.name.label('environment_name')
).filter(
Node.name == node_name
).first()
else:
class_name = data.class_name
hostgroup_name = data.hostgroup_name
environment_name = data.environment_name
node_id = data.node_id
# Parameters now
params = {}
params['parameters'] = {}
# legacy
params['parameters']['puppetmaster'] = ''
params['parameters']['hostgroup'] = hostgroup_name
params['parameters']['puppenc_node_id'] = node_id
for p in node.nodes_var:
if p.content == 'true':
content = True
elif p.content == 'false':
content = False
else:
if p.content[0] == '{':
try:
content = json.loads(p.content)
except:
app.logger.info('unable to format json')
content = p.content
else:
content = p.content
params['parameters'][p.name] = content
app.logger.info('Get ENC on %s, by %s', node_name, g.user)
# We need to display it on "ENC" format
base = {
'environment': environment_name
}
if(class_name != ''):
classes_result = {
'classes': [
class_name,
],
}
else:
classes_result = {
'classes': [],
}
base.update(classes_result)
res = base.copy()
res.update(params)
app.logger.info(res)
Node.query.filter_by(id=node_id).update({ "last_used": db.func.current_timestamp() }, synchronize_session=False)
db.session.commit()
if output == 'json':
return jsonify(res, 200)
else:
return output_yaml(res, 200)
| 6,075 |
tests/base_tests.py
|
andela-jmuli/wishlist
| 2 |
2171526
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from faker import Factory
from rest_framework.test import APITestCase, APIClient
from rest_framework import status
from factories import BucketlistFactory, BucketlistItemFactory, UserFactory
class BaseTest(APITestCase):
""" Base class for test cases """
def setUp(self):
"""
method that prepares tests with required data
"""
self.fake = Factory.create()
self.client = APIClient()
self.test_user3 = {
'username': self.fake.user_name(),
'first_name': self.fake.first_name(),
'last_name': self.fake.last_name(),
'password': self.fake.password(),
'email': self.fake.email()
}
self.test_user2 = {
'username': self.fake.user_name(),
'first_name': self.fake.first_name(),
'last_name': self.fake.last_name(),
'password': self.fake.password(),
'email': self.fake.email()
}
self.test_user1 = UserFactory(username='muli', password='<PASSWORD>')
self.test_user4 = UserFactory(username='mike')
self.single_bucketlist_url = '/bucketlists/'
self.all_bucketlists__url = '/bucketlists/'
self.create_bucketlist_item__url = '/bucketlists/1/items/'
self.single_bucketlist_item__url = '/bucketlists/1/items/'
def user_creation(self, username='jojo', first_name='joseph', last_name='muli', email='<EMAIL>', password='<PASSWORD>'):
""" method that tests object instance in model test """
return User.objects.create(username=username, first_name=first_name, last_name=last_name, email=email, password=password)
def create_users(self, user):
""" method to register a new user """
response = self.client.post(path='/auth/register/', data=user, format='json')
return response
def login_user1(self):
"This method logs in user 1 and returns the token"
self.client.post('/auth/register/',{'username': 'test', 'password': '<PASSWORD>', 'email': '<EMAIL>'}, format='json')
response = self.client.post('/auth/login/',data={'username': 'test', 'password': '<PASSWORD>'}, format='json')
self.token = response.data['auth_token']
return self.token
def get_token(self):
""" Set Header token """
self.login_user1()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
| 2,502 |
run_emu_all.py
|
fjt7tdmi/rafi-emu-python
| 1 |
2171596
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import rafi
import sys
CONFIG_PATH = "./riscv_tests.json"
BINARY_DIR_PATH = "./rafi-prebuilt-binary/riscv-tests/isa"
MAX_CYCLE = 10000
configs = None
with open(CONFIG_PATH, "r") as f:
configs = json.load(f)
failure_count = 0
for config in configs:
path = os.path.join(BINARY_DIR_PATH, f"{config}.bin")
try:
print(f"{path}")
rafi.run_emulation(path, MAX_CYCLE)
except Exception as e:
print(e)
failure_count += 1
sys.exit(failure_count)
| 1,127 |
workshops/admin.py
|
WebCampZg/conference-web
| 4 |
2172513
|
from django.contrib import admin
from easy_select2 import select2_modelform
from .models import Workshop
class WorkshopAdmin(admin.ModelAdmin):
form = select2_modelform(Workshop)
list_display = ('title', 'people', 'published', 'event')
list_filter = ('event', 'published')
def formfield_for_manytomany(self, db_field, request, **kwargs):
field = super().formfield_for_manytomany(db_field, request, **kwargs)
if db_field.name == "applicants":
field.queryset = field.queryset.prefetch_related('user').order_by('user__first_name', 'user__last_name')
return field
def people(self, obj):
return ", ".join(obj.applicant_names())
admin.site.register(Workshop, WorkshopAdmin)
| 742 |
basic program/calculator.py
|
ZephyrAveryl777/Python-Programs
| 6 |
2172333
|
while True:
print("-"*25)
try :
num1= float(input("Enter the first number: "))
num2 = float(input("Enter the second number: "))
except:
print("Invalid Input")
continue
print("\npress 1 for addition \npres 2 for subtraction \npress 3 for multiplication \npress 4 for division \npress 5 for modulus")
option =int(input("Enter your option: "))
print("your option",option)
def Addition():
result = num1 + num2
return "Addition of {0} and {1} gives: {2:.5f}".format(num1, num2, result )
def Subtraction():
result = num1 - num2
return "Subtraction of {0} and {1} gives: {2:.5f}".format(num1, num2, result )
def Multiplication():
result = num1 * num2
return "Multiplication of {0} and {1} gives: {2:.5f}".format(num1, num2, result )
def Division():
result = num1 / num2
return "Division of {0} and {1} gives {2:.5f}".format(num1, num2, result )
def Modulus():
result = num1 % num2
return "Modulus of {0} and {1} gives: {2:.5f}".format(num1, num2, result )
def default():
return "Incorrect option, check again"
switcher = {
1: Addition,
2: Subtraction,
3: Multiplication,
4: Division,
5: Modulus
}
def switch(option):
return switcher.get(option, default)()
print(switch(option))
continue
pass
| 1,516 |
scripts/download_tiles.py
|
ipepe/rails-real-tram-map
| 0 |
2172973
|
#!/usr/local/bin/python
from sys import argv
import os
import math
import urllib2
import time
import random
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def download_url(zoom, xtile, ytile, download_path):
# Switch between otile1 - otile4
subdomain = random.randint(1, 4)
url = "http://%d.basemaps.cartocdn.com/dark_all/%d/%d/%d.png" % (subdomain, zoom, xtile, ytile)
dir_path = "%s/tiles/%d/%d/" % (download_path, zoom, xtile)
download_path = "%s/tiles/%d/%d/%d.png" % (download_path, zoom, xtile, ytile)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if os.path.exists(download_path):
print "already downloaded %r" % url
else:
print "downloading %r" % url
time.sleep(1)
source = urllib2.urlopen(url)
content = source.read()
source.close()
destination = open(download_path,'wb')
destination.write(content)
destination.close()
def main(argv):
try:
south = 52.0
west = 20.7
north = 52.4
east = 21.3
min_zoom = 15
max_zoom = 16
download_path = "./"
except:
exit(2)
for zoom in range(int(min_zoom), int(max_zoom) + 1, 1):
xtile, ytile = deg2num(float(south), float(west), float(zoom))
final_xtile, final_ytile = deg2num(float(north), float(east), float(zoom))
for x in range(xtile, final_xtile + 1, 1):
for y in range(ytile, final_ytile - 1, -1):
download_url(int(zoom), x, y, download_path)
return 0
main(argv)
| 1,595 |
01_script/data_gathering/add_noise.py
|
iwatake2222/loud_talking_detector
| 16 |
2170257
|
#-- coding: utf-8 --
import os
import glob
import shutil
import time
import argparse
import random
import librosa
import soundfile
def clear_last_sep(dir):
dir.replace(os.sep,'/')
if dir[-1] == "/":
dir = dir[:-1]
return dir
def add_noise(target_dir, noise_dir, output_dir, signature_text="", sampling_rate=16000, process_ratio=0.3, original_volume=1.0, noise_volume=1.0):
if signature_text == "":
signature_text = os.path.splitext(os.path.basename(clear_last_sep(noise_dir)))[0]
''' Make sure the shuffling is deterministic for reproduce '''
random.seed(1234)
''' Read noise data as array '''
noise_list = []
noise_wav_path_list = glob.glob(noise_dir + "/*.wav")
for noise_wav_path in noise_wav_path_list:
data, sr = librosa.core.load(noise_wav_path, sr=sampling_rate, mono=True)
noise_list.append(data)
''' Process for selected input files '''
wav_path_list = glob.glob(target_dir + "/*.wav")
random.shuffle(wav_path_list)
wav_path_list = wav_path_list[:int(len(wav_path_list) * process_ratio)]
for wav_path in wav_path_list:
basename, ext = os.path.splitext(os.path.basename(wav_path))
output_path = output_dir + "/" + basename + "_" + signature_text + ".wav"
data, sr = librosa.core.load(wav_path, sr=sampling_rate, mono=True)
duration_sample = len(data)
noise = random.choice(noise_list)
start_sample = int(random.uniform(0, len(noise) - duration_sample - 1))
data = data * original_volume + noise[start_sample:start_sample + duration_sample] * noise_volume
soundfile.write(output_path, data, samplerate=sampling_rate, subtype="PCM_16")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Download AudioSet")
parser.add_argument("-t", "--target_dir", type=str, required=False, default="./talking")
parser.add_argument("-n", "--noise_dir", type=str, required=False, default="./noise")
parser.add_argument("-o", "--output_dir", type=str, required=False, default="./")
parser.add_argument("-s", "--signature_text", type=str, required=False, default="")
parser.add_argument("-r", "--sampling_rate", type=int, required=False, default="16000")
parser.add_argument("-p", "--process_ratio", type=float, required=False, default="0.5")
parser.add_argument("-u", "--original_volume", type=float, required=False, default="1.0")
parser.add_argument("-v", "--noise_volume", type=float, required=False, default="1.0")
args = parser.parse_args()
target_dir = args.target_dir
noise_dir = args.noise_dir
output_dir = args.output_dir
signature_text = args.signature_text
sampling_rate = args.sampling_rate
process_ratio = args.process_ratio
original_volume = args.original_volume
noise_volume = args.noise_volume
os.makedirs(output_dir, exist_ok=True)
add_noise(target_dir, noise_dir, output_dir, signature_text, sampling_rate, process_ratio, original_volume, noise_volume)
| 3,032 |
src/edrn.rdf/edrn/rdf/simpledmccrdfgenerator.py
|
EDRN/DMCCBackend
| 0 |
2172245
|
# encoding: utf-8
# Copyright 2012 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''Simple DMCC RDF Generator. This generator is used to describe the some of the more simple
sets of data available at the DMCC by accessing their crummy web service.
'''
from edrn.rdf import _
from .exceptions import MissingParameterError
from .interfaces import IAsserter
from .rdfgenerator import IRDFGenerator
from .utils import get_suds_client
from .utils import parseTokens, splitDMCCRows
from .utils import validateAccessibleURL
from Acquisition import aq_inner
from rdflib.term import URIRef
from zope import schema
import rdflib, logging
_logger = logging.getLogger(__name__)
DEFAULT_VERIFICATION_NUM = '0' * 40960 # Why, why, why? DMCC, this makes no sense unless you copied a sample
# SOAP app from a book or a website that did credit card number verification and decided to make that the basis
# for all your SOAP apps and I bet that's exactly what you did.
class ISimpleDMCCRDFGenerator(IRDFGenerator):
'''Simple DMCC RDF Generator.'''
webServiceURL = schema.TextLine(
title=_('Web Service URL'),
description=_('The Uniform Resource Locator to the DMCC SOAP web service.'),
required=True,
constraint=validateAccessibleURL,
)
operationName = schema.TextLine(
title=_('Operation Name'),
description=_('Name of the SOAP operation to invoke in order to retrieve data.'),
required=True,
)
verificationNum = schema.TextLine(
title=_('Verification Number String'),
description=_('Utterly pointless and needless parameter to pass to the operation. A default will be used if unset.'),
required=False,
)
uriPrefix = schema.TextLine(
title=_('URI Prefix'),
description=_('The Uniform Resource Identifier prepended to all subjects described by this generator.'),
required=True,
)
identifyingKey = schema.TextLine(
title=_('Identifying Key'),
description=_('Key in the DMCC output serves as the discriminant for objects described by this generator.'),
required=True,
)
typeURI = schema.TextLine(
title=_('Type URI'),
description=_('Uniform Resource Identifier naming the type of objects described by this generator.'),
required=True,
)
class SimpleDMCCGraphGenerator(object):
'''A statement graph generator that produces statements based on the DMCC's crummy web service.'''
def __init__(self, context):
self.context = context
def generateGraph(self):
context = aq_inner(self.context)
if not context.webServiceURL: raise MissingParameterError(context, 'webServiceURL')
if not context.operationName: raise MissingParameterError(context, 'operationName')
if not context.identifyingKey: raise MissingParameterError(context, 'identifyingKey')
if not context.uriPrefix: raise MissingParameterError(context, 'uriPrefix')
if not context.typeURI: raise MissingParameterError(context, 'typeURI')
verificationNum = context.verificationNum if context.verificationNum else DEFAULT_VERIFICATION_NUM
predicates = {}
unusedSlots = set()
usedSlots = set()
for objID, item in context.contentItems():
predicates[item.title] = IAsserter(item)
client = get_suds_client(context.webServiceURL, context)
function = getattr(client.service, context.operationName)
horribleString = function(verificationNum)
graph = rdflib.Graph()
for row in splitDMCCRows(horribleString):
subjectURI, statements, statementsMade = None, [], False
for key, value in parseTokens(row):
usedSlots.add(key)
if key == context.identifyingKey and not subjectURI:
subjectURI = URIRef(context.uriPrefix + value)
elif key in predicates and len(value) > 0:
statements.extend(predicates[key].characterize(value))
statementsMade = True
elif key not in predicates:
unusedSlots.add(key)
# DMCC is giving out empty rows: they have an Identifier number, but no values in any of the columns.
# While we may wish to generate RDF for those (essentially just saying "Disease #31 exists", for example)
# It means we need to update EDRN Portal code to handle them, which we can't do right now.
# So just drop these. TODO: Add them back, but update the EDRN Portal.
if statementsMade:
graph.add((subjectURI, rdflib.RDF.type, URIRef(context.typeURI)))
for predicate, obj in statements:
graph.add((subjectURI, predicate, obj))
if unusedSlots:
_logger.warning('For %s the following slots were unused: %s', '/'.join(context.getPhysicalPath()),
', '.join(unusedSlots))
_logger.info('And the used slots are %s', ', '.join(usedSlots))
return graph
| 5,141 |
text_transform.py
|
SE-translation/old-everything-together
| 2 |
2172180
|
import os
os.chdir("C:/Users/DMA/PycharmProjects/se-csai-translation/data/nl-en") # Select working directory
counter = 0
with open("europarl-v7.nl-en.nl", "r", encoding="utf-8") as f: # must do with both .en and .nl files
with open("europarl-v7.nl-en-fixed.nl", "w+", encoding="utf-8") as fixed: # make sure you save to different files
while True:
line = f.readline()
if line == "":
break
line = line.replace('.\n', ' .\n').replace("'", " '").replace('"', ' "').replace(',', ' ,')
fixed.write(line)
counter += 1
if counter % 100000 == 0:
print("I just did 100k lines")
| 700 |
toExcelLineChart.py
|
nelliesnoodles/StatsClass
| 0 |
2170388
|
import xlsxwriter
import re
import os
# from : https://xlsxwriter.readthedocs.io/example_chart_line.html #
#chart1 = workbook.add_chart({'type': 'line'})
#chart1.add_series({
#'name': '=Sheet1!$B$1',
#'categories': '=Sheet1!$A$2:$A$7',
#'values': '=Sheet1!$B$2:$B$7',
#})
class ToExcelLineChart(object):
def __init__(self):
self.data = [[11, 22, 33], [1, 2, 3], [110, 110, 110]] #an array of data that will be plot points
self.data_heading = ['time','order', 'frequency %'] # a list of headings
self.name = "fake frequency chart"
self.filename = None
self.Alpha = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "L", "M"]
def write(self, test_filename=False, data_only=False):
if self.filename == None:
print("All non alphanumerics will be stripped from file name.")
prompt = input("please name your file: ")
filename = re.sub('[^0-9a-zA-Z]+', '', prompt)
filename = filename + ".xlsx"
self.filename = filename
if test_filename==True:
print(filename)
#add info to excel file:
workbook = xlsxwriter.Workbook(self.filename)
worksheet = workbook.add_worksheet()
if data_only:
self.add_chart_write()
else:
# write headings to top row, A1
col = 1
row = 0
i = 0
for item in self.data_heading:
item = self.data_heading[i]
worksheet.write(row, col, item)
i += 1
col += 1
row = 1
col = 1
start = 2
for item in self.data:
place = self.Alpha[col - 1] + str(start)
worksheet.write_column(place, item)
col += 1
valuelocation = '=Sheet1!$A$1:$A$'
start = int(valuelocation[11])
end = start + len(self.data[0]) - 1
newlocation = valuelocation + str(end)
chart = workbook.add_chart({'type': 'line'})
chart.add_series({'values': newlocation})
worksheet.insert_chart('A7', chart)
print(f"file created under file name: {self.filename}")
Current_path = os.getcwd()
print("file is currently saved in this directory: ", Current_path)
workbook.close()
def add_chart_write():
row = 1
col = 1
for item in self.data:
worksheet.write_column(row, col, item)
col += 1
# write headings to top row, A1
col = 1
row = 0
i = 0
for item in self.data_heading:
item = self.data_heading[i]
worksheet.write(row, col, item)
i += 1
col += 1
print(f"file created under file name: {self.filename}")
Current_path = os.getcwd()
print("file is currently saved in this directory: ", Current_path)
workbook.close()
def test1():
chart = ToExcelLineChart()
chart.write(test_filename=True)
def test2():
#use default data to write to file
chart = ToExcelLineChart()
chart.write()
test2()
| 3,590 |
kgae/optimize.py
|
fzbio/GILoop
| 1 |
2171287
|
import tensorflow as tf
def normalized_bce(norm, pos_weight):
"""
"""
def loss_function(y_true, y_pred):
y_true_list = tf.unstack(y_true)
y_pred_list = tf.unstack(y_pred)
cost_list = []
for i in range(len(y_true_list)):
cost_list.append(norm * tf.reduce_mean(
tf.nn.weighted_cross_entropy_with_logits(labels=y_true_list[i], logits=y_pred_list[i], pos_weight=pos_weight)
))
# cost = norm * tf.reduce_mean(
# tf.nn.weighted_cross_entropy_with_logits(labels=y_true, logits=y_pred, pos_weight=pos_weight)
# )
assert len(cost_list) == 1
return tf.stack(cost_list)
return loss_function
def gvae_loss():
pass
| 741 |
mpitest/test_newton_backtracking.py
|
colinxs/OpenMDAO
| 17 |
2172556
|
""" Test out newton solve with the array alpha on the backtracking linesearch."""
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, Component, ParallelGroup, \
Newton
from openmdao.test.mpi_util import MPITestCase
try:
from mpi4py import MPI
from openmdao.core.petsc_impl import PetscImpl as impl
from openmdao.solvers.petsc_ksp import PetscKSP
except ImportError:
impl = None
class SimpleImplicitComp1(Component):
""" A Simple Implicit Component with an additional output equation.
f(x,z) = xz + z - 4
y = x + 2z
Sol: when x = 0.5, z = 2.666
Sol: when x = 2.0, z = 1.333
Coupled derivs:
y = x + 8/(x+1)
dy_dx = 1 - 8/(x+1)**2 = -2.5555555555555554
z = 4/(x+1)
dz_dx = -4/(x+1)**2 = -1.7777777777777777
"""
def __init__(self):
super(SimpleImplicitComp1, self).__init__()
# Params
self.add_param('x', np.zeros((3, 1)))
# Unknowns
self.add_output('y', np.zeros((3, 1)))
# States
self.add_state('z', 2.0*np.ones((3, 1)), lower=1.5, upper=np.array([[2.6, 2.5, 2.65]]).T)
self.maxiter = 10
self.atol = 1.0e-12
def solve_nonlinear(self, params, unknowns, resids):
pass
def apply_nonlinear(self, params, unknowns, resids):
""" Don't solve; just calculate the residual."""
x = params['x']
z = unknowns['z']
resids['z'] = x*z + z - 4.0
# Output equations need to evaluate a residual just like an explicit comp.
resids['y'] = x + 2.0*z - unknowns['y']
def linearize(self, params, unknowns, resids):
"""Analytical derivatives."""
J = {}
# Output equation
J[('y', 'x')] = np.diag(np.array([1.0, 1.0, 1.0]))
J[('y', 'z')] = np.diag(np.array([2.0, 2.0, 2.0]))
# State equation
J[('z', 'z')] = (params['x'] + 1.0) * np.eye(3)
J[('z', 'x')] = unknowns['z'] * np.eye(3)
return J
class SimpleImplicitComp2(Component):
""" A Simple Implicit Component with an additional output equation.
f(x,z) = xz + z - 4
y = x + 2z
Sol: when x = 0.5, z = 2.666
Sol: when x = 2.0, z = 1.333
Coupled derivs:
y = x + 8/(x+1)
dy_dx = 1 - 8/(x+1)**2 = -2.5555555555555554
z = 4/(x+1)
dz_dx = -4/(x+1)**2 = -1.7777777777777777
"""
def __init__(self):
super(SimpleImplicitComp2, self).__init__()
# Params
self.add_param('x', np.zeros((3, 1)))
# Unknowns
self.add_output('y', np.zeros((3, 1)))
# States
self.add_state('z', -2.0*np.ones((3, 1)), upper=-1.5, lower=np.array([[-2.6, -2.5, -2.65]]).T)
self.maxiter = 10
self.atol = 1.0e-12
def solve_nonlinear(self, params, unknowns, resids):
pass
def apply_nonlinear(self, params, unknowns, resids):
""" Don't solve; just calculate the residual."""
x = params['x']
z = unknowns['z']
resids['z'] = -x*z - z - 4.0
# Output equations need to evaluate a residual just like an explicit comp.
resids['y'] = x - 2.0*z - unknowns['y']
def linearize(self, params, unknowns, resids):
"""Analytical derivatives."""
J = {}
# Output equation
J[('y', 'x')] = np.diag(np.array([1.0, 1.0, 1.0]))
J[('y', 'z')] = -np.diag(np.array([2.0, 2.0, 2.0]))
# State equation
J[('z', 'z')] = -(params['x'] + 1.0) * np.eye(3)
J[('z', 'x')] = -unknowns['z'] * np.eye(3)
return J
class TestNewtonBacktrackingMPI(MPITestCase):
N_PROCS = 2
def setUp(self):
if impl is None:
raise unittest.SkipTest("Can't run this test (even in serial) without mpi4py and petsc4py")
def test_newton_backtrack_MPI(self):
#------------------------------------------------------
# Test that Newton doesn't drive it past lower bounds
#------------------------------------------------------
top = Problem(impl=impl)
top.root = Group()
par = top.root.add('par', ParallelGroup())
par.add('comp1', SimpleImplicitComp1())
par.add('comp2', SimpleImplicitComp2())
top.root.ln_solver = PetscKSP()
top.root.nl_solver = Newton()
top.root.nl_solver.options['maxiter'] = 5
top.root.add('px', IndepVarComp('x', np.ones((3, 1))))
top.root.connect('px.x', 'par.comp1.x')
top.root.connect('px.x', 'par.comp2.x')
top.setup(check=False)
top['px.x'] = np.array([2.0, 2.0, 2.0])
top.run()
if not MPI or self.comm.rank == 0:
self.assertEqual(top['par.comp1.z'][0], 1.5)
self.assertEqual(top['par.comp1.z'][1], 1.5)
self.assertEqual(top['par.comp1.z'][2], 1.5)
if not MPI or self.comm.rank == 1:
self.assertEqual(top['par.comp2.z'][0], -1.5)
self.assertEqual(top['par.comp2.z'][1], -1.5)
self.assertEqual(top['par.comp2.z'][2], -1.5)
#------------------------------------------------------
# Test that Newton doesn't drive it past upper bounds
#------------------------------------------------------
top = Problem(impl=impl)
top.root = Group()
par = top.root.add('par', ParallelGroup())
par.add('comp1', SimpleImplicitComp1())
par.add('comp2', SimpleImplicitComp2())
top.root.ln_solver = PetscKSP()
top.root.nl_solver = Newton()
top.root.nl_solver.options['maxiter'] = 5
top.root.add('px', IndepVarComp('x', np.ones((3, 1))))
top.root.connect('px.x', 'par.comp1.x')
top.root.connect('px.x', 'par.comp2.x')
top.setup(check=False)
top['px.x'] = 0.5*np.ones((3, 1))
top.run()
# Each bound is observed
if top.root.par.comp1.is_active():
self.assertEqual(top['par.comp1.z'][0], 2.6)
self.assertEqual(top['par.comp1.z'][1], 2.5)
self.assertEqual(top['par.comp1.z'][2], 2.65)
if top.root.par.comp2.is_active():
self.assertEqual(top['par.comp2.z'][0], -2.6)
self.assertEqual(top['par.comp2.z'][1], -2.5)
self.assertEqual(top['par.comp2.z'][2], -2.65)
if __name__ == '__main__':
from openmdao.test.mpi_util import mpirun_tests
mpirun_tests()
| 6,461 |
app_layout.py
|
trentwatt/web-flyover
| 2 |
2170901
|
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
starting_vertex = "cdc.gov"
with open("data/gov_to_gov/nodes.txt", "r") as f:
nodes = [line.strip() for line in f.readlines()]
graph = dcc.Graph(id="graph", config={"displayModeBar": False})
controls = dbc.Row(
dbc.Col(
dbc.Row(
[
dbc.Col(html.Button("Back", id="back-button"), width=2),
dbc.Col(
dcc.Slider(
id="sensitivity-slider", min=0, max=1, step=0.025, value=0.75
),
),
dbc.Col(html.Div(id="sensitivity-display"), width=3,),
],
),
width={"offset": 3, "size": 6},
),
align="start",
)
center_panel = dbc.Col(graph, width=6)
left_panel = dbc.Col(
[
html.Div(dcc.Markdown(id="left-info-panel"), className="text-center",),
dcc.Graph(id="left-legend-panel", config={"displayModeBar": False}),
],
width=3,
)
right_panel = dbc.Col(
[
html.Div(dcc.Markdown(id="right-info-panel"), className="text-center",),
dcc.Graph(id="right-legend-panel", config={"displayModeBar": False}),
],
width=3,
)
search_nodes = dbc.Col(
html.Div(
dcc.Dropdown(
id="node-search",
options=[{"label": node, "value": node} for node in nodes],
value=starting_vertex,
),
style={"verticalAlign": "middle"},
),
width={"offset": 1, "size": 2},
align="end",
)
title = dbc.Col(
html.Div(dcc.Markdown(id="title-text"), className="text-center"), width=6
)
title_bar = dbc.Row([search_nodes, title])
body = dbc.Row([left_panel, center_panel, right_panel], no_gutters=True)
debug = dbc.Row(
[
html.Div(id="history", style={"display": "none"}),
html.Div(id="cache", style={"display": "none"}),
html.Pre(id="trigger-data", style={"display": "none"}),
html.Pre(id="click-data", style={"display": "none"}),
html.Pre(id="input-data", style={"display": "none"}),
]
)
spacer = dbc.Row(dbc.Col(html.Div(style={"height": "75px"})))
documentation = dbc.Row(
dbc.Col(
dcc.Markdown(
"""
This app provides an graph interface to the network of all the hyperlinks between websites ending in ".gov", as scraped by
[Common Crawl](https://commoncrawl.org/2019/11/host-and-domain-level-web-graphs-aug-sep-oct-2019/).
The center node is the current website, whose title is at the top. On the left hand side are
the sites most representative of those that link to it, and on the right are those most representative
of those it links to. The sensitivity, which you can adjust with the slider, is what calibrates this "representativeness:" the higher the sensitivity, the more
idiosyncratic representatives you will get. The lower the sensitivity, the more the sites will be representative
of the network as a whole.
When you click on a node for a site in the graph, it will take you to that site's graph. You can in this way explore a path of relatedness,
as well as find your way back with the back button. If you are curious about what any of the sites are, the legends on the right and
left are hyperlinked. If you have a specific site in mind you would like to start from, you can search for it in the dropdown.
The edge thicknesses indicate the strength of the relatedness between two nodes.
Enjoy. If you would like to share any ideas or experiences, please find me at trent wat son 1 at gmail.
"""
),
width={"offset": 2, "size": 8},
),
align="end",
)
layout = html.Div(
children=[
dbc.Container([title_bar, body, controls], fluid=True),
dbc.Container([spacer, documentation]),
debug,
]
)
| 3,983 |
open511_server/views/jurisdictions.py
|
Open511/open511-server
| 1 |
2171169
|
from django.shortcuts import get_object_or_404
from lxml.builder import E
from open511_server.models import Jurisdiction, JurisdictionGeography
from open511_server.utils.views import ModelListAPIView, APIView, Resource
class JurisdictionListView(ModelListAPIView):
allow_jsonp = True
model = Jurisdiction
def get_qs(self, request):
return Jurisdiction.objects.all()
def object_to_xml(self, request, obj):
return obj.to_full_xml_element(accept_language=request.accept_language)
class JurisdictionView(APIView):
model = Jurisdiction
up_url = '../../'
def get(self, request, id):
jur = get_object_or_404(Jurisdiction, id=id)
return Resource(E.jurisdictions(jur.to_full_xml_element(accept_language=request.accept_language)))
class JurisdictionGeographyView(APIView):
model = JurisdictionGeography
def get(self, request, id):
jur_geo = get_object_or_404(JurisdictionGeography, jurisdiction__id=id)
return Resource(E.geographies(jur_geo.to_full_xml_element()))
| 1,054 |
decisionProject/question/views.py
|
wldusdhso/likelion_ideaton2
| 0 |
2170910
|
from django.shortcuts import render
from .models import Question, Answer
from django.shortcuts import render,get_object_or_404,redirect
from django.utils import timezone
# Create your views here.
def list(request):
questions = Question.objects.all()
return render(request,'question/list.html',{'questions':questions})
def new(request):
return render(request,'question/new.html')
def create(request):
new_question = Question()
new_question.title = request.POST['title']
new_question.writer = request.user
new_question.content = request.POST['content']
new_question.pub_date = timezone.datetime.now()
new_question.save()
return redirect('/question/'+ str(new_question.id))
def detail(request,question_id):
question = get_object_or_404(Question,pk = question_id)
answer = Answer.objects.filter(question_id=question_id)
print(answer)
return render(request, 'question/detail.html',{'question':question, 'answer':answer,'edit_answer':edit_answer})
def edit(request,question_id):
question = get_object_or_404(Question,pk = question_id)
return render(request,'question/edit.html',{'question':question})
def update(request,question_id):
edit_question = get_object_or_404(Question,pk = question_id)
edit_question.title = request.POST['title']
edit_question.writer = request.user
edit_question.content = request.POST['content']
edit_question.pub_date = timezone.datetime.now()
edit_question.save()
return redirect('/question/'+ str(edit_question.id))
def delete(request,question_id):
delete_question = get_object_or_404(Question,pk = question_id)
delete_question.delete()
return redirect ('question:list')
def create_answer(request, question_id):
new_answer = Answer()
new_answer.writer = request.user
new_answer.content = request.POST['content']
new_answer.pub_date = timezone.datetime.now()
new_answer.question_id = question_id
new_answer.save()
print('create test')
return redirect('question:detail',question_id)
def edit_answer(request,answer_id):
answer = get_object_or_404(Answer,pk = answer_id)
return render(request, 'question/update_answer.html')
def update_answer(request,answer_id):
edit_answer = get_object_or_404(Answer,pk = answer_id)
edit_answer.writer = request.user
edit_answer.content = request.POST['content']
edit_answer.pub_date = timezone.datetime.now()
question_id = edit_answer.question_id
edit_answer.save()
return redirect('question:detail',question_id)
| 2,566 |
redditnfl/reddittools/flairtool.py
|
redditnfl/reddittools
| 0 |
2172357
|
#!/usr/bin/env python
import argparse
import sys
import traceback
from pathlib import Path
from pprint import pprint
from typing import TextIO, List
import csv
import yaml
from praw import Reddit
from praw.exceptions import ClientException
from praw.models import Subreddit
from redditnfl.reddittools.reddittoken import ensure_scopes
FLAIR_FIELD_NAMES = ['user', 'flair_text', 'flair_css_class']
def yaml_file_type(filename):
with open(filename) as fp:
return yaml.load(fp, Loader=yaml.SafeLoader)
def dir_path_type(must_exist=True, create=False, mode=0o777):
def f(path):
p = Path(path)
if not p.is_dir():
if must_exist:
raise argparse.ArgumentError("Path %s does not exist" % path)
elif create:
try:
p.mkdir(parents=True, mode=mode)
except Exception as e:
raise argparse.ArgumentError("Could not create path %s" % path) from e
return p
return f
class Command:
def setup_argparse(self, sp: argparse.ArgumentParser):
raise NotImplementedError("Not Implemented")
def run(self, sr: Subreddit, args):
raise NotImplementedError("Not implemented")
class Dump(Command):
def setup_argparse(self, sp: argparse.ArgumentParser):
sp.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding="UTF-8"), default=sys.stdout)
def run(self, sr: Subreddit, args):
self.dump_flair(sr, args.outfile)
@staticmethod
def dump_flair(sr: Subreddit, outfile: TextIO):
writer = csv.DictWriter(outfile, FLAIR_FIELD_NAMES, extrasaction='ignore')
writer.writeheader()
for user in sr.flair():
user['username'] = user['user'].name
writer.writerow(user)
class SetTemplates(Command):
def setup_argparse(self, sp: argparse.ArgumentParser):
sp.add_argument('flairconfig', help='Config file detailing flair', type=yaml_file_type)
sp.add_argument('emojidir', type=dir_path_type(), help="Directory containing emoji images")
def run(self, sr: Subreddit, args):
self.clear_templates(sr, args.dry_run)
self.create_templates(sr, args.flairconfig, args.emojidir, args.dry_run)
if args.verbose:
self.dump_templates(sr)
@staticmethod
def dump_templates(sr: Subreddit):
from pprint import pprint
pprint(list(sr.flair.templates))
def create_templates(self, sr: Subreddit, flairconfig: List, emoji_dir: Path, dry_run=False):
for flair in flairconfig:
emoji_file = emoji_dir / (flair['emoji'] + ".png")
if not emoji_file.exists():
raise Exception("File does not exist: %s" % emoji_file)
print("Upload %s as :%s:" % (emoji_file, flair['emoji']))
if not dry_run:
sr.emoji.add(flair['emoji'], str(emoji_file))
print("Create template class=<{class}>, text=<{text}>, emojis=<{emoji}>".format(**flair))
if not dry_run:
self.create_template(sr, flair['text'], flair['class'], [flair['emoji']], flair.get('mod_only', False))
@staticmethod
def create_template(sr: Subreddit, text: str, css_class: str, emojis: List[str],
mod_only: bool = True) -> str:
"""
Create a flair template that fullfills the following criteria:
* Only shows text on old reddit (no emoji)
* Shows emoji + Text on new reddit
:param sr: The subreddit to work on
:param text: (Plain) Text for the flair
:param css_class: CSS class
:param emojis: Emojis to add in front
:param mod_only: Whether to make the template mod_only (Default: True)
:return: The created template
"""
templates_before = [template['id'] for template in sr.flair.templates]
# First we create an old-style template
sr.flair.templates.add(text=text, css_class=css_class)
# Find the id of the template we created
new_template = list(filter(lambda t: t['id'] not in templates_before, sr.flair.templates))[0]
# Then we update the text with the emojis in it on the new flair endpoint. This seems to be preferable.
if emojis:
# Prepend any emojis
text = ":" + ("::".join(emojis)) + ": " + text
sr.flair.templates.update(new_template['id'], mod_only=mod_only, text_editable=False,
background_color='transparent', text=text, css_class=css_class)
new_template = list(filter(lambda t: t['id'] not in templates_before, sr.flair.templates))[0]
return new_template
@staticmethod
def create_template2(sr: Subreddit, text: str, css_class: str, emojis: List[str],
mod_only: bool = True) -> str:
"""
Create a flair template that fullfills the following criteria:
* Only shows text on old reddit (no emoji)
* Shows emoji + Text on new reddit
:param sr: The subreddit to work on
:param text: (Plain) Text for the flair
:param css_class: CSS class
:param emojis: Emojis to add in front
:param mod_only: Whether to make the template mod_only (Default: True)
:return: The created template
"""
templates_before = [template['id'] for template in sr.flair.templates]
if emojis:
# Prepend any emojis
text = ":" + ("::".join(emojis)) + ": " + text
sr.flair.templates.add(mod_only=mod_only, text_editable=False,
background_color='transparent', text=text, css_class=css_class)
# Find the id of the template we created
new_template = list(filter(lambda t: t['id'] not in templates_before, sr.flair.templates))[0]
return new_template
@staticmethod
def clear_templates(sr: Subreddit, dry_run=False):
print("Deleting all templates")
if not dry_run:
sr.flair.templates.clear()
for emoji in sr.emoji:
print("Delete emoji %s" % emoji)
if not dry_run:
emoji.delete()
class Clear(Command):
def setup_argparse(self, sp: argparse.ArgumentParser):
pass
def run(self, sr: Subreddit, args):
want_input = "Yes I am sure!"
answer = input("This will delete ALL assigned flair in /r/{}!\n\nPlease type '{}' to confirm: ".format(sr.display_name, want_input))
if answer == want_input:
self.clear(sr, dry_run=args.dry_run)
else:
print("Aborting")
@staticmethod
def clear(sr: Subreddit, dry_run=False):
if dry_run:
print("Would delete all flair in /r/{}".format(sr.display_name))
else:
print("Deleting all flair in /r/{}".format(sr.display_name))
sr.flair.delete_all()
class Upload(Command):
def setup_argparse(self, sp: argparse.ArgumentParser):
sp.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding="UTF-8"), default=sys.stdin)
def run(self, sr: Subreddit, args):
self.upload(args.infile, sr, dry_run=args.dry_run)
@staticmethod
def upload(infile, sr: Subreddit, has_header=True, dry_run=False):
if not has_header:
fieldnames = FLAIR_FIELD_NAMES
else:
fieldnames = None
reader = csv.DictReader(infile, fieldnames=fieldnames, restval='')
if dry_run:
for row in reader:
pprint(row)
else:
sr.flair.update(reader)
COMMANDS = [Dump, Upload, Clear, SetTemplates]
class FlairTool:
def __init__(self):
parser = argparse.ArgumentParser(description="Flair swiss army knife")
parser.add_argument('-s', '--site', dest='site', help="Reddit 'site' (praw.ini section) to use")
parser.add_argument('-v', '--verbose', action="store_true", help="Log network requets")
parser.add_argument('-n', '--dry-run', action="store_true", help="Prevent any action on reddit being taken "
"(other side-effects are not prevented!)")
parser.add_argument('sr_name', help="Name of subreddit to run on")
sp = parser.add_subparsers(help="Flair command to run (%s)" % ", ".join([c.__name__ for c in COMMANDS]),
dest='cmd', required=True, metavar='cmd')
self.parser = parser
self.commands = {}
for CmdClass in COMMANDS:
name = CmdClass.__name__
cmd_parser = sp.add_parser(name)
cmd = CmdClass()
cmd.setup_argparse(cmd_parser)
self.commands[name] = cmd
def run(self):
args = self.parser.parse_args()
if args.verbose:
import logging
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
for logger_name in ("praw", "prawcore"):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
try:
r = Reddit(args.site)
ensure_scopes(r, scopes=['read', 'flair', 'modflair', 'structuredstyles'])
except ClientException:
traceback.print_exc()
sys.stderr.write("\n\nOh dear, something broke. Most likely you need to pass the --site "
"parameter, set the praw_site environment variable or configure a "
"DEFAULT site in your praw.ini\n\n")
self.parser.print_help()
sys.exit(1)
sr = r.subreddit(args.sr_name)
self.commands[args.cmd].run(sr, args)
def main():
ft = FlairTool()
ft.run()
if __name__ == '__main__':
main()
| 9,875 |
main_app/tests/utils.py
|
wszoltysek/give_things
| 0 |
2172634
|
from faker import Faker
from main_app.models import *
from random import randint
faker = Faker()
def fake_user():
user = User.objects.create(username=faker.first_name_male())
return user
def fake_category():
new_category = Category.objects.create(name=faker.word())
return new_category
def fake_institution():
new_institution = Institution.objects.create(
name=faker.word(),
description=faker.sentence(),
type=randint(0, 2)
)
new_institution.categories.add(fake_category())
return new_institution
def fake_donation():
new_donation = Donation.objects.create(
quantity=randint(1, 5),
address=faker.address(),
phone_number=randint(1000, 2000),
city=faker.word(),
zip_code="44-100",
pick_up_date=faker.date(),
pick_up_time=faker.time(),
pick_up_comment=faker.sentence(),
collected=True,
user=fake_user()
)
new_donation.categories.add(fake_category())
new_donation.institution.add(fake_institution())
return new_donation
| 1,081 |
python/colorTerm.py
|
Ayushd70/RetartedCodes
| 0 |
2172798
|
# Python Program to Print Colored Text to the Terminal
# Method 1 using termcolor
from termcolor import colored
print(colored('RetardedCodes', 'blue'))
# Method 2 using ANSI escape sequences
# print('\x1b[38;2;5;86;243m' + 'RetardedCodes' + '\x1b[0m')
| 255 |
bot1/playground/planned_auto_drive.py
|
dpm76/Bot1
| 0 |
2172863
|
'''
Created on 27 oct. 2018
@author: david
'''
import logging
import time
from engine.driver import Driver
class AutoPilot(object):
def __init__(self, driver):
self._driver = driver
def drive(self, throttle, timespan):
self._driver.setNeutral()
self._driver.setMode(Driver.MODE_NORMAL)
self._driver.setDirection(0.0)
self._driver.setThrottle(throttle)
time.sleep(timespan)
self._driver.setNeutral()
def rotate(self, direction, timespan):
self._driver.setNeutral()
self._driver.setMode(Driver.MODE_ROTATE)
driver.setThrottle(0.0)
self._driver.setDirection(direction)
time.sleep(timespan)
self._driver.setNeutral()
logging.basicConfig(level=logging.DEBUG)
driver = Driver.createForTesting()
#driver = Driver.createForRobot()
pilot = AutoPilot(driver)
driver.start()
logging.info("**** Forwards ****")
pilot.drive(-40.0, 2)
logging.info("**** Backwards ****")
pilot.drive(40.0, 2)
logging.info("**** Rotate ****")
pilot.rotate(30.0, 2)
logging.info("**** Forwards ****")
pilot.drive(-40.0, 2)
logging.info("**** Backwards ****")
pilot.drive(40.0, 2)
logging.info("**** Rotate ****")
pilot.rotate(-30.0, 2)
logging.info("**** Forwards ****")
pilot.drive(-40.0, 2)
logging.info("**** Backwards ****")
pilot.drive(40.0, 2)
driver.stop()
| 1,422 |
LabPrograms/EM&K-Means.py
|
pranavraikote/MachineLearning-LabPrograms
| 0 |
2171340
|
#IMport modules
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
#Reading of Dataset
data = pd.read_csv('EMK.csv')
print("Input Data and Shape")
print(data.shape)
data.head()
#Extraction of Data
f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2)))
#Graph Plot of the Dataset
print("X ", X)
print('Graph for whole dataset')
plt.scatter(f1, f2, c='blue', s=7)
plt.show()
#KMeans Clustering Algorithm
kmeans = KMeans(3, random_state=0)
#Kmeans labels
labels = kmeans.fit(X).predict(X)
print("labels ",labels)
#KMeans Cluster Centroids
centroids = kmeans.cluster_centers_
print("centroids ",centroids)
#KMeans Graph
plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis')
print('Graph using Kmeans Algorithm')
plt.scatter(centroids[:, 0], centroids[:, 1], marker='*', s=200, c='#050505')
plt.show()
#EM Algorithm
gmm = GaussianMixture(n_components=3).fit(X)
labels = gmm.predict(X)
probs = gmm.predict_proba(X)
size = 10 * probs.max(1) ** 3
#EM Graph
print('Graph using EM Algorithm')
plt.scatter(X[:, 0], X[:, 1], c=labels, s=size, cmap='viridis')
plt.show()
| 1,267 |
algorith/leetc/arrays/search_rotated_sorted_ii.py
|
ryu577/algorithms
| 0 |
2172825
|
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> bool:
ix = binary_srch(nums,target,0,len(nums)-1)
if ix==-1:
return False
else:
return True
def find_pivot(a,lo,hi):
if (a[hi]>a[lo]):
return -1
else:
while lo<hi:
mid=(lo+hi)//2
if (a[mid]<a[hi]):
hi=mid
else:
lo=mid+1
return lo
def find_pivot2(a,lo,hi):
if (a[hi]>a[lo]):
return -1
else:
while lo<hi:
mid=(lo+hi)//2+(lo+hi)%2
if (a[mid]<=a[hi]):
hi=mid-1
else:
lo=mid
return hi
def map_sorted_to_rotated(x,pv,a):
n=len(a)
return a[(x+pv+1)%n]
def binary_srch(a,t,lo,hi):
pv=find_pivot(a,lo,hi)
while lo!=hi:
mid=(lo+hi)//2
val=map_sorted_to_rotated(mid,pv,a)
if val<t:
lo=mid+1
else:
hi=mid
if map_sorted_to_rotated(lo,pv,a)==t:
return (lo+pv+1)%len(a)
else: return -1
if __name__=="__main__":
nums = [2,5,6,0,0,1,2]; target = 0
pv = find_pivot(nums,0,len(nums)-1)
print(pv)
nums = [2,2,2,0,0,1,2]; target = 0
pv = find_pivot(nums,0,len(nums)-1)
print(pv)
nums=[2,5,6,0,0,1,2]
ix = binary_srch(nums,0,0,len(nums)-1)
print(ix)
nums=[2,2,2,0,2,2]
pv1=find_pivot(nums,0,len(nums)-1)
pv2=find_pivot2(nums,0,len(nums)-1)
print(str(pv1)+","+str(pv2))
nums=[4,5,6,7,0,1,2]
pv1=find_pivot(nums,0,len(nums)-1)
pv2=find_pivot2(nums,0,len(nums)-1)
print(str(pv1)+","+str(pv2))
ix = binary_srch(nums,0,0,len(nums)-1)
print(ix)
| 1,721 |
ocfweb/api/urls.py
|
TonyLianLong/ocfweb
| 0 |
2170897
|
from django.urls import path
from ocfweb.api import hours
from ocfweb.api import lab
from ocfweb.api import session_tracking
urlpatterns = [
path('hours', hours.get_hours_all, name='hours_all'),
path('hours/today', hours.get_hours_today, name='hours_today'),
path('lab/desktops', lab.desktop_usage, name='desktop_usage'),
path('session/log', session_tracking.log_session, name='log_session'),
]
| 413 |
sloth/gui/jupyx/__init__.py
|
maurov/xraysloth
| 4 |
2171616
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JupyX: Jupyter UI for X-ray data analysis
-----------------------------------------
"""
#################
# IPython utils #
#################
def ipythonAutoreload():
"""Force ipython to autoreload imported modules"""
from IPython import get_ipython
mgc = get_ipython().magic
mgc(u'%load_ext autoreload')
mgc(u'%autoreload 2')
def run_from_ipython():
"""Check if inside ipython -> see :func:`is_in_notebook`"""
try:
__IPYTHON__
return True
except NameError:
return False
def is_in_notebook():
"""check if code is run from IPython notebook
.. note:: code from StackOverflow `https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook/24937408#24937408`_
"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
| 1,235 |
peaks_analysis.py
|
ryuzakyl/peaks-recognition
| 1 |
2172085
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) CENATAV, DATYS - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by <NAME> <<EMAIL>>, March 2017
import os
import sys
from ctypes import (
cdll,
# c/c++ types declarations
POINTER,
c_int,
c_double,
# c/c++ functions declarations
CFUNCTYPE,
# for passing values as reference
byref,
# for mapping C structures in the wrapper
Structure,
)
# common path for shared library
__shared_lib_path = "{}/peaks-analysis/cmake-build-debug/".format(os.path.split(__file__)[0])
lib_name = 'lib_peaks_analysis'
win32_lib_ext = '.dll'
posix_lib_ext = '.so'
# linux os support
if os.name == 'posix' and sys.platform.startswith('linux'):
try:
__peaks_analysis_lib_path = "{}{}{}".format(__shared_lib_path, lib_name, posix_lib_ext)
__peaks_analysis_lib = cdll.LoadLibrary(__peaks_analysis_lib_path)
except:
raise ImportError('Error while loading COW shared library.')
# windows os support
elif os.name == 'nt':
try:
__peaks_analysis_lib_path = "{}{}{}".format(__shared_lib_path, lib_name, win32_lib_ext)
__peaks_analysis_lib = cdll.LoadLibrary(__peaks_analysis_lib_path)
except:
raise ImportError('Error while loading COW shared library.')
# os not supported
else:
raise NotImplemented()
# ---------------------------------------------------------------
# creates a c_array of type 'c_type' and size 'n' (if l is provided, then it copies the values of l)
def c_array(c_type, n, l=None):
return (c_type * n)(*l) if l else (c_type * n)()
# allowing c/c++ function prototypes declarations a bit easier
def c_func(lib_func_name, lib_handle, ret_type, *args):
# types and flags declarations
a_types = []
a_flags = []
# for each argument
for arg in args:
a_types.append(arg[1])
a_flags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(ret_type, *a_types)((lib_func_name, lib_handle), tuple(a_flags))
# subclassing the ctypes.Structure class to add new features
class _Structure(Structure):
def __repr__(self):
"""
Print the fields
"""
res = []
for field in self._fields_:
res.append('%s=%s' % (field[0], repr(getattr(self, field[0]))))
return self.__class__.__name__ + '(' + ','.join(res) + ')'
@classmethod
def from_param(cls, obj):
"""
Magically construct from a tuple
"""
if isinstance(obj, cls):
return obj
if isinstance(obj, tuple):
return cls(*obj)
raise TypeError
# represents the c# classic 'KeyValuePair' structure
class PeakInfo(_Structure):
_fields_ = [
('lower_bound', c_int),
('upper_bound', c_int),
('height_index', c_int),
('peak_height', c_double),
('peak_area', c_double),
]
# additional type declarations
c_int_p = POINTER(c_int) # c/c++ int* data type
c_double_p = POINTER(c_double) # c/c++ double* data type
c_peak_info_p = POINTER(PeakInfo) # c/c++ PeakInfo* data type
# ---------------------------------------------------------------
# handle to 'find_in_histogram' function in the shared library
__c_find_in_histogram = c_func(
'find_in_histogram', __peaks_analysis_lib, c_peak_info_p,
# histogram/signal/spectrum to perform detection on
('histogram', c_double_p, 1), # double* histogram
# histogram/signal/spectrum length
('h_length', c_int, 1), # int h_length
# interval size (dx) (size of interval of analysis I)
('dx', c_int, 1), # int dx
# indicates the "smoothness" of the curve
('smoothness', c_int, 1), # int smoothness
# growth angle of certain interval (indicates if the function is really growing)
('growth_angle', c_double, 1), # double growth_angle
# abate angle of certain interval (indicates if the function is really abating)
('abate_angle', c_double, 1), # double abate_angle
# threshold for filtering peaks that do not match required height
('height_thres', c_double, 1), # double height_thres
# out int for amount of peaks found
('peaks_count', c_int_p, 1), # int* peaks_count
)
# handle to 'compute_peak_statistics' function in the shared library
__c_compute_peak_statistics = c_func(
'compute_peak_statistics', __peaks_analysis_lib, PeakInfo,
# histogram/signal/spectrum to perform detection on
('histogram', c_double_p, 1), # double* histogram
# indicates peak start
('lb', c_int, 1), # int lb
# indicates peak end
('ub', c_int, 1), # int ub
)
# deletes an already allocated (PeakInfo*)
__c_delete_peak_info_ptr = c_func(
'delete_peak_info_ptr', __peaks_analysis_lib, None,
('ptr', c_peak_info_p, 1) # PeakInfo* int_ptr
)
# --------------------------------------------------
def delete_peak_info_ptr(ptr):
# calling the c++ function
__c_delete_peak_info_ptr(ptr)
# noinspection PyTypeChecker
def find_in_histogram(h, dx, smoothness, growth_angle, abate_angle, height_thres):
# getting the length of the histogram
h_length = len(h)
# getting the ctypes array
h_arr = c_array(c_double, h_length, h)
# calling the c++ method
peaks_count = c_int(0) # peaks data size (2 * amount of peaks)
peaks_data = __c_find_in_histogram(h_arr, h_length, dx, smoothness, growth_angle, abate_angle, height_thres, byref(peaks_count))
# copying the peaks data
p_count = peaks_count.value
peaks_info = [
(
peaks_data[i].lower_bound,
peaks_data[i].upper_bound,
peaks_data[i].height_index,
peaks_data[i].peak_height,
peaks_data[i].peak_area,
)
for i in range(p_count)
]
# releasing the memory of 'peaks_data'
delete_peak_info_ptr(peaks_data)
# returning the peaks and valleys
return peaks_info
def compute_peak_statistics(h, lb, ub):
# getting the length of the histogram
h_length = len(h)
# getting the ctypes array
h_arr = c_array(c_double, h_length, h)
# calling the c++ method
peak_stats = __c_compute_peak_statistics(h_arr, lb, ub)
# returning peak stats as a tuple
return (
peak_stats.lower_bound,
peak_stats.upper_bound,
peak_stats.height_index,
peak_stats.peak_height,
peak_stats.peak_area,
)
| 6,500 |
env/lib/python3.7/site-packages/indicoio/text/summarization.py
|
Novandev/gn_api
| 4 |
2172553
|
from ..utils.api import api_handler
from ..utils.decorators import detect_batch_decorator
@detect_batch_decorator
def summarization(text, cloud=None, batch=False, api_key=None, version=1, **kwargs):
"""
Given input text, returns a `top_n` length sentence summary.
Example usage:
.. code-block:: python
>>> from indicoio import summarization
>>> summary = summarization("https://en.wikipedia.org/wiki/Yahoo!_data_breach")
>>> summary
["This information was disclosed two years later on September 22, 2016.", "[1] The data breach is one of the largest in the history of the Internet.", "Specific details of material taken include names, email addresses, telephone numbers, dates of birth, and encrypted passwords.", "[2]\\n\\nEvents [ edit ]\\n\\nYahoo alleged in its statement that the breach was carried out by \\"state-sponsored\\" hackers,[3] but the organization did not name any country.", "We had our own use for it and other buyers did as well."]
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: Dictionary of party probability pairs
"""
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(text, cloud=cloud, api="summarization", url_params=url_params, **kwargs)
| 1,328 |
tests/benchmarks/test_benchmarks.py
|
shishaboy/epymetheus
| 0 |
2172415
|
import pytest
import pandas as pd
from epymetheus import Universe, Trade
from epymetheus.benchmarks import BuyAndHold
class TestBuyAndHold:
def test(self):
universe = Universe(
pd.DataFrame({"A": [1, 2, 3], "B": [2, 3, 4], "C": [3, 4, 5]})
)
strategy = BuyAndHold({"A": 0.5, "B": 0.5}).run(universe)
assert len(strategy.trades) == 1
assert (strategy.trades[0].asset == ["A", "B"]).all()
assert (strategy.trades[0].lot == [0.5 / 1, 0.5 / 2]).all()
assert strategy.trades[0].open_bar == 0
assert strategy.trades[0].close_bar == 2
| 613 |
scripts/baseball-stats-db/create-database/create_database_schema.py
|
rippinrobr/sports-stats-utilities
| 1 |
2172031
|
from sql_schema import *
import sys
def define_parameters(parser):
parser.add_argument("--dbtype", choices=SUPPORTED_DBS, help="the database type you'd like to generate the schema for", type=str, required=True)
parser.add_argument("--dbhost", help="host of the database server", default="localhost", type=str)
parser.add_argument("--dbname", help="Name of the database where the tables are to be added. REQUIRED if not sqlite", type=str)
parser.add_argument("--dbpath", help="SQLITE ONLY - the path for the newly created database", type=str)
parser.add_argument("--dbpass", help="The password for the user given in the --dbuser option, ignored for SQLite", type=str)
parser.add_argument("--dbport", help="The port the database server is listening on, ignored for SQLite, defaults to appropriate value for server type if not provided", type=int)
parser.add_argument("--dbuser", help="username to use when creating the database, ignored for SQLite databases, REQUIRED for others.", type=str)
def create_db_connection(args):
if args.dbtype == SQLITE:
sqlite_db_name = "baseball_databank.sqlite3"
if args.dbpath:
sqlite_db_name = args.dbpath
return SqliteDatabase(sqlite_db_name)
else:
if args.dbuser == None:
param_parser.print_help()
sys.exit(1)
if args.dbtype == POSTGRES:
port = 5432
if port > 0:
port = args.dbport
return PostgresqlDatabase(args.dbname, host=args.dbhost, user=args.dbuser, port=args.dbport, password=args.dbpass)
if args.dbtype == MYSQL: #>r#(-t:Hu4&w
port = 3306
if args.dbport > 0:
port = args.dbport
return MySQLDatabase(args.dbname, host=args.dbhost, port=port, user=args.dbuser, password=args.dbpass)
def main():
param_parser = argparse.ArgumentParser(description="Generates a DB schema based on the Baseball Databank csv files.")
define_parameters(param_parser)
args = param_parser.parse_args()
if args.dbname == None and args.dbtype.lower() != SQLITE:
print "ERROR: If --dbtype is not sqlite then --dbname is required"
param_parser.print_help()
sys.exit(1)
try:
bdb_db = create_db_connection(args)
initialize_db_and_connect(bdb_db)
except ProgrammingError as e:
print "ERROR: An issue occurred while trying to create the database schema:", e
except InternalError as i:
print "ERROR: An issue occurred while trying to create the datbase schema:", i
except OperationalError as o:
print "ERROR: An error occurred while attempting to create the database schema:", o
if __name__ == "__main__":
main()
| 2,799 |
tests/test_builder.py
|
Luoyufu/doclink
| 4 |
2172965
|
# -*- coding: utf-8 -*-
import pytest
from doclink.consumer import Consumer
from doclink.builder import Api, ApiBuilder
class MockClient(object):
def request(self, request_meta):
return MockResp(200)
class MockResp(object):
def __init__(self, status_code):
self.status_code = status_code
def mock_func(resp):
pass
consumer = Consumer('base_uri', client=MockClient())
class TestApi(object):
def test_init(self):
api = Api('test', consumer, 'get', 'uri', mock_func)
assert api.name == 'test'
assert api.request_meta == {'base_uri': 'base_uri',
'uri': 'uri',
'method': 'get'}
def test_update_request_meta(self):
api = Api('test', consumer, 'get', 'uri', mock_func)
api.update_request_meta(base_uri='new_base_uri')
assert api.request_meta, {'base_uri': 'new_base_uri',
'uri': 'uri',
'method': 'get'}
def test_add_arg_group(self):
arg_group = object()
api = Api('test', consumer, 'get', 'uri', mock_func)
api.add_arg_group(arg_group)
assert arg_group in api.arg_groups
def test_partial(self):
api = Api('test', consumer, 'get', 'uri', mock_func)
p = api.partial(arg='value')
assert p.args == ()
assert p.keywords == {'arg': 'value'}
assert p.func is api
def test_resp_hook(self):
def mock_hook(resp):
return resp.status_code
api = Api('test', consumer, 'get', 'uri', mock_func)
api.add_resp_hook(mock_hook)
result = api()
assert result == 200
def test_resp_hook_not_callable(self):
api = Api('test', consumer, 'get', 'uri', mock_func)
with pytest.raises(ValueError):
api.add_resp_hook(None)
def test_on_request(self):
api = Api('test', consumer, 'get', 'uri', mock_func)
class OnRequest(object):
def __init__(self):
self.called = False
def __call__(self, request_meta):
self.called = True
on_request = OnRequest()
api.on_request(on_request)
result = api()
assert on_request.called
assert result.status_code == 200
class TestApiBuilder(object):
def test_build_path_arg_group(self):
api_builder = ApiBuilder(consumer, 'get', '/uri/{query1=query1}', mock_func, parser=None)
path_arg_group = api_builder._api.arg_groups[0]
assert path_arg_group.group_name == 'path'
assert path_arg_group.arg_map['query1'].default == 'query1'
| 2,803 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.