max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
tests/python/test_basic.py
|
hcho3/treelite
| 1 |
2024839
|
# -*- coding: utf-8 -*-
"""Suite of basic tests"""
from __future__ import print_function
import unittest
import os
import subprocess
from zipfile import ZipFile
import numpy as np
import treelite
import treelite.runtime
from util import load_txt, os_compatible_toolchains, os_platform, libname, \
run_pipeline_test, make_annotation
dpath = os.path.abspath(os.path.join(os.getcwd(), 'tests/examples/'))
class TestBasic(unittest.TestCase):
def test_basic(self):
"""
Test a basic workflow: load a model, compile and export as shared lib,
and make predictions
"""
for model_path, dtrain_path, dtest_path, libname_fmt, \
expected_prob_path, expected_margin_path, multiclass in \
[('mushroom/mushroom.model', 'mushroom/agaricus.train',
'mushroom/agaricus.test', './agaricus{}',
'mushroom/agaricus.test.prob',
'mushroom/agaricus.test.margin', False),
('dermatology/dermatology.model', 'dermatology/dermatology.train',
'dermatology/dermatology.test', './dermatology{}',
'dermatology/dermatology.test.prob',
'dermatology/dermatology.test.margin', True),
('letor/mq2008.model', 'letor/mq2008.train',
'letor/mq2008.test', './mq2008{}',
None, 'letor/mq2008.test.pred', False)]:
model_path = os.path.join(dpath, model_path)
model = treelite.Model.load(model_path, model_format='xgboost')
make_annotation(model=model, dtrain_path=dtrain_path,
annotation_path='./annotation.json')
for use_annotation in ['./annotation.json', None]:
for use_quantize in [True, False]:
run_pipeline_test(model=model, dtest_path=dtest_path,
libname_fmt=libname_fmt,
expected_prob_path=expected_prob_path,
expected_margin_path=expected_margin_path,
multiclass=multiclass, use_annotation=use_annotation,
use_quantize=use_quantize)
def test_srcpkg(self):
"""Test feature to export a source tarball"""
model_path = os.path.join(dpath, 'mushroom/mushroom.model')
dmat_path = os.path.join(dpath, 'mushroom/agaricus.test')
libpath = libname('./mushroom/mushroom{}')
model = treelite.Model.load(model_path, model_format='xgboost')
toolchain = os_compatible_toolchains()[0]
model.export_srcpkg(platform=os_platform(), toolchain=toolchain,
pkgpath='./srcpkg.zip', libname=libpath,
params={}, verbose=True)
with ZipFile('./srcpkg.zip', 'r') as zip_ref:
zip_ref.extractall('.')
subprocess.call(['make', '-C', 'mushroom'])
predictor = treelite.runtime.Predictor(libpath='./mushroom', verbose=True)
dmat = treelite.DMatrix(dmat_path)
batch = treelite.runtime.Batch.from_csr(dmat)
expected_prob_path = os.path.join(dpath, 'mushroom/agaricus.test.prob')
expected_prob = load_txt(expected_prob_path)
out_prob = predictor.predict(batch)
assert np.allclose(out_prob, expected_prob, atol=1e-11, rtol=1e-8)
| 3,135 |
persistentmemo/tests.py
|
e-c-d/persistentmemo
| 0 |
2025849
|
import unittest
from . import *
class PersistentMemoTest(unittest.TestCase):
def test_all(self):
m = PersistentMemo()
m.store = {}
called = 0
x = []
@fdeps(x, set_readonly=False)
def f(x,y,**kw):
nonlocal called
called += 1
return (x+y,list(kw))
mf = m.memoize()(f)
mf(3,5)
mf(3,5)
self.assertEqual(called, 1)
mf(3,4)
self.assertEqual(called, 2)
mf([4],['x',4.4])
mf([4],['x',4.4])
mf([4],['x',4.40001])
self.assertEqual(called, 4)
self.assertEqual(mf(3,4,w=[7]), mf(3,4,w=[7]))
self.assertEqual(called, 5)
called = 0
x.append(0)
mf(3,5)
self.assertEqual(called, 1)
m.set_readonly(f)
mf(3,5)
called = 0
mf(3,5)
self.assertEqual(called, 0)
x.append(0)
self.assertEqual(called, 0)
| 952 |
cryptotrader/tests/trader_tests.py
|
mkuenzie/cryptotrader
| 0 |
2025929
|
from cryptotrader.cryptotrader import Cryptotrader
from cryptotrader.strategy import BbandsStrategy, BbandsDIStrategy
from datetime import timedelta, datetime
def write_markdown(trades, initial_wallet, usd_wallet, crypto_wallet):
mdfile = trader.generate_markdown(trades, initial_wallet, usd_wallet, crypto_wallet)
f = open('readme.md', 'w')
f.write(mdfile)
f.close()
proximity = 0.25
trader = Cryptotrader(market='BTC-USD', strategy=BbandsStrategy(proximity=proximity, stddevs=2), fee=0.0035, interval=timedelta(hours=1))
trader.refresh()
trades = trader.test2(start_at=datetime(2021, 1, 25, 12, 0, 0))
total = len(trades)
wallet = 100
for i in range(0, total-2, 2):
buy = trades[i]
sell = trades[i+1]
buy_price = buy['price']
sell_price = sell['price']
if buy_price <= sell_price:
gain = ((sell_price / buy_price) - 1) * 100
wallet = wallet + (wallet * (gain/100))
print(trades[i])
print(trades[i+1])
print('+ %.2f %%' % gain)
else:
loss = (1 - (sell_price / buy_price)) * 100
wallet = wallet - (wallet * (loss/100))
print(trades[i])
print(trades[i + 1])
print('- %.2f %%' % loss)
print(wallet)
| 1,220 |
9_Input_and_Output_example_sols/create_panda.py
|
Maruja/Maruja-ILAS-Python
| 0 |
2025663
|
import numpy as np
import pandas as pd
# generate random data
data = np.random.randint(0, high=10, size=(4, 4))
print(data)
# first column is number 1-4
data[:,0]=np.arange(4)
headers = ["score1", "score2", "score3"]
print(data)
data_frame = pd.DataFrame(data=data[1:,1:], # values
index=data[1:,0], # 1st column as index
columns = headers, # strings in the list called haeders as the column names
#columns=data[0,1:], # 1st row as the column names
)
print(data_frame)
| 538 |
jsonschema/tests/compat.py
|
NeodomoEwe/AWS-lambda-alexa-smart-home
| 2 |
2022935
|
import sys
if sys.version_info[:2] < (2, 7): # pragma: no cover
import unittest2 as unittest
else:
import unittest
try:
from unittest import mock
except ImportError:
import mock
# flake8: noqa
| 230 |
webserver/model.py
|
ArseniKavalchuk/ffmpeg-hls-view
| 1 |
2024490
|
from dataclasses import dataclass
from enum import Enum
class Status(Enum):
OFF = 0
ON = 1
@dataclass
class ChannelItem:
channel_id: int
description: str
rtsp_link: str
status: Status
| 210 |
lab15/account/forms.py
|
alejo8591/angular-labs
| 0 |
2025202
|
from django import forms
from django.contrib.auth.models import User
from account.models import UserProfile
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password',)
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('user_profile_website', 'user_profile_picture', 'user_profile_identification',)
excludes = ('user_profile',)
| 474 |
test/test_blueprint2CSAR.py
|
SODALITE-EU/iac-blueprint-builder
| 0 |
2025681
|
import pytest
from pathlib import Path
import shutil
import yaml
import src.blueprint2CSAR as blueprint2CSAR
class TestValidate:
def test_not_meta_multiple_yaml(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-multiple-yaml'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.MultipleDefinitionsFoundException):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_no_meta_no_entry_definitions(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-no-entry-def'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.NoEntryDefinitionsFoundException):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_no_meta_no_meta_section(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-no-meta-section'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.NoMetadataExcepion):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_no_meta_success(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-ok'
assert blueprint2CSAR.validate_csar(csar_path)
# it should not fail
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_meta_section_missing_key(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-missing-key'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.BrokenMetadataException):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_broken_metadata_file(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-broken-meta'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.BrokenMetadataException):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_no_entry_definitions(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-entry-def'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.NoEntryDefinitionsFoundException):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_no_other_definitions(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-other-def'
assert not blueprint2CSAR.validate_csar(csar_path)
with pytest.raises(blueprint2CSAR.NoOtherDefinitionsFoundException):
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
def test_success(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-ok'
assert blueprint2CSAR.validate_csar(csar_path)
# it should not fail
blueprint2CSAR.validate_csar(csar_path, raise_exceptions=True)
class TestEntryDefinitions:
def test_no_meta_no_yaml(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-no-entry-def'
assert blueprint2CSAR.entry_definitions(csar_path) is None
def test_no_meta_success(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-no-meta-ok'
assert blueprint2CSAR.entry_definitions(csar_path) == 'service.yaml'
def test_meta_no_entry_definitions(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-broken-meta'
assert blueprint2CSAR.entry_definitions(csar_path) is None
def test_meta_success(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-ok'
assert blueprint2CSAR.entry_definitions(csar_path) == 'service.yaml'
class TestToCsarSimple:
def test_CSAR_not_valid(self, CSAR_unpacked):
csar_path = CSAR_unpacked / 'CSAR-broken-meta'
dst_path = CSAR_unpacked / 'CSAR-dummy'
assert not blueprint2CSAR.to_CSAR_simple(csar_path, dst_path, raise_exceptions=False)
with pytest.raises(Exception):
blueprint2CSAR.to_CSAR_simple(csar_path, dst_path, raise_exceptions=True)
def test_success(self, CSAR_unpacked: Path):
csar_path = CSAR_unpacked / 'CSAR-ok'
dst_path = CSAR_unpacked / 'CSAR-dummy'
dst_path_with_zip = Path(str(dst_path) + '.zip')
assert blueprint2CSAR.to_CSAR_simple(csar_path, dst_path, raise_exceptions=False)
assert dst_path_with_zip.exists()
dst_path_with_zip.unlink()
# should not fail
blueprint2CSAR.to_CSAR_simple(csar_path, dst_path, raise_exceptions=True)
assert dst_path_with_zip.exists()
dst_path_with_zip.unlink()
class TestToCsar:
def test_no_meta_multiple_yaml(self, get_workdir_path, CSAR_unpacked):
blueprint_path = CSAR_unpacked / 'CSAR-no-meta-multiple-yaml'
with pytest.raises(blueprint2CSAR.MultipleDefinitionsFoundException):
blueprint2CSAR.to_CSAR(blueprint_name='some_blueprint',
blueprint_dir=blueprint_path,
no_meta=True,
workdir=get_workdir_path)
def test_no_meta_success(self, get_workdir_path, CSAR_unpacked):
blueprint_path = CSAR_unpacked / 'CSAR-no-meta-ok'
workdir = get_workdir_path
name = 'some_blueprint'
output = workdir / f'CSAR-{name}'
blueprint2CSAR.to_CSAR(blueprint_name=name,
blueprint_dir=blueprint_path,
no_meta=True,
workdir=workdir,
output=output)
def test_meta_no_entry_definitions(self, get_workdir_path, CSAR_unpacked):
blueprint_path = CSAR_unpacked / 'CSAR-no-entry-def'
with pytest.raises(FileNotFoundError):
blueprint2CSAR.to_CSAR(blueprint_name='some_blueprint',
blueprint_dir=blueprint_path,
entry_definitions=Path('service.yaml'),
workdir=get_workdir_path)
def test_wrong_tosca_version(self, get_workdir_path, CSAR_unpacked):
blueprint_path = CSAR_unpacked / 'CSAR-wrong-tosca-version'
with pytest.raises(TypeError):
blueprint2CSAR.to_CSAR(blueprint_name='some_blueprint',
blueprint_dir=blueprint_path,
entry_definitions=Path('service.yaml'),
workdir=get_workdir_path)
def test_no_other_definition(self, get_workdir_path, CSAR_unpacked):
blueprint_path = CSAR_unpacked / 'CSAR-wrong-other-def'
with pytest.raises(TypeError):
blueprint2CSAR.to_CSAR(blueprint_name='some_blueprint',
blueprint_dir=blueprint_path,
entry_definitions=Path('service.yaml'),
other_definitions=[Path('other_def.yaml')],
workdir=get_workdir_path)
def test_success(self, get_workdir_path: Path, CSAR_unpacked):
workdir = get_workdir_path
name = 'some_blueprint'
output = workdir / f'CSAR-{name}'
output_with_zip = Path(f'{output}.zip')
unpacked = workdir / 'my_csar_unpacked'
blueprint_path = CSAR_unpacked / 'CSAR-ok'
blueprint2CSAR.to_CSAR(blueprint_name=name,
blueprint_dir=blueprint_path,
entry_definitions=Path('service.yaml'),
workdir=workdir,
output=output)
assert output_with_zip.exists()
shutil.unpack_archive(str(output_with_zip.absolute()), extract_dir=str(unpacked.absolute()))
assert (unpacked / 'TOSCA-Metadata').is_dir()
metadata_path = unpacked / 'TOSCA-Metadata' / 'TOSCA.meta'
assert metadata_path.exists()
metadata = yaml.load(metadata_path.open('r'), Loader=yaml.SafeLoader)
assert isinstance(metadata, dict)
assert all(key in metadata.keys() for key in ['TOSCA-Meta-File-Version', 'CSAR-Version',
'Created-By', 'Entry-Definitions',
'CSAR-name', 'CSAR-timestamp'])
| 8,354 |
src/twisted/trial/test/novars.py
|
giadram/twisted
| 4,612 |
2025660
|
# fodder for test_script, which parses files for emacs local variable
# declarations. This one is supposed to have none.
# The class declaration is irrelevant
class Bar:
pass
| 182 |
eden/integration/hg/status_deadlock_test.py
|
jmswen/eden
| 0 |
2025884
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
from typing import Dict, List
from eden.integration.lib.hgrepo import HgRepository
from eden.integration.lib.util import gen_tree
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
@hg_test
class StatusDeadlockTest(EdenHgTestCase):
"""
Test running an "hg status" command that needs to import many directories
and .gitignore files.
This attempts to exercise a deadlock issue we had in the past where all of
the EdenServer thread pool threads would be blocked waiting on operations
that needed a thread from this pool to complete. Eden wouldn't be able to
make forward progress from this state.
"""
commit1: str
expected_status: Dict[str, str] = {}
def edenfs_logging_settings(self) -> Dict[str, str]:
levels = {"eden": "DBG2"}
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG:
levels["eden.fs.store.hg"] = "DBG9"
return levels
def populate_backing_repo(self, repo: HgRepository) -> None:
logging.debug("== populate_backing_repo")
# By default repo.write_file() also calls "hg add" on the path.
# Unfortunately "hg add" is really slow. Disable calling it one at a
# time on these files as we write them. We'll make a single "hg add"
# call at the end with all paths in a single command.
new_files = []
fanouts = [4, 4, 4, 4]
def populate_dir(path: str) -> None:
logging.debug("populate %s", path)
test_path = os.path.join(path, "test.txt")
repo.write_file(test_path, f"test\n{path}\n", add=False)
new_files.append(test_path)
gitignore_path = os.path.join(path, ".gitignore")
gitignore_contents = f"*.log\n/{path}/foo.txt\n"
repo.write_file(gitignore_path, gitignore_contents, add=False)
new_files.append(gitignore_path)
gen_tree("src", fanouts, populate_dir, populate_dir)
self._hg_add_many(repo, new_files)
self.commit1 = repo.commit("Initial commit.")
logging.debug("== created initial commit")
new_files = []
def create_new_file(path: str) -> None:
logging.debug("add new file in %s", path)
new_path = os.path.join(path, "new.txt")
repo.write_file(new_path, "new\n", add=False)
new_files.append(new_path)
self.expected_status[new_path] = "?"
gen_tree("src", fanouts, create_new_file)
self._hg_add_many(repo, new_files)
self.commit2 = repo.commit("Initial commit.")
logging.debug("== created second commit")
def _hg_add_many(self, repo: HgRepository, paths: List[str]) -> None:
# Call "hg add" with at most chunk_size files at a time
chunk_size = 250
for n in range(0, len(paths), chunk_size):
logging.debug("= add %d/%d", n, len(paths))
repo.add_files(paths[n : n + chunk_size])
def test(self) -> None:
# Reset our working directory parent from commit2 to commit1.
# This forces us to have an unclean status, but eden won't need
# to load the source control state yet. It won't load the affected
# trees until we run "hg status" below.
self.hg("reset", "--keep", self.commit1)
# Now run "hg status".
# This will cause eden to import all of the trees and .gitignore
# files as it performs the status operation.
logging.debug("== checking status")
self.assert_status(self.expected_status)
| 3,905 |
rrt_exploration_turtlebot/scripts/rrtFrontier/functions.py
|
kimactor/automatic_explore
| 0 |
2024725
|
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from std_msgs.msg import String
from geometry_msgs.msg import Point
from os import system
from random import random
from numpy import array
from numpy import floor,ceil
from numpy import delete
from numpy import concatenate
from numpy import vstack
from numpy import linalg as LA
from math import copysign
from numpy import where,inf
from numpy import logical_and as AND
from numpy import all as All
from scipy.optimize import minimize
# Nearest function-------------------------------------
def Nearest(V,x):
n=1000000
i=0
for i in range(0,V.shape[0]):
n1=LA.norm(V[i,:]-x)
if (n1<n):
n=n1
result=i
return result
# Steer function-------------------------------------
def myfun(x,x0,x1,eta):
X=array([x[0],x[1]])
return LA.norm(X-x1)
def Steer(x0,x1,eta):
def consFun(x):
X=array([x[0],x[1]])
x0=p[0]
eta=p[2]
return -LA.norm(X-x0)+eta
cons = ({'type': 'ineq',
'fun' : consFun })
p=(x0,x1,eta)
res = minimize(myfun,[x0[0],x0[1]],args=p,constraints=cons, method='COBYLA',options={'disp': False})
xnew=array([res.x[0],res.x[1]])
return xnew
# gridValue function-------------------------------------
def gridValue(mapData,Xp):
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
# returns grid value at "Xp" location
#map data: 100 occupied -1 unknown 0 free
index=( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) )
if int(index) < len(Data):
return Data[int(index)]
else:
return 100
# gridCheck function-------------------------------------
def gridCheck(mapData,Xp):
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
# check if points are in freespace or not
# c=1 means grid cell occupied
# c=0 means grid cell is free
index=( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) )
c=1
if int(index) < len(Data):
if Data[int(index)]==0:
c=0
else:
c=1
return c
# ObstacleFree function-------------------------------------
def ObstacleFree(xnear,xnew,mapsub):
out=1
rez=mapsub.info.resolution*0.5
stepz=int(ceil(LA.norm(xnew-xnear))/rez)
xi=xnear
for c in range(0,stepz):
xi=Steer(xi,xnew,rez)
if (gridCheck(mapsub,xi) !=0):
out=0
if (gridCheck(mapsub,xnew) !=0):
out=0
return out
# Find function-------------------------------------
def Find(E,x):
if not All(array([E.shape]).shape==array([1,1])):
yy=E==x[1]
xx=E==x[0]
m=AND(yy[:,3], xx[:,2])
m=where(m==True)
if len(m[0])>0:
return m[0][0]
else:
return 0
# Near function-------------------------------------
def Near(V,xnew,r):
xnear=array([0,0])
i=0
for i in range(0,V.shape[0]):
n=LA.norm(V[i,:]-xnew)
if (n<=r):
p=V[i,:]
xnear=vstack((xnear,p))
xnear=delete(xnear, (0), axis=0)
return xnear
# Cost function-------------------------------------
def Cost(E,xn):
x=xn
if All(array([E.shape]).shape==array([1,1])):
c=0
else:
xinit=E[0,0:2]
c=0
while not All(x==xinit):
xp=E[Find(E,x),0:2]
c+=LA.norm(x-xp)
x=xp
return c
# prepEdges function
def prepEdges(E):
p=Point()
pl=[]
if not All(array([E.shape]).shape==array([1,1])):
Ex=delete(E, (1), axis=1)
Ex=delete(Ex, (2), axis=1)
Ey=delete(E, (0), axis=1)
Ey=delete(Ey, (1), axis=1)
pxs=Ex.flatten()
pys=Ey.flatten()
j=0
for j in range(0,pys.shape[0]):
p.x=pxs[j]
p.y=pys[j]
pl.append(copy(p))
return pl
# Assigner 3 robots------------------------------------------------------------------------------------------------------------------------
def assigner3(goal,x_new,client1,client2,client3,listener):
clientstate1=client1.get_state()
clientstate2=client2.get_state()
clientstate3=client3.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
aval1=1
else:
aval1=10000000
if clientstate2==2 or clientstate2==3 or clientstate2==4 or clientstate2==5 or clientstate2==9:
aval2=1
else:
aval2=10000000
if clientstate3==2 or clientstate3==3 or clientstate3==4 or clientstate3==5 or clientstate3==9:
aval3=1
else:
aval3=10000000
(trans1,rot) = listener.lookupTransform('/robot_1/map', '/robot_1/base_link', rospy.Time(0))
(trans2,rot) = listener.lookupTransform('/robot_1/map', '/robot_2/base_link', rospy.Time(0))
(trans3,rot) = listener.lookupTransform('/robot_1/map', '/robot_3/base_link', rospy.Time(0))
dist1=LA.norm(array([ trans1[0],trans1[1] ])-x_new)*aval1
dist2=LA.norm(array([ trans2[0],trans2[1] ])-x_new)*aval2
dist3=LA.norm(array([ trans3[0],trans3[1] ])-x_new)*aval3
alldist=[dist1,dist2,dist3]
# if no robot is available wait
while aval1==aval2==aval3==10000000:
clientstate1=client1.get_state()
clientstate2=client2.get_state()
clientstate3=client3.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
aval1=1
else:
aval1=10000000
if clientstate2==2 or clientstate2==3 or clientstate2==4 or clientstate2==5 or clientstate2==9:
aval2=1
else:
aval2=10000000
if clientstate3==2 or clientstate3==3 or clientstate3==4 or clientstate3==5 or clientstate3==9:
aval3=1
else:
aval3=10000000
goal.target_pose.pose.position.x=x_new[0]
goal.target_pose.pose.position.y=x_new[1]
goal.target_pose.pose.orientation.w = 1.0
#send command to the lowest cost available robot
if min(alldist)==dist1 and aval1==1:
client1.send_goal(goal)
#client1.wait_for_result()
#client1.get_result()
elif min(alldist)==dist2 and aval2==1:
client2.send_goal(goal)
#client2.wait_for_result()
#client2.get_result()
elif min(alldist)==dist3 and aval3==1:
client3.send_goal(goal)
#client3.wait_for_result()
#client3.get_result()
return 0
# Assigner 1 robots------------------------------------------------------------------------------------------------------------------------
def assigner1(goal,x_new,client1,listener):
#client1.send_goal(goal)
#client1.wait_for_result()
#client1.get_result()
clientstate1=client1.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
client1.send_goal(goal)
return 0
# Assigner 1 robots opecv detector------------------------------------------------------------------------------------------------------------------------
def assigner1new(goal,x_new,client1,listener):
goal.target_pose.pose.position.x=x_new[0]
goal.target_pose.pose.position.y=x_new[1]
goal.target_pose.pose.orientation.w = 1.0
clientstate1=client1.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
client1.send_goal(goal)
return 0
#-------------RRT frontier
# oObstacleFree function-------------------------------------
def ObstacleFree2(xnear,xnew,mapsub):
rez=mapsub.info.resolution*0.5
stepz=int(ceil(LA.norm(xnew-xnear))/rez)
xi=xnear
obs=0
unk=0
for c in range(0,stepz):
xi=Steer(xi,xnew,rez)
if (gridValue(mapsub,xi) ==100):
obs=1
if (gridValue(mapsub,xi) ==-1):
unk=1
if (gridValue(mapsub,xnew) ==100):
obs=1
if (gridValue(mapsub,xi) ==-1):
unk=1
if unk==1:
out=-1
if obs==1:
out=0
if obs!=1 and unk!=1:
out=1
#print "obs= ",obs," unk= ",unk," out= ",out
#raw_input(" ")
return out
# assigner1rrtfront(goal,frontiers,client1,listener) ----------------------------------------------------
def Nearest2(V,x):
n=inf
result=0
for i in range(0,len(V)):
n1=LA.norm(V[i]-x)
if (n1<n):
n=n1
result=i
return result
def assigner1rrtfront(goal,frontiers,client1,xp):
clientstate1=client1.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
if len(frontiers)>0:
row=Nearest2(frontiers,xp)
nextfrontier=frontiers[row]
frontiers=delete(frontiers, (row), axis=0)
goal.target_pose.pose.position.x=nextfrontier[0]
goal.target_pose.pose.position.y=nextfrontier[1]
goal.target_pose.pose.orientation.w = 1.0
print "exploration goal sent"
client1.send_goal(goal)
return frontiers
| 8,778 |
app/apizen/method.py
|
b1ackmatrix/py_func2webapi
| 2 |
2023926
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/5/19 上午9:33
# @Author : Matrix
# @Site :
# @File : controller.py
# @Software: PyCharm
from functools import wraps
from .schema import convert
from .version import allversion
from inspect import signature, Parameter
from .exceptions import ApiSysExceptions
__author__ = 'blackmatrix'
"""
-------------------------------
ApiZen 接口处理方法的异常判断与执行
-------------------------------
适用版本:Flask、Tornado
"""
def apiconfig(raw_resp=False):
"""
Api配置装饰器
:param raw_resp: 是否保留原始返回格式,默认不保留。
:return:
"""
def _apiconfig(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__rawresp__ = raw_resp
return wrapper
return _apiconfig
# 获取api处理函数及相关异常判断
def get_method(version, api_method, http_method):
"""
获取api处理函数及相关异常判断
:param version: 接口版本
:param api_method: 方法名
:param http_method: http请求方式
:return:
"""
# 检查版本号
if version not in allversion:
raise ApiSysExceptions.unsupported_version
# 检查版本是否停用
elif not allversion[version].get('enable', True):
raise ApiSysExceptions.version_stop
methods = getattr(allversion[version]['methods'], 'api_methods')
# 检查方法名是否存在
if api_method not in methods:
raise ApiSysExceptions.invalid_method
# 检查方法是否停用
elif not methods[api_method].get('enable', True):
raise ApiSysExceptions.api_stop
# 检查方法是否允许以某种请求方式调用
elif http_method.lower() not in methods[api_method].get('methods', ['get', 'post']):
raise ApiSysExceptions.not_allowed_request
# 检查函数是否可调用
elif not callable(methods[api_method].get('func')):
raise ApiSysExceptions.error_api_config
_func = methods[api_method].get('func')
if not hasattr(_func, '__rawresp__'):
_func.__rawresp__ = False
return _func
# 运行接口处理方法,及异常处理
def run_method(api_method, request_params):
# 最终传递给接口处理方法的全部参数
func_args = {}
# 获取函数方法的参数
api_method_params = signature(api_method).parameters
for k, v in api_method_params.items():
if str(v.kind) == 'VAR_POSITIONAL':
raise ApiSysExceptions.error_api_config
elif str(v.kind) in ('POSITIONAL_OR_KEYWORD', 'KEYWORD_ONLY'):
if k not in request_params:
if v.default is Parameter.empty:
missing_arguments = ApiSysExceptions.missing_arguments
missing_arguments.err_msg = '{0}:{1}'.format(missing_arguments.err_msg, k)
raise missing_arguments
func_args[k] = convert(k, v.default, v.default, v.annotation)
else:
func_args[k] = convert(k, request_params.get(k), v.default, v.annotation)
elif str(v.kind) == 'VAR_KEYWORD':
func_args.update({k: v for k, v in request_params.items()
if k not in api_method_params.keys()})
return api_method(**func_args)
| 3,011 |
articles/views.py
|
amehdinp/first
| 0 |
2024646
|
from django.shortcuts import render,HttpResponse,redirect
from django.http import HttpResponse
from . import models,forms
# Create your views here.
def articles_list(request):
articles1 = models.Article.objects.all()
arg={'articles':articles1}
return render(request, 'articles/articles_list.html',arg)
def creat_article(request):
if request.method=='POST':
form=forms.CreatArticle(request.POST,request.FILES)
if form.is_valid():
instance = form.save(commit = False)
instance.author = request.user
instance.save()
return redirect('articles:list')
else :
form=forms.CreatArticle()
return render (request,'articles/creat_article.html',{'form':form})
# return render(request,'articles/creat_article.html')
def details(request,slug):
article=models.Article.objects.get (slug=slug)
return render(request,'articles/article_detail.html',{'article':article})
| 963 |
tests/api/v1/test_permission.py
|
geru-br/keyloop
| 1 |
2024265
|
import pytest
@pytest.fixture
def permission_payload():
return {
"data": {
"type": "permission",
"attributes": {
"name": "permission_a",
"description": "Permission for resource A"
}
}
}
def test_create_permission(pyramid_app, permission_payload):
res = pyramid_app.post_json("/api/v1/realms/REALM/permissions", permission_payload,
content_type="application/vnd.api+json",
status=200)
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"data": {
"id": res.json['data']['id'],
"type": "permission",
"attributes": {
"name": "permission_a",
"description": "Permission for resource A"
}
},
}
def test_create_permission_with_existent_name(pyramid_app, permission_payload, fake_permission_class):
pyramid_app.post_json("/api/v1/realms/REALM/permissions", permission_payload,
content_type="application/vnd.api+json",
status=200)
res = pyramid_app.post_json("/api/v1/realms/REALM/permissions", permission_payload,
content_type="application/vnd.api+json",
status=409)
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"status": "error",
"errors": [
{
"location": "body",
"name": "name",
"description": "Existent permission with name: permission_a"
}
]
}
def test_create_permission_with_invalid_data(pyramid_app, permission_payload):
permission_payload['data']['attributes']['description'] = ''
res = pyramid_app.post_json("/api/v1/realms/REALM/permissions", permission_payload,
content_type="application/vnd.api+json",
status=400)
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"status": "error",
"errors": [
{
"location": "body",
"name": "errors",
"description": [
{
"detail": "Shorter than minimum length 1.",
"source": {"pointer": "/data/attributes/description"}
}
]
}
]
}
def test_create_permission_with_invalid_realm(pyramid_app, permission_payload):
res = pyramid_app.post_json("/api/v1/realms/INVALID-REALM/permissions", permission_payload,
content_type="application/vnd.api+json",
status=404)
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"status": "error",
"errors": [
{
"location": "path",
"name": "realm_slug",
"description": "Invalid realm"
}
]
}
def test_get_permissions_empty_list(pyramid_app, permission_payload, fake_permission_class):
res = pyramid_app.get("/api/v1/realms/REALM/permissions", params={'page[number]': 2, 'page[size]': 30})
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"data": [],
"links": {
"self": "http://localhost/api/v1/realms/REALM/permissions?page%5Bnumber%5D=2&page%5Bsize%5D=30",
"first": "http://localhost/api/v1/realms/REALM/permissions?page[number]=1&page[size]=30",
"prev": "http://localhost/api/v1/realms/REALM/permissions?page[number]=1&page[size]=30",
"next": None,
"last": "http://localhost/api/v1/realms/REALM/permissions?page[number]=0&page[size]=30"
},
"meta": {
"count": 0,
"total_pages": 0
}
}
def test_get_permissions(pyramid_app, permission_payload, fake_permission_class):
pyramid_app.post_json("/api/v1/realms/REALM/permissions", permission_payload,
content_type="application/vnd.api+json",
status=200)
res = pyramid_app.get("/api/v1/realms/REALM/permissions", params={'page[number]': 1, 'page[size]': 30})
permission_id = str(next(iter(fake_permission_class.PERMISSIONS.keys())))
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"data": [
{
"type": "permission",
"attributes": {
"name": "permission_a",
"description": "Permission for resource A"
},
"id": permission_id
}
],
"links": {
"self": "http://localhost/api/v1/realms/REALM/permissions?page%5Bnumber%5D=1&page%5Bsize%5D=30",
"first": "http://localhost/api/v1/realms/REALM/permissions?page[number]=1&page[size]=30",
"prev": None,
"next": "http://localhost/api/v1/realms/REALM/permissions?page[number]=1&page[size]=30",
"last": "http://localhost/api/v1/realms/REALM/permissions?page[number]=1&page[size]=30"
},
"meta": {
"count": 1,
"total_pages": 1
}
}
def test_get_permissions_negative_page(pyramid_app, permission_payload):
res = pyramid_app.get("/api/v1/realms/REALM/permissions", params={'page[number]': -1, 'page[size]': 30}, status=400)
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"status": "error",
"errors": [
{
"location": "querystring",
"name": "page[number]",
"description": [
"Invalid value."
]
}
]
}
def test_get_permissions_negative_limit(pyramid_app, permission_payload):
res = pyramid_app.get("/api/v1/realms/REALM/permissions", params={'page[number]': 1, 'page[size]': -30}, status=400)
assert res.content_type == "application/vnd.api+json"
assert res.json == {
"status": "error",
"errors": [
{
"location": "querystring",
"name": "page[size]",
"description": [
"Invalid value."
]
}
]
}
| 6,461 |
AvgTrueRangeGit.py
|
adamrvfisher/TechnicalAnalysisLibrary
| 3 |
2025711
|
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a technical analysis tool
#Import modules
from YahooGrabber import YahooGrabber
import numpy as np
#Input ticker
ticker = 'SPY'
#Data request
s = YahooGrabber(ticker)
#Variable assignment
window = 14
#ATR calculation
s['Method1'] = s['High'] - s['Low']
s['Method2'] = abs((s['High'] - s['Close'].shift(1)))
s['Method3'] = abs((s['Low'] - s['Close'].shift(1)))
s['Method1'] = s['Method1'].fillna(0)
s['Method2'] = s['Method2'].fillna(0)
s['Method3'] = s['Method3'].fillna(0)
s['TrueRange'] = s[['Method1','Method2','Method3']].max(axis = 1)
s['AverageTrueRange'] = s['TrueRange'].rolling(window = window,
center=False).mean
#Trim out the window period for graph
trim = (window * 2 - 1)
s = s[trim:]
#Graphical display
s[['AverageTrueRange']].plot(grid=True, figsize=(8,3))
| 913 |
squid/models/commands.py
|
Friskytool/command-handler
| 0 |
2025436
|
from ast import arg
from ctypes import Union
import inspect
from tokenize import Double
from typing import TYPE_CHECKING, List, Optional
from squid.models.enums import ApplicationCommandType
from squid.models.enums import ApplicationCommandOptionType
from squid.models.interaction import ApplicationCommandOption
if TYPE_CHECKING:
from squid.bot.command import SquidCommand
class CreateApplicationCommand(object):
def __init__(
self,
name: str,
description: str,
options: List["ApplicationCommandOption"],
default_permissions: Optional[bool] = None,
type: Optional[ApplicationCommandType] = ApplicationCommandType.chat_input,
):
self.name = name
self.description = description
self.options = options
self.default_permissions = default_permissions
self.type = type
def __repr__(self):
return f"<CreateApplicationCommand name={self.name!r} description={self.description!r} options={self.options!r} default_permissions={self.default_permissions!r} type={self.type!r}>"
def get_annotated_type(
typ: object = str, name: str = None
) -> ApplicationCommandOptionType:
if typ == int:
return ApplicationCommandOptionType.number
elif typ == bool:
return ApplicationCommandOptionType.boolean
elif typ == str:
return ApplicationCommandOptionType.string
if name:
try:
return getattr(ApplicationCommandOptionType, name.lower())
except AttributeError:
pass
raise ValueError(f"Unknown type ({typ}): {name}")
@classmethod
def from_command(cls, cmd: "SquidCommand", type=ApplicationCommandType.chat_input):
options = []
for c in cmd.commands:
if c.commands: # is a sub command group
ty = ApplicationCommandOptionType.sub_command_group
else:
ty = ApplicationCommandOptionType.sub_command
options.append(cls.from_command(c, type=ty))
# adding arguments for base command
argspec = inspect.getfullargspec(cmd.callback)
if cmd.cog:
argspec.args.remove("self")
if "ctx" not in argspec.args:
raise ValueError("Callback must have a ctx argument")
else:
argspec.args.remove("ctx")
for arg in argspec.args:
options.append(
ApplicationCommandOption(
state=None,
data=dict(
name=arg,
description="Enter the value for the argument",
type=cls.get_annotated_type(
argspec.annotations.get(arg, str), arg
),
required=arg in argspec.args[: -len(argspec.defaults or [])],
options=[],
),
)
)
return cls(
name=cmd.name,
description=cmd.help,
options=options,
default_permissions=cmd.enabled,
type=type,
)
def serialize(self):
return {
"name": self.name,
"description": self.description,
"options": [option.serialize() for option in self.options],
"default_permissions": self.default_permissions,
"type": self.type.value,
}
| 3,478 |
101-150/131.py
|
yshshadow/Leetcode
| 0 |
2025566
|
# Given a string s, partition s such that every substring of the partition is a palindrome.
#
# Return all possible palindrome partitioning of s.
#
# Example:
#
# Input: "aab"
# Output:
# [
# ["aa","b"],
# ["a","a","b"]
# ]
class Solution(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
res = []
self.dfs(s, [], res)
return res
def dfs(self, s, path, res):
if not s:
res.append(path)
return
for i in range(1, len(s) + 1):
if self.isPal(s[:i]):
self.dfs(s[i:], path + [s[:i]], res)
def isPal(self, s):
return s == s[::-1]
| 702 |
Project-2/Sarang Gupta/JobComparator.py
|
Mercury1508/IEEE-LEAD-2.0
| 1 |
2024364
|
import requests
from bs4 import BeautifulSoup
import tkinter as tk
from tkinter import *
root = tk.Tk()
root.grid_columnconfigure((0,1), weight=1)
root.configure(background="#10D5E2")
root.geometry("800x600")
root.title("JOB COMPARATOR ")
root.configure(borderwidth="10",relief="sunken",background="#A58FAA",cursor="arrow")
root.resizable(False,False)
Label1 = tk.Label(root)
_img1 = PhotoImage(file="webscrapper.png")
Label1.configure(image=_img1)
Label1.configure(text='''Label''')
Label1.grid(row=1,column=0,pady=2)
label_pro=tk.Label(root,text="Job Name ",width=20,bg="#72147E",fg="white",padx=10,font=("Times",18),relief="raised",pady=10)
label_pro.grid(row=7,column=0)
label_pro=tk.Label(root,text=" Location ",width=20,bg="#72147E",fg="white",padx=10,font=("Times",18),relief="raised",pady=10)
label_pro.grid(row=8,column=0)
def myclass():
show(job.get(),location.get())
job= tk.Entry(root,width=40,bg="#687980",fg="black",font=("Times",18),relief="sunken")
job.grid(row=7,column=2,columnspan=1)
location= tk.Entry(root,width=40,bg="#687980",fg="black",font=("Times",18),relief="sunken")
location.grid(row=8,column=2,columnspan=1)
button_find=tk.Button(root,text=" Compare ",bg="#464F41",fg="white",width=8,pady=5,font=("lob ",18),command=myclass,relief="raised",padx=50)
button_find.grid(row=9,column=2,columnspan=1)
def show(job,location):
job= job.replace(" ", "+")
url_indeed = "https://in.indeed.com/jobs?q="+job+"&l="+location
r = requests.get(url_indeed)
htmltext = r.content
soup = BeautifulSoup(htmltext, 'html.parser')
card = soup.find('div', class_='jobsearch-SerpJobCard')
job_title = card.h2.a
job_location = card.find('span', class_='location accessible-contrast-color-location')
job_company = card.find('span', class_='company')
link=card.h2.a.get('href')
job_link = 'https://www.indeed.com' + link
job_summary = card.find('div', class_='summary')
post_date = card.find('span', class_='date date-a11y')
try:
job_salary = card.find('span', class_='salaryText').get_text()
except AttributeError:
job_salary = "NA"
url_timesjob = (f"https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords={job}&txtLocation={location}")
r = requests.get(url_timesjob)
htmlContent = r.content
soup = BeautifulSoup(htmlContent, 'html.parser')
divs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')
div = divs[0]
title = div.a
link_job=div.h2.a.get('href')
job_linktimesjob='https://www.timesjobs.com/'+link_job
comp = div.find('h3', class_="joblist-comp-name")
descrip = div.find('ul', class_='list-job-dtl clearfix')
location_job = div.find('ul', class_='top-jd-dtl clearfix')
str1 = location_job.get_text()
str2 = str(str1)
str3 = str2.replace("card_travel", "Experience Required: ")
tempList = [job_link,job_title.get_text().strip(),job_company.get_text().strip(),job_summary.get_text().strip(),job_salary,job_location.get_text().strip(),post_date.get_text()]
tempList1=[job_linktimesjob,title.get_text().strip(),comp.get_text().strip(),descrip.get_text().strip(),str3.strip()]
indeed_list=["Job Link: ","Job Title: ","Company Name: ","Job Summary: ","Job Salary: ","Job Location: ","post date: "]
listBox.insert(END,"INDEED")
listBox.insert(END, "\n")
listBox.insert(END, "\n")
for i in range(0,7):
listBox.insert(END,indeed_list[i])
listBox.insert(END,tempList[i])
listBox.insert(END, "\n")
listBox.insert(END, "\n")
listBox.insert(END, "TIMES JOB")
listBox.insert(END, "\n")
listBox.insert(END, "\n")
listBox.insert(END, "Job Link: ")
listBox.insert(END, tempList1[0])
listBox.insert(END, "\n")
listBox.insert(END, "Job Title:")
listBox.insert(END, "\n")
listBox.insert(END, tempList1[1])
listBox.insert(END, "\n")
listBox.insert(END,"Company Name: ")
listBox.insert(END, "\n")
for i in range(2,5):
listBox.insert(END, tempList1[i])
listBox.insert(END, "\n")
listBox = Text(root,width=80,bg="#E1E5EA",relief="sunken",font=("Helvetica",10,"bold"))
listBox.grid(row=1, column=2,)
listBox.insert(END,"JOB COMPARATOR ( INDEED VS TIMESJOB )")
listBox.insert(END,"\n")
root.mainloop()
| 4,333 |
didadata/exceptions.py
|
stephrdev/didadata
| 1 |
2025790
|
class DidadataException(Exception):
pass
class MetricNotFoundException(DidadataException):
pass
class InvalidValueException(DidadataException):
pass
| 165 |
Modules/Utils/ImportData.py
|
PierreOreistein/MVA-Kernels
| 0 |
2022715
|
import pandas as pd
def ImportData(file_path_x, file_path_y, suffix="", header=None, sep=" "):
"""Import the data in file_name and return them as a Panda DataFrame."""
# Dictionnary containing all the dataset required
df_dict = {}
for k in range(3):
# Extraction of the training set , the testing set and the labels of
# the training set
X_train_df = pd.read_csv(file_path_x + "Xtr" + str(k) + suffix +
".csv", header=header, sep=sep)
X_test_df = pd.read_csv(file_path_x + "Xte" + str(k) + suffix + ".csv",
header=header,
sep=sep)
Y_train_df = pd.read_csv(file_path_y + "Ytr" + str(k) + ".csv")
# Adding of these datasets to df_dict
df_dict[k] = [X_train_df, X_test_df, Y_train_df]
return df_dict
| 876 |
4.repetition/aula8.exercicio.1.parte2.py
|
mtanji/intro-algorithm-python
| 0 |
2025691
|
# [Parte 2] Faça um programa, derivado deste anterior, para receber a quantidade de usuários para os quais ele deve fazer a indicação de maioridade, então receber a idade e dizer se é maior ou menor de idade essa quantidade de vezes. Por exemplo, se o usuário digitar 3, deve pedir a idade da primeira pessoa e dizer se é maior de idade, então pedir a idade do segunda pessoa e dizer se é maior de idade, e assim sucessivamente até que a pergunta tenha sido respondida na quantidade de vezes em que o usuário indicou.
quantidade = input('querer saber a situação de quantos usuários? ')
quantidade = int(quantidade)
for rep in range(quantidade):
idade = input('digite sua idade ')
idade = int(idade)
if idade >= 18:
print('maior de idade')
else:
print('menor de idade')
| 804 |
tests/plugins/test_streamboat.py
|
fuglede/streamlink
| 0 |
2025502
|
import unittest
from streamlink.plugins.streamboat import StreamBoat
class TestPluginStreamBoat(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://streamboat.tv/@example',
'https://streamboat.tv/@test',
]
for url in should_match:
self.assertTrue(StreamBoat.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(StreamBoat.can_handle_url(url))
| 599 |
challenges/day4.py
|
Jeffreyo3/AdventOfCode2020
| 0 |
2024441
|
"""
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
"""
import re
f = open("challenges\data\day4data.txt", "r")
def processPassportData(file):
data = []
currPassport = {}
for x in f:
if re.match("^\s*$", x):
# print("blank = ", x)
# print(currPassport)
data.append(currPassport)
currPassport = {}
else:
currLine = x.split()
for l in currLine:
keyVal = l.split(":")
currPassport[keyVal[0]] = keyVal[1]
if currPassport:
data.append(currPassport)
return data
def validatePassportFields(passport):
requiredFields = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
for rf in requiredFields:
if rf not in passport.keys():
return False
return True
def countValidPassports(passportList, validationMethod):
count = 0
for p in passportList:
if validationMethod(p):
count += 1
return count
data = processPassportData(f)
validCount = countValidPassports(data, validatePassportFields)
print("Part 1: ", validCount)
"""
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
"""
class PassportValidator:
validHexCharacters = set("0123456789ABCDEFabcdef")
def __init__(self, requiredFields = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"], validEyeColors = set(["amb", "blu", "brn", "gry", "grn", "hzl", "oth"])):
self.requiredFields = requiredFields
self.validEyeColors = validEyeColors
def validateAll(self, passport):
if not self.validatePassportFields(passport):
return False
if not self.validateBirthYear(passport["byr"]):
return False
if not self.validateIssueYear(passport["iyr"]):
return False
if not self.validateExpirationYear(passport["eyr"]):
return False
if not self.validateHeight(passport["hgt"]):
return False
if not self.vaildateHairColor(passport["hcl"]):
return False
if not self.validateEyeColor(passport["ecl"]):
return False
if not self.validatePassportId(passport["pid"]):
return False
return True
def validatePassportFields(self, passport):
for rf in self.requiredFields:
if rf not in passport.keys():
return False
return True
def validateYear(self, yearStr, reqMin, reqMax):
if len(yearStr) != 4:
return False
yearInt = int(yearStr)
if yearInt < reqMin or yearInt > reqMax:
return False
return True
def validateBirthYear(self, yearStr):
return self.validateYear(yearStr, 1920, 2002)
def validateIssueYear(self, yearStr):
return self.validateYear(yearStr, 2010, 2020)
def validateExpirationYear(self, yearStr):
return self.validateYear(yearStr, 2020, 2030)
def validateMsmntRange(self, num, n_min, n_max):
return num <= n_max and num >= n_min
def validateHeight(self, heightStr):
# separate number from measurement type
number = heightStr[:-2]
msmntType = heightStr[-2:]
# validate that its a number
try:
number = int(number)
except ValueError:
return False
# validate that its one of the correct measurment types
if msmntType == "cm":
return self.validateMsmntRange(number, 150, 193)
elif msmntType == "in":
return self.validateMsmntRange(number, 59, 76)
else:
return False
def vaildateHairColor(self, hairStr):
if len(hairStr) != 7:
return False
hash_ = hairStr[:1]
hexColor = hairStr[1:]
if hash_ != "#":
return False
for c in hexColor:
if c not in self.validHexCharacters:
return False
return True
def validateEyeColor(self, eyeStr):
return eyeStr in self.validEyeColors
def validatePassportId(self, idString):
if len(idString) != 9:
return False
try:
int(idString)
except ValueError:
return False
return True
v = PassportValidator()
res = countValidPassports(data, v.validateAll)
print("Part 2: ", res)
| 8,697 |
tests/test_shadowrun_formatter.py
|
ephreal/rollbot
| 2 |
2025547
|
# -*- coding: utf-8 -*-
"""
This software is licensed under the License (MIT) located at
https://github.com/ephreal/rollbot/Licence
Please see the license for any restrictions or rights granted to you by the
License.
"""
import asyncio
import unittest
from classes.dice_rolling import shadowrun_rolling as sr
from classes.formatters import shadowrun_formatter as sf
class TestShadowrun3Formatter(unittest.TestCase):
def setUp(self):
self.roller = sr.Shadowrun3Roller()
self.formatter = sf.Shadowrun3Formatter()
def test_format_roll(self):
"""
Verifies that the rolls are formatted correctly
"""
# Check the formatting on a successful roll
rolls = [1, 3, 3, 4, 6, 12]
checked = self.roller.check_successes(5, rolls)
checked = run(checked)
non_verbose = self.formatter.format_roll(rolls, checked)
non_verbose = run(non_verbose)
verbose = self.formatter.format_roll(rolls, checked, verbose=True)
verbose = run(verbose)
expected_format = f"Test succeeded\n"\
f"You rolled {len(rolls)} dice\n"\
f"You had {checked['successes']} successes.\n\n"\
f"successes: {checked['successes']}\n"\
f"failures: 1"
self.assertEqual(expected_format, non_verbose)
self.assertTrue(len(expected_format) < len(verbose))
# Check the formatting on a failed test
rolls = [1, 1, 1, 1, 1, 1, 1, 1, 1]
checked = self.roller.check_successes(5, rolls)
checked = run(checked)
non_verbose = self.formatter.format_roll(rolls, checked)
non_verbose = run(non_verbose)
verbose = self.formatter.format_roll(rolls, checked, verbose=True)
verbose = run(verbose)
expected_format = f"TEST FAILED\n"\
f"You rolled {len(rolls)} dice\n"\
f"You had {checked['successes']} successes.\n\n"\
f"successes: {checked['successes']}\n"\
f"failures: 9"
self.assertEqual(expected_format, non_verbose)
self.assertTrue(len(expected_format) < len(verbose))
def test_format_initiative(self):
"""
Verifies the initiave formatting is correct
"""
rolls = [5, 5, 5, 5]
initiative = self.formatter.format_initiative(rolls, 26)
initiative = run(initiative)
expected_format = f"Your initiative score is 26"
self.assertEqual(expected_format, initiative)
def test_format_unchecked_roll(self):
"""
Verifies that formatting an unchecked roll is possible.
"""
roll = [1, 2, 3, 4, 5, 6]
expected_format = f"You rolled {len(roll)} dice\nRoll: {roll}"
roll = run(self.formatter.format_unchecked_roll(roll))
self.assertEqual(roll, expected_format)
class TestShadowrun5Formatter(unittest.TestCase):
def setUp(self):
self.formatter = sf.Shadowrun5Formatter()
self.roller = sr.Shadowrun5Roller()
def test_buy_hits(self):
"""
Verifies that buy_hits builds the formatted string correctly.
"""
dice_pool = 3
bought = self.formatter.format_buy_hits(dice_pool)
bought = run(bought)
expected_format = f"You bought {dice_pool} hits."
self.assertEqual(expected_format, bought)
def test_format_roll(self):
"""
Verifies that the shadowrun 5E hits formatter is formatting rolls
properly.
"""
rolls = [1, 2, 3, 4, 5, 6]
counted = {"hits": 2, "misses": 3, "ones": 1}
glitch = run(self.roller.is_glitch(rolls, counted['hits']))
formatted_hits = self.formatter.format_roll(rolls, counted,
glitch=glitch)
formatted_hits = run(formatted_hits)
expected_format = "You rolled 6 dice.\n"\
"hits : 2\n"\
"misses : 3\n"\
"ones : 1"
self.assertEqual(expected_format, formatted_hits)
# Verify that rolls and hit information from shadowrun rollers are
# correctly formatted for the formatter.
rolls = self.roller.roll(6)
rolls = run(rolls)
counted = self.roller.count_hits(rolls)
counted = run(counted)
glitch = run(self.roller.is_glitch(rolls, counted['hits']))
formatted_hits = self.formatter.format_roll(rolls, counted,
)
formatted_hits = run(formatted_hits)
expected_format = f"You rolled {len(rolls)} dice.\n"\
f"hits : {counted['hits']}\n"\
f"misses : {counted['misses']}\n"\
f"ones : {counted['ones']}"
self.assertEqual(expected_format, formatted_hits)
def test_format_extended_test(self):
"""
Verifies the extended test is formatted properly.
"""
extended = self.roller.extended_test(8, 8)
extended = run(extended)
formatted = self.formatter.format_extended_test(extended)
formatted = run(formatted)
self.assertTrue(isinstance(formatted, str))
def test_format_initiative(self):
"""
Verifies the initiave formatting is correct
"""
roll = [5, 5, 5, 5]
initiative = self.formatter.format_initiative(roll, 26)
initiative = run(initiative)
expected_format = f"Your initiative score is 26"
self.assertEqual(expected_format, initiative)
def run(coroutine):
"""
Runs and returns the data from the couroutine passed in. This is to
only be used in unittesting.
coroutine : asyncio coroutine
-> coroutine return
"""
return asyncio.get_event_loop().run_until_complete(coroutine)
| 5,985 |
Chapter07-design-patterns/builder.py
|
showa-yojyo/Software-Architecture-with-Python
| 0 |
2024633
|
#!/usr/bin/env python
# Code Listing #8
"""
Builder design pattern implemented as a house builder class example with
a few sub-classes demonstrating the power of the builder pattern.
"""
class Room:
""" A class representing a Room in a house """
def __init__(self, nwindows=2, direction='S'):
self.nwindows = nwindows
self.direction = direction
def __str__(self):
return "Room <facing:%s, windows=#%d>" % (self.direction,
self.nwindows)
class Porch:
""" A class representing a Porch in a house """
def __init__(self, ndoors=2, direction='W'):
self.ndoors = ndoors
self.direction = direction
def __str__(self):
return "Porch <facing:%s, doors=#%d>" % (self.direction,
self.ndoors)
class LegoHouse:
""" A lego house class """
def __init__(self, nrooms=0, nwindows=0, nporches=0):
# windows per room
self.nwindows = nwindows
self.nporches = nporches
self.nrooms = nrooms
self.rooms = []
self.porches = []
def __str__(self):
msg = "LegoHouse<rooms=#%d, porches=#%d>\n" % (self.nrooms,
self.nporches)
for i in self.rooms:
msg += str(i) + '\n'
for i in self.porches:
msg += str(i) + '\n'
return msg
def add_room(self, room):
""" Add a room to the house """
self.rooms.append(room)
def add_porch(self, porch):
""" Add a porch to the house """
self.porches.append(porch)
class LegoHouseBuilder:
""" Lego house builder class """
def __init__(self, *args, **kwargs):
self.house = LegoHouse(*args, **kwargs)
def build(self):
""" Build a lego house instance and return it """
self.build_rooms()
self.build_porches()
return self.house
def build_rooms(self):
""" Method to build rooms """
for i in range(self.house.nrooms):
room = Room(self.house.nwindows)
self.house.add_room(room)
def build_porches(self):
""" Method to build porches """
for i in range(self.house.nporches):
porch = Porch(1)
self.house.add_porch(porch)
# つましいレゴハウス
class BudgetLegoHouseBuilder(LegoHouseBuilder):
""" Builder building budget lego house with 1 room and no porch and rooms having 1 window """
def __init__(self):
self.house = LegoHouse(nrooms=1, nporches=0, nwindows=1)
class SmallLegoHouseBuilder(LegoHouseBuilder):
""" Builder building small lego house with 1 room and 1 porch and rooms having 2 windows """
def __init__(self):
self.house = LegoHouse(nrooms=2, nporches=1, nwindows=2)
class NorthFacingHouseBuilder(LegoHouseBuilder):
""" Builder building all rooms and porches facing North """
def build_rooms(self):
for i in range(self.house.nrooms):
room = Room(self.house.nwindows, direction='N')
self.house.add_room(room)
def build_porches(self):
for i in range(self.house.nporches):
porch = Porch(1, direction='N')
self.house.add_porch(porch)
# これだけ注意
class NorthFacingSmallHouseBuilder(NorthFacingHouseBuilder, SmallLegoHouseBuilder):
pass
if __name__ == "__main__":
bbuilder = BudgetLegoHouseBuilder()
print(bbuilder.build())
sbuilder = SmallLegoHouseBuilder()
print(sbuilder.build())
nbuilder = NorthFacingHouseBuilder(nrooms=2, nporches=1, nwindows=1)
print(nbuilder.build())
print(NorthFacingSmallHouseBuilder().build())
| 3,712 |
services/dscore-master/scorelib/argparse.py
|
ishine/self_supervised_AHC
| 10 |
2025669
|
"""Custom argument parser and action classes."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
__all__ = ['ArgumentParser']
class ArgumentParser(argparse.ArgumentParser):
"""Sub-class of ``ArgumentParser`` that write errors to STDERR."""
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
| 487 |
example/demod_lmmse/estimate_stats.py
|
RFChallenge/rfchallenge_starter
| 6 |
2025189
|
import os, sys
# to run this file from within example/demod_lmmse folder
os.chdir(os.getcwd())
print(os.path.abspath(os.curdir))
sys.path.append(os.curdir)
import numpy as np
import pickle
import rfcutils
import random
random.seed(0)
np.random.seed(0)
stats_folder = os.path.join('example', 'demod_lmmse', 'stats')
block_len = 4000
window_len=40960
def load_train_frames(interference_type, num_train_instances):
interference_sig_dataset = []
for ii in range(num_train_instances):
data,meta = rfcutils.load_dataset_sample(ii, 'train_frame', interference_type)
interference_sig_dataset.append(data)
interference_sig_dataset = np.array(interference_sig_dataset)
return interference_sig_dataset
def generate_samples(interference_sig_dataset, block_len=block_len, window_len=window_len):
block_dataset = []
for ii in range(10000):
idx = np.random.randint(interference_sig_dataset.shape[0])
start_idx = np.random.randint(interference_sig_dataset.shape[1]-window_len)
train_data = interference_sig_dataset[idx, start_idx:start_idx+window_len]
train_data = train_data/np.sqrt(np.mean(np.abs(train_data)**2))
train_data = train_data[:(window_len//block_len)*block_len]
block_data = train_data.reshape(-1,block_len)
block_dataset.append(block_data)
block_dataset = np.array(block_dataset)
block_dataset = block_dataset.reshape(-1,block_len)
return block_dataset
def generate_aligned_samples(sig_dataset, block_len=block_len, window_len=window_len):
template_start = None
block_dataset = []
for ii in range(10000):
idx = np.random.randint(sig_dataset.shape[0])
start_idx = np.random.randint(sig_dataset.shape[1]-window_len)
train_data = sig_dataset[idx, start_idx:start_idx+window_len]
train_data = train_data/np.sqrt(np.mean(np.abs(train_data)**2))
# alignment
if ii == 0:
template_start = train_data[:4000]
else:
pk_idx = np.argmax(np.abs(np.correlate(train_data,template_start,mode='full')))
pk_idx -= len(template_start)
train_data = np.roll(train_data, -pk_idx)
train_data = train_data[:(window_len//block_len)*block_len]
block_data = train_data.reshape(-1,block_len)
block_dataset.append(block_data)
block_dataset = np.array(block_dataset)
block_dataset = block_dataset.reshape(-1,block_len)
return block_dataset, template_start
interference_type = 'EMISignal1'
interference_sig_dataset = load_train_frames(interference_type, 530)
block_dataset = generate_samples(interference_sig_dataset)
mu_emi1 = np.mean(block_dataset, axis=0)
cov1_emi1 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_emi1), np.conj(block_dataset-mu_emi1))
cov2_emi1 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_emi1), block_dataset-mu_emi1)
pickle.dump((mu_emi1,cov1_emi1,cov2_emi1),open(os.path.join(stats_folder,f'{interference_type}_stats.pickle'),'wb'))
interference_type = 'CommSignal2'
interference_sig_dataset = load_train_frames(interference_type, 100)
block_dataset = generate_samples(interference_sig_dataset)
mu_comm2 = np.mean(block_dataset, axis=0)
cov1_comm2 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_comm2), np.conj(block_dataset-mu_comm2))
cov2_comm2 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_comm2), block_dataset-mu_comm2)
pickle.dump((mu_comm2,cov1_comm2,cov2_comm2),open(os.path.join(stats_folder,f'{interference_type}_stats.pickle'),'wb'))
interference_type = 'CommSignal3'
interference_sig_dataset = load_train_frames(interference_type, 139)
block_dataset = generate_samples(interference_sig_dataset)
mu_comm3 = np.mean(block_dataset, axis=0)
cov1_comm3 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_comm3), np.conj(block_dataset-mu_comm3))
cov2_comm3 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_comm3), block_dataset-mu_comm3)
pickle.dump((mu_comm3,cov1_comm3,cov2_comm3),open(os.path.join(stats_folder,f'{interference_type}_stats.pickle'),'wb'))
sig_type = 'QPSK'
qpsk_block_dataset = []
for ii in range(10000):
qpsk_sig, _, _, _ = rfcutils.generate_qpsk_signal()
qpsk_sig = qpsk_sig[:(len(qpsk_sig)//block_len)*block_len]
block_data = qpsk_sig.reshape(-1,block_len)
qpsk_block_dataset.append(block_data)
qpsk_block_dataset = np.array(qpsk_block_dataset)
qpsk_block_dataset = qpsk_block_dataset.reshape(-1,block_len)
mu_qpsk = np.mean(qpsk_block_dataset, axis=0)
cov1_qpsk = 1/qpsk_block_dataset.shape[0]*np.matmul(np.transpose(qpsk_block_dataset-mu_qpsk), np.conj(qpsk_block_dataset-mu_qpsk))
cov2_qpsk = 1/qpsk_block_dataset.shape[0]*np.matmul(np.transpose(qpsk_block_dataset-mu_qpsk), qpsk_block_dataset-mu_qpsk)
pickle.dump((mu_qpsk,cov1_qpsk,cov2_qpsk),open(os.path.join(stats_folder,f'{sig_type}_demod_stats.pickle'),'wb'))
##########
sig_type = 'CommSignal2'
sig_sig_dataset = load_train_frames(sig_type, 100)
block_dataset, template_start = generate_aligned_samples(sig_sig_dataset)
mu_comm2 = np.mean(block_dataset, axis=0)
cov1_comm2 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_comm2), np.conj(block_dataset-mu_comm2))
cov2_comm2 = 1/block_dataset.shape[0]*np.matmul(np.transpose(block_dataset-mu_comm2), block_dataset-mu_comm2)
pickle.dump((mu_comm2,cov1_comm2,cov2_comm2,template_start),open(os.path.join(stats_folder,f'{sig_type}_aligned_stats.pickle'),'wb'))
| 5,537 |
core/admin.py
|
MarcosBB/E-commerce
| 2 |
2023343
|
from django.contrib import admin
from .models import Produto, Compra
@admin.register(Produto)
class CargoAdmin(admin.ModelAdmin):
list_display = ('nome','preco' ,'modificado', 'ativo')
@admin.register(Compra)
class CompraAdmin(admin.ModelAdmin):
list_display = ('produtoId', 'quantidade', '_autor')
exclude = ['autor'] # Exclui o campo usuario da página admin no formulario de compra
# Exibindo nome do usuário completo
def _autor(self, instance):
return f'{instance.autor.apelido}'
# Mostrando apenas as compras do usuário logado
def get_queryset(self, request):
qs = super(CompraAdmin, self).get_queryset(request)
return qs.filter(autor=request.user)
# Permitindo que apenas o usuário logado faça compras no seu nome
def save_model(self, request, obj, form, change) :
obj.autor = request.user
super().save_model(request, obj, form, change)
| 925 |
netmikoTask.py
|
chaseheim/StoutCNIT381_Final
| 0 |
2025839
|
from netmiko import Netmiko
#Netmiko skill function that gets called by the Webex bot's skill 'show run'
def showRun(host, username, password):
#Open a connection to the CSR that is passed into the function
connection = Netmiko(host=host, port='22' ,username=username, password=password, device_type='cisco_ios')
#Send the 'show run' command to the CSR and save in output, then return output
output = connection.send_command('show run')
connection.disconnect()
return output
| 499 |
sniffer/sniffer.py
|
JustNao/DofusHelper
| 4 |
2025765
|
from . import network, protocol
def packetRead(msg):
try:
print(protocol.read(protocol.msg_from_id[msg.id]['name'], msg.data))
except KeyError:
print("KeyError : ", msg.id)
network.launch_in_thread(packetRead)
| 233 |
Web/views/develop_dyups_view.py
|
zhmsg/dms
| 0 |
2024330
|
#!/user/bin/env python
# -*- coding: utf-8 -*-
import sys
from flask import request, jsonify, g
from Tools.RenderTemplate import RenderTemplate
from Web import dyups_url_prefix as url_prefix, create_blue
from Web import control
sys.path.append('..')
__author__ = 'Zhouheng'
rt = RenderTemplate("Dyups", url_prefix=url_prefix)
develop_dyups_view = create_blue('develop_dyups_view', url_prefix=url_prefix)
@develop_dyups_view.route("/", methods=["GET"])
def index_page():
webcluster_role = control.role_value["dyups_web"]
apicluster_role = control.role_value["dyups_api"]
op_server_url = url_prefix + "/server/"
op_upstream_url = url_prefix + "/upstream/"
return rt.render("Index.html", webcluster_role=webcluster_role, apicluster_role=apicluster_role,
op_server_url=op_server_url, op_upstream_url=op_upstream_url)
@develop_dyups_view.route("/<upstream_name>/", methods=["GET"])
def web_upstream(upstream_name):
exec_r, data = control.get_server_list(g.user_name, g.user_role, upstream_name)
r_data = {"name": upstream_name, "data": data}
return jsonify({"status": exec_r, "data": r_data})
@develop_dyups_view.route("/upstream/", methods=["POST", "DELETE"])
def remove_upstream():
request_data = request.json
server_ip = request_data["server_ip"]
server_port = int(request_data["server_port"])
upstream_name = request_data["upstream_name"]
if request.method == "DELETE":
exec_r, data = control.remove_upstream(g.user_name, g.user_role, upstream_name, server_ip, server_port)
else:
exec_r, data = control.add_upstream(g.user_name, g.user_role, upstream_name, server_ip, server_port)
return jsonify({"status": exec_r, "data": data})
@develop_dyups_view.route("/server/", methods=["POST", "DELETE"])
def op_server_nodes():
request_data = request.json
server_ip = request_data["server_ip"]
server_port = int(request_data["server_port"])
upstream_name = request_data["upstream_name"]
if request.method == "POST":
exec_r, data = control.add_server_node(g.user_name, g.user_role, upstream_name, server_ip, server_port)
else:
exec_r, data = control.delete_server_node(g.user_name, g.user_role, upstream_name, server_ip, server_port)
return jsonify({"status": exec_r, "data": data})
| 2,326 |
junit2htmlreport/textutils.py
|
johng42/junit2html
| 112 |
2024036
|
"""
Stringify to unicode
"""
import sys
__py3__ = sys.version_info > (3, 0)
def unicode_str(text):
"""
Convert text to unicode
:param text:
:return:
"""
if __py3__:
if isinstance(text, bytes):
return text.decode("utf-8", "strict")
return str(text)
return unicode(text)
| 327 |
sa/profiles/Qtech/QSW8200/get_portchannel.py
|
xUndero/noc
| 1 |
2024071
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Qtech.QSW8200.get_portchannel
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetportchannel import IGetPortchannel
class Script(BaseScript):
name = "Qtech.QSW8200.get_portchannel"
interface = IGetPortchannel
cache = True
rx_item = re.compile(
r"^Group (?P<portgroup>\d+) information:\s*\n"
r"^Mode\s*:\s*(?P<mode>\S+).+\n"
r"^MinLinks\s*:.+\n"
r"^UpLinks\s* :.+\n"
r"^Member Port:(?P<members>.+)\n",
re.MULTILINE,
)
def execute(self):
r = []
cmd = self.cli("show port-channel", cached=True)
for match in self.rx_item.finditer(cmd):
members = match.group("members").split()
r += [
{
"interface": "port-channel%s" % match.group("portgroup"),
"members": members,
"type": "L" if match.group("mode") == "Lacp" else "S",
}
]
return r
| 1,338 |
tasks/task6_JupiterNB.py
|
shmalex/galvanize
| 1 |
2022666
|
"""
https://gpython-anon-ljgawq.notebooks.azure.com/nb/notebooks/python_problems.ipynb
"""
"https://www.python-course.eu/python3_object_oriented_programming.php
http://python-textbok.readthedocs.io/en/1.0/Classes.html
https://python.swaroopch.com/oop.html "
| 263 |
return_random_sumpod.py
|
jamesmorad/pLUX
| 0 |
2025100
|
#!/usr/bin/env ipython
import EvtReader_PyMod
import RQReader_PyMod
import GetPodOverlaps_PyMod
import numpy as np
import random
import glob
import os
import fnmatch
import math
import matplotlib.pyplot as plt
def rand_pulse_area_phe(pulse_area):
my_list = ['A'] * 4 + ['B'] * 24 + ['C'] * 24 + ['D']*24 + ['E']*24
pa_bin = random.choice(my_list)
if pulse_area < 2:
bin_better_be = 'A'
elif pulse_area >=2 and pulse_area < 50:
bin_better_be = 'B'
elif pulse_area >=50 and pulse_area < 500:
bin_better_be = 'C'
elif pulse_area >=500 and pulse_area < 2000:
bin_better_be = 'D'
elif pulse_area >= 2000 and pulse_area < 50000:
bin_better_be = 'E'
else:
bin_better_be = 'F'
if bin_better_be == pa_bin:
we_good = True
else:
we_good = False
return we_good
def get_rand_sumpod(evt_dir_path,rq_dir_path):
#Check if we have a 1-to-1 match up of evts and rqs otherwise this may be trouble
if len(glob.glob(evt_dir_path+"/*.evt")) != len(glob.glob(rq_dir_path+"/*.rq")):
return None
#find a random evt file in the directory and choose that to grab our event from
file_list = glob.glob(evt_dir_path+"/*.evt")
rand_file = random.choice(file_list)
evt_filename = os.path.basename(rand_file)
filename_prefix = evt_filename.split("_")[0]+"_"+evt_filename.split("_")[1]
evt_filenumber = evt_filename.split("_")[2]
#open evt file
ereader = EvtReader_PyMod.EvtReader(rand_file, filematch = '*.evt', max_channel = 121, debug=True,pmt_gain_file="/data/3/DatasetsForPLUX/pmt_gains_9355.xml")
#choose random event number and read that dictionary in
evt_num = random.randrange(0,ereader.evt_number)
edict1 = ereader[evt_num]
rq_files = glob.glob(rq_dir_path+"/*.rq")
pattern = "*"+evt_filenumber+"*"
rq_file = fnmatch.filter(rq_files,pattern)[0]
rqreader = RQReader_PyMod.ReadRQFile(rq_file)
random_pulse = random.randrange(0,10)
pulse_start_samples = rqreader[0]['pulse_start_samples'][evt_num][random_pulse]
pulse_end_samples = rqreader[0]['pulse_end_samples'][evt_num][random_pulse]
luxstamp_samples = rqreader[0]['luxstamp_samples'][evt_num]
pulse_area_phe = rqreader[0]['pulse_area_phe'][evt_num][random_pulse]
aft_width = rqreader[0]['aft_t2_samples'][evt_num][random_pulse] - rqreader[0]['aft_t0_samples'][evt_num][random_pulse]
peak_area_phe = rqreader[0]['peak_area_phe'][evt_num][random_pulse]
while pulse_start_samples == 999999 or not rand_pulse_area_phe(pulse_area_phe):
random_pulse = random.randrange(0,10)
pulse_start_samples = rqreader[0]['pulse_start_samples'][evt_num][random_pulse]
pulse_end_samples = rqreader[0]['pulse_end_samples'][evt_num][random_pulse]
luxstamp_samples = rqreader[0]['luxstamp_samples'][evt_num]
pulse_area_phe = rqreader[0]['pulse_area_phe'][evt_num][random_pulse]
aft_width = rqreader[0]['aft_t2_samples'][evt_num][random_pulse] - rqreader[0]['aft_t0_samples'][evt_num][random_pulse]
peak_area_phe = rqreader[0]['peak_area_phe'][evt_num][random_pulse]
aft_width = "{0:.2f}".format(10*aft_width)
pulse_area_phe = "{0:.2f}".format(pulse_area_phe)
peak_area_phe = peak_area_phe.clip(0)
peak_area_phe = [x/peak_area_phe.max() for x in peak_area_phe]
peak_area_phe = list(peak_area_phe)
# Get the indicies
pulsestarts, pulseends, pulsechannels, pulseindecies = GetPodOverlaps_PyMod.GetPodOverlaps(edict1['pulse_starts'][0], edict1['pulse_lengths'][0], channels = edict1['active_channels'][0], total_tpc_pmts=122)
sumpods = []
pod_starts = []
pulse_num = pulsestarts.size
pods_array = []
for i in range(pulse_num): # step over pulses
sumpod = np.zeros(pulseends[i] - pulsestarts[i], dtype=np.float64)
indi_pods = np.zeros(pulseends[i] - pulsestarts[i], dtype=np.float64)
currentind = 0
for j in range(pulseindecies[i].size):
# step over the PODs making up a pulse
inii = edict1['pulse_starts'][0][pulsechannels[i][j]][pulseindecies[i][j]]-pulsestarts[i]
inilens = edict1['pulse_lengths'][0][pulsechannels[i][j]][pulseindecies[i][j]]
if pulseindecies[i][j] == 0:
startind = 0
else:
startind = edict1['pulse_lengths'][0][pulsechannels[i][j]][:pulseindecies[i][j]].sum()
finalind = edict1['pulse_lengths'][0][pulsechannels[i][j]][:pulseindecies[i][j]+1].sum()
sumpod[inii:inii+inilens] += edict1['pulse_data'][0][pulsechannels[i][j]][startind:finalind]
indi_pods = edict1['pulse_data'][0][pulsechannels[i][j]]
pods_array.append(indi_pods)
sumpods.append(sumpod)
del sumpod
del indi_pods
pulse_width = pulse_end_samples-pulse_start_samples
for i in range(edict1['active_channels'][0].size): # step over each PMT channel
plt.plot(edict1['pulse_time_arr'][0][i], edict1['pulse_data'][0][i])
plt.savefig("/var/www/FlaskApp/FlaskApp/static/james_is_lazy.png")
times = []
for pulseE,pulseS in zip(pulseends,pulsestarts):
times.append(np.arange(pulseE-pulseS)+pulseS)
summed_pod = []
indiv_pods = []
time_axis = []
for time,pod in zip(np.hstack(times),np.hstack(sumpods)):
if time >= pulse_start_samples-math.floor(0.1*pulse_width) and time <= pulse_end_samples+math.floor(0.1*pulse_width):
time_axis.append(10*(time-pulse_start_samples))
summed_pod.append(pod)
data_dict = dict()
data_dict["times"] = time_axis
data_dict["pulse"] = summed_pod
data_dict["filename_prefix"] = filename_prefix
data_dict["filenumber"] = evt_filenumber
data_dict["pulse_start_samples"] = pulse_start_samples
data_dict["pulse_end_samples"] = pulse_end_samples
data_dict["event_number"] = evt_num
data_dict["pulse_area_phe"] = pulse_area_phe
data_dict["peak_area_phe"] = peak_area_phe
data_dict["aft_width"] = aft_width
data_dict["luxstamp_samples"] = luxstamp_samples
return data_dict
| 5,845 |
service/country_iso_scraper.py
|
arielbello/dota-ladder
| 2 |
2024316
|
import constants as Const
import utils
import pandas as pd
import time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
def _get_data():
"""
Scrapes "https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2" for country
codes and respective names
@return: country codes and full names in a Panda.DataFrame
"""
# headless usage
driver_options = Options()
driver_options.add_argument("--headless")
driver_options.add_argument("--disable-extensions")
with webdriver.Firefox(options=driver_options) as driver:
# Countries 2 letter iso wikipedia page
driver.get(Const.Urls.COUNTRY_ISO)
try:
WebDriverWait(driver, 10).until(lambda d: d.find_element(By.CSS_SELECTOR, "table.wikitable.sortable"))
except TimeoutException:
print("Time exceeded, ", TimeoutException)
# unpredictable results with WebDriverWait, so we force wait
time.sleep(3)
table = driver.find_element(By.CSS_SELECTOR, "table.wikitable.sortable")
table_body = table.find_element(By.TAG_NAME, "tbody")
rows = table_body.find_elements(By.TAG_NAME, "tr")
country_list = []
for row in rows:
data = row.find_elements(By.TAG_NAME, "td")
entry = [data[0].text, data[1].text]
country_list.append(entry)
df = pd.DataFrame(country_list)
df.columns = ['code', 'name']
return df
def _save_dataframe_to_csv(df, path):
utils.create_folder_if_needed(Const.Files.GENERATED)
df.to_csv(path, index=False, encoding="utf-8")
print("wrote to", path)
def scrape():
df = _get_data()
path = Const.Files.GENERATED + "/" + Const.Files.COUNTRY_ISO
_save_dataframe_to_csv(df, path)
if __name__ == "__main__":
scrape()
| 1,972 |
app/__init__.py
|
InfernapeXavier/knowyourparkinson
| 0 |
2024854
|
from flask import Flask, request, render_template
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://[email protected]/3394112",
integrations=[FlaskIntegration()])
app = Flask(__name__)
from app import routes, errors
| 309 |
app.py
|
DishaShrivastava/iotpocserver
| 1 |
2024246
|
from flask import Flask, render_template,request,redirect,url_for # For flask implementation
from bson import ObjectId # For ObjectId to work
from pymongo import MongoClient
import os
import json
app = Flask(__name__)
title = "Azure IoT POC Server"
heading = "AzureIoT Hub backend"
##Un-Comment when running against the Cosmos DB Emulator
# client = MongoClient("mongodb://rpi:FFwCOdm<EMAIL>XI<EMAIL>KDQX<EMAIL>==<EMAIL>:10255/?ssl=true&replicaSet=globaldb") #host uri
# db = client.iot #Select the database
# db.authenticate(name="rpi",password='FF<KEY>')
## Comment out when running locally
client = MongoClient(os.getenv("mongodb://rpi:<EMAIL>==<EMAIL>:10255/?ssl=true&replicaSet=globaldb"))
db = client.iot #Select the database
db.authenticate(name=os.getenv("rpi"),password=os.getenv("<KEY>))
msg = db.messages #Select the collection
def redirect_url():
return request.args.get('next') or \
request.referrer or \
url_for('index')
@app.route("/getTemperatureHumidity")
def dht11 ():
#Display the all Tasks
li = []
for doc in msg.find():
li.append(doc)
resDict = li[len(li)-1]
resDictFinal = {'temperature' : resDict['temperature'], 'humidity' : resDict['humidity']}
response = app.response_class(
response= json.dumps(resDictFinal),
status=200,
mimetype='application/json'
)
return response
@app.route("/getPushBtnCount")
def pushBtn ():
#Display the all Tasks
li = []
for doc in msg.find():
li.append(doc)
print(type(doc))
resDict = li[len(li)-1]
resDictFinal = {'count' : resDict['count']}
response = app.response_class(
response= json.dumps(resDictFinal),
status=200,
mimetype='application/json'
)
return response
# define for IIS module registration.
wsgi_app = app.wsgi_app
if __name__ == "__main__":
app.debug = True
app.run()
| 1,859 |
register/admin.py
|
SmartElect/SmartElect
| 23 |
2025969
|
from django.contrib import admin
from django.utils.translation import ugettext as _
from libya_elections.admin_models import LibyaAdminModel
from libya_elections.admin_site import admin_site
from text_messages.models import MessageText
from .models import Person, RegistrationCenter, Registration, SMS, Blacklist, Whitelist,\
Office, Constituency, SubConstituency
def national_id(reg):
return reg.citizen.national_id
class BlacklistAdmin(LibyaAdminModel):
list_display = ['phone_number', 'creation_date', 'modification_date']
search_fields = ["phone_number"]
readonly_fields = ['creation_date', 'modification_date']
class PersonAdmin(LibyaAdminModel):
list_display = ['citizen']
raw_id_fields = ['citizen']
class OfficeAdmin(LibyaAdminModel):
list_display = ['id', 'name_english', 'name_arabic', 'region']
search_fields = ['id', 'name_english', 'name_arabic']
class ConstituencyAdmin(LibyaAdminModel):
list_display = ['id', 'name_english', 'name_arabic']
search_fields = ['id', 'name_english', 'name_arabic']
class SubConstituencyAdmin(LibyaAdminModel):
list_display = ['id', 'name_english', 'name_arabic']
search_fields = ['id', 'name_english', 'name_arabic']
def delete_selected_except_copied_centers(modeladmin, request, queryset):
"""Custom admin action which checks to make sure user is not trying to delete a copied center.
If a copied center is selected, user gets a warning message and no centers are deleted.
"""
copied_ids = queryset.filter(copied_by__isnull=False).values_list('center_id', flat=True)
if copied_ids:
msg = _('The following centers are copied by other centers and cannot be deleted: {}. '
'No centers were deleted.')
modeladmin.message_user(request, msg.format(copied_ids))
else:
return admin.actions.delete_selected(modeladmin, request, queryset)
class RegistrationCenterAdmin(LibyaAdminModel):
list_display = ['center_id', 'name', 'reg_open', 'office',
'constituency', 'subconstituency']
list_filter = ['reg_open', 'center_type', 'office', 'constituency', 'subconstituency']
search_fields = ["center_id", "name"]
readonly_fields = ['copied_by_these_centers']
date_hierarchy = 'creation_date'
def copied_by_these_centers(self, instance):
centers = ', '.join([str(center.center_id) for center in instance.copied_by.all()])
return centers or _("No copies")
def get_actions(self, request):
actions = super(RegistrationCenterAdmin, self).get_actions(request)
if 'delete_selected' in actions:
# Replace it with our version
actions['delete_selected'] = (
delete_selected_except_copied_centers,
'delete_selected',
_('Permanently delete selected %(verbose_name_plural)s.')
)
return actions
def get_readonly_fields(self, request, obj=None):
"""
Don't allow changes to copy centers.
"""
# Make sure we make a modifiable copy of the readonly fields to work with
readonly_fields = list(super(RegistrationCenterAdmin, self).get_readonly_fields(
request, obj))
if obj:
if obj.copy_of:
# Copy centers are not editable, so mark all fields (except 'deleted') read-only
return [field.name for field in obj._meta.local_fields
if field.editable and not field.name == 'deleted']
if obj.has_copy:
# Copied centers can't be deleted, so mark 'deleted' read-only
if 'deleted' not in readonly_fields:
readonly_fields.append('deleted')
# 'copy_of' can only be set initially, not while editing
if 'copy_of' not in readonly_fields:
readonly_fields.append('copy_of')
return readonly_fields
def has_delete_permission(self, request, obj=None):
"""Overridden to prevent deletion of RegistrationCenters that have copies."""
delete_permission = super(RegistrationCenterAdmin, self).has_delete_permission(request, obj)
if obj and isinstance(obj, RegistrationCenter):
return not obj.has_copy
else:
return delete_permission
# See
# docs.djangoproject.com/en/dev/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
# for doc on this class
class ArchivedListFilter(admin.SimpleListFilter):
title = _('archived')
parameter_name = 'arc'
def lookups(self, request, model_admin):
return (
('1', _('Yes')),
('0', _('No')),
)
def queryset(self, request, queryset):
if self.value() == '0':
return queryset.filter(archive_time=None)
if self.value() == '1':
return queryset.exclude(archive_time=None)
class RegistrationAdmin(LibyaAdminModel):
list_display = ['citizen', national_id, 'registration_center', 'archive_time']
list_display_links = [national_id]
list_filter = [ArchivedListFilter]
raw_id_fields = ['citizen', 'registration_center', 'sms']
search_fields = ["registration_center__center_id", "registration_center__name"]
class SMSAdmin(LibyaAdminModel):
list_display = ['creation_date', 'from_number', 'direction', 'to_number',
'citizen', 'carrier', 'msg_type', 'message_code', 'message']
raw_id_fields = ['citizen', 'in_response_to']
search_fields = ['from_number', 'to_number', 'carrier__name', 'msg_type', 'message']
def get_list_display(self, *args, **kwargs):
# Initialize the choices on the message_code field
# We don't do it in the model def because the values are only
# defined in the database, and we don't do it unless/until we need
# to admin the SMS model because otherwise Django migrations think
# the SMS message codes keep changing everytime someone with
# different data in their database runs it. We wait until the
# admin calls get_list_display() to be sure someone is in the admin,
# since it's only in the admin that it matters at all whether these
# choices are defined.
if not SMS._meta.get_field('message_code').choices:
message_code_choices = [
(msg.number, msg.label) for msg in MessageText.objects.all()
]
SMS._meta.get_field('message_code').choices = message_code_choices
return super(SMSAdmin, self).get_list_display(*args, **kwargs)
class WhiteListAdmin(LibyaAdminModel):
list_display = ['phone_number', 'creation_date', 'modification_date']
search_fields = ["phone_number"]
readonly_fields = ['creation_date', 'modification_date']
admin_site.register(Blacklist, BlacklistAdmin)
admin_site.register(Person, PersonAdmin)
admin_site.register(Office, OfficeAdmin)
admin_site.register(Constituency, ConstituencyAdmin)
admin_site.register(SubConstituency, SubConstituencyAdmin)
admin_site.register(RegistrationCenter, RegistrationCenterAdmin)
admin_site.register(Registration, RegistrationAdmin)
admin_site.register(SMS, SMSAdmin)
admin_site.register(Whitelist, WhiteListAdmin)
| 7,242 |
OnesAndZeros.py
|
ReenExeCubeTime/CodewarsPython
| 1 |
2025770
|
class OnesAndZeros(object):
def array_to_number(self, arr):
sum = 0
for bit in arr:
sum = (sum << 1) | bit
return sum
| 157 |
silver/fixtures/pytest_fixtures.py
|
truehostcloud/silver
| 0 |
2025807
|
import factory
import pytest
from rest_framework.test import APIClient
from django.conf import settings as django_settings
from django.contrib.auth import get_user_model
from django.test import Client
from silver.models import Invoice
from silver.fixtures.factories import CustomerFactory, ProviderFactory, InvoiceFactory
from silver.tests.api.utils.client import JSONApiClient
User = get_user_model()
@pytest.fixture()
def settings():
return django_settings
@pytest.fixture()
def user(db):
return User.objects.create(username="user")
@pytest.fixture()
def anonymous_api_client():
return APIClient()
@pytest.fixture()
def authenticated_api_client(user):
client = JSONApiClient()
client.force_authenticate(user=user)
return client
@pytest.fixture()
def authenticated_client(user):
user.set_password("password")
user.save()
client = Client()
client.login(username=user.username, password="password")
return client
@pytest.fixture()
def customer(db):
return CustomerFactory.create()
@pytest.fixture()
def provider(db):
return ProviderFactory.create()
@pytest.fixture()
def invoice(db):
return InvoiceFactory.create()
@pytest.fixture()
def issued_invoice(db):
return InvoiceFactory.create(state=Invoice.STATES.ISSUED)
@pytest.fixture()
def two_pages_of_invoices(db, settings):
allowed_states = [
Invoice.STATES.ISSUED,
Invoice.STATES.PAID,
Invoice.STATES.CANCELED,
]
return InvoiceFactory.create_batch(
settings.API_PAGE_SIZE * 2,
state=factory.Sequence(lambda n: allowed_states[n % len(allowed_states)]),
)
| 1,646 |
__init__.py
|
xamgore/camera-pipeline-scripts
| 1 |
2024694
|
# This file is used to assure Pycharm that __init__.py files
# are indeed important, so "unused imports" check wan't be active.
# To achieve this, we import all useful imports, that are used across
# all subprojects. BUT these imports must be in all __init__ files.
import sys, os, json
from os.path import join, dirname
| 328 |
14/euler14.py
|
adamkkarl/ProjectEuler
| 2 |
2025112
|
#!/bin/python3
__author__ = "<NAME>"
"""The Collatz sequence is defined by:
if n is even, divide it by 2
if n is odd, triple it and add 1
given N, which number less than or equal to N has the longest chain before hitting 1?"""
#https://projecteuler.net/problem=14
#April 2018
MAXIMUM = 0
steps = []
answers = []
def update(n):
"""Update the array so that now has at least an answer for key=n"""
"""likely will also fill in additional step lengths"""
if n == 1: #base case 1
return 0
s = 0
if n < MAXIMUM: #should have a n answer in this spot
if steps[n] != None: #already know answer for this spot
return steps[n]
if n % 2 == 0:
s = 1 + update(n>>1)
else:
s = 1 + update(3*n + 1)
steps[n] = s #fill in an answer
else: #calculate on the fly
if n % 2 == 0:
s = 1 + update(n>>1)
else:
s = 1 + update(3*n + 1)
return s
def populateCollatz():
"""populates collatz steps array up to n"""
global steps
global answers
steps = [None]*(MAXIMUM)
answers = [0]*(MAXIMUM)
steps[0] = 1
steps[1] = 0
for i in range(1, MAXIMUM):
if steps[i] == None:
update(i)
def populateAnswers():
"""Using the array of number of steps for N, produce an array of the value that produces
the maximum # of steps less than of equal to N (in case of a tie use the larger number).
Using this method we only have to check an array for the maximum 1 time rather than for every
test case N"""
max_steps = 0
max_index = 0
for i in range(MAXIMUM):
if max_steps <= steps[i]:
max_steps = steps[i]
max_index = i
answers[i] = max_index
def main():
global MAXIMUM
print("Which number under __ produces the longest collatz sequence? ", end="")
MAXIMUM = int(input()) + 1
populateCollatz()
populateAnswers()
print("%d produces the longest sequence" % answers[MAXIMUM-1])
if __name__ == "__main__":
main()
| 2,097 |
test.py
|
edgeless634/bilibili_spider
| 0 |
2024904
|
import logging
from api.biliApi import BiliApi
logging.basicConfig(level=logging.INFO)
def setting_test():
from setting.settingReader import setting
logging.info(f"Get setting: {setting}")
def proxyApi_test():
from api.proxyApi import ProxyApi
proxy_api = ProxyApi()
print(proxy_api.get_proxy())
def biliApi_test():
from api.biliApi import BiliApi
bili_api = BiliApi()
print(bili_api.get_cid_by_bid("BV1Nq4y1A7Vk"))
l = bili_api.get_up_video_by_mid(347235)
print(l[:4])
cid = bili_api.get_cid_by_aid(l[0]["aid"])
print(cid)
danmaku = bili_api.get_danmaku_list_by_cid(cid)
print(danmaku[:8])
followings = bili_api.get_following_by_mid(385842994)
print(followings[:10])
relations = bili_api.get_relationship_info_by_mid(385842994)
print(relations)
def DanmakuFetcher_test():
from helper.fetch import DanmakuFetcher
t = DanmakuFetcher()
t.start()
t.join()
def UserFollowingFetcher_test():
from helper.fetch import UserFollowingFetcher
t = UserFollowingFetcher()
t.start()
t.join()
if __name__ == '__main__':
DanmakuFetcher_test()
| 1,142 |
tests/python/sd.py
|
BenSouchet/sorbus
| 0 |
2022616
|
# Rouge Highlighter token test - Literal String Doc
def test():
"""Test of a docstring"""
return True
| 110 |
covid/smooth/aligator/aligator.py
|
Gandor26/cacovid
| 0 |
2024926
|
from typing import Optional
import numpy as np
import torch as pt
from torch import Tensor, BoolTensor, nn
from torch.nn import init, functional as F
from .covid_lin import run_aligator
class Aligator(nn.Module):
def __init__(self,
cond_size: int,
pred_size: int,
*lr: float,
) -> None:
super(Aligator, self).__init__()
self.cond_size = cond_size
self.pred_size = pred_size
if len(lr) > 0:
self.lr = lr
else:
self.lr = [
1e-2, 8e-2, 1e-1, 1.5e-1,
2e-1, 2.5e-1, 1e0,
]
def forward(self, data: Tensor):
steps = data.size(1)
min_err = float('inf')
best_lr = None
sm = []
pr = []
for s in range(data.size(0)):
series = data[s].cpu().numpy()
delta = np.max(series) - np.min(series)
for sigma in self.lr:
filtered = run_aligator(
steps, series, np.arange(steps),
sigma, delta, 0, -1,
)
residue = series - filtered
err = np.mean(residue**2)
if err < min_err:
min_err = err
best_lr = sigma
smoothed = filtered
sm.append(smoothed)
preds = run_aligator(
steps, series, np.arange(steps),
best_lr, delta, 0, self.pred_size
)
pr.append(preds)
sm = np.stack(sm, axis=0)
sm = data.new_tensor(sm)
pr = np.stack(pr, axis=0)
pr = data.new_tensor(pr)
sm = data - sm
return sm, pr
| 1,740 |
Commands/m_ban-unban-kick.py
|
Hrishikesh-2712/ExcellExpressBotDC
| 1 |
2025333
|
import nextcord
from nextcord.ext import commands
import datetime
class BanUnbanKick(commands.Cog):
def __init__(self, Bot):
self.Bot = Bot
@commands.command(name="ban",help="Bans the user.")
@commands.has_role("Staff")
async def ban(self, ctx, member: nextcord.Member=None, *, reason=None):
if member is None:
await ctx.reply("Who am i going to ban?")
embed = nextcord.Embed(title=f"{member} has been banned!",colour=nextcord.Colour.red())
await ctx.channel.send(embed=embed)
await member.send(f"You have been banned from **Excell Express**.\nReason: {reason}\nModerator: {ctx.message.author}")
await member.ban(reason=reason)
await ctx.message.delete()
logEmbed = nextcord.Embed(title="User Banned",colour=nextcord.Colour.red(),timestamp = datetime.datetime.now())
logEmbed.add_field(name="User",value=f"{member.mention}({member.id})")
logEmbed.add_field(name="Moderator",value=f"{ctx.message.author.mention}({ctx.message.author.id})")
logEmbed.add_field(name="Reason",value=reason,inline=False)
logchannel = self.Bot.get_channel(920817546503008316)
await logchannel.send(embed=logEmbed)
@commands.command(name="unban", help = "Unbans the user.")
@commands.has_role("Staff")
async def unban(self,ctx, *, member_id: int = None):
if member_id is None:
await ctx.reply("Who am i going to unban?")
await ctx.guild.unban(nextcord.Object(id=member_id))
embed = nextcord.Embed(title=f"<@{member_id}> has been unbanned!",colour=nextcord.Colour.blue())
await ctx.send(embed=embed)
logEmbed = nextcord.Embed(title="User Unbanned",colour=nextcord.Colour.blue(),timestamp = datetime.datetime.now())
logEmbed.add_field(name="User",value=f"<@{member_id}>({member_id})")
logEmbed.add_field(name="Moderator",value=f"{ctx.message.author.mention}({ctx.message.author.id})")
logchannel = self.Bot.get_channel(920817546503008316)
await logchannel.send(embed=logEmbed)
await ctx.message.delete()
@commands.command(name="kick", help="Kicks the user.")
@commands.has_role("Staff")
async def kick(self,ctx, member:nextcord.Member=None, *, reason=None):
await member.send(f"You have been kicked from **Excell Express**.\nReason: {reason}\nModerator: {ctx.message.author}")
embed = nextcord.Embed(title=f"{member} has been kicked!",colour=nextcord.Color.orange())
await member.kick()
await ctx.send(embed=embed)
await ctx.message.delete()
logEmbed = nextcord.Embed(title="User Kicked",colour=nextcord.Colour.orange(),timestamp = datetime.datetime.now())
logEmbed.add_field(name="User",value=f"{member}({member.id})")
logEmbed.add_field(name="Moderator",value=f"{ctx.message.author.mention}({ctx.message.author.id})")
logEmbed.add_field(name="Reason",value=reason,inline=False)
logchannel = self.Bot.get_channel(920817546503008316)
await logchannel.send(embed = logEmbed)
def setup(Bot):
Bot.add_cog(BanUnbanKick(Bot))
| 3,142 |
tests/base.py
|
ozeranskiy/tilda_wrapper_api
| 3 |
2023519
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of tilda_wrapper_api.
# https://github.com/ozeranskiy/tilda_wrapper_api
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2019, <NAME> <<EMAIL>>
# Standard libraries
from unittest import TestCase as PythonTestCase
class TestCase(PythonTestCase):
pass
| 375 |
attendance_system.py
|
shellyalmo/Attendance_system_project
| 0 |
2025058
|
import pandas as pd
import numpy as np
from _datetime import datetime
class AttendanceSystem:
def __init__(self, path_to_internal_employees_file, path_to_attendance_log):
self.clock = Clock(path_to_internal_employees_file, path_to_attendance_log)
# add method - read csv, write csv
self.report = Report(path_to_internal_employees_file, path_to_attendance_log)
# class Helper
# add method - read csv, write csv
#
def start(self, user_id):
# CHECK if user_id is None
# start menu, being called after validation
proceed = True
while proceed:
option = int(
input("Enter the number of action you wish to do:\n1) Add employee manually\n2) Delete employee "
"manually\n3) Add employees from file\n4) Delete employees from file\n5) Mark attendance\n6) Generate "
"attnedance report for an employee\n7) Print monthly report for all employees\n8) Print report for "
"all late employees\nYour choice: "))
# method that takes a dict (keys:options,val:methods)
# 25-27 : method
if option == 1:
self.report.add_employee()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 2:
self.report.delete_employee()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 3:
self.report.add_employees_from_external_file()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 4:
self.report.delete_employees_from_external_file()
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if option == 5:
clock_option = int(
input("Enter the number of action you wish to do:\n1) Clock in\n2) Clock out\nYour answer: "))
if clock_option == 1:
datetime = self.clock.get_date()
attendance_log_df = pd.read_csv(self.clock.path_to_attendance_log, dtype=str)
data = [{'employee_id': user_id, 'date_time_in': datetime}]
dict_df = pd.DataFrame.from_dict(data)
updated_attendance_log_df = attendance_log_df.append(dict_df, sort=False)
print(updated_attendance_log_df)
updated_attendance_log_df.to_csv(self.clock.path_to_attendance_log, mode='w', index=False)
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
if clock_option == 2:
# column- datetime, column= in\out
datetime = self.clock.get_date()
attendance_log_df = pd.read_csv(self.clock.path_to_attendance_log, dtype=str)
data = [{'date_time_out': datetime}]
dict_df = pd.DataFrame.from_dict(data)
updated_attendance_log_df = attendance_log_df.append(dict_df, sort=False)
print(updated_attendance_log_df)
updated_attendance_log_df.to_csv(self.clock.path_to_attendance_log, mode='w', index=False)
answer = input("Would you like to go back to the menu?\nyes/no\nYour answer: ")
if answer != 'yes':
proceed = False
def is_valid(self):
restart = True
user_id = None
# validating that the user is one of the employees of the company
while restart:
internal_employees_file_df = pd.read_csv(self.report.path_to_internal_employees_file, dtype=str)
print(internal_employees_file_df)
print("Welcome to Employee Attendance Management System.")
user_id = str(input("Please enter your ID: "))
user_name = str(input("Please enter your full name: "))
# TODO: check validity
input_df = pd.DataFrame(data=[[user_id, user_name]], columns=["user_id", "user_name"])
is_id_valid = internal_employees_file_df['employee_id'].isin(input_df['user_id'])
is_name_valid = internal_employees_file_df['employee_name'].isin(input_df['user_name'])
if is_id_valid.any() and is_name_valid.any():
restart = False
print("you are valid")
else:
print("try again")
if not restart:
self.start(user_id)
class Clock:
def __init__(self, path_to_internal_employees_file, path_to_attendance_log):
self.path_to_internal_employees_file = path_to_internal_employees_file
self.path_to_attendance_log = path_to_attendance_log
def get_date(self):
return datetime.now().isoformat(' ', 'seconds')
class Report:
def __init__(self, path_to_internal_employees_file, path_to_attendance_log):
self.path_to_internal_employees_file = path_to_internal_employees_file
self.path_to_attendance_log = path_to_attendance_log
def add_employee(self):
# put in a dict
new_employee_id = str(input("Please enter employee's ID: "))
new_employee_name = str(input("Please enter employee's full name: "))
new_employee_phone = str(input("Please enter employee's phone: "))
new_employee_age = str(input("Please enter employee's age: "))
input_df = pd.DataFrame(data=[[new_employee_id, new_employee_name, new_employee_phone, new_employee_age]],
columns=["employee_id", "employee_name", "employee_phone", "employee_age"])
input_df.to_csv(self.path_to_internal_employees_file, mode='a', header=False, index=False)
def delete_employee(self):
deleted_employee_id = str(input("Please enter employee's ID: "))
internal_employees_file_df = pd.read_csv(self.path_to_internal_employees_file, dtype=str)
internal_employees_file_df.drop(
internal_employees_file_df.loc[internal_employees_file_df['employee_id'] == deleted_employee_id].index,
inplace=True)
print("The new employees file after deleting:\n")
print(internal_employees_file_df)
internal_employees_file_df.to_csv(self.path_to_internal_employees_file, mode='w', index=False)
def add_employees_from_external_file(self):
external_file_path = str(input("Please enter file path: "))
external_employees_file = pd.read_csv(external_file_path, dtype=str)
external_employees_file.to_csv(self.path_to_internal_employees_file, mode='a', header=False, index=False)
def delete_employees_from_external_file(self):
external_file_path = str(input("Please enter file path: "))
external_employees_to_delete_df = pd.read_csv(external_file_path, dtype=str)
print(external_employees_to_delete_df)
internal_employees_file_df = pd.read_csv(self.path_to_internal_employees_file, dtype=str)
condition = internal_employees_file_df['employee_id'].isin(external_employees_to_delete_df['employee_id'])
internal_employees_file_df.drop(internal_employees_file_df[condition].index, inplace=True)
print("The new employees file after deleting:\n")
print(internal_employees_file_df)
internal_employees_file_df.to_csv(self.path_to_internal_employees_file, mode='w', index=False)
def create_report_X(self):
pass
# sort by id,name - depends on the type of report
| 7,981 |
pyigm/fN/constraints.py
|
pyigm/pyigm
| 16 |
2025570
|
""" Class for f(N) constraints
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import imp
import pdb
from pkg_resources import resource_filename
from astropy.io import fits
from astropy import cosmology
class FNConstraint(object):
"""A Class for fN constraints
Parameters
----------
fN_dtype : str
zeval : float
Redshift where the constraint is evaluated
ref : str
Reference
flavor : str
Specific type of constraint
Attributes
----------
fN_dtype : str
Constraint type for the fN
'fN' -- Standard f(N) evaluation
'MFP' -- MFP
'LLS' -- LLS incidence
'DLA' -- DLA incidence
'teff' -- tau effective
'beta' -- slope constraint
flavor : str
Specific type of constraint
comment : str
ref : str
Reference
cosm : astropy.cosmology, optional
Cosmology
zeval : float
Redshift where the constraint is evaluated
data : dict
Dictionary containing the constraints
'MFP' -- MFP, SIG_MFP (pMpc)
'LLS' -- LX, SIG_LX
'DLA' -- LX, SIG_LX
"""
@classmethod
def from_row(cls, ftype, row):
""" Read from binary FITS table
Parameters
----------
row : astropy.table.row
"""
slf = cls(ftype)
# Common items
common = ['REF','COSM','TYPE','COMMENT']
slf.ref = row['REF']
if row['COSM'] in ['VANILLA', '', 'h=0.7, Om=0.3, OL=0.7']:
cosmo = cosmology.core.FlatLambdaCDM(70., 0.3, Ob0=0.0455)
#sigma_8 = 0.80
else:
pdb.set_trace()
raise ValueError('Not ready for this type of cosmology')
slf.cosmo = cosmo
slf.flavor = row['TYPE']
slf.comment = row['COMMENT']
# zeval
if 'ZEVAL' in row.array.names:
slf.zeval = row['ZEVAL']
elif 'Z_LLS' in row.array.names:
slf.zeval = row['Z_LLS']
elif 'Z_MFP' in row.array.names:
slf.zeval = row['Z_MFP']
elif 'Z_TEFF' in row.array.names:
slf.zeval = row['Z_TEFF']
else:
raise ValueError('fN.data: No redshift info!')
# zip the rest
slf.data = dict(zip(row.array.names,row))
for item in common:
slf.data.pop(item) # No need to duplicate
# Return
return slf
@classmethod
def from_fitsfile(cls, fits_file):
""" Build up a list of fN constraints from a multi-extension FITS file
Parameters
----------
fits_file : str or list
Name of FITS file
Returns
-------
fN_list : list
List of FNConstraint objects
"""
# List of constraints
fN_cs = []
# Read
if isinstance(fits_file, list):
for ifile in fits_file:
tmp_cs = cls.from_fitsfile(ifile)
for cs in tmp_cs:
fN_cs.append(cs)
else:
hdus = fits.open(fits_file)
if len(hdus) == 1:
raise ValueError('Expecting a multi-extension fits file -- {:s}'.format(
fits_file))
# Loop through hdu
for hdu in hdus[1:]:
data = hdu.data
# Get ftype
if 'FN' in data.dtype.names:
ftype = 'fN' # Standard f(N) data
elif 'TAU_LIM' in data.dtype.names:
ftype = 'LLS' # LLS survey
elif 'MFP' in data.dtype.names:
ftype = 'MFP' # MFP measurement
elif 'TEFF' in data.dtype.names:
ftype = 'teff' # tau effective (Lya)
else:
raise ValueError('Cannot figure out ftype')
# Loop on the Table
for row in data:
fNc = cls.from_row(ftype, row)
fN_cs.append(fNc)
# Return
return fN_cs
@classmethod
def load_defaults(cls):
""" Load default constraints on f(N)
Returns
-------
all_fN_cs : list
list of FNConstraint objects
"""
fn_file = resource_filename('pyigm', '/data/fN/fN_constraints_z2.5_vanilla.fits')
k13r13_file = resource_filename('pyigm', '/data/fN/fN_constraints_K13R13_vanilla.fits')
n12_file = resource_filename('pyigm', '/data/fN/fN_constraints_N12_vanilla.fits')
# Load
all_fN_cs = cls.from_fitsfile([fn_file,k13r13_file, n12_file])
# Return
return all_fN_cs
# Initialize with type
def __init__(self, fN_dtype, zeval=0., ref='', flavor='', cosmo=None,
data=None):
"""
Parameters
----------
fN_dtype : str
zeval : float, optional
ref : str, optional
flavor : str, optional
cosmo : astropy.cosmology
data : dict, optional
"""
if fN_dtype not in ['fN', 'MFP', 'LLS', 'DLA', 'teff', 'beta']:
raise IOError('Bad f(N) constraint')
self.fN_dtype = fN_dtype
self.zeval = zeval
self.ref = ref
self.flavor = flavor
self.cosmo = cosmo
if data is None:
self.data = {}
else:
self.data = data
def __repr__(self):
return ('<{:s}: {:s}_{:s} zeval={:g}, ref={:s}>'.format(
self.__class__.__name__, self.fN_dtype, self.flavor,
self.zeval, self.ref))
def fN_data_from_ascii_file(infile):
"""
Parameters
----------
infile
Returns
-------
"""
assert False # Bad code
#makes new fN constraint with data type fN
fNc = FNConstraint('fN')
ftype = fNc.fN_dtype.encode('ascii')
fNc.fN_dtype = ftype
fNc.ref=infile.encode('ascii')
# Open file
f = open(infile, 'r')
# Read and ignore header lines
firstline = f.readline()
# get rid of newline /n symbol
firstline =firstline.strip()
#get zeval and DX from first line
values = firstline.split()
fNc.zeval = float(values[0])
ZEVAL = float(values[0])
DX = float(values[1])
#declaration of variables
BINS1 =[]
BINS2 = []
fn = []
SIG_FN1 = []
SIG_FN2 = []
count = 0
numlines=0
# Loop over lines and extract info
for line in f:
line = line.strip()
columns = line.split()
BINS1.append(float(columns[0]))
BINS2.append(float(columns[1]))
fn.append(float(columns[2]))
SIG_FN1.append(float(columns[3]))
SIG_FN2.append(float(columns[3]))
numlines +=1
if (float(columns[0])!=0) or (float(columns[1])!=0) or (float(columns[2])!=0) or (float(columns[3])!=0):
count +=1
f.close()
NPT = int(count)
bins = []
bins.append(BINS1)
bins.append(BINS2)
sig_fn = []
sig_fn.append(SIG_FN1)
sig_fn.append(SIG_FN2)
BINS = np.ndarray(shape=(2, numlines), dtype=float, buffer=np.array(bins))
SIG_FN = np.ndarray(shape=(2, numlines), dtype=float, buffer=np.array(sig_fn))
FN = np.ndarray(shape=(numlines,), dtype=float, buffer=np.array(fn))
#makes array with names in ASCII not unicode
arrayofnames = ['BINS','FN','SIG_FN','DX','NPT','ZEVAL']
names = []
for name in arrayofnames:
newname = name.encode('ascii')
names.append(newname)
values = [BINS,FN,SIG_FN,DX,NPT,ZEVAL]
fNc.data = dict(zip(names, values))
return fNc
| 7,651 |
script/train.py
|
stdiff/emo-classifier
| 0 |
2025196
|
import logging
from emo_classifier import setup_logger
from training import LocalPaths
from training.train_tfidf import start_training_tfidf_model
from training.utils_for_sagemaker import InstanceType, generate_tag_list, start_sagemaker_training_job
logging.getLogger().setLevel(logging.INFO)
logger = setup_logger(__name__)
local_paths = LocalPaths()
def train_tfidf_model_on_sagemaker():
entry_point = local_paths.project_root / "training/train_tfidf.py"
tags = generate_tag_list(Project="emo-classifier", Owner="hironori.sakai", Env="DEV")
start_sagemaker_training_job(
base_job_name="emo-classifier", entry_point=entry_point, tags=tags, instance_type=InstanceType.local
)
def train_embedding_bag_model_on_local():
from training.train_embedding_bag import start_train_embedding_bag_model
logger.info(f"Start Training an embedding bag model on local machine")
start_train_embedding_bag_model(embedding_dim=32, max_epoch=20, patience=3)
def train_embedding_bag_model_on_sagemaker():
logger.info(f"Start Training an embedding bag model on sagemaker")
entry_point = local_paths.project_root / "training/train_embedding_bag.py"
tags = generate_tag_list(Project="emo-classifier", Owner="hironori.sakai", Env="DEV")
start_sagemaker_training_job(
base_job_name="emo-classifier", entry_point=entry_point, tags=tags, instance_type=InstanceType.ml_m5_xlarge
)
def start():
"""
This function can be executed by command "poetry run train"
"""
# start_training_tfidf_model()
# train_tfidf_model_on_sagemaker()
train_embedding_bag_model_on_local()
# train_embedding_bag_model_on_sagemaker()
if __name__ == "__main__":
start()
| 1,723 |
allenact_plugins/manipulathor_plugin/armpointnav_constants.py
|
brandontrabucco/allenact
| 187 |
2025480
|
import json
import os
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
TRAIN_OBJECTS = ["Apple", "Bread", "Tomato", "Lettuce", "Pot", "Mug"]
TEST_OBJECTS = ["Potato", "SoapBottle", "Pan", "Egg", "Spatula", "Cup"]
MOVE_ARM_CONSTANT = 0.05
MOVE_ARM_HEIGHT_CONSTANT = MOVE_ARM_CONSTANT
UNWANTED_MOVE_THR = 0.01
DISTANCE_EPS = 1e-9
DISTANCE_MAX = 10.0
dataset_json_file = os.path.join(
ABS_PATH_OF_TOP_LEVEL_DIR, "datasets", "apnd-dataset", "starting_pose.json"
)
try:
with open(dataset_json_file) as f:
ARM_START_POSITIONS = json.load(f)
except Exception:
raise Exception("Dataset not found in {}".format(dataset_json_file))
| 644 |
apis/ibapi.py
|
rickdynasty/BluecollarChatbot
| 0 |
2025625
|
from utils.FeignClient import get,post,delete,put
baseurl = 'http://ibrain/'
def query_by_id(json_data):
return post(baseurl+'askbob/ask',json=json_data)
def query_by_problem_name(str:str):
# data = {'inputText':str,'precise':True}
# return post(baseurl+'askbob/ask',json=data)
return str
| 310 |
scripts/generate_workloads.py
|
jatinarora2409/scout-scripts
| 0 |
2024892
|
hibench_workloads = ['bayes.spark', 'dfsioe.hadoop', 'wordcount.spark', 'lr.spark', 'als.spark', 'aggregation.spark', 'aggregation.hadoop', 'pagerank.spark', 'join.spark', 'join.hadoop', 'pagerank.hadoop', 'scan.spark', 'scan.hadoop', 'sort.hadoop', 'sort.spark', 'terasort.hadoop', 'terasort.spark']
sparkperf_workloads = ['regression', 'als', 'fp-growth', 'block-matrix-mult', 'classification', 'chi-sq-feature', 'pic', 'gradient-boosted-tree', 'gmm', 'decision-tree', 'random-forest', 'kmeans', 'lda', 'chi-sq-gof', 'chi-sq-mat', 'naive-bayes', 'pca', 'summary-statistics', 'svd', 'pearson', 'word2vec', 'spearman']
workload_list = []
datasize = 'small'
for w in hibench_workloads:
app, framework = w.split('.')
workload_list.append('hibench {} {} {} 1'.format(framework, app, datasize))
for app in sparkperf_workloads:
workload_list.append('sparkperf spark1.5 {} {} 1'.format(app, datasize))
workload_strs = ""
for w in workload_list:
print(w)
workload_strs += ' -w "' + w + '"'
print("myaws run {} --instance-type c4.large --instance-num 3 --ami ami-2196095e --keyname scout --cluster-mode n+1 --s3-bucket scout-dataset-test --no-terminate --no-dry-run".format(workload_strs))
| 1,208 |
setup.py
|
JeremyAlain/lottery_ticket_pruner
| 8 |
2025056
|
import setuptools
def get_version():
""" Do this so we don't have to import lottery_ticket_pruner which requires keras which cannot be counted on
to be installed when this package gets installed.
"""
with open('lottery_ticket_pruner/__init__.py', 'r') as f:
for line in f.readlines():
if line.startswith('__version__'):
version = line.split('=')[1].strip().replace('"', '').replace('\'', '')
return version
return ''
def get_long_description():
with open('README.md', 'r') as fh:
return fh.read()
setuptools.setup(
name='lottery-ticket-pruner',
version=get_version(),
author='<NAME>',
author_email='<EMAIL>',
description='Enables pruning of Keras DNNs using "lottery ticket" pruning',
long_description=get_long_description(),
long_description_content_type='text/markdown',
url='https://github.com/jim-meyer/lottery_ticket_pruner',
packages=setuptools.find_packages(),
# Don't specify version of tensorflow so that this package can easily be used in AWS deep learning AMIs
# where the "tensorflow" package is actually named "tensorflow-gpu"
# install_requires=['keras>=2.1.0', 'tensorflow>=1.12', 'numpy>=1.18.3'],
install_requires=['keras>=2.1.0', 'numpy>=1.18.3'],
classifiers=[
'Programming Language :: Python :: 3',
"Development Status :: 4 - Beta",
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
],
python_requires=('>=3.6')
)
| 1,944 |
DataBase/CinemaBase/url.py
|
John15321/CinemaManagementSystem
| 3 |
2025946
|
from django.urls import path
from . import views
urlpatterns = [
path('login/',views.loginPage, name='login'),
path('logout/',views.logoutUser, name='logout'),
path('register/',views.registerPage, name='register'),
path('permission/', views.permission, name='permission'),
path('', views.home, name='home'),
path('shows/', views.shows, name='shows'),
path('show/<str:pk>/', views.show, name='show'),
path('projectors/', views.projectors, name='projectors'),
path('cinemahall/<str:pk>', views.cinemahall, name='cinemahall'),
path('cinemahalls/', views.cinemahalls, name='cinemahalls'),
path('soundsystems/', views.soundsystems, name='soundsystems'),
path('creatshow/', views.creatShow, name='creatshow'),
path('updateshow/<str:pk>/', views.updateShow, name='updateshow'),
path('deleteshow/<str:pk>/', views.deleteShow, name='deleteshow'),
path('addprojector/', views.addprojector, name='addprojector'),
path('deleteprojector/<str:pk>/', views.deleteprojector, name='deleteprojector'),
path('addsoundsystem/', views.addsoundsystem, name='addsoundsystem'),
path('deletesoundsystem/<str:pk>/', views.deletesoundsystem, name='deletesoundsystem'),
path('addcinemahall/', views.addcinemahall, name='addcinemahall'),
path('deletecinemahall/<str:pk>/', views.deletecinemahall, name='deletecinemahall'),
path('updatecinemahall/<str:pk>/', views.updatecinemahall, name='updatecinemahll'),
path('spls/', views.spls, name='spls'),
path('spl/<str:pk>/', views.spl, name='spl'),
path('createspl/', views.createspl, name='createspl'),
path('deletespl/<str:pk>/', views.deletespl, name='deletespl'),
path('updatespl/<str:pk>/', views.updatespl, name='updatespl'),
path('cpls/', views.cpls, name='cpls'),
path('createcpl/', views.createcpl, name='createcpl'),
path('deletecpl/<str:pk>/', views.deletecpl, name='deletecpl'),
path('updatecpl/<str:pk>/', views.updatecpl, name='updatecpl'),
path('kdms/', views.kdms, name='kdms'),
path('createkdm/', views.createkdm, name='createkdm'),
path('deletekdm/<str:pk>/', views.deletekdm, name='deletekdm'),
path('effects/', views.effects, name='effects'),
path('createeffect/', views.createeffect, name='createeffect'),
path('deleteeffect/<str:pk>/', views.deleteeffect, name='deleteeffect'),
]
| 2,365 |
mcs_search/find_mcs.py
|
iwatobipen/rdkit_cpp
| 5 |
2025874
|
import sys
from rdkit import Chem
from rdkit.Chem import rdFMCS
def get_mcs(mols):
mcs = rdFMCS.FindMCS(mols)
return mcs
if __name__=='__main__':
mols = Chem.SDMolSupplier(sys.argv[1])
mcs = get_mcs(mols)
print(mcs.smartsString)
| 251 |
os_xml_automation/shared_res.py
|
osfunapps/os-xml-automation-py
| 1 |
2025714
|
import os_file_handler.file_handler as fh
from os_xml_handler import xml_handler as xh
from os_tools import tools as tools
ACTION = 'action'
PATH_TYPE = 'path_type'
PATH_TYPE_SEARCH = 'search'
PATH_TYPE_AS_SRC = 'as_src'
# root children
NODE_FILE_SRC = 'file_src'
NODE_FILE_DST = 'file_dst'
NODE_ORIGINAL_TEXT = 'original_text'
NODE_NEW_TEXT = 'new_text'
# file node children
NODE_PATH = 'path'
NODE_SEARCH_PATH = 'search_path'
NODE_FULL_NAME = 'full_name'
NODE_PREFIX = 'name_prefix'
NODE_PREFIX_2 = 'prefix' # added cause users tend to do that wrong
NODE_SUFFIX = 'name_suffix'
NODE_SUFFIX_2 = 'suffix' # added cause users tend to do that wrong
NODE_EXTENSION = 'extension'
# file node types
NODE_DIR_SRC = 'dir_src'
NODE_DIR_DST = 'dir_dst'
NODE_STEP = 'step'
NODE_ROOT_ATT_EXTENSION_MAPPER_PATH = 'extension_mapper_path'
| 835 |
test/test_playlist_service_api.py
|
stanionascu/python-embyapi
| 0 |
2024308
|
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import embyapi
from embyapi.api.playlist_service_api import PlaylistServiceApi # noqa: E501
from embyapi.rest import ApiException
class TestPlaylistServiceApi(unittest.TestCase):
"""PlaylistServiceApi unit test stubs"""
def setUp(self):
self.api = PlaylistServiceApi() # noqa: E501
def tearDown(self):
pass
def test_delete_playlists_by_id_items(self):
"""Test case for delete_playlists_by_id_items
Removes items from a playlist # noqa: E501
"""
pass
def test_get_playlists_by_id_items(self):
"""Test case for get_playlists_by_id_items
Gets the original items of a playlist # noqa: E501
"""
pass
def test_post_playlists(self):
"""Test case for post_playlists
Creates a new playlist # noqa: E501
"""
pass
def test_post_playlists_by_id_items(self):
"""Test case for post_playlists_by_id_items
Adds items to a playlist # noqa: E501
"""
pass
def test_post_playlists_by_id_items_by_itemid_move_by_newindex(self):
"""Test case for post_playlists_by_id_items_by_itemid_move_by_newindex
Moves a playlist item # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 1,559 |
run_kpt_conv.py
|
felixmusil/run_qe
| 0 |
2025731
|
import cPickle as pck
import pandas as pd
from make_input.qe_input import makeQEInput
from make_input.qe_run import run_qe_hpc
from tqdm import tqdm
from make_input.raw_info import bravaisLattice2ibrav,SG2BravaisLattice
from make_input.SSSP_acc_PBE_info import wfccutoffs,rhocutoffs
sgs = range(1,230+1)
sg2ibrav = {}
ibrav2sg = {ibrav:[] for ibrav in bravaisLattice2ibrav.values()}
for sg in sgs:
bl = SG2BravaisLattice[sg]
ibrav = bravaisLattice2ibrav[bl]
ibrav2sg[ibrav].append(sg)
sg2ibrav[sg] = ibrav
calculation_type = '"scf"'
zatom = 14
kpt = [2,2,2]
Nkpts = [1000,2000,3000,5000]
# rhocutoff ,wfccutoff = None,None
rhocutoff ,wfccutoff = rhocutoffs[zatom],wfccutoffs[zatom]
smearing = 1e-2
etot_conv_thr = 1e-4
forc_conv_thr = 1e-4
nstep = 100
scf_conv_thr = 1e-6
hpc = 'deneb'
node = 1
tasks = 16
cpus_per_tasks = 1
mem = 63000
time = '10:00:00'
debug = False
dataPath = '/scratch/musil/qmat/data/'
ppPath='"/scratch/musil/qmat/run_qe/pseudo/SSSP_acc_PBE/"'
fileNames = {}
infoPath = './info/'
structurePath = './structures/'
#fileNames['crystals'] = structurePath + 'input_crystals_sg1-230-18-10-17.pck'
fileNames['crystals'] = '/scratch/musil/qmat/data/run_relax_Si/low_energy_structures.pck'
fileNames['wyck'] = infoPath+'SpaceGroup-multiplicity-wickoff-info.pck'
fileNames['general info'] = infoPath+'SpaceGroup-general-info.pck'
fileNames['elements info'] = infoPath+'General-Info-Elements-fast.pck'
with open(fileNames['crystals'],'rb') as f:
crystals = pck.load(f)
with open(fileNames['wyck'],'rb') as f:
WyckTable = pck.load(f)
SGTable = pd.read_pickle(fileNames['general info'])
ElemTable = pd.read_pickle(fileNames['elements info'])
dirNames = {(sg,it,Nkpt):dataPath + 'kpt_convergence_2/sg_{}-f_{}-kpt_{}'.format(sg,it,Nkpt)
for (sg,it) in crystals.keys() for Nkpt in Nkpts }
# crystal = crystals[sg][it]
# dirName = dataPath + 'test_run/sg_{}-f_{}'.format(sg,it)
# print 'Calc in folder:'
# print dirName
print 'sending the calcs'
pbar = tqdm(total=len(dirNames),ascii=True)
for (sg,it,Nkpt),dirName in dirNames.iteritems():
crystal = crystals[(sg,it)]
input_str = makeQEInput(crystal,sg,WyckTable,SGTable,ElemTable,
zatom = zatom,rhocutoff = rhocutoff,wfccutoff = wfccutoff,
calculation_type=calculation_type,smearing=smearing,collect_wf =False,
pressure=0,press_conv_thr=0.5,cell_factor=5,force_ibrav0=True,
etot_conv_thr=etot_conv_thr,forc_conv_thr=forc_conv_thr,nstep=nstep,
scf_conv_thr=scf_conv_thr,print_forces=True,
kpt = kpt,Nkpt=Nkpt ,kpt_offset = [0,0,0],
ppPath=ppPath)
exitstatus = run_qe_hpc(input_str,dirName,verbose=False,hpc=hpc, node=node,
tasks_per_node=tasks,name='{}_{}_{}'.format(sg,it,Nkpt),
cpus_per_tasks=cpus_per_tasks, mem=mem, time=time, debug=debug)
pbar.update()
pbar.close()
| 2,982 |
sparrow_django_common/utils/validation_data.py
|
littletiger7/sparrow_django_common
| 2 |
2025052
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
class VerificationConfiguration(object):
"""验证中间件需要配置"""
def __valid_permission_svc(self):
"""验证settings中的配置, 如有新增的配置,请在此处加上验证"""
try:
self.PERMISSION_SERVICE = settings.PERMISSION_MIDDLEWARE['PERMISSION_SERVICE']
self.FILTER_PATH = settings.PERMISSION_MIDDLEWARE['FILTER_PATH']
self.PERMISSION_SERVICE['name']
self.PERMISSION_SERVICE['host']
self.PERMISSION_SERVICE['address']
except KeyError as ex:
raise NotImplementedError("没有配置这个参数%s"% ex)
def __verify_middleware_location(self, request):
"""验证中间件位置"""
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"PermissionMiddleware应该放置在'sparrow_django_common.middleware.JWT_middleware.JWTMiddleware'后面")
def valid_permission_svc(self):
"""settings配置数据校验"""
self.__valid_permission_svc()
def verify_middleware_location(self, request):
"""校验中间件位置"""
self.__verify_middleware_location(request)
| 1,137 |
dpaste/settings/base.py
|
jcroot/dpaste
| 278 |
2025971
|
# ==============================================================================
# Import global settings to make it easier to extend settings.
# ==============================================================================
import os
import sys
import dj_database_url
import dpaste
env = os.environ.get
BASE_DIR, PROJECT_MODULE_NAME = os.path.split(
os.path.dirname(os.path.realpath(dpaste.__file__))
)
# ==============================================================================
# Settings
# ==============================================================================
DEBUG = env("DEBUG") == "True"
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = env("SECRET_KEY", "secret-key")
ALLOWED_HOSTS = env("ALLOWED_HOSTS", "*").split(",")
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = False
LANGUAGE_CODE = "en"
LANGUAGES = (("en", "English"),)
# LOCALE_PATHS = (
# os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'locale')),
# )
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
STATIC_ROOT = env("STATIC_ROOT", ".static")
MEDIA_ROOT = env("MEDIA_ROOT", ".media")
STATIC_URL = "/static/"
ROOT_URLCONF = "dpaste.urls"
WSGI_APPLICATION = "dpaste.wsgi.application"
MIDDLEWARE = [
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"csp.middleware.CSPMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.i18n",
]
},
}
]
INSTALLED_APPS = [
"django.contrib.staticfiles",
"django.contrib.sessions",
"staticinline.apps.StaticInlineAppConfig",
"dpaste.apps.dpasteAppConfig",
]
sys.stdout.write(f"\n🐘 Database URL is: {env('DATABASE_URL')}\n")
DATABASES = {"default": dj_database_url.config(default="sqlite:///dpaste.sqlite")}
# ==============================================================================
# App specific settings
# ==============================================================================
# If this project installation was built with production settings,
# add that webserver right away.
try:
import django_webserver # noqa
INSTALLED_APPS.append("django_webserver")
sys.stdout.write(
f'\n🚀 Production webserver installed. Will run on port {env("PORT")}\n'
)
except ImportError:
pass
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
CSP_DEFAULT_SRC = ("'none'",)
CSP_SCRIPT_SRC = ("'self'", "'unsafe-inline'")
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
}
},
}
| 3,880 |
testBeta.py
|
takao42/genetic-linkage
| 0 |
2025734
|
#!/usr/bin/python
import numpy as np
from timer import Timer
from GenericLinkage import *
def testBasic0():
L1 = np.array([[0,0,0,0,0],[1,1,1,1,1]])
L2 = np.array([[1,1,1,1,1],[0,0,0,0,0]])
L1 = np.array(L1, dtype = np.bool)
L2 = np.array(L2, dtype = np.bool)
RF = np.array([0.05,0.05,0.05,0.05])
with Timer('generating 3 progenies'):
Y = cross2(L1,L2,RF,3)
print(Y)
def testBasic1():
# number of columns in a gene
n = 1400000
# number of progenies to produce
k = 30
# generate two random 2 by n matrices
# filled with random 0 and 1
L1 = np.random.randint(2, size=(2,n))
L1 = np.array(L1, dtype = np.bool)
L2 = np.random.randint(2, size=(2,n))
L2 = np.array(L2, dtype = np.bool)
# array of recombination frequencies of size (n-1)
RF = 0.1*np.random.random(n-1)
# generate progenies
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2(L1,L2,RF,k)
def testRotation0():
# up in column-based corresponds to left in row-based
# down in column-based corresponds to right in row-based
# conversion from column-based to row-based
# will be done by rotating the matrices
# by 270 degrees
L1 = np.array([[0,1],[0,1],[0,1],[0,1],[0,1]])
L2 = np.array([[1,0],[1,0],[1,0],[1,0],[1,0]])
RF = np.array([[0.05],[0.05],[0.05],[0.05]])
# convert
L1 = np.rot90(L1, 3)
L2 = np.rot90(L2, 3)
RF = RF[:,0]
with Timer('generating 3 progenies'):
Y = cross2(L1,L2,RF,3)
def testRotation1():
# up in column-based corresponds to left in row-based
# down in column-based corresponds to right in row-based
# conversion from column-based to row-based
# will be done by rotating the matrices
# by 270 degrees
# number of rows in a gene
n = 1400000
#n = 6480
# number of progenies to produce
k = 30
#k = 3240
# generate two random n by 2 matrices
# filled with 0 and 1
L1 = np.random.randint(2, size=(n,2))
L2 = np.random.randint(2, size=(n,2))
# generate (n-1)*1 matrix of random recombination frequencies
RF = 0.1*np.random.random((n-1, 1))
# generate progenies
# convert
L1 = np.rot90(L1, 3)
L2 = np.rot90(L2, 3)
RF = RF[:,0]
with Timer('generating 30 progenies'):
Y = cross2(L1,L2,RF,k)
def testMP0():
L1 = np.array([[0,0,0,0,0],[1,1,1,1,1]])
L2 = np.array([[1,1,1,1,1],[0,0,0,0,0]])
RF = np.array([0.05,0.05,0.05,0.05])
k = 35
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2mp(L1,L2,RF,k)
def testMP1():
# number of rows in a gene
n = 1400000
# number of progenies to produce
k = 489
# generate two random 2 by n matrices
# filled with random 0 and 1
L1 = np.random.randint(2, size=(2,n))
L2 = np.random.randint(2, size=(2,n))
# array of recombination frequencies of size (n-1)
RF = 0.1*np.random.random(n-1)
# generate progenies
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2mp(L1,L2,RF,k)
def testSP0():
L1 = np.array([[0,0,0,0,0],[1,1,1,1,1]])
L2 = np.array([[1,1,1,1,1],[0,0,0,0,0]])
RF = np.array([0.05,0.05,0.05,0.05])
k = 35
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2sp(L1,L2,RF,k)
def testSP1():
# number of rows in a gene
n = 1400000
# number of progenies to produce
k = 489
# generate two random 2 by n matrices
# filled with random 0 and 1
L1 = np.random.randint(2, size=(2,n))
L2 = np.random.randint(2, size=(2,n))
# array of recombination frequencies of size (n-1)
RF = 0.1*np.random.random(n-1)
# generate progenies
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2sp(L1,L2,RF,k)
if __name__ == '__main__':
#testBasic0()
testBasic1()
#testRotation0()
#testRotation1()
#testMP0()
#testMP1()
#testSP0()
#testSP1()
| 3,724 |
pzc/locations.py
|
miquelramirez/CMPzC
| 0 |
2023095
|
import sys
class VictoryLocation :
def __init__( self ) :
self.X = None
self.Y = None
self.nationality = None
self.value = None
def load( self, tokens ) :
self.X = int(tokens[1])
self.Y = int(tokens[2])
self.value = int(tokens[3])
self.nationality = tokens[4]
def write( self ) :
tokens = [ "6" ]
tokens += [ str(self.X) ]
tokens += [ str(self.Y) ]
tokens += [ str(self.value) ]
tokens += [ str(self.nationality) ]
return " ".join( tokens )
fort_types = { 1:'Improved',
2:'Trench',
4:'Pillboxes',
8:'Bunker',
16:'Minefield',
516:'Pillboxes & Improved',
514:'Trench (reduced)',
520:'Bunker & Improved'
}
class FortifiedLocation :
def __init__( self ) :
self.X = None
self.Y = None
self.type = None
def load( self, tokens ) :
self.X = int( tokens[1] )
self.Y = int( tokens[2] )
try :
self.type = fort_types[ int(tokens[3]) ]
except KeyError :
if int(tokens[3]) == 32768 :
print >> sys.stdout, "Impassable hex at: (%d,%d)"%( self.X, self.Y )
return False
return True
| 1,081 |
main.py
|
dantesup/SE
| 0 |
2025328
|
# coding=utf-8
from flask import Flask, render_template, url_for, request, session, redirect
# from flask_pymongo import PyMongo
import bcrypt # to hash the password
app = Flask(__name__)
# app.config['MONGO_DBNAME'] = 'web_mongodb'
# app.config['MONGO_URI'] = 'mongodb://hugo:<EMAIL>:40877/web_mongodb'
# mongo = PyMongo(app)
@app.route('/')
def index():
if 'email' in session:
return render_template('index.html', blank=session['email'])
# return 'You are logged in as ' + session['email']
return render_template('index.html', blank='登录')
# return render_template('sign_login.html')
@app.route('/sign_login', methods=['GET', 'POST'])
def sign_login():
# if request.method == 'GET':
return render_template('sign_login.html')
# users = mongo.db.users
# if request.form.get('email_login') is not None:
# login_user = users.find_one({'email': request.form.get('email_login')})
# if login_user and bcrypt.hashpw(request.form['password_login'].encode('utf-8'),
# login_user['password']) == login_user['password']:
# session['email'] = request.form['email_login']
# return redirect(url_for('index'))
# else:
# existing_user = users.find_one({'email': request.form.get('email_signup')})
# if existing_user is None:
# hashpass = bcrypt.hashpw(request.form['password_signup'].encode('utf-8'), bcrypt.gensalt())
# users.insert({'email': request.form['email_signup'], 'password': <PASSWORD>,
# 'username': request.form.get('username_signup'), 'intro': request.form.get('intro_signup')})
# session['email'] = request.form['email_signup']
# return redirect(url_for('index'))
# return 'That email already exists!'
# return render_template('sign_login.html')
@app.route('/user_info')
def user_info():
users = mongo.db.users
login_user = users.find_one({'email': session['email']})
return render_template('user_info.html', username=login_user['username'], email=login_user['email'],
intro=login_user['intro'])
@app.route('/spider', methods=['POST', 'GET'])
def spider():
return redirect('https://www.baidu.com/s?wd=%ssite:tieba.baidu.com' % request.form.get('search'))
# return render_template('spider.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
if __name__ == '__main__':
app.secret_key = 'mysecret'
app.run(debug=True)
| 2,556 |
telemetry/third_party/web-page-replay/rules_parser.py
|
ravitejavalluri/catapult
| 226 |
2023085
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Rules parser.
The input syntax is:
[{"comment": ignored_value},
{"rule_class_name1": {"arg1": value, "arg2": value, ...}},
{"rule_class_name2": {"arg1": value, "arg2": value, ...}},
...]
E.g.:
[{"comment": "this text is ignored"},
{"SendStatus": {"url": "example\\.com/ss.*", "status": 204}},
{"ModifyUrl": {"url": "(example\\.com)(/.*)", "new_url": "{1}"}}
]
"""
import json
import re
class Error(Exception):
pass
class Rules(object):
"""A parsed sequence of Rule objects."""
def __init__(self, file_obj=None, allowed_imports=None):
"""Initializes from the given file object.
Args:
file_obj: A file object.
allowed_imports: A set of strings, defaults to {'rules'}.
Use {'*'} to allow any import path.
"""
if allowed_imports is None:
allowed_imports = {'rules'}
self._rules = [] if file_obj is None else _Load(file_obj, allowed_imports)
def Contains(self, rule_type_name):
"""Returns true if any rule matches the given type name.
Args:
rule_type_name: a string.
Returns:
True if any rule matches, else False.
"""
return any(rule for rule in self._rules if rule.IsType(rule_type_name))
def Find(self, rule_type_name):
"""Returns a _Rule object containing all rules with the given type name.
Args:
rule_type_name: a string.
Returns:
A callable object that expects two arguments:
request: the httparchive ArchivedHttpRequest
response: the httparchive ArchivedHttpResponse
and returns the rule return_value of the first rule that returns
should_stop == True, or the last rule's return_value if all rules returns
should_stop == False.
"""
matches = [rule for rule in self._rules if rule.IsType(rule_type_name)]
return _Rule(matches)
def __str__(self):
return _ToString(self._rules)
def __repr__(self):
return str(self)
class _Rule(object):
"""Calls a sequence of Rule objects until one returns should_stop."""
def __init__(self, rules):
self._rules = rules
def __call__(self, request, response):
"""Calls the rules until one returns should_stop.
Args:
request: the httparchive ArchivedHttpRequest.
response: the httparchive ArchivedHttpResponse, which may be None.
Returns:
The rule return_value of the first rule that returns should_stop == True,
or the last rule's return_value if all rules return should_stop == False.
"""
return_value = None
for rule in self._rules:
should_stop, return_value = rule.ApplyRule(
return_value, request, response)
if should_stop:
break
return return_value
def __str__(self):
return _ToString(self._rules)
def __repr__(self):
return str(self)
def _ToString(rules):
"""Formats a sequence of Rule objects into a string."""
return '[\n%s\n]' % '\n'.join('%s' % rule for rule in rules)
def _Load(file_obj, allowed_imports):
"""Parses and evaluates all rules in the given file.
Args:
file_obj: a file object.
allowed_imports: a sequence of strings, e.g.: {'rules'}.
Returns:
a list of rules.
"""
rules = []
entries = json.load(file_obj)
if not isinstance(entries, list):
raise Error('Expecting a list, not %s', type(entries))
for i, entry in enumerate(entries):
if not isinstance(entry, dict):
raise Error('%s: Expecting a dict, not %s', i, type(entry))
if len(entry) != 1:
raise Error('%s: Expecting 1 item, not %d', i, len(entry))
name, args = next(entry.iteritems())
if not isinstance(name, basestring):
raise Error('%s: Expecting a string TYPE, not %s', i, type(name))
if not re.match(r'(\w+\.)*\w+$', name):
raise Error('%s: Expecting a classname TYPE, not %s', i, name)
if name == 'comment':
continue
if not isinstance(args, dict):
raise Error('%s: Expecting a dict ARGS, not %s', i, type(args))
fullname = str(name)
if '.' not in fullname:
fullname = 'rules.%s' % fullname
modulename, classname = fullname.rsplit('.', 1)
if '*' not in allowed_imports and modulename not in allowed_imports:
raise Error('%s: Package %r is not in allowed_imports', i, modulename)
module = __import__(modulename, fromlist=[classname])
clazz = getattr(module, classname)
missing = {s for s in ('IsType', 'ApplyRule') if not hasattr(clazz, s)}
if missing:
raise Error('%s: %s lacks %s', i, clazz.__name__, ' and '.join(missing))
rule = clazz(**args)
rules.append(rule)
return rules
| 5,179 |
NATO_Alphabet/main.py
|
dlouima/python_project
| 0 |
2023619
|
import pandas
data = pandas.read_csv('nato_phonetic_alphabet.csv')
alphabet = {letter.letter: letter.code for (
index, letter) in data.iterrows()}
# check user input word agains the dictionary
user_word = input('Enter your name here: ').upper()
while user_word.isdigit:
nato_alphabet = []
for letter in user_word:
nato_alphabet.append(alphabet[letter])
# print data back to the user
print(nato_alphabet)
| 428 |
tmp/analyze_review_sentiment.py
|
kcarnold/sentiment-slant-gi18
| 0 |
2025116
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 16:18:57 2017
@author: kcarnold
"""
import pandas as pd
import json
import numpy as np
#%%
PARTICIPANT_LEVEL_CSV = 'data/by_participant/participant_level_sent3_2_2017-06-21T08:52:31.507194.csv'
ANNOTATIONS_JSON = 'sent3_2-sentiment-results.json'
#%%
data = pd.read_csv(PARTICIPANT_LEVEL_CSV).query('kind == "final"')
results = json.load(open(ANNOTATIONS_JSON))['allStates']
#%%
pos_neg = pd.DataFrame([
dict(text=ent['text'],
pos=sum(x['range'][1] - x['range'][0] for x in ent['annotations'] if x['tool'] == 'pos') or 0.,
neg=sum(x['range'][1] - x['range'][0] for x in ent['annotations'] if x['tool'] == 'neg') or 0.)
for ent in results])
#%%
with_pos_neg = pd.merge(data, pos_neg, left_on='finalText', right_on='text')
#%%
pos = with_pos_neg['pos']
neg = with_pos_neg['neg']
with_pos_neg['diversity'] = 1 - (np.abs(pos - neg) / (pos + neg))
#%%
with_pos_neg.to_csv('annotated_sent3_2-sentiment-results.csv')
#%%
data['pos'] = pos_neg.pos
data['neg'] = pos_neg.neg
#pd.concat([data, pos_neg], axis=1)
| 1,094 |
ABC/abc151-abc200/abc159/a.py
|
KATO-Hiro/AtCoder
| 2 |
2025700
|
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
ans = 0
ans += max(0, n * (n - 1) // 2)
ans += max(0, m * (m - 1) // 2)
print(ans)
if __name__ == '__main__':
main()
| 279 |
cost_volume_analysis/model.py
|
mkirby42/flask_app_project
| 0 |
2024496
|
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class Widget(DB.Model):
name = DB.Column(DB.String(100), primary_key = True)
fixed_costs = DB.Column(DB.Integer)
variable_costs = DB.Column(DB.Integer)
price_point = DB.Column(DB.Integer)
def __repr__(self):
return "{}".format(self.name)
products = [
# Name, fixed_costs, variable_costs, price_point
('Book', 5000.00, 4.50, 25.00),
('Truck', 10000000.00, 25000.00, 60000.00),
('Cup', 200.00, 0.50, 2.50),
('Burger', 25000.00, 3.50, 5.50),
]
def upload_widgets(widget_list):
for _ in range(len(widget_list)):
product = Widget(
name = widget_list[_][0],
fixed_costs = widget_list[_][1],
variable_costs = widget_list[_][2],
price_point = widget_list[_][3])
DB.session.add(product)
DB.session.commit()
def add_widget(name, fixed_costs, variable_costs, price_point):
product = Widget(
name = name,
fixed_costs = fixed_costs,
variable_costs = variable_costs,
price_point = price_point)
DB.session.add(product)
DB.session.commit()
| 1,157 |
tcslackbot/utils/slacktoken.py
|
warnerpr/slack-teamcity-bot
| 1 |
2026002
|
""" some methods to handle storing slack tokens in a local file """
import os
def get_token():
""" read token from file and return """
with open(os.path.expanduser('~/.slackbottoken')) as token_file:
token = token_file.read().strip()
return token
| 270 |
harmony_transaction_generator/analysis.py
|
metronotes-testing/transaction-generator
| 1 |
2025721
|
import json
import datetime
import os
import time
from threading import Lock
from multiprocessing.pool import ThreadPool
from collections import defaultdict
import requests
from pyhmy.util import (
json_load,
datetime_format
)
from .common import (
Loggers,
get_config,
)
def live_info(accounts, interval, duration):
# TODO: Function to get live (usable) feedback of current status of the tx-gen, during tx-gen.
pass
def _get_transaction_by_hash(endpoint, txn_hash):
"""
Internal get transaction by has to speed up analysis.
Note that this functionality will eventually be migrated to the `pyhmy`
"""
url = endpoint
payload = "{\"jsonrpc\": \"2.0\", \"method\": \"hmy_getTransactionByHash\"," \
"\"params\": [\"" + txn_hash + "\"],\"id\": 1}"
headers = {
'Content-Type': 'application/json'
}
response = requests.request('POST', url, headers=headers, data=payload, allow_redirects=False, timeout=30)
return json_load(response.content)
def verify_transactions(transaction_log_dir, start_time, end_time):
"""
This will verify all transactions logged in `transaction_log_dir` from `start_time`
(as a datetime obj in UTC) to `end_time` (as a datetime obj in UTC).
It will return a report of the following structure:
```
{
"sent-transaction-report" : {
"sent-transactions": { # key = source shard
"0": [
<transaction log>
],
"1": [
<transaction log>
]
},
"sent-transactions-total": <count>,
"sent-transactions-total-per-shard": {
"(<src_shard>, <dst_shard>)" : <count>
},
"failed-sent-transactions": { # key = source shard
"0": [
<transaction log>
],
"1": [
<transaction log>
]
},
"failed-sent-transactions-total": <count>,
"failed-sent-transactions-total-per-shard": {
"(<src_shard>, <dst_shard>)" : <count>
}
},
"received-transaction-report" : {
"successful-transactions": { # key = source shard
"0": [
<transaction log>
],
"1": [
<transaction log>
]
},
"successful-transactions-total": <count>,
"successful-transactions-total-per-shard": {
"(<src_shard>, <dst_shard>)" : <count>
},
"failed-transactions": { # key = source shard
"0": [
<transaction log>
],
"1": [
<transaction log>
]
},
"failed-transactions-total": <count>,
"failed-transactions-total-per-shard": {
"(<src_shard>, <dst_shard>)" : <count>
}
}
}
```
"""
config = get_config()
transaction_log_dir = os.path.abspath(transaction_log_dir)
assert os.path.isfile(transaction_log_dir)
assert transaction_log_dir.endswith(".log")
assert isinstance(start_time, datetime.datetime)
assert isinstance(end_time, datetime.datetime)
Loggers.report.info(f"{'='*6} Verifying transactions {'='*6}")
with open(transaction_log_dir) as f:
tokens = f.read().split("\n")
transaction_logs = []
for tok in tokens:
if not tok:
continue
tok = tok.split(" : ")
assert len(tok) == 2, f"Line format for `{transaction_log_dir}` is unknown,"
txn_log = json.loads(tok[1].strip())
date = datetime.datetime.strptime(txn_log["send-time-utc"], datetime_format)
if date >= end_time:
break
if date >= start_time:
transaction_logs.append(txn_log)
sent_txn_hashes = set()
sent_txn_per_shard = defaultdict(list)
sent_shard_txn_total = defaultdict(int)
failed_sent_txn_count = 0
failed_sent_txn_per_shard = defaultdict(list)
failed_sent_shard_txn_total = defaultdict(int)
for txn_log in transaction_logs:
txn_hash = txn_log["hash"]
src, dst = str(txn_log["from-shard"]), str(txn_log["to-shard"])
if txn_hash is None:
failed_sent_txn_count += 1
failed_sent_shard_txn_total[f"({src}, {dst})"] += 1
failed_sent_txn_per_shard[src].append(txn_log)
elif txn_hash not in sent_txn_hashes:
sent_txn_hashes.add(txn_hash)
sent_shard_txn_total[f"({src}, {dst})"] += 1
sent_txn_per_shard[src].append(txn_log)
sent_transaction_report = {
"sent-transactions-total": len(sent_txn_hashes),
"sent-transactions": sent_txn_per_shard,
"sent-transactions-total-per-shard": sent_shard_txn_total,
"failed-sent-transactions-total": failed_sent_txn_count,
"failed-sent-transactions": failed_sent_txn_per_shard,
"failed-sent-transactions-total-per-shard": failed_sent_shard_txn_total,
}
Loggers.report.info(json.dumps(sent_transaction_report, indent=4))
successful_txn_count = 0
successful_txn_shard_count = defaultdict(int)
successful_txn_per_shard = defaultdict(list)
failed_txn_count = 0
failed_txn_shard_count = defaultdict(int)
failed_txn_per_shard = defaultdict(list)
lock = Lock()
def check_hash(src_shard, dst_shard, src_endpoint, log):
nonlocal successful_txn_count, failed_txn_count
response = _get_transaction_by_hash(src_endpoint, log['hash'])
lock.acquire()
if response['result'] is not None:
successful_txn_count += 1
successful_txn_shard_count[f"({src_shard}, {dst_shard})"] += 1
successful_txn_per_shard[shard].append(log)
else:
failed_txn_count += 1
failed_txn_shard_count[f"({src_shard}, {dst_shard})"] += 1
failed_txn_per_shard[shard].append(log)
lock.release()
pool = ThreadPool()
threads = []
for shard, txn_log_list in sent_txn_per_shard.items():
endpoint = config["ENDPOINTS"][int(shard)]
for txn_log in txn_log_list:
src, dst = str(txn_log["from-shard"]), str(txn_log["to-shard"])
threads.append(pool.apply_async(check_hash, (src, dst, endpoint, txn_log)))
for t in threads:
t.get()
pool.close()
received_transaction_report = {
"successful-transactions-total": successful_txn_count,
"successful-transactions": successful_txn_per_shard,
"successful-transactions-total-per-shard": successful_txn_shard_count,
"failed-transactions-total": failed_txn_count,
"failed-transactions": failed_txn_per_shard,
"failed-transactions-total-per-shard": failed_txn_shard_count,
}
Loggers.report.info(json.dumps(received_transaction_report, indent=4))
Loggers.report.write()
report = {
"sent-transaction-report": sent_transaction_report,
"received-transaction-report": received_transaction_report
}
return report
| 7,470 |
18_gematria/gematria.py
|
herjazz/tiny_python_projects
| 0 |
2025829
|
#!/usr/bin/env python3
"""
Title : gematria.py
Author : wrjt <<EMAIL>>
Date : 2021-09-09
Purpose: numerically encode a word using values for characters
(using ASCII values)
"""
import argparse
import os
import re
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gematria',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='text',
help='Input text or file')
args = parser.parse_args()
# Handle if text is a filename
if os.path.isfile(args.text):
with open(args.text, "rt", encoding="utf-8") as f:
args.text = f.read().rstrip()
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
# Book version
# for line in args.text.splitlines():
# print(' '.join(map(word2num, line.split())))
for line in args.text.splitlines():
print(' '.join([word2num(word) for word in line.split()]))
def word2num(word: str) -> str:
""" Convert a word to the sum of its ascii values """
# # Book version:
# return str(sum(map(ord, re.sub(r'[^A-Za-z0-9]', '', word))))
# Remove non-letters and numbers
word = re.sub(re.compile(r'[^A-Za-z0-9]'), '', word)
return str(sum(ord(c) for c in word))
def test_word2num():
""" Test word2num() """
assert word2num("a") == "97"
assert word2num("abc") == "294"
assert word2num("ab'c") == "294"
assert word2num("4a-b'c,") == "346"
# --------------------------------------------------
if __name__ == '__main__':
main()
| 1,770 |
examples/removeLineBreaks.py
|
evhub/pyparsing
| 0 |
2024546
|
# removeLineBreaks.py
#
# Demonstration of the pyparsing module, converting text files
# with hard line-breaks to text files with line breaks only
# between paragraphs. (Helps when converting downloads from Project
# Gutenberg - https://www.gutenberg.org/ - to import to word processing apps
# that can reformat paragraphs once hard line-breaks are removed.)
#
# Uses parse actions and transformString to remove unwanted line breaks,
# and to double up line breaks between paragraphs.
#
# Copyright 2006, by <NAME>
#
from pyparsing import *
# define an expression for the body of a line of text - use a parse action to reject any
# empty lines
def mustBeNonBlank(s,l,t):
if not t[0]:
raise ParseException(s,l,"line body can't be empty")
lineBody = SkipTo(lineEnd).setParseAction(mustBeNonBlank)
# now define a line with a trailing lineEnd, to be replaced with a space character
textLine = lineBody + Suppress(lineEnd).setParseAction(replaceWith(" "))
# define a paragraph, with a separating lineEnd, to be replaced with a double newline
para = OneOrMore(textLine) + Suppress(lineEnd).setParseAction(replaceWith("\n\n"))
# run a test
test = """
Now is the
time for
all
good men
to come to
the aid of their
country.
"""
print(para.transformString(test))
# process an entire file
z = para.transformString(file("Successful Methods of Public Speaking.txt").read())
file("Successful Methods of Public Speaking(2).txt","w").write(z)
| 1,519 |
modules/datasets/seg/BaseDataset.py
|
dontLoveBugs/MyTorch
| 1 |
2024856
|
# -*- coding: utf-8 -*-
"""
@Time : 2019/8/6 11:38
@Author : <NAME>
@Email : <EMAIL>
"""
import os
import time
import cv2
import torch
import numpy as np
import torch.utils.data as data
class BaseDataset(data.Dataset):
def __init__(self, root, split='test', mode=None, preprocess=None):
super(BaseDataset, self).__init__()
self.split = split
self.root = root
self.mode = mode if mode is not None else split
self.images_path, self.gts_path = self._get_pairs()
self.preprocess = preprocess
def __len__(self):
return len(self.images_path)
def __getitem__(self, index):
img_path, gt_path = self.images_path[index], self.gts_path[index]
item_name = img_path.split("/")[-1].split(".")[0]
img, gt = self._fetch_data(self.images_path[index], self.gts_path[index])
img = img[:, :, ::-1]
if self.preprocess is not None:
img, gt, extra_dict = self.preprocess(img, gt)
# print('img:', img.shape, ' gt:', gt.shape)
if self.split is 'train':
img = torch.from_numpy(np.ascontiguousarray(img)).float()
gt = torch.from_numpy(np.ascontiguousarray(gt)).long()
if self.preprocess is not None and extra_dict is not None:
for k, v in extra_dict.items():
extra_dict[k] = torch.from_numpy(np.ascontiguousarray(v))
if 'label' in k:
extra_dict[k] = extra_dict[k].long()
if 'img' in k:
extra_dict[k] = extra_dict[k].float()
output_dict = dict(data=img, label=gt, fn=str(item_name),
n=len(self.images_path))
if self.preprocess is not None and extra_dict is not None:
output_dict.update(**extra_dict)
return output_dict
def _fetch_data(self, img_path, gt_path, dtype=None):
img = self._open_image(img_path)
gt = self._open_image(gt_path, cv2.IMREAD_GRAYSCALE, dtype=dtype)
return img, gt
def _get_pairs(self):
raise NotImplementedError
def get_length(self):
return self.__len__()
@staticmethod
def _open_image(filepath, mode=cv2.IMREAD_COLOR, dtype=None):
# cv2: B G R
# h w c
if mode == cv2.IMREAD_COLOR:
img_bgr = cv2.imread(filepath, mode)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
img = np.array(img_rgb, dtype=dtype)
else:
img_gray = cv2.imread(filepath, mode)
img = np.array(img_gray, dtype=dtype)
return img
# @classmethod
def get_class_colors(self, *args):
raise NotImplementedError
# @classmethod
def get_class_names(self, *args):
raise NotImplementedError
if __name__ == "__main__":
data_setting = {'img_root': '',
'gt_root': '',
'train_source': '',
'eval_source': ''}
bd = BaseDataset(data_setting, 'train', None)
print(bd.get_class_names())
| 3,087 |
src/rnn_class/batch_units.py
|
JouniVatanen/NLP-and-Deep-Learning
| 1 |
2024495
|
# https://deeplearningcourses.com/c/deep-learning-recurrent-neural-networks-in-python
# https://udemy.com/deep-learning-recurrent-neural-networks-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import theano
import theano.tensor as T
def init_weight(Mi, Mo):
return np.random.randn(Mi, Mo) * np.sqrt(2.0 / Mi)
class SimpleRecurrentLayer:
def __init__(self, Mi, Mo, activation):
self.Mi = Mi
self.Mo = Mo
self.f = activation
# numpy init
Wxh = init_weight(Mi, Mo)
Whh = init_weight(Mo, Mo)
b = np.zeros(Mo)
h0 = np.zeros(Mo)
# theano vars
self.Wxh = theano.shared(Wxh)
self.Whh = theano.shared(Whh)
self.b = theano.shared(b)
self.h0 = theano.shared(h0)
self.params = [self.Wxh, self.Whh, self.b, self.h0]
def get_ht(self, xWxh_t, h_t1):
return self.f(xWxh_t + h_t1.dot(self.Whh) + self.b)
def recurrence(self, xWxh_t, is_start, h_t1, h0):
h_t = T.switch(
T.eq(is_start, 1),
self.get_ht(xWxh_t, h0),
self.get_ht(xWxh_t, h_t1)
)
return h_t
def output(self, Xflat, startPoints):
# Xflat should be (NT, D)
# calculate X after multiplying input weights
XWxh = Xflat.dot(self.Wxh)
h, _ = theano.scan(
fn=self.recurrence,
sequences=[XWxh, startPoints],
outputs_info=[self.h0],
non_sequences=[self.h0],
n_steps=Xflat.shape[0],
)
return h
class GRU:
def __init__(self, Mi, Mo, activation):
self.Mi = Mi
self.Mo = Mo
self.f = activation
# numpy init
Wxr = init_weight(Mi, Mo)
Whr = init_weight(Mo, Mo)
br = np.zeros(Mo)
Wxz = init_weight(Mi, Mo)
Whz = init_weight(Mo, Mo)
bz = np.zeros(Mo)
Wxh = init_weight(Mi, Mo)
Whh = init_weight(Mo, Mo)
bh = np.zeros(Mo)
h0 = np.zeros(Mo)
# theano vars
self.Wxr = theano.shared(Wxr)
self.Whr = theano.shared(Whr)
self.br = theano.shared(br)
self.Wxz = theano.shared(Wxz)
self.Whz = theano.shared(Whz)
self.bz = theano.shared(bz)
self.Wxh = theano.shared(Wxh)
self.Whh = theano.shared(Whh)
self.bh = theano.shared(bh)
self.h0 = theano.shared(h0)
self.params = [self.Wxr, self.Whr, self.br, self.Wxz, self.Whz, self.bz, self.Wxh, self.Whh, self.bh, self.h0]
def get_ht(self, xWxr_t, xWxz_t, xWxh_t, h_t1):
r = T.nnet.sigmoid(xWxr_t + h_t1.dot(self.Whr) + self.br)
z = T.nnet.sigmoid(xWxz_t + h_t1.dot(self.Whz) + self.bz)
hhat = self.f(xWxh_t + (r * h_t1).dot(self.Whh) + self.bh)
h = (1 - z) * h_t1 + z * hhat
return h
def recurrence(self, xWxr_t, xWxz_t, xWxh_t, is_start, h_t1, h0):
h_t = T.switch(
T.eq(is_start, 1),
self.get_ht(xWxr_t, xWxz_t, xWxh_t, h0),
self.get_ht(xWxr_t, xWxz_t, xWxh_t, h_t1)
)
return h_t
def output(self, Xflat, startPoints):
# Xflat should be (NT, D)
# calculate X after multiplying input weights
XWxr = Xflat.dot(self.Wxr)
XWxz = Xflat.dot(self.Wxz)
XWxh = Xflat.dot(self.Wxh)
h, _ = theano.scan(
fn=self.recurrence,
sequences=[XWxr, XWxz, XWxh, startPoints],
outputs_info=[self.h0],
non_sequences=[self.h0],
n_steps=Xflat.shape[0],
)
return h
class LSTM:
def __init__(self, Mi, Mo, activation):
self.Mi = Mi
self.Mo = Mo
self.f = activation
# numpy init
Wxi = init_weight(Mi, Mo)
Whi = init_weight(Mo, Mo)
Wci = init_weight(Mo, Mo)
bi = np.zeros(Mo)
Wxf = init_weight(Mi, Mo)
Whf = init_weight(Mo, Mo)
Wcf = init_weight(Mo, Mo)
bf = np.zeros(Mo)
Wxc = init_weight(Mi, Mo)
Whc = init_weight(Mo, Mo)
bc = np.zeros(Mo)
Wxo = init_weight(Mi, Mo)
Who = init_weight(Mo, Mo)
Wco = init_weight(Mo, Mo)
bo = np.zeros(Mo)
c0 = np.zeros(Mo)
h0 = np.zeros(Mo)
# theano vars
self.Wxi = theano.shared(Wxi)
self.Whi = theano.shared(Whi)
self.Wci = theano.shared(Wci)
self.bi = theano.shared(bi)
self.Wxf = theano.shared(Wxf)
self.Whf = theano.shared(Whf)
self.Wcf = theano.shared(Wcf)
self.bf = theano.shared(bf)
self.Wxc = theano.shared(Wxc)
self.Whc = theano.shared(Whc)
self.bc = theano.shared(bc)
self.Wxo = theano.shared(Wxo)
self.Who = theano.shared(Who)
self.Wco = theano.shared(Wco)
self.bo = theano.shared(bo)
self.c0 = theano.shared(c0)
self.h0 = theano.shared(h0)
self.params = [
self.Wxi,
self.Whi,
self.Wci,
self.bi,
self.Wxf,
self.Whf,
self.Wcf,
self.bf,
self.Wxc,
self.Whc,
self.bc,
self.Wxo,
self.Who,
self.Wco,
self.bo,
self.c0,
self.h0,
]
def get_ht_ct(self, xWxi_t, xWxf_t, xWxc_t, xWxo_t, h_t1, c_t1):
i_t = T.nnet.sigmoid(xWxi_t + h_t1.dot(self.Whi) + c_t1.dot(self.Wci) + self.bi)
f_t = T.nnet.sigmoid(xWxf_t + h_t1.dot(self.Whf) + c_t1.dot(self.Wcf) + self.bf)
c_t = f_t * c_t1 + i_t * T.tanh(xWxc_t + h_t1.dot(self.Whc) + self.bc)
o_t = T.nnet.sigmoid(xWxo_t + h_t1.dot(self.Who) + c_t.dot(self.Wco) + self.bo)
h_t = o_t * T.tanh(c_t)
return h_t, c_t
def recurrence(self, xWxi_t, xWxf_t, xWxc_t, xWxo_t, is_start, h_t1, c_t1, h0, c0):
h_t_c_t = T.switch(
T.eq(is_start, 1),
self.get_ht_ct(xWxi_t, xWxf_t, xWxc_t, xWxo_t, h0, c0),
self.get_ht_ct(xWxi_t, xWxf_t, xWxc_t, xWxo_t, h_t1, c_t1)
)
return h_t_c_t[0], h_t_c_t[1]
def output(self, Xflat, startPoints):
# Xflat should be (NT, D)
# calculate X after multiplying input weights
XWxi = Xflat.dot(self.Wxi)
XWxf = Xflat.dot(self.Wxf)
XWxc = Xflat.dot(self.Wxc)
XWxo = Xflat.dot(self.Wxo)
[h, c], _ = theano.scan(
fn=self.recurrence,
sequences=[XWxi, XWxf, XWxc, XWxo, startPoints],
outputs_info=[self.h0, self.c0],
non_sequences=[self.h0, self.c0],
n_steps=Xflat.shape[0],
)
return h
| 6,828 |
pinax/announcements/signals.py
|
craigds/pinax-announcements
| 0 |
2025490
|
import django.dispatch
# provides args: announcement, request
announcement_created = django.dispatch.Signal()
# provides args: announcement, request
announcement_updated = django.dispatch.Signal()
# provides args: announcement, request
announcement_deleted = django.dispatch.Signal()
| 285 |
mayaviTest.py
|
Anny-Moon/PlotterPyPCA
| 0 |
2024085
|
import numpy as np
from mayavi import mlab
n_mer, n_long = 6, 11
pi = np.pi
dphi = pi/1000.0
phi = np.arange(0.0, 2*pi + 0.5*dphi, dphi)
mu = phi*n_mer
x = np.cos(mu)*(1+np.cos(n_long*mu/n_mer)*0.5)
y = np.sin(mu)*(1+np.cos(n_long*mu/n_mer)*0.5)
z = np.sin(n_long*mu/n_mer)*0.5
l = mlab.plot3d(x, y, z, np.sin(mu),tube_radius=0.025, colormap='Spectral')
mlab.view(distance=4.75);
mlab.pitch(-2.0)
mlab.show()
| 411 |
test/test_queue.py
|
phiysng/python-algs
| 0 |
2025329
|
# -*- coding: utf-8 -*-
from src.Queue import Queue
def test_empty_queue():
queue = Queue()
assert len(queue) == 0
assert queue.size() == 0
def test_nonempty_queue():
queue = Queue()
queue.enqueue(1)
queue.enqueue(2)
queue.enqueue(3)
assert len(queue) == 3
assert queue.dequeue() == 1
assert queue.front() == 2
assert len(queue) == 2
assert queue.dequeue() == 2
assert len(queue) == 1
assert queue.dequeue() == 3
assert len(queue) == 0
| 509 |
src/utils/gen_coeffs_queue.py
|
gayashiva/air_model
| 3 |
2024627
|
import multiprocessing
from time import sleep
import os, sys, time
import logging
import coloredlogs
import xarray as xr
import numpy as np
import math
# Locals
dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(dirname)
from src.models.icestupaClass import Icestupa
from src.utils.settings import config
from src.automate.autoDischarge import TempFreeze, SunMelt
# define worker function
def calculate(process_name, tasks, results, results_list, da):
print("[%s] evaluation routine starts" % process_name)
while True:
new_value = tasks.get()
if new_value == "None":
print("[%s] evaluation routine quits" % process_name)
# Indicate finished
results.put(-1)
break
else:
# Compute result and mimic a long-running task
compute = TempFreeze(new_value)
# Output which process received the value
# print("[%s] received value: %s" % (process_name, new_value))
# print("[%s] calculated thickness rate: %.1f" % (process_name, compute))
# Add result to the queue
results.put(compute)
results_list.append([new_value, compute])
return
if __name__ == "__main__":
# Main logger
logger = logging.getLogger(__name__)
logger.setLevel("ERROR")
# Define IPC manager
manager = multiprocessing.Manager()
# Define a list (queue) for tasks and computation results
tasks = manager.Queue()
results = manager.Queue()
results_list = manager.list()
# Create process pool with four processes
num_processes = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=num_processes)
processes = []
# Define xarray
temp = list(range(-20, 5))
rh = list(range(0, 100, 10))
wind = list(range(0, 10))
alt = list(np.arange(0, 6.1, 0.5))
# cld = list(np.arange(0, 1.1, 0.5))
obj = ["WUE", "ICV"]
spray_r = list(np.arange(3, 11, 1))
da = xr.DataArray(
data=np.zeros(len(temp) * len(rh) * len(wind)* len(alt) * len(obj) * len(spray_r)).reshape(
len(temp), len(rh), len(wind), len(alt), len(obj), len(spray_r)
),
dims=["temp", "rh", "wind", "alt", "obj", "spray_r"],
coords=dict(
temp=temp,
rh=rh,
wind=wind,
alt=alt,
obj=obj,
spray_r=spray_r,
),
attrs=dict(
long_name="Freezing rate",
description="Mean freezing rate",
units="$l\\, min^{-1}$",
),
)
da.temp.attrs["units"] = "$\\degree C$"
da.temp.attrs["description"] = "Air Temperature"
da.temp.attrs["long_name"] = "Air Temperature"
da.rh.attrs["units"] = "%"
da.rh.attrs["long_name"] = "Relative Humidity"
da.wind.attrs["units"] = "$m\\, s^{-1}$"
da.wind.attrs["long_name"] = "Wind Speed"
da.alt.attrs["units"] = "$km$"
da.alt.attrs["long_name"] = "Altitude"
da.obj.attrs["units"] = " "
da.obj.attrs["long_name"] = "Objective"
da.spray_r.attrs["units"] = "$m$"
da.spray_r.attrs["long_name"] = "Spray radius"
# Initiate the worker processes
for i in range(num_processes):
# Set process name
process_name = "P%i" % i
# Create the process, and connect it to the worker function
new_process = multiprocessing.Process(
target=calculate, args=(process_name, tasks, results, results_list, da)
)
# Add new process to the list of processes
processes.append(new_process)
# Start the process
new_process.start()
# Fill task queue
task_list = []
for temp in da.temp.values:
for rh in da.rh.values:
for wind in da.wind.values:
for alt in da.alt.values:
for obj in da.obj.values:
task_list.append({'temp':temp, 'rh':rh, 'wind':wind, 'alt':alt, 'obj':obj})
for single_task in task_list:
tasks.put(single_task)
# Wait while the workers process
sleep(2)
# Quit the worker processes by sending them -1
for i in range(num_processes):
tasks.put("None")
# Read calculation results
num_finished_processes = 0
while True:
# Read result
new_result = results.get()
# Have a look at the results
if new_result == -1:
# Process has finished
num_finished_processes += 1
if num_finished_processes == num_processes:
for item in results_list:
input = item[0]
output = item[1]
for spray_r in da.spray_r.values:
input['spray_r'] = spray_r
da.sel(input).data += output
if input['obj'] == "WUE":
da.sel(input).data *= math.pi * spray_r * spray_r
elif input['obj'] == "ICV":
da.sel(input).data *= math.sqrt(2) * math.pi * spray_r * spray_r
print(da.data.mean())
da.to_netcdf("data/common/alt_obj_sims.nc")
break
| 5,261 |
src/extract_browser_data/browsers/chromium.py
|
sandorex/extract-browser-data.py
| 0 |
2022774
|
# (https://github.com/sandorex/extract-browser-data.py)
# extract-browser-data
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import ClassVar, Dict, List, Type
from ..chromium import ChromiumProfile
from ..profile import Profile
from ..util import Platform
from .browser import Browser
class ChromiumBrowser(Browser):
'''Browser class for Chromium-based browsers'''
PROFILE_TYPE: ClassVar[Type[Profile]] = ChromiumProfile
@classmethod
def get_default_user_path(cls) -> Dict[Platform, str]:
return {
Platform.WIN32: '$LOCALAPPDATA/Chromium/User Data',
Platform.LINUX: '$HOME/.config/chromium',
Platform.MACOS: '$HOME/Library/Application Support/Chromium'
}
@classmethod
def get_browser_name(cls) -> str:
return 'Chromium'
def get_profiles(self) -> List[Profile]:
if not os.path.isdir(self.data_path):
return []
profiles = []
for file in os.listdir(self.data_path):
# skip system profile
if file == 'System Profile':
continue
path = self.data_path.joinpath(file, 'Preferences')
if path.is_file():
with path.open() as f:
preferences = json.load(f)
# pylint: disable=not-callable
profiles.append(
self.PROFILE_TYPE(preferences['profile']['name'], path.parent))
return profiles
| 1,958 |
TP2/funcionesAuxiliares.py
|
brunograssano/TP-Organizacion-de-datos
| 0 |
2025998
|
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
def obtenerDatasetDeUrl(url):
direccion = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2]
return pd.read_csv(direccion)
def obtenerDatasets():
y = obtenerDatasetDeUrl('https://drive.google.com/file/d/1km-AEIMnWVGqMtK-W28n59hqS5Kufhd0/view?usp=sharing')
X = obtenerDatasetDeUrl('https://drive.google.com/file/d/1i-KJ2lSvM7OQH0Yd59bX01VoZcq8Sglq/view?usp=sharing')
return X, y
def obtenerHoldout():
return obtenerDatasetDeUrl('https://drive.google.com/file/d/1I980-_K9iOucJO26SG5_M8RELOQ5VB6A/view?usp=sharing')
def mostrarROCCurve(modelo,nombreModelo,X_test, X_train, y_test, y_train):
fpr_test, tpr_test, thresholds_test = roc_curve(y_test, modelo.predict_proba(X_test)[:, 1])
fpr_train, tpr_train, thresholds_train = roc_curve(y_train, modelo.predict_proba(X_train)[:, 1])
zero_test = np.argmin(np.abs(thresholds_test))
zero_train = np.argmin(np.abs(thresholds_train))
plt.plot(fpr_test, tpr_test, label="ROC Curve "+nombreModelo+" Test")
plt.plot(fpr_train, tpr_train, label="ROC Curve " + nombreModelo + " Train")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.plot(fpr_test[zero_test], tpr_test[zero_test], 'o', markersize=10, label="threshold zero test",
fillstyle="none", c="k", mew=2)
plt.plot(fpr_train[zero_train], tpr_train[zero_train], 'x', markersize=10, label="threshold zero train",
fillstyle="none", c="k", mew=2)
plt.legend(loc=4)
plt.show()
def mostrarMatrizDeConfusion(y_pred,y_test):
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
fig, ax = plt.subplots(dpi=100)
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, vmin=0, yticklabels=["No volveria", "Volveria"],
xticklabels=["No Volveria", "Volveria"], ax=ax)
ax.set_title("Matriz de confusion")
ax.set_xlabel("Predicho")
ax.set_ylabel("Real")
def mostrarAUCScore(modelo,nombreModelo,X_test,y_test):
auc_score = roc_auc_score(y_test, modelo.predict_proba(X_test)[:, 1])
print("AUC para "+nombreModelo+": {:.3f}".format(auc_score))
def escribirPrediccionesAArchivo(predicciones : np.array,nombreModelo,ids_usuarios):
archivo = open("PrediccionesHoldout/"+nombreModelo+".csv", "w")
archivo.write("id_usuario,volveria\n")
i = 0
for prediccion in predicciones:
archivo.write(str(ids_usuarios[i])+ "," + str(prediccion) + "\n")
i = i + 1
archivo.close()
| 2,663 |
TodoApp/admin.py
|
ysyesilyurt/TodoApp
| 6 |
2023155
|
from django.contrib import admin
from . import models
admin.site.register(models.TodoList)
admin.site.register(models.TodoItem)
| 129 |
logs.py
|
wiesnerroyal/stocks-screen
| 277 |
2025680
|
import logging
import sys
from config.config import config
__all__ = ('logger', )
def get_logger():
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
_logger = logging.getLogger()
if config.console_logs:
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(formatter)
_logger.addHandler(screen_handler)
_logger.setLevel(logging.NOTSET)
if config.logs_file is not None:
handler = logging.FileHandler(config.logs_file, mode='w')
handler.setFormatter(formatter)
_logger.addHandler(handler)
_logger.setLevel(logging.NOTSET)
return _logger
logger = get_logger()
| 753 |
04-Dive2DL/regression/regression.py
|
Jamiesona/deep_learning_explore
| 0 |
2025501
|
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = nn.Sequential(
nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, 12),
nn.ReLU(),
nn.Linear(12, 6),
nn.Sigmoid(),
nn.Linear(6, 1)
)
def forward(self, x):
y = self.model(x)
return y
def train_net(model, data_iter, trainner, loss, epochs):
model.train()
losses = torch.zeros(epochs)
n = len(data_iter)
for epoch in range(epochs):
for x, y in data_iter:
y_hat = model(x)
loss_val = loss(y_hat, y)
loss_val.backward()
trainner.step()
trainner.zero_grad()
losses[epoch] += loss_val.detach().item()
plt.plot(range(epochs), losses, 'k--o')
plt.xlabel('epochs')
plt.ylabel('Losses')
return losses
if __name__ == '__main__':
# 原始数据
x = torch.linspace(0, 2*np.pi, 200).unsqueeze(1)
y = 0.2 * x**2 + 0.1
data_iter = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(x, y), batch_size=20, shuffle=True)
# 构造模型
net = Net()
loss = nn.MSELoss()
trainer = torch.optim.Adam(net.parameters(), lr=0.05)
# 训练模型
losses = train_net(net, data_iter, trainer, loss, 30)
# 预测数据
net.eval()
y_hat = net(x).detach()
plt.figure()
plt.plot(x, y, 'k--', x, y_hat, 'r--')
plt.show()
print(losses[-1])
| 1,568 |
tests/commands/test_delete_case_cmd.py
|
mhkc/scout
| 111 |
2025941
|
# from scout.commands import cli
# from click.testing import CliRunner
# def test_delete_case(setup_loaded_database, database_setup):
# #Check that a case have been added
# case = setup_loaded_database.case(
# institute_id='cust000',
# case_id='337334'
# )
# assert case
# case_id = case.case_id
# nr_of_variants = 0
# variants = setup_loaded_database.variants(
# case_id=case_id,
# query=None,
# variant_ids=None,
# nr_of_variants=10000,
# skip=0
# )
# #Check that the variants are added
# for variant in variants:
# nr_of_variants += 1
# assert nr_of_variants == 207
#
# #Test to delete the case
# runner = CliRunner()
# args = open(database_setup, 'r')
# result = runner.invoke(cli, [
# '-c', database_setup,
# 'delete_case',
# '--owner', 'cust000',
# '--case_id', '337334',
# ])
# assert result.exit_code == 0
# case = setup_loaded_database.case(
# institute_id='cust000',
# case_id='337334'
# )
# assert case is None
#
# variants = setup_loaded_database.variants(
# case_id=case_id,
# query=None,
# variant_ids=None,
# nr_of_variants=10,
# skip=0
# )
# #Check that the variants are deleted
# nr_of_variants = 0
# for variant in variants:
# nr_of_variants += 1
# assert nr_of_variants == 0
#
| 1,481 |
inventory/ovirt.py
|
dmalicia/ansible
| 0 |
2025715
|
#!/usr/bin/env python
# Copyright 2015 IIX Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ovirt external inventory script
=================================
Generates inventory that Ansible can understand by making API requests to
oVirt via the ovirt-engine-sdk-python library.
When run against a specific host, this script returns the following variables
based on the data obtained from the ovirt_sdk Node object:
- ovirt_uuid
- ovirt_id
- ovirt_image
- ovirt_machine_type
- ovirt_ips
- ovirt_name
- ovirt_description
- ovirt_status
- ovirt_zone
- ovirt_tags
- ovirt_stats
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- running status:
group name prefixed with 'status_' (e.g. status_up, status_down,..)
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
Use the ovirt inventory script to print out instance specific information
$ contrib/inventory/ovirt.py --host my_instance
Author: <NAME> <<EMAIL>> based on the gce.py by <NAME> <<EMAIL>>
Version: 0.0.1
"""
USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
USER_AGENT_VERSION = "v1"
import sys
import os
import argparse
import ConfigParser
from collections import defaultdict
try:
import json
except ImportError:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import simplejson as json
try:
# noinspection PyUnresolvedReferences
from ovirtsdk.api import API
# noinspection PyUnresolvedReferences
from ovirtsdk.xml import params
except ImportError:
print("ovirt inventory script requires ovirt-engine-sdk-python")
sys.exit(1)
class OVirtInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.args = self.parse_cli_args()
self.driver = self.get_ovirt_driver()
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.node_to_dict(self.get_instance(self.args.host)),
pretty=self.args.pretty
))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(
self.json_format_dict(
data=self.group_instances(),
pretty=self.args.pretty
)
)
sys.exit(0)
@staticmethod
def get_ovirt_driver():
"""
Determine the ovirt authorization settings and return a ovirt_sdk driver.
:rtype : ovirtsdk.api.API
"""
kwargs = {}
ovirt_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'ovirt_url': '',
'ovirt_username': '',
'ovirt_password': '',
'ovirt_api_secrets': '',
})
if 'ovirt' not in config.sections():
config.add_section('ovirt')
config.read(ovirt_ini_path)
# Attempt to get ovirt params from a configuration file, if one
# exists.
secrets_path = config.get('ovirt', 'ovirt_api_secrets')
secrets_found = False
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
secrets_found = True
except ImportError:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
except ImportError:
pass
if not secrets_found:
kwargs = {
'url': config.get('ovirt', 'ovirt_url'),
'username': config.get('ovirt', 'ovirt_username'),
'password': config.get('ovirt', 'ovirt_password'),
}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url'])
kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None)
kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None)
# Retrieve and return the ovirt driver.
return API(insecure=True, **kwargs)
@staticmethod
def parse_cli_args():
"""
Command line argument processing
:rtype : argparse.Namespace
"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
return parser.parse_args()
def node_to_dict(self, inst):
"""
:type inst: params.VM
"""
if inst is None:
return {}
inst.get_custom_properties()
ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
if inst.get_guest_info() is not None else []
stats = {}
for stat in inst.get_statistics().list():
stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
return {
'ovirt_uuid': inst.get_id(),
'ovirt_id': inst.get_id(),
'ovirt_image': inst.get_os().get_type(),
'ovirt_machine_type': self.get_machine_type(inst),
'ovirt_ips': ips,
'ovirt_name': inst.get_name(),
'ovirt_description': inst.get_description(),
'ovirt_status': inst.get_status().get_state(),
'ovirt_zone': inst.get_cluster().get_id(),
'ovirt_tags': self.get_tags(inst),
'ovirt_stats': stats,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ips[0] if len(ips) > 0 else None
}
@staticmethod
def get_tags(inst):
"""
:type inst: params.VM
"""
return [x.get_name() for x in inst.get_tags().list()]
def get_machine_type(self,inst):
inst_type = inst.get_instance_type()
if inst_type:
return self.driver.instancetypes.get(id=inst_type.id).name
# noinspection PyBroadException,PyUnusedLocal
def get_instance(self, instance_name):
"""Gets details about a specific instance """
try:
return self.driver.vms.get(name=instance_name)
except Exception as e:
return None
def group_instances(self):
"""Group all instances"""
groups = defaultdict(list)
meta = {"hostvars": {}}
for node in self.driver.vms.list():
assert isinstance(node, params.VM)
name = node.get_name()
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.get_cluster().get_name()
groups[zone].append(name)
tags = self.get_tags(node)
for t in tags:
tag = 'tag_%s' % t
groups[tag].append(name)
nets = [x.get_name() for x in node.get_nics().list()]
for net in nets:
net = 'network_%s' % net
groups[net].append(name)
status = node.get_status().get_state()
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
groups["_meta"] = meta
return groups
@staticmethod
def json_format_dict(data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted
string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
OVirtInventory()
| 9,876 |
5. WEB/app/properties/migrations/0001_initial.py
|
doyaguillo1997/Data2Gether
| 1 |
2023222
|
# Generated by Django 3.2 on 2021-04-08 17:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
('cadastres', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.TextField()),
('account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.account')),
('cadastre', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cadastres.cadastre')),
],
options={
'verbose_name_plural': 'Properties',
'unique_together': {('account', 'external_id')},
},
),
]
| 1,027 |
util/plot_tools.py
|
zyhazwraith/FigureQA-baseline
| 30 |
2024843
|
# coding: utf-8
import numpy as np
import tensorflow as tf
def get_conv1_filter_grid_img(conv1_w, pad=1):
"""Creates an grid of convnet filters
Args:
conv1_w (tf.Tensor): The conv net kernel tensor.
pad (int): how much padding around grid cells
Returns:
tf.Tensor: A grid of convnet filters
"""
h, w, nchannels, b = conv1_w.get_shape().as_list()
grid_w = np.int32(np.ceil(np.sqrt(np.float32(b))))
grid_h = grid_w
v_min = tf.reduce_min(conv1_w)
v_max = tf.reduce_max(conv1_w)
conv1_w = (conv1_w - v_min) / (v_max - v_min)
conv1_w = tf.pad(
conv1_w, tf.constant([[pad, pad], [pad, pad], [0, 0], [0, 0]]),
mode='CONSTANT'
)
H = h + 2 * pad
W = w + 2 * pad
conv1_w = tf.transpose(conv1_w, (3, 0, 1, 2))
# pad to get a square number of grid cells
conv1_w = tf.pad(
conv1_w, tf.constant([[0, grid_w*grid_h - b], [0, 0], [0, 0], [0, 0]]),
mode='CONSTANT'
)
conv1_w = tf.reshape(
conv1_w,
tf.stack([grid_w, H * grid_h, W, nchannels])
)
conv1_w = tf.transpose(conv1_w, (0, 2, 1, 3))
conv1_w = tf.reshape(
conv1_w,
tf.stack([1, W * grid_w, H * grid_h, nchannels])
)
conv1_w = tf.transpose(conv1_w, (2, 1, 3, 0))
conv1_w = tf.transpose(conv1_w, (3, 0, 1, 2))
return conv1_w
if __name__ == '__main__':
import matplotlib.pyplot as plt
with tf.Session() as sess:
w = tf.get_variable('w', shape=(16, 16, 3, 23), dtype=np.float32)
sess.run(tf.global_variables_initializer())
w_grid_img = get_conv1_filter_grid_img(w, pad=1)
w_grid_img_np = sess.run(w_grid_img)
plt.imshow(w_grid_img_np[0])
#for i in range(w_grid_img_np.shape[0]):
# n = int(np.ceil(np.sqrt(w_grid_img_np.shape[0])))
# plt.subplot(n, n, i+1)
# plt.imshow(w_grid_img_np[i])
plt.show()
| 1,967 |
2016/day/5/solution.py
|
iangregson/advent-of-code
| 0 |
2022646
|
#!/usr/bin/env python3
import os
import hashlib
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
input_txt = file.read().strip()
print(input_txt)
door_id = input_txt
# door_id = 'abc'
def H(s, i):
result = hashlib.md5(bytes(str(s) + str(i), 'utf8'))
return result.hexdigest()
def starts_w_zeros(s, N):
return str(s)[0:5] == '0'*N
# N_zeros = 5
# I = 0
# chars = []
# for i in range(8):
# c = None
# while True:
# h = H(door_id, I)
# if starts_w_zeros(h, N_zeros):
# c = h[N_zeros]
# chars.append(c)
# I += 1
# break
# else:
# I += 1
# result = "".join(chars)
# print("Part 1 answer:", result)
N_zeros = 5
I = 0
chars = ['_']*8
for i in range(8):
c = None
while True:
h = H(door_id, I)
sys.stdout.write("\r" + str(I) + "\t\t" + "".join(chars))
if starts_w_zeros(h, N_zeros):
pos = h[N_zeros]
if pos.isnumeric() and int(pos) < len(chars) and chars[int(pos)] == '_':
c = h[N_zeros+1]
chars[int(pos)] = c
I += 1
break
else:
I += 1
else:
I += 1
result = "".join(chars)
print("\nPart 2 answer:", result)
| 1,345 |
src/plugin.py
|
headout/dagen-airflow
| 2 |
2025004
|
import logging
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.log.logging_mixin import LoggingMixin
from dagen.migrations.utils import initdb
from dagen.utils import get_template_loader
from dagen.www.api_views import dagen_rest_bp
from dagen.www.views import DagenFABView
from flask import Blueprint
ab_dagen_view = DagenFABView()
ab_dagen_package = {
'name': 'List Dagen DAGs',
'category': 'Dagen',
'view': ab_dagen_view
}
ab_dagen_create_mitem = {
'name': 'Create Dagen DAG',
'category': 'Dagen',
'category_icon': 'fa-th',
'href': '/dagen/dags/create'
}
dagen_bp = Blueprint(
"dagen_bp",
__name__,
template_folder='www/templates',
static_folder='www/static',
static_url_path='/static/dagen'
)
class DagenPlugin(AirflowPlugin, LoggingMixin):
name = 'dagen'
appbuilder_views = (ab_dagen_package,)
appbuilder_menu_items = (ab_dagen_create_mitem,)
flask_blueprints = (dagen_bp, dagen_rest_bp)
log = logging.root.getChild(f'{__name__}.{"DagenPlugin"}')
@classmethod
def validate(cls):
# HACK: since on_load is only called for entrypoint plugins
super().validate()
# Load templates per each airflow process
loader = get_template_loader()
if not loader.template_classes:
loader.load_templates()
| 1,349 |
excepthook_example.py
|
customprogrammingsolutions/excepthook_logging_example
| 1 |
2025735
|
import sys
import logging
import threading
from threaded_exception import RunsInAThread
logger = logging.getLogger(__name__)
logging.basicConfig(filename='example.log', filemode='w', level=logging.DEBUG)
def handle_unhandled_exception(exc_type, exc_value, exc_traceback, thread_identifier=''):
"""Handler for unhandled exceptions that will write to the logs"""
if issubclass(exc_type, KeyboardInterrupt):
# call the default excepthook saved at __excepthook__
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
if not thread_identifier:
logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback))
else:
logger.critical("Unhandled exception (on thread %s)", thread_identifier, exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_unhandled_exception
def patch_threading_excepthook():
"""Installs our exception handler into the threading modules Thread object
Inspired by https://bugs.python.org/issue1230540
"""
old_init = threading.Thread.__init__
def new_init(self, *args, **kwargs):
old_init(self, *args, **kwargs)
old_run = self.run
def run_with_our_excepthook(*args, **kwargs):
try:
old_run(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info(), thread_identifier=threading.get_ident())
self.run = run_with_our_excepthook
threading.Thread.__init__ = new_init
patch_threading_excepthook()
try:
raise ValueError("we catch this one")
except ValueError:
logger.info("Caught the exception")
# Test that the logger get exception raised from thread
foo = RunsInAThread("Runs on thread")
thread = threading.Thread(target=foo.run, args=())
thread.daemon = True
thread.start()
thread.join()
| 1,911 |
github_app_user_auth/auth.py
|
fperez/github-app-user-auth
| 0 |
2025810
|
import argparse
import requests
import sys
import time
import os
def do_authenticate_device_flow(client_id, in_jupyter=False):
"""
Authenticate user with given GitHub app using GitHub OAuth Device flow
https://docs.github.com/en/developers/apps/building-oauth-apps/authorizing-oauth-apps#device-flow
describes what happens here.
Returns an access_code and the number of seconds it expires in.
access_code will have scopes defined in the GitHub app
"""
verification_resp = requests.post(
"https://github.com/login/device/code",
data={"client_id": client_id, "scope": "repo"},
headers={"Accept": "application/json"},
).json()
url = verification_resp["verification_uri"]
code = verification_resp["user_code"]
if in_jupyter:
from IPython.display import display, Javascript
display(Javascript(f'navigator.clipboard.writeText("{code}");'))
print(f'The code {code} has been copied to your clipboard.')
print(f'You have 15 minutes to go to {url} and paste it there.\n')
ans = input("Hit ENTER to open that page in a new tab (type anything to cancel)>")
if ans:
print("Automatic opening canceled!")
else:
display(Javascript(f'window.open("{url}", "_blank");'))
else:
print(f'You have 15 minutes to go to {url} and enter the code: {code}')
print('Waiting...', end='', flush=True)
while True:
time.sleep(verification_resp["interval"])
print('.', end='', flush=True)
access_resp = requests.post(
"https://github.com/login/oauth/access_token",
data={
"client_id": client_id,
"device_code": verification_resp["device_code"],
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
},
headers={"Accept": "application/json"},
).json()
if "access_token" in access_resp:
print()
return access_resp["access_token"], access_resp["expires_in"]
def main(in_jupyter=False):
argparser = argparse.ArgumentParser()
argparser.add_argument(
"--client-id",
default=os.environ.get("GITHUB_APP_CLIENT_ID"),
help="""
Client ID of the GitHub app to authenticate with as the user
""".strip(),
)
argparser.add_argument(
"--git-credentials-path",
default="/tmp/github-app-git-credentials",
help="""
Path to write the git-credentials file to. Current contents will be overwritten!
""".strip(),
)
args = argparser.parse_args()
if not args.client_id:
print(
"--client-id must be specified or GITHUB_APP_CLIENT_ID environment variable must be set",
file=sys.stderr,
)
sys.exit(1)
access_token, expires_in = do_authenticate_device_flow(args.client_id, in_jupyter)
expires_in_hours = expires_in / 60 / 60
success = (f"Success! Authentication will expire in {expires_in_hours:0.1f} hours.\n<br>"
f"Process completed on: {time.asctime()}.")
if in_jupyter:
from IPython.display import display, HTML
display(HTML(f'<p style="background-color:lightgreen;">{success}</p>'))
else:
print(success)
# Create the file with appropriate permissions (0600) so other users can't read it
with open(os.open(args.git_credentials_path, os.O_WRONLY | os.O_CREAT, 0o600), "w") as f:
f.write(f"https://x-access-token:{access_token}@github.com\n")
| 3,568 |
info_scraper2.py
|
walmonte/info_scraper
| 0 |
2022945
|
# This app takes links from a file 'users.txt', retreives
# profile info from instagram.com, and saves it in 'profile_info.json'.
import requests
import urllib.request
import urllib.parse
import urllib.error
from bs4 import BeautifulSoup
import ssl
import json
class Insta_Info_Scraper:
def get_info(self, url):
html = urllib.request.urlopen(url, context=self.ctx).read()
soup = BeautifulSoup(html, 'html.parser')
data = soup.find_all('meta', attrs={'property': 'og:description'})
text = data[0].get('content').split()
user = '%s %s %s' % (text[-3], text[-2], text[-1])
followers = text[0]
following = text[2]
posts = text[4]
info={}
info["User"] = user
info["Followers"] = followers
info["Following"] = following
info["Posts"] = posts
self.info_arr.append(info)
def main(self):
self.ctx = ssl.create_default_context()
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
self.info_arr=[]
with open('users.txt') as f:
self.content = f.readlines()
self.content = [x.strip() for x in self.content]
for url in self.content:
self.get_info(url)
with open('profile_info.json', 'w') as outfile:
json.dump(self.info_arr, outfile, indent=4)
print("Json file containing required info has been created............")
if __name__ == '__main__':
obj = Insta_Info_Scraper()
obj.main()
| 1,585 |
test/yacc_error5.py
|
pyarnold/ply
| 1 |
2022962
|
# -----------------------------------------------------------------------------
# yacc_error5.py
#
# Lineno and position tracking with error tokens
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path:
sys.path.insert(0, "..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('right', 'UMINUS'),
)
# dictionary of names
names = {}
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_assign_error(t):
'statement : NAME EQUALS error'
line_start, line_end = t.linespan(3)
pos_start, pos_end = t.lexspan(3)
print("Assignment Error at %d:%d to %d:%d" %
(line_start, pos_start, line_end, pos_end))
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+':
t[0] = t[1] + t[3]
elif t[2] == '-':
t[0] = t[1] - t[3]
elif t[2] == '*':
t[0] = t[1] * t[3]
elif t[2] == '/':
t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
line_start, line_end = t.linespan(2)
pos_start, pos_end = t.lexspan(2)
print("Group at %d:%d to %d:%d" %
(line_start, pos_start, line_end, pos_end))
t[0] = t[2]
def p_expression_group_error(t):
'expression : LPAREN error RPAREN'
line_start, line_end = t.linespan(2)
pos_start, pos_end = t.lexspan(2)
print("Syntax error at %d:%d to %d:%d" %
(line_start, pos_start, line_end, pos_end))
t[0] = 0
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
parser = yacc.yacc()
import calclex
calclex.lexer.lineno = 1
parser.parse("""
a = 3 +
(4*5) +
(a b c) +
+ 6 + 7
""", tracking=True)
| 2,400 |
ramp/tests/test_utils.py
|
Marigold/ramp
| 1 |
2025888
|
import sys
sys.path.append('../..')
from ramp.utils import *
from ramp.features.base import *
import unittest
from pandas import *
import tempfile
class TestUtils(unittest.TestCase):
def test_np_hashes(self):
a = np.random.randn(20)
h = get_np_hash(a)
a[0] = 200000
h2 = get_np_hash(a)
self.assertNotEqual(h, h2)
b = a[0]
a[0] = b
self.assertEqual(h2, get_np_hash(a))
def test_stable_repr(self):
f = F('test')
f2 = F('test')
# equalivalent objects should have same repr
self.assertEqual(stable_repr(f), stable_repr(f2))
# repr is independent of object ids
class Test: pass
f.f = Test()
r1 = stable_repr(f)
f.f = Test()
r2 = stable_repr(f)
self.assertEqual(r1, r2)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 890 |
examples/middleware/misc.py
|
dotX12/waio
| 24 |
2025759
|
from waio.bot import Bot, Dispatcher
from waio.logs import loguru_filter
from examples.middleware.example_middlewares import BanMiddleware, DatabaseMiddleware
loguru_filter.set_level('DEBUG')
bot = Bot(
apikey='API_KEY',
src_name='SRC_NAME',
phone_number=7928994433
)
dp = Dispatcher(bot=bot)
dp.labeler.register_middleware(BanMiddleware())
dp.labeler.register_middleware(DatabaseMiddleware())
| 411 |
dcinside/api/post/__init__.py
|
Fr0zens/dc_test
| 0 |
2026016
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from .create import main as create # noqa
from .delete import main as delete # noqa
from .read import main as read # noqa
from .vote import downvote, upvote # noqa
| 412 |
teams/views.py
|
gennadis/dvmn_teams
| 0 |
2024866
|
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import Count
from .models import PM, TimeSlot, Student, Team
def index(request):
"""View function for home page of site."""
populated_teams = (
Team.objects.annotate(students_in_team=Count("students"))
.filter(
students_in_team__gt=0,
)
.all()
)
context = {
"populated_teams": populated_teams,
}
# Render the HTML template index.html with the data in the context variable
return render(request, "index.html", context=context)
| 607 |
app/handle_intent/__init__.py
|
drabekj/OttoBotServer
| 2 |
2023409
|
import app.intent_stock_price as intent_stock
from app import handle_end
from app.intent_education import handle_education
from app.intent_help import handle_help_intent
from app.intent_investing_strategy import handle_investing_strategy
from app.intent_market_cap import handle_market_cap
from app.intent_news import handle_news
from app.intent_recommendation import handle_recommendation
from app.intent_watchlist.add import handle_add_to_watchlist
from app.intent_watchlist.remove import handle_remove_from_watchlist
from app.intent_watchlist.report import handle_report_stock_watchlist
from app.utils.MyError import UnknownIntentError
def handle_intent(request):
"""
Delegate intent requests based on intent type to appropriate package to be handled.
:param request: incoming parsed Alexa request
:return: Final JSON response generated by the package
"""
intent_name = request.intent_name()
if intent_name == 'WhatsTheStockPriceIntent':
return intent_stock.handle_get_stock_price_intent(request)
elif intent_name == 'ReportStockWatchlistIntent':
return handle_report_stock_watchlist(request)
elif intent_name == 'AddStockToWatchlistIntent':
return handle_add_to_watchlist(request)
elif intent_name == 'RemoveStockFromWatchlistIntent':
return handle_remove_from_watchlist(request)
elif intent_name == 'EducateIntent':
return handle_education(request)
elif intent_name == 'NewsAboutCompanyIntent':
return handle_news(request)
elif intent_name == 'MarketCapIntent':
return handle_market_cap(request)
elif intent_name == 'InvestingStrategyIntent':
return handle_investing_strategy(request)
elif intent_name == 'RecommendationIntent':
return handle_recommendation(request)
elif intent_name == 'AMAZON.HelpIntent':
return handle_help_intent(request)
elif intent_name == 'AMAZON.StopIntent':
return handle_end(request)
else:
raise UnknownIntentError('Cant handle intent: ' + intent_name)
| 2,057 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.