max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
src/rooms/flask/app.py
|
gomyar/rooms
| 0 |
2025310
|
import os
import traceback
import uuid
import logging
import logging.config
log = logging.getLogger("rooms")
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
from werkzeug.debug import DebuggedApplication
from rooms.master import Master
from rooms.node import Node
from rooms.geography.basic_geography import BasicGeography
from rooms.geography.pointmap_geography import PointmapGeography
from rooms.geography.polygon_funnel import PolygonFunnelGeography
from rooms.room_builder import RoomBuilder
from rooms.room_builder import FileMapSource
from rooms.container import Container
from rooms.dbase.mongo_dbase import MongoDBase
from rooms.item_registry import ItemRegistry
_mongo_host = os.environ.get('ROOMS_MONGO_HOST', 'localhost')
_mongo_port = int(os.environ.get('ROOMS_MONGO_PORT', '27017'))
_mongo_dbname = os.environ.get('ROOMS_MONGO_DBNAME', 'rooms')
_node_hostname = os.environ.get('ROOMS_NODE_HOSTNAME', 'localhost:5000')
_node_name = os.environ.get('ROOMS_NODE_NAME', 'local')
_node_host = os.environ.get('ROOMS_NODE_HOST', 'localhost')
_node_port = int(os.environ.get('ROOMS_NODE_PORT', 5000))
_rooms_projectdir = os.environ.get('ROOMS_PROJECTDIR', os.getcwd())
mapdir = os.path.join(_rooms_projectdir, "maps")
itemdir = os.path.join(_rooms_projectdir, "items")
if os.path.exists(os.path.join(_rooms_projectdir, "logging.conf")):
logging.config.fileConfig(os.path.join(_rooms_projectdir,
"logging.conf"))
else:
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
dbase = MongoDBase(host=_mongo_host, port=_mongo_port, dbname=_mongo_dbname)
dbase.init_mongo()
container = Container(dbase, None)
node = Node(container, _node_name, _node_hostname)
container.node = node
container.player_script_name = "scripts.player_script"
container.room_script_name = "scripts.room_script"
room_builder = RoomBuilder(FileMapSource(mapdir), node)
item_registry = ItemRegistry()
if os.path.exists(itemdir):
item_registry.load_from_directory(itemdir)
container.geography = PolygonFunnelGeography
container.room_builder = room_builder
container.item_registry = item_registry
master = Master(container)
node.container = container
GEOGRAPHIES = {
'basic': BasicGeography,
'pointmap': PointmapGeography,
'polygon_funnel': PolygonFunnelGeography,
}
def start_rooms_app(app, container_type='pointmap'):
try:
container.geography = GEOGRAPHIES[container_type]
container.start_container()
http_server = WSGIServer((_node_host, _node_port), app,
handler_class=WebSocketHandler)
http_server.serve_forever()
except KeyboardInterrupt as ke:
log.debug("Server interrupted")
node.shutdown()
master.shutdown()
container.stop_container()
except:
traceback.print_exc()
log.exception("Exception starting server")
| 2,964 |
ziggurat_foundations/models/user_group.py
|
timgates42/ziggurat_foundations
| 59 |
2025104
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from ziggurat_foundations.models.base import BaseModel
__all__ = ["UserGroupMixin"]
class UserGroupMixin(BaseModel):
"""
Mixin for UserGroup model
"""
__table_args__ = {"mysql_engine": "InnoDB", "mysql_charset": "utf8"}
@declared_attr
def __tablename__(self):
return "users_groups"
@declared_attr
def group_id(self):
return sa.Column(
sa.Integer,
sa.ForeignKey("groups.id", onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
@declared_attr
def user_id(self):
return sa.Column(
sa.Integer,
sa.ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
def __repr__(self):
return "<UserGroup: g:%s, u:%s>" % (self.group_id, self.user_id)
| 986 |
zehao_origin/pltgausscenter.py
|
extrsun/dusty_plasma
| 0 |
2023640
|
import pyatomdb, numpy, pickle,pylab,dust
import matplotlib.pyplot as plt
from constants import *
dust_init=input('enter initial dust percentage, e.g., 95:\n')
print('percentage of dust=',dust_init)
band=input('what band? 1. 6.35--7.15(kalpha including lyman); 2. 6.35--6.75(excluding lyman)(press enter->default=2):\n')
if band=="":
band=2
band=int(band)
print('band=',band)
precision=input('precision(just press enter->default=0.2):\n')
#eV, this number should be <0.5 in order to get high-reso spectra and right centroid (avoid taking conti- spec into account
# because criteria line < 0.01*maxflux is used)
###Zehao's note:0.2 for pltmicurve.py,pltgausscenter.py,plot_ionfrac.py
### 1.5 for pltspec.py dust percent=95
### 2.5 for pltspec.py dust percent=0
if precision=="":
precision=0.2
precision=float(precision)
print('precision=',precision)
print('initializing...')
if band==1:
dat2=pickle.load(open('data_package/eperionperbin_6.35-7.15_v%s_p%s.pkl' %(version,precision),'rb'))
if band==2:
dat2=pickle.load(open('data_package/eperionperbin_6.35-6.75_v%s_p%s.pkl' %(version,precision),'rb'))
# dat2=pickle.load(open('data_package/eperionperbin_7.0-7.9_v%s_p%s.pkl' %(version,precision),'rb'))
dat=pickle.load(open('data_package/ionfrac_%s_v%s.pkl' %(str(dust_init),version),'rb'))
dat0=pickle.load(open('data_package/ionfrac_0_v%s.pkl' %version,'rb'))
telist=dat['telist']
taulist=dat['taulist']
print('full tau list=\n',taulist)
tauind=numpy.logspace(1,5,100) #there are 10^5 tau data points in ionfrac.pkl,choose 100, log spaced, just to reduce calculation time
tauind=numpy.array(list([1,2,3,4,5,6,7,8,9,10])+list(tauind),dtype=int)
print('selected tau index to calculate=\n',tauind)
shorttaulist=taulist[tauind]
print('selected tau to calculate=\n',shorttaulist,'\ntotal count of selected tau=',len(shorttaulist))
print('selected temperature(keV) to calculate=',telist)
input('press enter to start plotting...')
dust_dblline_center=E_keV
dust_dblline_yield=yie
dust_center=(E_keV[1]*yie[1]+E_keV[2]*yie[2])/(yie[1]+yie[2])
fig=pylab.figure(1)
#kcent stores dust model result, kcent0 stores pure-gas model result
kcent=numpy.zeros([len(telist),len(tauind)])
kcent0=numpy.zeros([len(telist),len(tauind)])
diff=numpy.zeros([len(telist),len(tauind)])
for ite,te in enumerate(telist):
for iiindex,index in enumerate(tauind):
print('calculating... temperature list=',telist)
print('temperature=',te,'keV','index of tau=',index)
#numerator stores dust model result, numer pure-gas.
numerator=0.
denominator=0.
numer=0.
denom=0.
numerator_array=numpy.zeros(len(dat2['ecent']),dtype=float)
denominator_array=numpy.zeros(len(dat2['ecent']),dtype=float)
numer_array=numpy.zeros(len(dat2['ecent']),dtype=float)
denom_array=numpy.zeros(len(dat2['ecent']),dtype=float)
for iebin,ebin in enumerate(dat2['ecent']):
#dust model
numerator_array[iebin]=ebin*sum(dat2['eperionperbin'][ite,:,iebin]*dat['ionfrac'][ite,index,:])
denominator_array[iebin]=sum(dat2['eperionperbin'][ite,:,iebin]*dat['ionfrac'][ite,index,:])
#pure-gas model
numer_array[iebin]=ebin*sum(dat2['eperionperbin'][ite,:,iebin]*dat0['ionfrac'][ite,index,:])
denom_array[iebin]=sum(dat2['eperionperbin'][ite,:,iebin]*dat0['ionfrac'][ite,index,:])
for iebin,ebin in enumerate(dat2['ecent']):
#choose strong lines to calc centroid
if denominator_array[iebin]> (0.01*max(denominator_array)):
numerator+=numerator_array[iebin]
denominator+=denominator_array[iebin]
if denom_array[iebin]>(0.01*max(denom_array)):
numer+=numer_array[iebin]
denom+=denom_array[iebin]
#dust
dust_emi=dust.calc_dust_kalpha_emi(te)
dust_flux=dat['ionfrac'][ite,index,0]*dust_emi
numerator+=(dust_flux*dust_center)
denominator+=dust_flux
kcent[ite,iiindex]=numerator/denominator
kcent0[ite,iiindex]=numer/denom
diff[ite,iiindex]=kcent0[ite,iiindex]-kcent[ite,iiindex]
ax1=fig.add_subplot(111)
print('plotting...\n')#,len(shorttaulist),len(diff)
for ite,te in enumerate(telist):
plt.plot(shorttaulist,diff[ite,:]*1000,label=repr(te))
plt.semilogx()
#ax.set_ylim([-20,200])
plt.xlabel('tau $({\mathrm{cm}^{-3}\mathrm{s}})$')
plt.ylabel('centroid shift(eV)')
plt.xlim(1e9,1e13)
plt.ylim(0,800)
ax1.legend(loc=0)
plt.savefig('results/Gauss_center/centeroid_%s.eps' %(version))
fig2=pylab.figure(2)
ax=fig2.add_subplot(121)
for ite,te in enumerate(telist):
plt.plot(shorttaulist,kcent0[ite,:]*1000,label=repr(te))
plt.semilogx()
print('kcent with 0 dust=\n',kcent0[0,:])
# print('kcent with %s dust=\n' %dust_init/100.0,kcent[0,:])
print('difference=\n',diff[0,:])
plt.xlabel('tau $({\mathrm{cm}^{-3}\mathrm{s}})$')
plt.ylabel('centroid (eV)')
plt.xlim(1e9,1e13)
plt.ylim(6900,7950)
plt.legend(loc=0)
ax=fig2.add_subplot(122)
for ite,te in enumerate(telist):
plt.plot(shorttaulist,kcent[ite,:]*1000,label=repr(te))
plt.semilogx()
plt.xlabel('tau $({\mathrm{cm}^{-3}\mathrm{s}})$')
plt.ylabel('centroid (eV)')
plt.xlim(1e9,1e13)
plt.ylim(6900,7950)
plt.legend(loc=0)
plt.tight_layout()
plt.savefig('results/Gauss_center/centeroid_shift_%s.eps' %(version),bbox_inches='tight')
plt.show()
| 5,291 |
libs/geGL/scripts/subscripts/generateStaticDeclarations.py
|
dormon/PGRE
| 0 |
2024536
|
#!/usr/bin/python
import sys
import re
import os
import fileinput
from subprocess import Popen, PIPE
data0=""
for line in fileinput.input():
data0+=line
data0=data0.split("\n")[:-1]
def getReturn(type):
if type == "void" or type == "GLvoid":
return ""
return "return "
def printContextDeclaration(data):
params = data.split(",")
args = ",".join(map(lambda x:x[0]+" "+x[1],zip(params[2::2],params[3::2])))
params2 = map(lambda x:re.sub(r"\[.*\]","",x),params);
print " GEGL_EXPORT "+params[0]+" "+params[1]+"("+args+");"
print "#include<geGL/OpenGL.h>"
print "namespace ge{"
print " namespace gl{"
for x in data0:
printContextDeclaration(x)
print " }"
print "}"
| 714 |
TSIS_3/3835.py
|
GMKanat/PP2_spring
| 0 |
2025255
|
a = list(map(int, input().split()))
mx = 10000
for i in range(len(a)):
if a[i] > 0 and mx > a[i]:
mx = a[i]
print(mx)
| 120 |
Commands/CivFR/__init__.py
|
iElden/EldenBot
| 0 |
2023913
|
from .CivFR import CmdCivGeneralFR
from .DynamicDraft import CmdCivFRDDraft
from .FFATournament import CmdFFATournament
from .Draft import CmdCivDraft
from .Voting import CmdCivFRVoting
from .Level import CmdCivFRLevel
from .TeamerTool import CmdTeamerTools
class CmdCivFR(CmdCivGeneralFR, CmdCivFRDDraft, CmdCivDraft, CmdCivFRVoting, CmdCivFRLevel, CmdTeamerTools):
pass
| 376 |
aula01/exec3.py
|
miguelviladev/programming-fundamentals
| 6 |
2023952
|
name = input("Como te chamas? ")
yob = int(input("Em que ano nasceste? "))
print("{} , em 2020 farás {} anos.".format(name, 2020-yob))
| 135 |
src/config.py
|
MiKoMW/R250Project
| 0 |
2025125
|
import os
# Change to DSTC9 dataset
isDSTC = False
# Parameter for the mixer
isMixer = False
mixer_delta = 2
mixer_T = 40
mixer_N_XENT_step = 2000
mixer_N_XENTRL_step = 2000
not_normalise_reward = True
increasing_rl = False
root_dir = os.path.expanduser("~")
root_dir = os.path.join(root_dir, "Desktop")
print_interval = 100
save_model_iter = 1000
train_data_path = "../data/twitter_url/chunked/train_*"
eval_data_path = "../data/twitter_url/chunked/val_*"
decode_data_path = "../data/twitter_url/chunked/test_*"
vocab_path = "../resource/woz3/woz_vocab.txt"
if isDSTC:
vocab_path = "../dstc9/dstc9_vocab.txt"
log_root = "../woz_da_pre_all"
# Hyperparameters
mode = "DAGGER" # other options: MLE/RL/GTI/SO/SIO/DAGGER/DAGGER*/MIXER
alpha = 1.0
beta = 1.0
k1 = 0.9999
k2 = 3000.
hidden_dim= 256
emb_dim= 128
batch_size= 40
sample_size= 4
max_enc_steps= 40
max_dec_steps= 40
beam_size= 8
min_dec_steps= 5
vocab_size= 5000
max_iterations = 50000
lr = 1e-5
pointer_gen = True
is_coverage = False
lr_coverage = 0.15
cov_loss_wt = 1.0
max_grad_norm = 2.0
rand_unif_init_mag = 0.02
trunc_norm_init_std = 1e-4
eps = 1e-12
use_gpu = True
min_earlyStopping = 20000
| 1,172 |
pru/trajectory_sector_intersections.py
|
euctrl-pru/rt-python
| 0 |
2024751
|
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
"""
Functions to find trajectory sector intersection data.
"""
import numpy as np
import pandas as pd
from via_sphere import global_Point3d, calculate_latitudes, calculate_longitudes
from .AirspaceVolume import AirspaceVolume
from .gis_database_interface import find_horizontal_sector_intersections, \
get_elementary_airspace_name, get_elementary_airspace_altitude_range, \
NotFoundException
from .airspace_intersections import find_3D_airspace_intersections
from pru.logger import logger
log = logger(__name__)
def find_trajectory_section_sector_intersections(smooth_traj, traj_path,
min_altitude, max_altitude,
start_distance, finish_distance):
"""
Find sector intersection positions for a section of a smoothed trajectory.
It calls find_horizontal_sector_intersections to find horizontal intersections
for the trajectory section between start_distance and finish_distance.
If horizontal intersections are found, find_3D_airspace_intersections is
called to find vertical intersections corresponding to the horizontal
intersections.
Parameters
----------
smooth_traj: SmoothedTrajectory
A SmoothedTrajectory containing the flight id, smoothed horizontal path,
time profile and altitude profile.
traj_path : an EcefPath
The EcefPath of the SmoothedTrajectory.
min_altitude: int
The minimum altitude of the trajectory section [feet].
max_altitude: int
The maximum altitude of the trajectory section [feet].
start_distance: float
The distance along the path to the start of the trajectory section
[Nautical Miles].
finish_distance: float
The distance along the path to the end of the trajectory section
[Nautical Miles].
Returns
-------
intersection_positions: a pandas DataFrame
The trajectory user airspace intersection positions.
Empty if no intersections found.
"""
lats = []
lons = []
volume_ids = []
positions = traj_path.subsection_positions(start_distance, finish_distance)
path_lats = calculate_latitudes(positions)
path_lons = calculate_longitudes(positions)
lats, lons, volume_ids = find_horizontal_sector_intersections(smooth_traj.flight_id,
path_lats, path_lons,
int(min_altitude),
int(max_altitude))
if len(lats):
# A dict to hold the intersected volumes
volumes = {}
is_cruising = (min_altitude == max_altitude)
try:
for volume_id in set(volume_ids):
volume_name = get_elementary_airspace_name(volume_id)
bottom_alt = 0
top_alt = 0
if not is_cruising:
bottom_alt, top_alt = get_elementary_airspace_altitude_range(volume_id)
volumes.setdefault(volume_id, AirspaceVolume(volume_name,
bottom_alt, top_alt))
except NotFoundException:
log.exception('sector id: %s not found for flight id: %s',
volume_id, smooth_traj.flight_id)
return pd.DataFrame()
intersection_points = global_Point3d(np.array(lats), np.array(lons))
return find_3D_airspace_intersections(smooth_traj, traj_path,
intersection_points,
volume_ids, volumes, start_distance,
is_cruising)
else:
return pd.DataFrame()
def find_climbing_sector_intersections(smooth_traj, traj_path):
"""
Find airspace sector intersections for a climbing trajectory section.
Parameters
----------
smooth_traj: SmoothedTrajectory
A SmoothedTrajectory containing the flight id, smoothed horizontal path,
time profile and altitude profile.
traj_path : an EcefPath
The EcefPath of the SmoothedTrajectory.
Returns
-------
intersection_positions: a pandas DataFrame
The climbing trajectory user airspace intersection positions.
Empty if no intersections found.
"""
toc_distance = smooth_traj.altp.top_of_climb_distance()
if toc_distance:
min_altitude = smooth_traj.altp.altitudes[0]
max_altitude = smooth_traj.altp.altitudes.max()
return find_trajectory_section_sector_intersections(smooth_traj,
traj_path,
min_altitude, max_altitude,
0.0, toc_distance)
else:
return pd.DataFrame()
def find_cruising_sector_intersections(smooth_traj, traj_path):
"""
Find airspace sector intersections for a cruising trajectory section.
Parameters
----------
smooth_traj: SmoothedTrajectory
A SmoothedTrajectory containing the flight id, smoothed horizontal path,
time profile and altitude profile.
traj_path : an EcefPath
The EcefPath of the SmoothedTrajectory.
Returns
-------
intersection_positions: a pandas DataFrame
The cruising trajectory user airspace intersection positions.
Empty if no intersections found.
"""
toc_distance = smooth_traj.altp.top_of_climb_distance()
tod_distance = smooth_traj.altp.top_of_descent_distance()
if toc_distance < tod_distance:
altitude = smooth_traj.altp.altitudes.max()
return find_trajectory_section_sector_intersections(smooth_traj,
traj_path,
altitude, altitude,
toc_distance, tod_distance)
else:
return pd.DataFrame()
def find_descending_sector_intersections(smooth_traj, traj_path):
"""
Find airspace sector intersections for a descending trajectory section.
Parameters
----------
smooth_traj: SmoothedTrajectory
A SmoothedTrajectory containing the flight id, smoothed horizontal path,
time profile and altitude profile.
traj_path : an EcefPath
The EcefPath of the SmoothedTrajectory.
Returns
-------
intersection_positions: a pandas DataFrame
The descending trajectory user airspace intersection positions.
Empty if no intersections found.
"""
tod_distance = smooth_traj.altp.top_of_descent_distance()
end_distance = smooth_traj.altp.distances[-1]
if tod_distance < end_distance:
max_altitude = smooth_traj.altp.altitudes.max()
min_altitude = smooth_traj.altp.altitudes[-1]
return find_trajectory_section_sector_intersections(smooth_traj,
traj_path,
min_altitude, max_altitude,
tod_distance, end_distance)
else:
return pd.DataFrame()
def find_trajectory_sector_intersections(smooth_traj):
"""
Find airspace sector intersection positions from a smoothed trajectory.
It finds intersections in three sections corresponding to the: climbing,
cruising and desending sections of a trajectory.
The resulting intersections are the concatenation of the climbing,
cruising and desending intersections.
Parameters
----------
smooth_traj: SmoothedTrajectory
A SmoothedTrajectory containing the flight id, smoothed horizontal path,
time profile and altitude profile.
Returns
-------
intersection_positions: a pandas DataFrame
The trajectory airspace sector intersection positions.
Empty if no intersections found.
"""
traj_path = smooth_traj.path.ecef_path()
intersections = find_climbing_sector_intersections(smooth_traj, traj_path)
cruise_intersections = find_cruising_sector_intersections(smooth_traj, traj_path)
if not cruise_intersections.empty:
intersections = cruise_intersections if intersections.empty else \
pd.concat([intersections, cruise_intersections], ignore_index=True)
descent_intersections = find_descending_sector_intersections(smooth_traj, traj_path)
if not descent_intersections.empty:
intersections = descent_intersections if intersections.empty else \
pd.concat([intersections, descent_intersections], ignore_index=True)
return intersections
| 8,989 |
problems/da-yin-cong-1dao-zui-da-de-nwei-shu-lcof/solution_3.py
|
MleMoe/LeetCode-1
| 2 |
2024846
|
from typing import List
class Solution:
def __init__(self):
self.nine = 0
self.start = 0
def printNumbers(self, n: int) -> [int]:
def dfs(x):
if x == n:
s = ''.join(num[self.start:])
if s != '0':
res.append(int(s))
if n - self.start == self.nine:
self.start -= 1
return
for i in range(10):
if i == 9:
self.nine += 1
num[x] = str(i)
dfs(x + 1)
self.nine -= 1
if n == 0:
return []
num, res = ['0'] * n, []
self.start = n - 1
dfs(0)
return res
if __name__ == '__main__':
test_cases = [0, 1, 2]
for case in test_cases:
ans = Solution().printNumbers(case)
print(ans)
| 884 |
get_instances2.py
|
ramiamar/sg-manager
| 0 |
2025067
|
import boto3
class InstanceInfo(object):
def __init__(self, instance):
self.id = instance['InstanceId']
self.machine_type = instance['InstanceType']
self.tags = InstanceInfo.parse_tags(instance.get('Tags', []))
self.client = self.tags.get('Client')
self.name = self.tags.get('Name')
self.deployer = self.tags.get('User')
self.service_type = self.tags.get('Type')
self.state = instance['State']['Name']
self.launch_date = instance['LaunchTime']
self.interfaces = list(
[NetworkInterface(ni) for ni in
instance['NetworkInterfaces']])
self.raw = instance
@staticmethod
def parse_tags(tags):
parsed = {}
for k_v_pair in tags:
parsed[k_v_pair['Key']] = k_v_pair['Value']
return parsed
@staticmethod
def get_all():
ec2 = boto3.client('ec2')
res = ec2.describe_instances()
instance_infos = []
for r in res['Reservations']:
for i in r['Instances']:
instance_infos.append(InstanceInfo(i))
return instance_infos
def __str__(self):
return ' '.join(['%s=%s' % (k, v) for k, v in self.__dict__.items()])
def __repr__(self):
return self.__str__()
class NetworkInterface(object):
def __init__(self, network_interface):
self.id = network_interface['NetworkInterfaceId']
self.ip = network_interface['PrivateIpAddress']
self.public_ip = network_interface.get('Association', {}).get(
'PublicIp')
self.subnet = network_interface['SubnetId']
self.vpc = network_interface['VpcId']
self.security_groups = list(
[SecurityGroup(g) for g in network_interface['Groups']])
def __str__(self):
return '[NI id=%s ip=%s pub_ip=%s subnet=%s sgs=%s]' % (
self.id, self.ip, self.public_ip, self.subnet,
self.security_groups)
def __repr__(self):
return self.__str__()
class SecurityGroup(object):
def __init__(self, group):
self.name = group['GroupName']
self.id = group['GroupId']
def __str__(self):
return self.name or self.id
def __repr__(self):
return self.__str__()
def list_nifs_csv():
instance_infos = InstanceInfo.get_all()
print('instance_id,state,nif_id,ip,pub_ip,sgs,tags')
for i in instance_infos:
for ni in i.interfaces:
print('%s,%s,%s,%s,%s,%s,"%s"' % (
i.id, i.state, ni.id, ni.ip, ni.public_ip,
' '.join([str(sg) for sg in ni.security_groups]),
i.tags))
def list_machines_by_sg():
instance_infos = InstanceInfo.get_all()
print('sg,instance_id,instance_type,instance_name')
instances_by_sg = {}
for i in instance_infos:
for ni in i.interfaces:
for sg in ni.security_groups:
if str(sg) not in instances_by_sg:
instances_by_sg[str(sg)] = set()
instances_by_sg[str(sg)].add(i)
for sg, instances in instances_by_sg.items():
for i in instances:
print('%s,%s,%s,%s' % (sg, i.id, i.service_type, i.name))
def list_client_machines():
instance_infos = InstanceInfo.get_all()
for i in instance_infos:
if i.client is None:
continue
print('%s,%s,%s' % (i.id, i.client, i.service_type))
def list_cluster_machines_with_client_tag():
instance_infos = InstanceInfo.get_all()
# ec2resource = boto3.resource('ec2')
for i in instance_infos:
if i.client is None or i.service_type is None:
continue
print('%s,%s,%s,%s' % (i.id, i.name, i.client, i.service_type))
# t = ec2resource.Tag(i.id, 'Client', i.client)
# print('Deleting %r' % t)
# t.delete()
if __name__ == '__main__':
list_machines_by_sg()
#list_nifs_csv()
| 3,929 |
tests/__init__.py
|
gtdavidv/exchangelib
| 0 |
2025079
|
import logging
import sys
import unittest
from exchangelib.util import PrettyXmlHandler
# Always show full repr() output for object instances in unittest error messages
unittest.util._MAX_LENGTH = 2000
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG, handlers=[PrettyXmlHandler()])
else:
logging.basicConfig(level=logging.CRITICAL)
| 356 |
addons/generic_request/migrations/11.0.1.8.0/post-migrate.py
|
marionumza/vocal_v12
| 0 |
2025121
|
from odoo import api, SUPERUSER_ID
from odoo.addons.http_routing.models.ir_http import slugify
def migrate(cr, installed_version):
env = api.Environment(cr, SUPERUSER_ID, {})
# Migrate categories
Category = env['request.category'].with_context(active_test=False)
for record in Category.search([('code', '=', False)]):
record.code = slugify(record.display_name, max_length=0)
# Migrate stage types
StageType = env['request.stage.type'].with_context(active_test=False)
for record in StageType.search([('code', '=', False)]):
record.code = slugify(record.display_name, max_length=0)
| 628 |
gridstographs/3_putittogether/try.py
|
tekjar/fcnd.projects
| 0 |
2024567
|
from enum import Enum
import matplotlib.pyplot as plt
import numpy as np
class Action(Enum):
LEFT = (0, -1, 1)
RIGHT = (0, 1, 1)
UP = (-1, 0, 1)
DOWN = (1, 0, 1)
def __str__(self):
'''
returns string representation of this action
'''
if self == self.LEFT:
return '◀'
elif self == self.RIGHT:
return '▶'
elif self == self.UP:
return '▲'
elif self == self.DOWN:
return '▼'
def move_value(self):
'''
returns (row, column) value to add to current position to perform this action
'''
return self.value[0]
def cost(self):
'''
returns cost of current action
'''
return self.value[1]
plt.rcParams['figure.figsize'] = 12, 12
path = []
path.append(Action.DOWN)
path.append(Action.DOWN)
path.append(Action.RIGHT)
x, y = [], []
for action in path:
pp = np.array(path)
x = [x.value[0] for x in pp]
print(x)
# plt.plot(pp[:, 1], pp[:, 0], 'g')
# plt.xlabel('EAST')
# plt.ylabel('NORTH')
# plt.show()
| 1,107 |
logs/candump2dbc.py
|
AutomotiveDevOps/ctf_tesla_logs
| 0 |
2024273
|
#!/usr/bin/env python3
import sys
from typing import Dict
from typing import List
if len(sys.argv) == 1:
sys.argv.append("final_log")
import json
class CanMsg:
def __init__(self, canid, data):
self.canid = canid
self.data = data
def __repr__(self):
return f"CAN<{self.canid}, {self.data}>"
def parse_log(log_file):
messages = list()
with open(log_file) as f:
for can_msg in f.readlines():
can_id, can_data = can_msg.strip().split(" ")[-1].split("#")
cm = CanMsg(can_id, can_data)
messages.append(cm)
#
unique_datas = dict()
#
for canid in {msg.canid for msg in messages}:
id_datas = {msg.data for msg in messages if msg.canid == canid}
unique_datas[canid] = id_datas
#
return unique_datas
data = parse_log(sys.argv[1])
def gen_dbc_entry(can_id):
dbc_entry = {
"id": int(f"{can_id}", 16),
"is_extended_frame": False,
"name": f"Ox{can_id}",
"signals": list(),
}
def gen_sig(start_bit, bit_length=8):
return {
"bit_length": bit_length,
"factor": "1",
"is_big_endian": True,
"is_float": False,
"is_signed": False,
"name": f"Ox{can_id}_{start_bit}",
"offset": "0",
"start_bit": start_bit,
}
for start_bit in range(0, 64, 8):
sig = gen_sig(start_bit=start_bit)
dbc_entry["signals"].append(sig)
return dbc_entry
dbc = {"messages": list()} # type: Dict[str, List]
unique_message_dbc_threshold = 15
for can_id, can_messages in data.items():
if len(can_messages) > unique_message_dbc_threshold:
continue
dbc["messages"].append(gen_dbc_entry(can_id))
dbc_json = "tesla_autogen.json"
dbc_file = "tesla_autogen.dbc"
print("Machen dbc-json")
with open(dbc_json, "w") as fp:
json.dump(obj=dbc, fp=fp, indent=4, sort_keys=True)
import canmatrix.convert
canmatrix.convert.convert(
infile=dbc_json,
out_file_name=dbc_file,
# ...
)
| 2,078 |
batchkit_examples/speech_sdk/work_item.py
|
arturosorio/batch-processing-kit
| 23 |
2023297
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
from typing import List, Optional
import audiofile
import os
from batchkit.work_item import WorkItemRequest, WorkItemResult
class SpeechSDKWorkItemRequest(WorkItemRequest):
def __init__(self, filepath: str, language: str,
nbest: int, diarization: str, profanity: str,
cache_search_dirs: List[str], output_dir: str,
log_dir: str, allow_resume: bool, enable_sentiment: bool):
"""
:param filepath: input audio file to recognize
:param language: language of the request
:param nbest: how many maximum results to consider per recognition
:param diarization: diarization mode
:param profanity: profanity mode
:param output_dir: where json result containing the file's transcriptions
details will be placed.
:param cache_search_dirs: directories where the audio file's json result may
be located if it has already been processed before.
:param log_dir: where per-worker-request Carbon SDK logs will be placed
"""
super().__init__(filepath, language)
self.nbest = nbest
self.diarization = diarization
self.profanity = profanity
self.cache_search_dirs = cache_search_dirs
self.output_folder = output_dir
self.log_dir = log_dir
self.allow_resume = allow_resume
self.enable_sentiment = enable_sentiment
self._cached_duration = None
# override
def priority(self) -> int:
"""
Use the audio's duration as priority, such that longer audio files
commence processing first to potentially lower overall batch processing time.
If the duration cannot be fetched from the audio file's header, a default
priority of -1 is returned signifying the priority could not be determined.
"""
try:
return int(self.duration() * 1000)
except Exception:
return -1
def duration(self) -> float:
"""
Fetch the audio file duration in seconds.
"""
if self._cached_duration:
return self._cached_duration
if not os.path.isfile(self.filepath):
raise FileNotFoundError("Cannot determine duration because file does not exist.")
# Audio file segment ptr file is one kind of work item.
if self.filepath.endswith(".seg.json"):
with open(self.filepath, "r") as f:
meta = json.load(f)
start_offset_secs = float(meta["start_offset"])
end_offset_secs = float(meta["end_offset"])
self._cached_duration = end_offset_secs - start_offset_secs
# Regular audio file is the common kind of work item.
else:
self._cached_duration = audiofile.duration(self.filepath)
return self._cached_duration
class SpeechSDKWorkItemResult(WorkItemResult):
def __init__(self,
request: WorkItemRequest,
passed: bool,
endpoint: str,
latency: float,
attempts: int,
thread: str,
can_retry: bool,
cached: bool,
audio_duration: int,
error_type: Optional[str] = None,
failed_reason: Optional[str] = None,
):
super().__init__(request, passed, endpoint, latency, attempts,
can_retry, thread, cached, error_type, failed_reason)
self.audio_duration = audio_duration
| 3,693 |
isw2-master/src/app/core/mensajeEnviado.py
|
marlanbar/academic-projects
| 0 |
2025008
|
from .estadoMensaje import EstadoMensaje
class MensajeEnviado(EstadoMensaje):
#Colaboradores externos
#confirmaciones: Conjunto<ConfirmacionMensaje>
#msg: Mensaje
def __init__(self, unMensaje, unConjDeConfirmaciones):
self.confirmaciones = set(unConjDeConfirmaciones)
self.msg = unMensaje
def puedeEliminarse(self):
return False
def estaPendiente(self):
return False
def enviarSiPendiente(self, unMensajero):
pass
def getConfirmaciones(self):
return set(self.confirmaciones)
def __str__(self):
return self.msg
| 539 |
plugins/shodan/komand_shodan/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
| 46 |
2023457
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .host_information.action import HostInformation
from .shodan_query.action import ShodanQuery
| 138 |
Lib/aescrypt.py
|
evi1hack/viperpython
| 42 |
2025377
|
# -*- coding: utf-8 -*-
# @File : aescrypt.py
# @Date : 2021/2/26
# @Desc :
import base64
from Crypto.Cipher import AES
class Aescrypt(object):
def __init__(self, key, model, iv, encode_):
self.encode_ = encode_
self.model = {'ECB': AES.MODE_ECB, 'CBC': AES.MODE_CBC}[model]
self.key = self.add_16(key)
if model == 'ECB':
self.aes = AES.new(self.key, self.model) # 创建一个aes对象
elif model == 'CBC':
self.aes = AES.new(self.key, self.model, iv) # 创建一个aes对象
def add_16(self, par):
par = par.encode(self.encode_)
while len(par) % 16 != 0:
par += b'\0'
return par
def aesencrypt(self, text):
text = self.add_16(text)
encrypt_text = self.aes.encrypt(text)
return base64.encodebytes(encrypt_text).decode().strip()
def aesdecrypt(self, text):
text = base64.decodebytes(text.encode(self.encode_))
decrypt_text = self.aes.decrypt(text)
return decrypt_text.decode(self.encode_).strip('\0')
| 1,052 |
src/app.py
|
multa-metrics/multa-metrics-agent
| 1 |
2024464
|
import schedule
import sys
import time
import traceback
from src.handlers.hardware_handler import get_shadow_data
from src.handlers.logging_handler import Logger
from src.handlers.mqtt_handler import (
mqtt_connect_v2,
mqtt_device_defender_publish_v2,
mqtt_shadow_publish_v2,
mqtt_shadow_update_subscribe_v2,
)
from src.handlers.registration_handler import RegistrationHandler, register_agent
from src.settings.app import DEVICE_SYNC_TIME
logs_handler = Logger()
logger = logs_handler.get_logger()
if __name__ == "__main__":
logger.info("Starting application!")
try:
if RegistrationHandler.check_credentials() is False:
logger.info("Unable to find device credentials, starting registration...")
register_agent()
else:
logger.info("Agent is already registered")
except RuntimeError:
logger.error("Error registering and saving credentials...")
sys.exit(1)
try:
logger.info("Starting MQTT connection")
mqtt_connection, shadow_client = mqtt_connect_v2()
except Exception:
logger.error("Error initalizing MQTT Connection... Exiting after a minute")
logger.error(traceback.format_exc())
time.sleep(60)
sys.exit(1)
try:
logger.info("Starting Shadow Subscripitions")
mqtt_shadow_update_subscribe_v2(shadow_client=shadow_client)
except Exception:
logger.error("Error subscribing to Shadow topics... Exiting after a minute")
logger.error(traceback.format_exc())
time.sleep(60)
sys.exit(1)
# logger.info("Scheduling Device Defender publishing...")
# schedule.every(DEVICE_SYNC_TIME).seconds.do(mqtt_device_defender_publish_v2, mqtt_connection)
logger.info("Scheduling Device Shadow publishing...")
schedule.every(DEVICE_SYNC_TIME).seconds.do(
mqtt_shadow_publish_v2, shadow_client=shadow_client, data=get_shadow_data()
)
while True:
schedule.run_pending()
time.sleep(1)
| 2,028 |
logger.py
|
MahiroHoshino/MinutesService
| 0 |
2024271
|
""" Log output class
Created by <NAME>
How to use:
logger = Logger().get_logger()
logger.error("error msg")
logger.debug("debug msg") etc...
@see https://docs.python.jp/3/howto/logging.html
Log output format:
time(year-month-day hour-minute-seconds,millisecond): function name: line number: log name: massage
"""
import logging
class Logger:
def __init__(self):
self._logger = logging.getLogger(__name__)
self._logger.setLevel(10)
# output file log.txt
file_handler = logging.FileHandler('log.txt')
self._logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
self._logger.addHandler(stream_handler)
# time(year-month-day hour-minute-seconds,millisecond): function name: line number: log name: massage
formatter = logging.Formatter('%(asctime)s:\t%(funcName)s:\t%(lineno)d:\t%(levelname)s:\t%(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
def get_logger(self):
return self._logger
| 1,126 |
basic/stack.py
|
salman-kgp/DataStructures
| 0 |
2023964
|
class Stack():
def __init__(self):
self.data = []
def isEmpty(self):
return self.data == []
def push(self,item):
self.data.append(item)
def pop(self):
return self.data.pop()
def peek(self):
return self.data[-1]
def size(self):
return len(self.data)
| 271 |
sharptable/formatters/cell_formatter.py
|
peterbbryan/sharptable
| 0 |
2024129
|
import matplotlib
from sharptable.tables.matplotlib_table import MatplotlibTable
from .formatter import Formatter
class CellFormatter(Formatter):
"""
ABC for sharptable cells.
"""
def __init__(self, row: int, column: int):
"""
Args:
row: Row number of the cell being formatted.
column: Column number of the cell being formatted
"""
self._row = row
self._column = column
def _get_matplotlib_cell(self, table: MatplotlibTable) -> matplotlib.table.Cell:
"""
Get matplotlib cell at index.
Args:
table: MatplotlibTable containing cell.
Returns:
Matplotlib cell from table at the row, col index.
"""
cell = table.table[self.row, self.column]
return cell
@property
def row(self) -> int:
"""
Row number of the cell being formatted.
"""
return self._row
@property
def column(self) -> int:
"""
Column number of the cell being formatted
"""
return self._column
| 1,108 |
application/pages/training_analysis/views/activity_view.py
|
slamer59/awesome-panel
| 179 |
2024450
|
"""This module provides Views of Activities"""
import panel as pn
import param
class ActivityView(pn.Column):
"""A View of an Activity
Args:
parameters (param.parameterized.Parameters): A set of parameters containing a
file parameter
map_plot (pn.Viewable): A plot of an activity
activity_plots (pn.Viewable): Plots of the activity
"""
def __init__(
self,
parameters: param.parameterized.Parameters,
map_plot: pn.viewable.Viewable,
activity_plots: pn.viewable.Viewable,
):
super().__init__(
pn.Param(
parameters.file,
widgets={
"file": {
"type": pn.widgets.FileInput,
"accept": ".fit",
}
},
),
map_plot,
activity_plots,
sizing_mode="stretch_both",
)
| 985 |
Scripts/dk/resourcepool.py
|
hhg128/DKGL
| 14 |
2025382
|
import _dk_core as core
import os
from . import zipfile
class _DirLocator:
def __init__(self, dir):
self.dir = dir
def getSystemPath(self, file):
file = os.path.normpath(os.path.join(self.dir, file))
if os.path.isfile(file):
return file
def openFile(self, file):
file = os.path.normpath(os.path.join(self.dir, file))
if os.path.isfile(file):
try:
s = core.Data(filemap=file, writable=False)
return s
except:
pass
class _ZipLocator:
def __init__(self, zip, prefix):
assert isinstance(zip, core.ZipUnarchiver)
self.zip = zip
if prefix[-1:] == '/':
self.prefix = prefix[:-1]
else:
self.prefix = prefix
def getSystemPath(self, file):
pass
def openFile(self, file):
files = (self.prefix + '/' + file, self.prefix + '\\' + file)
for f in files:
try:
s = self.zip.openFileStream(f)
return s
except:
pass
_URLPrefix = ('http://', 'ftp://', 'file://')
class ResourcePool(core.ResourceLoader):
"""
ResourcePool
Loading core resource from file or URL and keep object alive in a pool.
"""
def __init__(self):
super().__init__()
self.resources = {}
self.data = {}
self.locators = {}
def addResource(self, name, res):
'''add resource to pool'''
self.resources[name] = res
def addResourceData(self, name, data):
'''add resource-data to pool'''
self.data[name] = data
def removeAllResources(self):
self.resources = {}
def removeAllResourceData(self):
self.data = {}
def removeResource(self, name):
try:
del(self.resources[name])
except:
pass
def removeResourceData(self, name):
try:
del(self.data[name])
except KeyError:
pass
def findResource(self, name):
return self.resources.get(name, None)
def findResourceData(self, name):
return self.data.get(name, None)
def findResourcePath(self, name):
for loc in self.locators.values():
s = loc.getSystemPath(name)
if s:
return s
def loadResource(self, name):
res = self.findResource(name)
if res:
return res
print('loading resource:', name)
if name.startswith(_URLPrefix):
res = self.resourceFromObject(name)
else:
path = self.findResourcePath(name)
if path:
res = self.resourceFromObject(path, name)
else:
stream = self.openResourceStream(name)
if stream:
res = self.resourceFromObject(stream, name)
if res:
self.addResource(name, res)
return res
def loadResourceData(self, name):
data = self.findResourceData(name)
if data:
return data
print('loading resource data:', name)
if name.startswith(_URLPrefix):
data = core.Data(source = name)
else:
path = self.findResourcePath(name)
if path: # file-system file
try:
data = core.Data(filemap=path, writable=False)
except:
pass
else: # zip file
stream = self.openResourceStream(name)
if stream:
data = core.Data(source=stream)
if data:
self.addResourceData(name, data)
return data
def openResourceStream(self, name):
'''load file or data to byte-like object(buffer),
use mmap (or core.Data) for local file.'''
s = self.findResourceData(name)
if s:
return s
for loc in self.locators.values():
s = loc.openFile(name)
if s:
return s
def addSearchPath(self, path):
'''add search path, path must be filesystem directory or zip file.
a zip file can have zip + prefix style.
ie, addSearchPath("/myfiles.zip/myPrefix"),
then just files which name starts with 'myPrefix' will be used.
'''
if path in self.locators:
return True
abspath = os.path.abspath(path)
if os.path.isdir(abspath):
# system directory
loc = _DirLocator(abspath)
self.locators[path] = loc
return True
else:
# zip + prefix
p = path.replace('\\', '/')
index = len(path)
while index >= 0:
p = p[:index]
file = os.path.normpath(p)
rest = path[index+1:]
if os.path.isfile(file):
try:
zip = zipfile.ZipUnarchiver(file)
loc = _ZipLocator(zip, rest)
self.locators[path] = loc
return True
except Exception as e:
print('addSearchPath Error: ', e)
break
index = p.rfind('/')
return False
def removeSearchPath(self, path):
try:
del(self.locators[path])
except KeyError:
pass
def clone(self):
obj = type(self)()
obj.resources = self.resources.copy()
obj.data = self.data.copy()
obj.locators = self.locators.copy()
return obj
| 5,687 |
setup.py
|
shivangtripathi/Topsis
| 0 |
2025237
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
# Here is the module name.
name="Topsis_Shivang_101917183",
# version of the module
version="0.1",
# Name of Author
author="<NAME>",
# your Email address
author_email="<EMAIL>",
# #Small Description about module
description='A python package to implement topsis',
long_description_content_type="text/markdown",
# url="https://github.com/username/",
packages=setuptools.find_packages(),
install_requires=[
'numpy',
'pandas',
],
keywords=['topsis', 'Rank', 'Best', 'Model'],
license="MIT",
# classifiers like program is suitable for python3, just leave as it is.
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 857 |
LeetCode_Python/binary_tree/solve_problems_recursively/path_sum.py
|
nlantau/Codewars_2020_2021
| 0 |
2025250
|
# nlantau, 2022-01-01
"""
Given the root of a binary tree and an integer targetSum,
return true if the tree has a >> root-to-leaf << path such that
adding up all the values along the path equals targetSum.
A leaf is a node with no children.
"""
from typing import *
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def hasPathSum(self, root: Optional[TreeNode], targetSum: int) -> bool:
if not root:
return False
elif not root.left and not root.right:
return root.val == targetSum
else:
return self.hasPathSum(root.left, targetSum - root.val) or \
self.hasPathSum(root.right, targetSum - root.val)
def hasPathSum2(self, root: Optional[TreeNode], targetSum: int) -> bool:
stack = [root]
seq = []
tmp = []
while stack:
curr = stack.pop()
print(curr.val)
tmp.append(curr.val)
for child in [curr.left, curr.right]:
if child:
stack.append(child)
elif not child:
seq.append(tmp)
tmp = []
return seq
"""
Notes:
117/117, 50 ms (18.26% faster), 15.2 MB (42.85% better)
Tried a few own variants, but somehome input [], 0 resulted in True
but should've been False...
"""
if __name__ == "__main__":
a = TreeNode(5)
al = TreeNode(4)
al_l = TreeNode(11)
al_l_l = TreeNode(7)
al_l_r = TreeNode(2)
ar = TreeNode(8)
ar_l = TreeNode(13)
ar_r = TreeNode(4)
ar_r_r = TreeNode(1)
a.left = al
al.left = al_l
al_l.left = al_l_l
al_l.right = al_l_r
a.right = ar
ar.left = ar_l
ar.right = ar_r
ar_r.right = ar_r_r
print(Solution().hasPathSum(a, 22))
print(Solution().hasPathSum2(a, 22))
z = TreeNode(None)
print(Solution().hasPathSum(z, 0))
| 2,004 |
PLC/Methods/AddIlink.py
|
dreibh/planetlab-lxc-plcapi
| 0 |
2025041
|
#
# <NAME> - INRIA
#
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Auth import Auth
from PLC.TagTypes import TagType, TagTypes
from PLC.Ilinks import Ilink, Ilinks
from PLC.Interfaces import Interface, Interfaces
from PLC.Sites import Sites
from PLC.AuthorizeHelpers import AuthorizeHelpers
class AddIlink(Method):
"""
Create a link between two interfaces
The link has a tag type, that needs be created beforehand
and an optional value.
Returns the new ilink_id (> 0) if successful, faults
otherwise.
"""
roles = ['admin', 'pi', 'tech', 'user']
accepts = [
Auth(),
# refer to either the id or the type name
Ilink.fields['src_interface_id'],
Ilink.fields['dst_interface_id'],
Mixed(TagType.fields['tag_type_id'],
TagType.fields['tagname']),
Ilink.fields['value'],
]
returns = Parameter(int, 'New ilink_id (> 0) if successful')
def call(self, auth, src_if_id, dst_if_id, tag_type_id_or_name, value):
src_if = Interfaces (self.api, [src_if_id],['interface_id'])
if not src_if:
raise PLCInvalidArgument("No such source interface %r"%src_if_id)
dst_if = Interfaces (self.api, [dst_if_id],['interface_id'])
if not dst_if:
raise PLCInvalidArgument("No such destination interface %r"%dst_if_id)
tag_types = TagTypes(self.api, [tag_type_id_or_name])
if not tag_types:
raise PLCInvalidArgument("AddIlink: No such tag type %r"%tag_type_id_or_name)
tag_type = tag_types[0]
# checks for existence - with the same type
conflicts = Ilinks(self.api,
{'tag_type_id':tag_type['tag_type_id'],
'src_interface_id':src_if_id,
'dst_interface_id':dst_if_id,})
if len(conflicts) :
ilink=conflicts[0]
raise PLCInvalidArgument("Ilink (%s,%d,%d) already exists and has value %r"\
%(tag_type['name'],src_if_id,dst_if_id,ilink['value']))
# check authorizations
if 'admin' in self.caller['roles']:
pass
elif not AuthorizeHelpers.caller_may_access_tag_type (self.api, self.caller, tag_type):
raise PLCPermissionDenied("%s, forbidden tag %s"%(self.name,tag_type['tagname']))
elif AuthorizeHelpers.interface_belongs_to_person (self.api, src_if, self.caller):
pass
elif src_if_id != dst_if_id and AuthorizeHelpers.interface_belongs_to_person (self.api, dst_if, self.caller):
pass
else:
raise PLCPermissionDenied("%s: you must one either the src or dst interface"%self.name)
ilink = Ilink(self.api)
ilink['tag_type_id'] = tag_type['tag_type_id']
ilink['src_interface_id'] = src_if_id
ilink['dst_interface_id'] = dst_if_id
ilink['value'] = value
ilink.sync()
self.object_type = 'Interface'
self.object_ids = [src_if_id,dst_if_id]
return ilink['ilink_id']
| 3,142 |
graph_generator.py
|
mihaigalos/netatmo-indicator
| 1 |
2024664
|
from collections import OrderedDict
import plotly, os
import plotly.plotly as py
import json
from plotly.graph_objs import *
import time
class MakePlot:
def __init__(self):
with open(os.path.join(os.path.expanduser('~'), 'netatmo_data.json')) as f:
try:
self.data = json.load(f)
self.data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0]))
except Exception, err:
print(err)
f.close()
def read_data(self):
x= []
y= [ [] for i in range(len(self.data.values()[1]))]
captions = [key for (key, value) in self.data.values()[0].items()]
i = 0
for k,v in self.data.items():
x.append(k.split(" ")[1])
i=0
for location, value in v.items():
y[i].append(value)
i=i+1
return (x, y, captions)
def construct_trace(self, x, y, caption):
return {
"x": x,
"y": y,
"line": {"shape": "spline"},
"mode": "lines+markers",
"type": "scatter",
"name": caption
}
def draw(self):
x, y, captions = self.read_data()
traces = []
i = 0
for y_value in y:
trace = self.construct_trace(x, y_value, captions[i])
traces.append(trace)
i +=1
data = Data(traces)
layout = {
"showlegend":False,
"xaxis" : {"nticks":10}
}
fig = Figure(data=data, layout=layout)
plot_url = plotly.offline.plot(fig)
MakePlot().draw()
| 1,627 |
source/normality_battery.py
|
seemir/normbat
| 1 |
2024493
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from source.util.descriptive_statistics import DescriptiveStatistics
from source.util.multivariate_normality import MultivariateNormality
from source.util.univariate_normality import UnivariateNormality
from source.util.result_generator import ResultGenerator
from source.util.assertor import Assertor
from pyfiglet import Figlet
from .version import __version__
import pandas as pd
import numpy as np
import inspect
import datetime
import os
class NormalityBattery:
"""
Battery of univariate normality tests on row or column vectors of pandas.DataFrame
"""
def __init__(self, df: pd.DataFrame):
"""
Constructor / Initiate the class
Parameters
----------
df : pandas.DataFrame
Dataframe for which one wants to test for normality
"""
Assertor.evaluate_pd_dataframe(df)
Assertor.evaluate_numeric_df(df)
if np.prod(df.shape) < 400:
raise ValueError(
"pd.DataFrame must have at least 400 observations, i.e. (20 x 20) in order to "
"conduct any meaningful normality tests, got {}".format(df.shape))
self.df = df
def descriptive_statistics(self, dim: str = 'col', digits: int = 5):
"""
Gets descriptive statistics
Parameters
----------
dim : str
indicate whether one wants to show descriptive statistics along the columns 'col'
or rows 'row', default is 'col'
digits : int
number of decimal places to round down
Returns
-------
Out : str
string containing descriptive statistics
"""
ds = DescriptiveStatistics(self.df, dim=dim, digits=digits)
return ds.generate_descriptive_statistics()
def univariate_normality(self, dim: str = 'col', digits: int = 5):
"""
Checks to see if the values in the rows or columns of a dataframe are univariate normally
distributed using Jarque-Bera, D’Agostino / Pearson’s, Kolmogorov–Smirnov and Shapiro-Wilk.
Parameters
----------
dim : str
indicate whether one wants to test for normality along the columns 'col' or rows
'row', default is 'col'
digits : int
number of decimal places to round down results
Returns
-------
Out : str
string containing test-statistic and p-value of row/col vectors
"""
un = UnivariateNormality(self.df, dim=dim, digits=digits)
return un.generate_univariate_normality_results()
def multivariate_normality(self, digits: int = 5):
"""
Check to see if values of numeric DataFrame follows a multivariate normal distribution
Parameters
----------
digits : int
number of decimal places to round down results
Returns
-------
Out : str
string containing test-statistic and p-value of row/col vectors
"""
mn = MultivariateNormality(self.df, digits=digits)
return mn.generate_multivariate_normality_results()
def result_summary(self, dim: str = 'col', digits: int = 5):
"""
Summaries results of statistical tests
Parameters
----------
dim : str
indicate whether one wants to test for normality along the columns
'col' or rows 'row', default is 'col'
digits : int
number of decimal places to round down
Returns
-------
out : tuple
(summary, un, mn-objects)
"""
mn = self.multivariate_normality(digits)
un = self.univariate_normality(dim, digits)
result_summary = ResultGenerator(self.df, mn, un, dim, digits)
return result_summary.generate_result_summary()
def normality_report(self, file_dir: str = "reports/txt", dim: str = 'col', digits: int = 5,
ds: bool = False):
"""
Method that prints a report containing the results of the Normality tests
Parameters
----------
file_dir : str
directory to save the file
dim : str
indicate whether one wants to test for normality along the columns
'col' or rows 'row', default is 'col'
digits : int
number of decimal places to round down
ds : bool
indicating if one wants additional table with descriptive
statistics of the data
"""
Assertor.evaluate_data_type({file_dir: str, dim: str, digits: int, ds: bool})
try:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
except Exception as e:
raise OSError("creation of dir " + file_dir + " failed with: " + str(e))
local_time = datetime.datetime.now().isoformat().replace(":", "-").replace(".", "-")
file = open(os.path.join(file_dir, "NormalityReport_" + local_time + ".txt"), "w")
summary, mn, un = self.result_summary(dim=dim, digits=digits)
figlet = Figlet(font="slant")
title = figlet.renderText("normb")
if ds:
file.write(title)
file.write('Version: ' + __version__ + '\n''\n')
file.write(summary + '\n')
file.write(mn + '\n')
file.write(un + '\n')
file.write(self.descriptive_statistics(dim, digits))
else:
file.write(title)
file.write('Version: ' + __version__ + '\n''\n')
file.write(summary + '\n')
file.write(mn + '\n')
file.write(un + '\n')
file.close()
def __getmethods__(self):
"""
List all methods in class as str
Returns
-------
Out : list of str
names of all methods in class
"""
return [method[0] for method in inspect.getmembers(self, predicate=inspect.ismethod) if
method[0] not in ['__init__', 'normality_report', 'result_summary',
'__getmethods__']]
| 6,452 |
examples/run_multiple_2016_2017.py
|
fsetti/ProjectMetis
| 0 |
2025191
|
import time
import traceback
from metis.StatsParser import StatsParser
from metis.Utils import send_email
import data2016_94x_v2
import data2017_94x_v2
import mc2017_94x_v2
if __name__ == "__main__":
for i in range(10000):
total_summary = {}
tasks = []
tasks.extend(data2016_94x_v2.get_tasks())
tasks.extend(data2017_94x_v2.get_tasks())
tasks.extend(mc2017_94x_v2.get_tasks())
for task in tasks:
dsname = task.get_sample().get_datasetname()
try:
if not task.complete():
task.process()
except:
traceback_string = traceback.format_exc()
print "Runtime error:\n{0}".format(traceback_string)
send_email(subject="metis error", body=traceback_string)
total_summary[dsname] = task.get_task_summary()
StatsParser(data=total_summary, webdir="~/public_html/dump/metis/", make_plots=False).do()
time.sleep(3.*3600)
| 1,008 |
nstack-cli/data/client/templates/init/python2/service.py
|
nstack/nstack
| 246 |
2022700
|
#!/usr/bin/env python
"""
{{ name }} Service
"""
import nstack
class Module(nstack.Module):
def numChars(self, x):
return len(x)
| 143 |
src/old/diagnose_scripts/plot/plot_contourf.py
|
meteorologytoday/CESM-diagnostic
| 0 |
2025326
|
import Ngl, Nio
import sys, argparse
import numpy as np
def ext(data):
s = data.shape
ndata = np.zeros((s[0], s[1]+1))
ndata[:, 0:-1] = data
ndata[:, -1] = data[:, 0]
return ndata
def ext_axis(lon):
return np.append(lon, 360)
parser = argparse.ArgumentParser()
parser.add_argument('--data-file')
parser.add_argument('--domain-file')
parser.add_argument('--output-dir')
parser.add_argument('--casename')
parser.add_argument('--varname')
parser.add_argument('--title', default="")
parser.add_argument('--colormap')
parser.add_argument('--auto-clevs', action="store_true", default=False)
parser.add_argument('--cmin', type=float)
parser.add_argument('--cmax', type=float)
parser.add_argument('--clevs', type=int)
parser.add_argument('--clabel', default="")
parser.add_argument('--offset', type=float, default=0.0)
parser.add_argument('--scale', default="1.0")
parser.add_argument('--idx-t', type=int, default=-1)
parser.add_argument('--idx-z', type=int, default=-1)
parser.add_argument('--extra-filename', default="")
parser.add_argument('--land-transparent', action="store_true", default=False)
args = parser.parse_args()
f = Nio.open_file(args.data_file, "r")
g = Nio.open_file(args.domain_file, "r")
lon = g.variables["xc"][1, :] #-- read clon
lat = g.variables["yc"][:, 1] #-- read clat
args.scale = eval(args.scale)
var = f.variables[args.varname]
if args.idx_t == -1:
if args.idx_z == -1:
data = var[:, :]
else:
data = var[args.idx_z, :, :]
else:
if args.idx_z == -1:
data = var[args.idx_t, :, :]
else:
data = var[args.idx_t, args.idx_z, :, :]
data -= args.offset
data /= args.scale
missing_value = var._FillValue[0]
data[np.isnan(data)] = missing_value
f.close()
# Extend data to avoid a white stripe on the 0-deg lon
lon = ext_axis(lon)
data = ext(data)
wks_type = "png"
wks = Ngl.open_wks(wks_type, "%s/%s_contourf_%s%s" % (args.output_dir, args.casename, args.varname, args.extra_filename))
cnres = Ngl.Resources()
# Contour resources
cnres.cnFillOn = True
cnres.cnFillPalette = Ngl.read_colormap_file(args.colormap)
#cnres.cnFillPalette = args.colormap
cnres.cnLinesOn = False
cnres.cnLineLabelsOn = False
# Labelbar resource
cnres.lbOrientation = "horizontal"
# Scalar field resources
cnres.sfXArray = lon
cnres.sfYArray = lat
cnres.sfMissingValueV = missing_value
# Map resources
cnres.mpFillOn = True
cnres.mpFillDrawOrder = "PostDraw"
cnres.mpLandFillColor = ("Transparent" if args.land_transparent else "Gray")
cnres.mpOceanFillColor = "Transparent"
cnres.mpInlandWaterFillColor = "Transparent"
cnres.mpCenterLonF = 200.0
if args.auto_clevs == False:
cnres.cnLevelSelectionMode = "ManualLevels"
cnres.cnMinLevelValF = args.cmin
cnres.cnMaxLevelValF = args.cmax
cnres.cnLevelSpacingF = (args.cmax - args.cmin) / args.clevs
cnres.lbOrientation = "horizontal"
cnres.lbTitleString = args.clabel
cnres.lbTitlePosition = "Bottom"
#cnres.lbTitleAngleF = 90.0
cnres.tiMainFontHeightF = 0.01
cnres.tiMainString = args.title
plot = Ngl.contour_map(wks, data, cnres)
Ngl.end()
| 3,278 |
pdbuddy/__init__.py
|
emou/pdbuddy
| 0 |
2023063
|
from __future__ import absolute_import
from pdbuddy.tracer import Tracer
__all__ = ['Tracer']
__version__ = '0.0.1'
| 118 |
hamal/hamal/version.py
|
JackDan9/hamal
| 3 |
2024106
|
# Copyright
from pbr import version as pbr_version
VENDOR = "Open Community"
PRODUCT = "hamal"
loaded = False
version_info = pbr_version.VersionInfo('hamal')
version_string = version_info.version_string
| 206 |
redis_connection.py
|
micael-grilo/E-Cart
| 5 |
2024679
|
import redis
from functools import wraps
class ErrorMessage(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
def raise_exception(msg_prefix='', *args, **kwargs):
def deco(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
msg = msg_prefix + str(e)
raise ErrorMessage(msg)
return decorated_function
return deco
class Redis(object):
@raise_exception("Redis connection can't be established due to Error: ")
def get_connections(self, host, port, db=0, password=<PASSWORD>):
REDIS_SERVER = host
REDIS_PORT = port
if REDIS_SERVER and REDIS_PORT:
REDIS_DB = db
REDIS_PASSWORD = password
redis_connection = redis.Redis(
host=REDIS_SERVER, port=REDIS_PORT, db=REDIS_DB, password=<PASSWORD>)
return redis_connection
else:
raise ErrorMessage("REDIS_SERVER or REDIS_PORT not provided")
| 1,133 |
tests/test_sqlite_cache.py
|
brettelliot/ecal
| 5 |
2024339
|
import unittest
import tempfile
import ecal
import pandas as pd
from pandas.util.testing import assert_frame_equal
class TestSqliteCache(unittest.TestCase):
def test_cached_dates_table_created(self):
# GIVEN a database file
f = tempfile.NamedTemporaryFile()
# WHEN it's passed to SqliteCache's constructor
cache = ecal.SqliteCache(f.name)
# THEN the cached_dates table is created
cursor = cache._conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
actual = cursor.fetchall()
expected = ('cached_dates',)
self.assertTrue(expected in actual)
f.close()
def test_announcements_table_created(self):
# GIVEN a database file
f = tempfile.NamedTemporaryFile()
# WHEN it's passed to SqliteCache's constructor
cache = ecal.SqliteCache(f.name)
# THEN the announcements table is created
cursor = cache._conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
actual = cursor.fetchall()
expected = ('announcements',)
self.assertTrue(expected in actual)
f.close()
def test_create_string_of_rows_for_VALUES_clause(self):
# Given a SqliteCache and list of date strings
f = tempfile.NamedTemporaryFile()
cache = ecal.SqliteCache(f.name)
date_list = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
# When passed to _create_string_of_rows_for_VALUES_clause()
actual = cache._create_string_of_rows_for_VALUES_clause(date_list)
# Then it should be a string of tuples where each tuple only has one element and each is a row.
# And also, these aren't python tuples with one element so they don't look like: (element,)
expected = "('2018-01-01'),('2018-01-02'),('2018-01-03'),('2018-01-04'),('2018-01-05')"
self.assertEqual(actual, expected)
def test_check_for_missing_dates_that_are_not_in_the_cache(self):
# GIVEN SqliteCache without any cached dates
f = tempfile.NamedTemporaryFile()
cache = ecal.SqliteCache(f.name)
# When a the cache is asked to identify missing dates and the date is in the cache
date_list = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
actual = cache.check_for_missing_dates(date_list)
# Then the cached dates are not returned
expected = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
self.assertListEqual(actual, expected)
f.close()
def test_check_for_missing_dates_that_are_in_the_cache(self):
# GIVEN SqliteCache with a cached date
f = tempfile.NamedTemporaryFile()
cache = ecal.SqliteCache(f.name)
cursor = cache._conn.cursor()
cursor.execute("insert into cached_dates values('2018-01-03')")
# When a the cache is asked to identify missing dates and the date is in the cache
date_list = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
actual = cache.check_for_missing_dates(date_list)
# Then the cached dates are not returned
expected = ['2018-01-01', '2018-01-02', '2018-01-04', '2018-01-05']
self.assertListEqual(actual, expected)
f.close()
def test_add_missing_dates(self):
# Given an SqliteCache
f = tempfile.NamedTemporaryFile()
cache = ecal.SqliteCache(f.name)
# When some missing dates are added
missing_dates = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
cache.add(missing_dates, None)
# Then they should be found in the cached_dates tables.
expected = [('2018-01-01',), ('2018-01-02',), ('2018-01-03',), ('2018-01-04',), ('2018-01-05',)]
c = cache._conn.cursor()
c.execute('select * from cached_dates;')
actual = c.fetchall()
self.assertListEqual(actual, expected)
def test_add_missing_dates_and_announcements(self):
# Given an SqliteCache
f = tempfile.NamedTemporaryFile()
cache = ecal.SqliteCache(f.name)
# When some missing dates and announcements are added
missing_dates = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
uncached_announcements = {'ticker': ['AEHR', 'ANGO', 'FC', 'LW', 'PKE', 'PSMT', 'RPM', 'SONC', 'WBA'],
'when': ['amc', 'bmo', 'amc', 'bmo', 'bmo', 'amc', 'bmo', 'amc', 'bmo'],
'date': ['2018-01-05', '2018-01-05', '2018-01-05', '2018-01-05', '2018-01-05', '2018-01-05',
'2018-01-05', '2018-01-05', '2018-01-05']}
uncached_announcements_df = pd.DataFrame.from_dict(uncached_announcements)
uncached_announcements_df = uncached_announcements_df.set_index('date')
uncached_announcements_df = uncached_announcements_df[['ticker', 'when']]
cache.add(missing_dates, uncached_announcements_df)
# Then they should be found in the cached_dates tables.
expected = [('2018-01-01',), ('2018-01-02',), ('2018-01-03',), ('2018-01-04',), ('2018-01-05',)]
c = cache._conn.cursor()
c.execute('select * from cached_dates;')
actual = c.fetchall()
self.assertListEqual(actual, expected)
# And they should be found in the announcements table
expected = [('2018-01-05', 'AEHR', 'amc'), ('2018-01-05', 'ANGO', 'bmo'), ('2018-01-05', 'FC', 'amc'),
('2018-01-05', 'LW', 'bmo'), ('2018-01-05', 'PKE', 'bmo'), ('2018-01-05', 'PSMT', 'amc'),
('2018-01-05', 'RPM', 'bmo'), ('2018-01-05', 'SONC', 'amc'), ('2018-01-05', 'WBA', 'bmo')]
c = cache._conn.cursor()
c.execute('select * from announcements;')
actual = c.fetchall()
self.assertListEqual(actual, expected)
def test_fetch_calendar(self):
# Given an SqliteCache with some data
f = tempfile.NamedTemporaryFile()
cache = ecal.SqliteCache(f.name)
missing_dates = ['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']
uncached_announcements = {'ticker': ['AEHR', 'ANGO', 'FC', 'LW', 'PKE', 'PSMT', 'RPM', 'SONC', 'WBA'],
'when': ['amc', 'bmo', 'amc', 'bmo', 'bmo', 'amc', 'bmo', 'amc', 'bmo'],
'date': ['2018-01-05', '2018-01-05', '2018-01-05', '2018-01-05', '2018-01-05', '2018-01-05',
'2018-01-05', '2018-01-05', '2018-01-05']}
uncached_announcements_df = pd.DataFrame.from_dict(uncached_announcements)
uncached_announcements_df = uncached_announcements_df.set_index('date')
uncached_announcements_df = uncached_announcements_df[['ticker', 'when']]
cache.add(missing_dates, uncached_announcements_df)
# When we try and fetch data
actual = cache.fetch_calendar('2018-01-01', '2018-01-05')
# Then we get a dataframe containing it.
assert_frame_equal(actual, uncached_announcements_df)
| 7,142 |
python/cross_validation.py
|
micaelverissimo/ringerlowet
| 0 |
2024580
|
__all__ = ['CV_Skeleton']
import numpy as np
from keras import backend as K
from keras.layers import Dense
class CV_Skeleton:
def __init__(self, in_X, in_y, tuning_params, preproc_method, cv_method, ml_model, categorial_y=True):
self.in_X = in_X
self.in_y = in_y
self.tuning_params = tuning_params
self.preproc_method = preproc_method
self.cv_method = cv_method
self.ml_model = ml_model
self.cv_dict = {}
self.cv_dict['model'] = self.ml_model
if categorial_y:
from keras.utils import to_categorical
self.sparse_y = to_categorical(self.in_y)
def get_X(self):
return self.in_X
def get_y(self):
return self.in_y
def create_train_test_indices(self):
return list(self.cv_method.split(self.in_X, self.in_y))
def get_train_test_indices(self):
return self.train_test_indices
def get_preproc_params(self):
return self.preproc_method.get_params()
def reset_model(self):
session = K.get_session()
for layer in self.ml_model.layers:
if isinstance(layer, Dense):
old = layer.get_weights()
layer.weights[0].initializer.run(session=session)
layer.weights[1].initializer.run(session=session)
else:
print(layer, "not reinitialized")
def get_cv_dict(self):
return self.cv_dict
def cv_loop(self):
self.train_test_indices = self.create_train_test_indices()
for idx, ifold in enumerate(self.train_test_indices):
best_loss = 100.0
train_id, test_id = ifold[0], ifold[1]
self.cv_dict[idx] = {}
self.cv_dict[idx]['trn_tst_indices'] = ifold
for init in range(self.tuning_params['n_inits']):
print('Training in the Fold: %i | Init: %i' %(idx+1, init+1))
# reset the weights of model
self.reset_model()
# create a scaler to prepare the data and fit using only the train data
self.preproc_method.fit(self.in_X[train_id])
# transform all data
self.X_norm = self.preproc_method.transform(self.in_X)
self.train_evo = self.ml_model.fit(self.X_norm[train_id], self.sparse_y[train_id],
batch_size=self.tuning_params['batch_size'],
epochs=self.tuning_params['epochs'],
validation_data=(self.X_norm[test_id], self.sparse_y[test_id]))
if np.min(self.train_evo.history['val_loss']) < best_loss:
best_loss = np.min(self.train_evo.history['val_loss'])
self.cv_dict[idx]['history'] = self.train_evo.history
self.cv_dict[idx]['weights'] = self.ml_model.get_weights()
| 2,940 |
Lib/test/test_compiler/test_static/unknown_names.py
|
mananpal1997/cinder
| 0 |
2024165
|
from __static__ import TYPED_DOUBLE
import re
from compiler.errors import TypedSyntaxError
from .common import StaticTestBase
class UnknownNameTests(StaticTestBase):
def test_unknown_name_toplevel(self) -> None:
codestr = """
b = a + 1
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_unknown_name_class_toplevel(self) -> None:
codestr = """
class C:
b: int = a + 1
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_unknown_name_method(self) -> None:
codestr = """
class C:
def foo(self) -> int:
b = a + 1
return 0
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_unknown_name_function(self) -> None:
codestr = """
def foo() -> int:
return a
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_builtins_ok(self) -> None:
codestr = """
def foo() -> None:
a = open("sourcefile.hs")
"""
self.compile(codestr)
def test_no_unknown_name_error_assignments(self) -> None:
codestr = """
def foo() -> None:
a: int = 1
b = 2
"""
self.compile(codestr)
def test_unknown_name_error_augassign(self) -> None:
codestr = """
def foo() -> None:
a += 1
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_with_optional_vars_are_known(self) -> None:
codestr = """
def foo(x) -> None:
with x() as y:
pass
"""
self.compile(codestr)
def test_inline_import_supported(self) -> None:
codestr = """
def f():
import math
return math.isnan
"""
self.compile(codestr)
def test_inline_import_as_supported(self) -> None:
codestr = """
def f():
import os.path as road # Modernization.
return road.exists
"""
self.compile(codestr)
def test_inline_from_import_names_supported(self) -> None:
acode = """
x: int = 42
"""
bcode = """
def f():
from a import x
return x
"""
bcomp = self.compiler(a=acode, b=bcode).compile_module("b")
def test_inline_from_import_names_supported_alias(self) -> None:
acode = """
x: int = 42
"""
bcode = """
def f():
from a import x as y
return y
"""
bcomp = self.compiler(a=acode, b=bcode).compile_module("b")
def test_unknown_decorated_functions_declared(self) -> None:
codestr = """
def foo(x):
return x
def bar():
baz()
@foo
def baz():
pass
"""
self.compile(codestr)
def test_cellvars_known(self) -> None:
codestr = """
def use(x):
return x
def foo(x):
use(x)
def nested():
return x
return nested
"""
self.compile(codestr)
def test_name_defined_in_except_and_else_known(self) -> None:
codestr = """
def foo(self):
try:
pass
except Exception:
a = None
else:
a = None
return a
"""
self.compile(codestr)
def test_name_defined_only_in_else_unknown(self) -> None:
codestr = """
def foo(self):
try:
pass
except Exception:
pass
else:
a = None
return a
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_name_defined_only_in_if_unknown(self) -> None:
codestr = """
def foo(self, p):
if p:
a = None
return a
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_name_defined_only_in_else_unknown(self) -> None:
codestr = """
def foo(self, p):
if p:
pass
else:
a = None
return a
"""
self.type_error(codestr, r"Name `a` is not defined.")
def test_name_defined_terminal_except_raises(self) -> None:
codestr = """
def foo(self):
try:
a = None
except:
raise Exception
return a
"""
self.compile(codestr)
def test_name_defined_terminal_except_returns(self) -> None:
codestr = """
def foo(self):
try:
a = None
except:
return None
return a
"""
self.compile(codestr)
| 5,184 |
tests/test_app_wsgi.py
|
mirekdlugosz/reader
| 205 |
2023468
|
import os
def dummy_plugin(reader):
reader._dummy_was_here = True
def test_app_wsgi(monkeypatch, db_path):
# This assumes no-one else imports reader._app.wsgi.app.
# Also, further imports will yield the same app from this test.
monkeypatch.setitem(os.environ, 'READER_DB', db_path)
monkeypatch.setitem(os.environ, 'READER_PLUGIN', 'test_app_wsgi:dummy_plugin')
monkeypatch.setitem(os.environ, 'READER_APP_PLUGIN', 'test_app_wsgi:dummy_plugin')
from reader._app.wsgi import app
from reader._app import get_reader
with app.app_context():
assert get_reader()._dummy_was_here
assert app._dummy_was_here
| 654 |
mt_metadata/transfer_functions/emtf_xml/data_type.py
|
kujaku11/mt_metadata
| 10 |
2025223
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 21:30:36 2020
:copyright:
<NAME> (<EMAIL>)
:license: MIT
"""
# =============================================================================
# Imports
# =============================================================================
from mt_metadata.base.helpers import write_lines
from mt_metadata.base import get_schema, Base
from .standards import SCHEMA_FN_PATHS
# =============================================================================
attr_dict = get_schema("data_type", SCHEMA_FN_PATHS)
# =============================================================================
class DataType(Base):
__doc__ = write_lines(attr_dict)
def __init__(self, **kwargs):
self.name = None
self.type = None
self.description = None
self.tag = None
self.external_url = None
self.intention = None
self.input = None
self.output = None
self.units = None
super().__init__(attr_dict=attr_dict, **kwargs)
| 1,038 |
project/users/migrations/0003_auto_20210320_2303.py
|
nicoeft/django-starter-api
| 0 |
2024611
|
# Generated by Django 2.2.19 on 2021-03-20 23:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_issued_at'),
]
operations = [
migrations.AlterField(
model_name='user',
name='issued_at',
field=models.DateTimeField(auto_now_add=True, help_text='Date time after wich tokens are valid .', verbose_name='issued at'),
),
]
| 469 |
theape/plugins/compositeplugin.py
|
rsnakamura/theape
| 0 |
2024788
|
# python standard library
from collections import OrderedDict
# this package
from theape import BasePlugin
#from ape.parts.watchers import TheWatcher
from theape.parts.storage.filestorage import FileStorage
SECTION = 'WATCHER'
INTERVAL_OPTION = 'interval'
TOTAL_OPTION = 'total'
VERBOSE_OPTION = 'verbose'
configuration = """
[{0}]
#
""".format(SECTION,
TOTAL_OPTION,
INTERVAL_OPTION,
VERBOSE_OPTION)
sections = OrderedDict()
sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until time is over'
sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant to allow the insertion of a pause in the execution of the APE. At this point all calls to sleep will get the same configuration.'
sections['configuration'] = configuration
sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime'
sections['options'] = """
The configuration options --
{bold}end{reset} : an absolute time given as a time-stamp that can be interpreted by `dateutil.parser.parse`. This is for the cases where you have a specific time that you want the sleep to end.
{bold}total{reset} : a relative time given as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units only use the first letter, but since `months` and `minutes` both start with `m`, you have to use two letters to specify them. The sleep will stop at the start of the sleep + the total time given.
{bold}interval{reset} : The amount of time beween reports of the time remaining (default = 1 second). Use the same formatting as the `total` option.
{bold}verbose{reset} : If True (the default) then report time remaining at specified intervals while the sleep runs.
One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is optional.
"""
sections['author'] = 'ape'
class Sleep(BasePlugin):
"""
A plugin for TheBigSleep
"""
def __init__(self, *args, **kwargs):
"""
Constructor for Sleep
"""
super(Sleep, self).__init__(*args, **kwargs)
return
def fetch_config(self):
"""
prints a config-file sample
"""
print(configuration)
@property
def sections(self):
"""
Help dictionary
"""
if self._sections is None:
self._sections = sections
return self._sections
@property
def product(self):
"""
A built TheBigSleep object
:return: TheBigSleep
"""
if self._product is None:
end = self.configuration.get_datetime(section=SLEEP_SECTION,
option=END_OPTION,
optional=True)
total = self.configuration.get_relativetime(section=SLEEP_SECTION,
option=TOTAL_OPTION,
optional=True)
interval = self.configuration.get_relativetime(section=SLEEP_SECTION,
option=INTERVAL_OPTION,
optional=True,
default=1)
if interval != 1:
interval = interval.total_seconds()
verbose = self.configuration.get_boolean(section=SLEEP_SECTION,
option=VERBOSE_OPTION,
optional=True,
default=True)
self._product = TheBigSleep(end=end,
total=total,
interval=interval,
verbose=verbose)
return self._product
| 3,898 |
fibo.py
|
AShish7208/python-data-structure
| 0 |
2024982
|
def fibo(n,lookup):
if n<=1:
lookup[n]=n
if lookup[n] is None:
lookup[n] = fibo(n-1,lookup) + fibo(n-2,lookup)
return lookup[n]
def main():
n=34
lookup = [None]*101
print("Fibonacci Number is ",fibo(n,lookup))
if __name__ == "__main__":
main()
| 336 |
verres/architecture/local_error_cnn.py
|
csxeba/Verres
| 0 |
2024695
|
import tensorflow as tf
from verres.architecture.layers import local_error as le_layers
from verres.methods.local_error_model_factory import LocalErrorModelFactory
class LocalErrorCNN:
def __init__(self,
input_shape=(32, 32, 3),
output_dim=10,
output_activation="softmax",
output_loss="categorical_crossentropy",
optimizer="adam",
use_label_prediction_loss=True,
use_similarity_loss=True,
use_gradient_barrier=True,
backbone_trainable=True,
alpha=0.5):
self.backbone_trainable = backbone_trainable
self.use_gradient_barrier = use_gradient_barrier
self.use_similarity_loss = use_similarity_loss
self.use_label_prediction_loss = use_label_prediction_loss
self.optimizer = optimizer
self.output_loss = output_loss
self.output_activation = output_activation
self.input_shape = input_shape
self.output_dim = output_dim
self.local_error_model_factory = None
self.alpha = alpha
def build_experiment_default(self):
inputs = tf.keras.Input(self.input_shape)
llkwargs = dict(use_gradient_barrier=self.use_gradient_barrier,
use_label_prediction_loss=self.use_label_prediction_loss,
use_similarity_loss=self.use_similarity_loss,
num_output_classes=self.output_dim,
label_prediction_activation=self.output_activation,
trainable=self.backbone_trainable)
layers = [
le_layers.LocalErrorConvContainer(
32, (5, 5), padding="same", activation="relu", strides=2, **llkwargs),
le_layers.LocalErrorConvContainer(
32, (3, 3), padding="same", activation="relu", **llkwargs),
le_layers.LocalErrorConvContainer(
64, (5, 5), padding="same", activation="relu", strides=2, **llkwargs),
le_layers.LocalErrorConvContainer(
64, (3, 3), padding="same", activation="relu", **llkwargs),
le_layers.LocalErrorConvContainer(
128, (5, 5), padding="same", activation="relu", strides=2, **llkwargs),
tf.keras.layers.GlobalAveragePooling2D(),
le_layers.LocalErrorDenseContainer(
32, activation="relu", **llkwargs),
tf.keras.layers.Dense(self.output_dim, activation=self.output_activation)
]
x = inputs
for layer in layers:
x = layer(x)
self.local_error_model_factory = LocalErrorModelFactory(
input_tensor=inputs,
hidden_layers=layers[:-1],
output_layer=layers[-1]
)
self.local_error_model_factory.compile(
optimizer=self.optimizer, loss=self.output_loss, metrics=["acc"], alpha=self.alpha
)
def build_vgg8b(self):
inputs = tf.keras.Input(self.input_shape)
llkwargs = dict(use_gradient_barrier=self.use_gradient_barrier,
use_label_prediction_loss=self.use_label_prediction_loss,
use_similarity_loss=self.use_similarity_loss,
num_output_classes=self.output_dim,
label_prediction_activation=self.output_activation,
trainable=self.backbone_trainable)
layers = [
le_layers.LocalErrorConvContainer(
128, (3, 3), padding="same", activation="relu", **llkwargs),
le_layers.LocalErrorConvContainer(
256, (3, 3), padding="same", activation="relu", **llkwargs),
tf.keras.layers.MaxPool2D(),
le_layers.LocalErrorConvContainer(
256, (3, 3), padding="same", activation="relu", **llkwargs),
le_layers.LocalErrorConvContainer(
512, (3, 3), padding="same", activation="relu", **llkwargs),
tf.keras.layers.MaxPool2D(),
le_layers.LocalErrorConvContainer(
512, (3, 3), padding="same", activation="relu", **llkwargs),
tf.keras.layers.MaxPool2D(),
le_layers.LocalErrorConvContainer(
512, (3, 3), padding="same", activation="relu", **llkwargs),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
le_layers.LocalErrorDenseContainer(
1024, activation="relu", **llkwargs),
tf.keras.layers.Dense(self.output_dim, activation=self.output_activation)]
x = inputs
for layer in layers:
x = layer(x)
self.local_error_model_factory = LocalErrorModelFactory(inputs, layers[:-1], output_layer=layers[-1])
self.local_error_model_factory.compile(
optimizer=self.optimizer, loss=self.output_loss, metrics=["acc"], alpha=self.alpha
)
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
kwargs = locals()
if "self" in kwargs:
kwargs.pop("self")
kwargs["generator"] = self.local_error_model_factory.adapt_data_generator(
kwargs["generator"]
)
if kwargs["validation_data"] is not None:
kwargs["validation_data"] = self.local_error_model_factory.adapt_data_generator(
kwargs["validation_data"]
)
return self.local_error_model_factory.training_model.fit_generator(**kwargs)
def predict(self, x, **kwargs):
return self.local_error_model_factory.inference_model.predict(x, **kwargs)
| 6,126 |
tests/tree/sparse_merkle_tree/test_sparse_merkle_tree_performance.py
|
ricott1/dingus
| 0 |
2023197
|
from typing import Coroutine
from dingus.tree.sparse_merkle_tree import SparseMerkleTree
from dingus.tree.constants import EMPTY_HASH
import time
import asyncio
from tests.tree.utils import create_test_case
KEY_LENGTH = 32
def test_large_update(capsys) -> None:
start_time = time.time()
initial_keys, initial_values = create_test_case(1000000)
extra_keys, extra_values = create_test_case(10000)
with capsys.disabled():
print(f"\ncreate test cases: {time.time() - start_time:.2f}s")
asyncio.run(
case_testing_batch(
initial_keys, initial_values, extra_keys, extra_values, capsys
)
)
async def case_testing_batch(
initial_keys, initial_values, extra_keys, extra_values, capsys
) -> Coroutine[None, None, SparseMerkleTree]:
_smt = SparseMerkleTree(KEY_LENGTH)
assert _smt.root.hash == EMPTY_HASH
start_time = time.time()
new_root = await _smt.update(initial_keys, initial_values)
assert _smt.root.hash == new_root.hash
with capsys.disabled():
print(
f"create tree with {len(initial_keys)} leaves: {time.time() - start_time:.2f}s"
)
start_time = time.time()
extra_new_root = await _smt.update(extra_keys, extra_values)
assert _smt.root.hash == extra_new_root.hash
with capsys.disabled():
print(
f"update tree with {len(extra_keys)} leaves: {time.time() - start_time:.2f}s"
)
return _smt
| 1,455 |
example/PosePrediction/dualLSTM/network.py
|
ddddwee1/SULT
| 18 |
2024951
|
import numpy as np
import tensorflow as tf
import model3 as M
import config
import util
from tensorflow.python.training.tracking.data_structures import NoDependency
LSTM_DIM = config.LSTM_DIM
IN_DIM = config.IN_DIM
class Decoder(M.Model):
def initialize(self):
self.c1 = M.Dense(512, activation=M.PARAM_LRELU)
self.c2 = M.Dense(256, activation=M.PARAM_LRELU)
self.c3 = M.Dense(IN_DIM)
def forward(self, x):
return self.c3(self.c2(self.c1(x)))
class LSTM2(M.Model):
def initialize(self, outdim):
self.hc = NoDependency({})
self.outdim = outdim
self.LSTMCell = M.LSTMCell(outdim)
def forward(self, x, branch, init_hc=False):
branch = str(branch)
if (not (branch in self.hc)) or (init_hc):
self.hc[branch] = [tf.zeros([x.shape[0], self.outdim]), tf.zeros([x.shape[0], self.outdim])]
h,c = self.hc[branch]
# print('h',h.shape,'c',c.shape,'x',x.shape)
next_h, next_c = self.LSTMCell(x, h, c)
self.hc[branch] = [next_h, next_c]
return next_h
class PosePredNet(M.Model):
def initialize(self):
self.lstm_l1 = LSTM2(LSTM_DIM)
self.lstm_l2 = LSTM2(LSTM_DIM)
self.dec = Decoder()
def forward(self, enc_in, pred_step):
vs = util.convert_velocity(enc_in)
out_layer1 = [self.lstm_l1(vs[i], 0, init_hc=i==0) for i in range(len(vs))]
out_layer2a = [self.lstm_l2(x, 0, init_hc=i==0) for i,x in enumerate(out_layer1[0::2])]
out_layer2b = [self.lstm_l2(x, 1, init_hc=i==0) for i,x in enumerate(out_layer1[1::2])]
out_layer2 = [out_layer2a, out_layer2b]
predictions = []
pred_pos = tf.convert_to_tensor(enc_in[-1])
for i in range(pred_step):
step = len(vs) + i - 1
pred_v = self.dec(tf.concat([out_layer1[step], out_layer2[step%2][step//2]], axis=1))
pred_pos = pred_pos + pred_v
predictions.append(pred_pos)
if i!=(pred_step-1):
out_layer1.append(self.lstm_l1(pred_v, 0))
step += 1
# print(step, len(out_layer1))
out_layer2[step%2].append(self.lstm_l2(out_layer1[step], step%2))
return predictions
| 1,985 |
modules/storage/mlstorage.py
|
imdatsolak/bender
| 0 |
2023384
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from modules.mlbendermodule import MLBenderModule
"""
MLStorage is an abstract (parent) class for all Storage classes
Copyright (c) 2019 <NAME>
All Rights Reserved.
"""
class MLStorage(MLBenderModule):
def __init__(self, configDictionary):
super(MLStorage, self).__init__(configDictionary)
self.profile = {
"name" : "mlstorage-abstract",
"class" : "storage",
"version" : "1.0"
}
def capabilities(self):
return self.profile
def initForBender(self, benderInstance):
self.benderCore = benderInstance
def storeDataForKey(self, data, key, ownerID = "NO_OWNER", timeLimitInSeconds = 1000*365*24*60*60):
# Stores an arbitrary data in the permanent storage that can be accessed by any module
# Returns 'True' if successful, otherwise 'False'
return False
def updateLastAccessTimeOfDataForKeyAndOwner(self, key, ownerID):
# There is a "last access" time for each entry in the permanent storage module.
# Using this method, you can update the last access time of that entry.
# The time used is "now"
return False
def updateTimelimitOfDataWithKeyAndOwner(self, newTimeLimitInSeconds, key, ownerID):
# Updates the timelimit of the entry to "now+timeLimitInSeconds"
# returns True if the entry existed and update was successful
# returns False otherwise
return False
def updateTimelimitOfAllDataForOwner(self, newTimeLimitInSeconds, ownerID):
# Updates the timelimit of all entries of the ownerID to "now+newTimeLimitInSeconds"
# This method is mainly used by the session module as it tracks when the last
# communication within a session was.
return False
def deleteDataForKey(self, key, ownerID = "NO_OWNER"):
# Deletes the data stored with the key and ownerID
# If no data exists, it does nothing
return False
def deleteDataForKey(self, key):
# Deletes the data associated with the key that has NO ownerID
return False
def deleteAllDataForOwner(self, ownerID):
# Deletes all data (with all keys) for the owner "ownerID"
return False
def dataForKey(self, key, ownerID = "NO_OWNER"):
# Returns the data associated with key and that has no ownerID
return False
def lastAccessTimeForKeyAndOwner(self, key, ownerID):
# Returns the last time this entry was accessed (based on "updateLastAccessTime...")
# returns nil if the entry doesn't exist
return False
| 2,819 |
personalwebsite/item_parser.py
|
JaredsWebApplications/personal-website
| 0 |
2024360
|
#!/usr/bin/env python
import json
import pathlib
import typing
from models import DemoItem, PortfolioItem
class ItemParsingException(Exception):
"""
An exception if an item cannot be parsed
"""
def __init__(self, message: typing.Text):
if not isinstance(message, typing.Text):
raise ValueError("[ERROR] Failed to create an Exception")
self.message = message
def parse_contents(
json_path: pathlib.Path,
) -> typing.List[typing.Union[DemoItem, PortfolioItem]]:
"""
Given a JSON file, parse the contents to
obtain either a list of DemoItems or PortfolioItem
Returns:
typing.List[typing.Union[DemoItem, PortfolioItem]]: a container of parsed objects
"""
if not json_path.is_file():
raise FileNotFoundError(f"cannot load {json_path}, it does not exist")
container: typing.List[typing.Union[DemoItem, PortfolioItem]] = []
with open(json_path, "r", encoding="utf-8") as file_pointer:
contents = json.load(file_pointer)
for item in contents["items"]:
match item:
case {
"authors": authors,
"demo_link": demo_link,
"information": information,
"languages": languages,
"name": name,
"source_code_link": source_code_link,
}:
container.append(
DemoItem(
name,
demo_link,
source_code_link,
information,
authors,
languages,
)
)
case {
"authors": authors,
"description": description,
"documenation_link": doc_link,
"image_path": path,
"languages": languages,
"name": name,
"source_code_link": source_code_link,
}:
container.append(
PortfolioItem(
name,
description,
path,
doc_link,
source_code_link,
authors,
languages,
)
)
case _:
raise ItemParsingException(f"Failed to parse {item}")
return container
| 2,461 |
assignment1/admin.py
|
chriskarlsson/WebIntelligence
| 0 |
2025188
|
from django.contrib import admin
from .models import User, Movie, Rating
# Register your models here.
admin.site.register(User)
admin.site.register(Movie)
admin.site.register(Rating)
| 186 |
mixtape/base.py
|
ashwoods/gst-py-awesome-mix-vol-1
| 3 |
2024392
|
import logging
from typing import Tuple, Type, TypeVar
import attr
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
from .exceptions import PlayerSetStateError
logger = logging.getLogger(__name__)
BasePlayerType = TypeVar("BasePlayerType", bound="BasePlayer")
@attr.s
class BasePlayer:
"""Player base player"""
# TODO: configuration for set_auto_flush_bus
# as the application depends on async bus messages
# we might want to handle flushing the bus ourselves,
# otherwise setting the pipeline to `Gst.State.NULL`
# flushes the bus including the state change messages
# self.pipeline.set_auto_flush_bus(False)
pipeline: Gst.Pipeline = attr.ib()
init: bool = attr.ib(init=False, default=False)
def __del__(self) -> None:
"""
Make sure that the gstreamer pipeline is always cleaned up
"""
if self.state is not Gst.State.NULL:
logger.warning("Player cleanup on destructor")
self.teardown()
@property
def bus(self) -> Gst.Bus:
"""Convenience property for the pipeline Gst.Bus"""
return self.pipeline.get_bus()
@property
def state(self) -> Gst.State:
"""Convenience property for the current pipeline Gst.State"""
return self.pipeline.get_state(0)[1]
def set_state(self, state: Gst.State) -> Tuple[Gst.StateChangeReturn, Gst.State, Gst.State]:
"""Set pipeline state"""
if not self.init:
logger.warning("Calling set_state without calling setup. Trying to do this now.")
self.setup()
ret = self.pipeline.set_state(state)
if ret == Gst.StateChangeReturn.FAILURE:
raise PlayerSetStateError
return ret
def setup(self) -> None:
"""
Player setup: meant to be used with hooks or subclassed
Call super() after custom code.
"""
self.init = True
def teardown(self) -> None:
"""Player teardown: by default sets the pipeline to Gst.State.NULL"""
if self.state is not Gst.State.NULL:
self.set_state(Gst.State.NULL)
def ready(self) -> Tuple[Gst.StateChangeReturn, Gst.State, Gst.State]:
"""Set pipeline to state to Gst.State.READY"""
return self.set_state(Gst.State.READY)
def play(self) -> Tuple[Gst.StateChangeReturn, Gst.State, Gst.State]:
"""Set pipeline to state to Gst.State.PLAY"""
return self.set_state(Gst.State.PLAYING)
def pause(self) -> Tuple[Gst.StateChangeReturn, Gst.State, Gst.State]:
"""Set pipeline to state to Gst.State.PAUSED"""
return self.set_state(Gst.State.PAUSED)
# fmt: off
def stop(self, send_eos: bool = False, teardown: bool = False) -> Tuple[Gst.StateChangeReturn, Gst.State, Gst.State]:
"""Set pipeline to state to Gst.State.NULL, with the option of sending eos and teardown"""
# fmt: on
if send_eos:
self.send_eos()
ret = self.set_state(Gst.State.NULL)
if teardown:
self.teardown()
return ret
def send_eos(self) -> bool:
"""Send a eos event to the pipeline"""
return self.pipeline.send_event(Gst.Event.new_eos())
@classmethod
def create(cls: Type[BasePlayerType], pipeline: Gst.Pipeline) -> BasePlayerType:
"""Player factory from a given pipeline that calls setup by default"""
player = cls(pipeline)
player.setup()
return player
@classmethod
def from_description(cls: Type[BasePlayerType], description: str) -> BasePlayerType:
"""Player factory from a pipeline description"""
pipeline = Gst.parse_launch(description)
assert isinstance(pipeline, Gst.Pipeline)
return cls.create(pipeline=pipeline)
| 3,794 |
Python/File_handling.py
|
Paimon-food/hacktoberfest2021
| 1 |
2023165
|
#this is the example of file handling using python
def game():
return int(input("enter score "))
score=game()
with open("Highscore.txt") as f:
hiScoreStr=f.read()
if hiScoreStr=='':
with open("Highscore.txt","w") as f:
f.write(str(score))
print("updated")
elif int(hiScoreStr)<score:
with open("Highscore.txt","w") as f:
f.write(str(score))
print("updated")
elif int(hiScoreStr)>=score:
print(hiScoreStr)
print("do you want to reset?")
choice = input("enter yes or no in lowercase ")
if choice == "yes":
score = int(input("enter new value "))
with open("Highscore.txt","w") as f:
f.write(str(score))
print("updated")
else:
print("Appreciate your patience")
print(f.read())
| 695 |
0101-0200/0128-Longest Consecutive Sequence/0128-Longest Consecutive Sequence.py
|
jiadaizhao/LeetCode
| 49 |
2024907
|
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
table = set(nums)
maxLen = 0
for num in nums:
if num - 1 not in table:
nextNum = num + 1
while nextNum in table:
nextNum += 1
maxLen = max(maxLen, nextNum - num)
return maxLen
| 362 |
Chapter02/unittests/testExercise2_04.py
|
lmcayo/The-Data-Analysis-Workshop
| 37 |
2023536
|
import unittest
from scipy.stats import ttest_ind, pearsonr, ks_2samp
from utils import prepare_data
class TestExercise2_04(unittest.TestCase):
def setUp(self):
self.data = prepare_data()
self.data["Disease"] = self.data["Reason for absence"].apply(self.in_icd)
def in_icd(self, val):
return "Yes" if val >= 1 and val <= 21 else "No"
def test_pearson(self):
pearson_test = pearsonr(self.data["Age"], self.data["Absenteeism time in hours"])
self.assertAlmostEqual(pearson_test[0], 0.065, places=2)
self.assertAlmostEqual(pearson_test[1], 0.073, places=2)
def test_means(self):
means = self.data[["Disease", "Age"]].groupby("Disease").mean()
self.assertAlmostEqual(means["Age"]["Yes"], 36.652, places=2)
self.assertAlmostEqual(means["Age"]["No"], 36.338, places=2)
def test_ks(self):
disease_mask = self.data["Disease"] == "Yes"
disease_ages = self.data["Age"][disease_mask]
no_disease_ages = self.data["Age"][~disease_mask]
test_res = ttest_ind(disease_ages, no_disease_ages)
ks_res = ks_2samp(disease_ages, no_disease_ages)
self.assertAlmostEqual(test_res[0], 0.629, places=2)
self.assertAlmostEqual(test_res[1], 0.529, places=2)
self.assertAlmostEqual(ks_res[0], 0.056, places=2)
self.assertAlmostEqual(ks_res[1], 0.618, places=2)
| 1,407 |
models.py
|
txWang/MOGONET
| 39 |
2024571
|
""" Componets of the model
"""
import torch.nn as nn
import torch
import torch.nn.functional as F
def xavier_init(m):
if type(m) == nn.Linear:
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.0)
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
nn.init.xavier_normal_(self.weight.data)
if self.bias is not None:
self.bias.data.fill_(0.0)
def forward(self, x, adj):
support = torch.mm(x, self.weight)
output = torch.sparse.mm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
class GCN_E(nn.Module):
def __init__(self, in_dim, hgcn_dim, dropout):
super().__init__()
self.gc1 = GraphConvolution(in_dim, hgcn_dim[0])
self.gc2 = GraphConvolution(hgcn_dim[0], hgcn_dim[1])
self.gc3 = GraphConvolution(hgcn_dim[1], hgcn_dim[2])
self.dropout = dropout
def forward(self, x, adj):
x = self.gc1(x, adj)
x = F.leaky_relu(x, 0.25)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
x = F.leaky_relu(x, 0.25)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc3(x, adj)
x = F.leaky_relu(x, 0.25)
return x
class Classifier_1(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.clf = nn.Sequential(nn.Linear(in_dim, out_dim))
self.clf.apply(xavier_init)
def forward(self, x):
x = self.clf(x)
return x
class VCDN(nn.Module):
def __init__(self, num_view, num_cls, hvcdn_dim):
super().__init__()
self.num_cls = num_cls
self.model = nn.Sequential(
nn.Linear(pow(num_cls, num_view), hvcdn_dim),
nn.LeakyReLU(0.25),
nn.Linear(hvcdn_dim, num_cls)
)
self.model.apply(xavier_init)
def forward(self, in_list):
num_view = len(in_list)
for i in range(num_view):
in_list[i] = torch.sigmoid(in_list[i])
x = torch.reshape(torch.matmul(in_list[0].unsqueeze(-1), in_list[1].unsqueeze(1)),(-1,pow(self.num_cls,2),1))
for i in range(2,num_view):
x = torch.reshape(torch.matmul(x, in_list[i].unsqueeze(1)),(-1,pow(self.num_cls,i+1),1))
vcdn_feat = torch.reshape(x, (-1,pow(self.num_cls,num_view)))
output = self.model(vcdn_feat)
return output
def init_model_dict(num_view, num_class, dim_list, dim_he_list, dim_hc, gcn_dopout=0.5):
model_dict = {}
for i in range(num_view):
model_dict["E{:}".format(i+1)] = GCN_E(dim_list[i], dim_he_list, gcn_dopout)
model_dict["C{:}".format(i+1)] = Classifier_1(dim_he_list[-1], num_class)
if num_view >= 2:
model_dict["C"] = VCDN(num_view, num_class, dim_hc)
return model_dict
def init_optim(num_view, model_dict, lr_e=1e-4, lr_c=1e-4):
optim_dict = {}
for i in range(num_view):
optim_dict["C{:}".format(i+1)] = torch.optim.Adam(
list(model_dict["E{:}".format(i+1)].parameters())+list(model_dict["C{:}".format(i+1)].parameters()),
lr=lr_e)
if num_view >= 2:
optim_dict["C"] = torch.optim.Adam(model_dict["C"].parameters(), lr=lr_c)
return optim_dict
| 3,711 |
p6.py
|
daicang/Euler
| 0 |
2024699
|
# difference between sum of squares of the first 100 natural numbers and the square of the sum
import math
def solve(n):
return math.pow(sum(range(n+1)), 2) - sum([x ** 2 for x in range(n+1)])
print solve(100)
| 217 |
app/controllers/static.py
|
FenrirUnbound/too-lazy-decrease-reading
| 0 |
2025312
|
from __future__ import absolute_import, unicode_literals
from flask import Blueprint, send_from_directory
static_route = Blueprint('static', __name__)
@static_route.route('/')
def index():
return send_from_directory('static', 'index.html')
@static_route.route('/static/js/<path:path>')
def javascript(path):
return send_from_directory('static/js', path)
@static_route.route('/static/css/<path:path>')
def css(path):
return send_from_directory('static/css', path)
@static_route.route('/static/media/<path:path>')
def media(path):
return send_from_directory('static/media', path)
| 601 |
bin/extract_significative_references.py
|
GuilleGorines/nfcore-pikavirus-legacy
| 0 |
2025358
|
#!/usr/bin/env python
#
# USAGE:
# Checks MASH result, extracts the name of the significative references found in the given directory,
# and creates a symlink for nextflow to pick for future references.
#
# INPUTS:
# - 1: MASH file result (tsv format, obtained from mash dist)
# - 2: path to directory containing the reference data
#
#
# DISCLAIMER: this script has exclusively been developed for the correct functioning of nf-core pikavirus,
# and therefore is not guaranteed to function properly in other settings. Despite this, feel free to use it
# at will.
import sys
import os
mashresult = sys.argv[1]
refdir = sys.argv[2]
realpath = os.path.realpath(refdir)
with open(mashresult) as infile:
infile = infile.readlines()
# remove header
infile = [line.split() for line in infile if not line.startswith("#")]
# get name, remove extension of file if p-val < 0.05
infile = [line[0].split()[0] for line in infile if float(line[3]) < 0.05]
# files without extension
reference_dict = {item.split()[0]:[f"{realpath}/{item}",f"Final_fnas/{item}"] for item in os.listdir(refdir)}
os.mkdir(f"Final_fnas", 0o777)
for filename in infile:
if filename in reference_dict.keys():
os.symlink(reference_dict[filename][0],reference_dict[filename][1])
| 1,286 |
agreements/urls.py
|
cu-library/mellyn
| 0 |
2025034
|
"""
This module defines the urls provided by this application.
https://docs.djangoproject.com/en/3.0/ref/urls/
"""
from django.urls import path
from . import views
urlpatterns = [ # pylint: disable=invalid-name
path('resources/', views.ResourceList.as_view(), name='resources_list'),
path('resources/create/', views.ResourceCreate.as_view(), name='resources_create'),
path('resources/<slug:slug>/', views.ResourceRead.as_view(), name='resources_read'),
path('resources/<slug:slug>/update/', views.ResourceUpdate.as_view(), name='resources_update'),
path('resources/<slug:slug>/delete/', views.ResourceDelete.as_view(), name='resources_delete'),
path('resources/<slug:slug>/permissions/', views.ResourcePermissions.as_view(), name='resources_permissions'),
path('resources/<slug:slug>/permissions/groups/',
views.ResourcePermissionsGroups.as_view(),
name='resources_permissions_groups'),
path('resources/<slug:slug>/permissions/groups/<slug:groupdescriptionslug>/',
views.ResourcePermissionsGroupUpdate.as_view(),
name='resources_permissions_groups_update'),
path('resources/<slug:slug>/codes/', views.ResourceLicenseCode.as_view(), name='resources_codes_list'),
path('resources/<slug:slug>/codes/add/', views.ResourceLicenseCodeAdd.as_view(), name='resources_codes_create'),
path('resources/<slug:slug>/codes/update/',
views.ResourceLicenseCodeUpdate.as_view(),
name='resources_codes_update'),
path('resources/<slug:slug>/access/', views.ResourceAccess.as_view(), {'accesspath': ''}, name='resources_access'),
path('resources/<slug:slug>/access/<path:accesspath>', views.ResourceAccess.as_view(), name='resources_access'),
path('resources/<slug:slug>/filestats/', views.ResourceAccessFileStats.as_view(), name='resources_file_stats'),
path('faculties/', views.FacultyList.as_view(), name='faculties_list'),
path('faculties/create/', views.FacultyCreate.as_view(), name='faculties_create'),
path('faculties/<slug:slug>/', views.FacultyRead.as_view(), name='faculties_read'),
path('faculties/<slug:slug>/update/', views.FacultyUpdate.as_view(), name='faculties_update'),
path('faculties/<slug:slug>/delete/', views.FacultyDelete.as_view(), name='faculties_delete'),
path('departments/', views.DepartmentList.as_view(), name='departments_list'),
path('departments/create/', views.DepartmentCreate.as_view(), name='departments_create'),
path('departments/create/partof/<slug:facultyslug>/',
views.DepartmentCreateUnderFaculty.as_view(),
name='departments_create_under_faculty'),
path('departments/<slug:slug>/', views.DepartmentRead.as_view(), name='departments_read'),
path('departments/<slug:slug>/update/', views.DepartmentUpdate.as_view(), name='departments_update'),
path('departments/<slug:slug>/delete/', views.DepartmentDelete.as_view(), name='departments_delete'),
path('agreements/', views.AgreementList.as_view(), name='agreements_list'),
path('agreements/create/', views.AgreementCreate.as_view(), name='agreements_create'),
path('agreements/<slug:slug>/', views.AgreementRead.as_view(), name='agreements_read'),
path('agreements/<slug:slug>/update/', views.AgreementUpdate.as_view(), name='agreements_update'),
path('agreements/<slug:slug>/delete/', views.AgreementDelete.as_view(), name='agreements_delete'),
path('agreements/<slug:slug>/permissions/', views.AgreementPermissions.as_view(), name='agreements_permissions'),
path('agreements/<slug:slug>/permissions/groups/', views.AgreementPermissionsGroups.as_view(),
name='agreements_permissions_groups'),
path('agreements/<slug:slug>/permissions/groups/<slug:groupdescriptionslug>/',
views.AgreementPermissionsGroupUpdate.as_view(),
name='agreements_permissions_groups_update'),
path('agreements/<slug:slug>/signatures/',
views.AgreementSignatureList.as_view(),
name='agreements_signatures_list'),
path('agreements/<slug:slug>/signatures/csv/',
views.AgreementSignatureCSV.as_view(),
name='agreements_signatures_csv'),
]
| 4,163 |
Scripts/auth-server-script.py
|
srimannaarayana/boxtestpy
| 0 |
2025299
|
#!C:\Python27\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'auth==0.5.3','console_scripts','auth-server'
__requires__ = 'auth==0.5.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('auth==0.5.3', 'console_scripts', 'auth-server')()
)
| 307 |
thinc/tests/layers/test_softmax.py
|
richardpaulhudson/thinc
| 0 |
2022999
|
from typing import Tuple, cast
import numpy
from numpy.testing import assert_allclose
import pytest
from thinc.api import Model, NumpyOps, Softmax_v2
from thinc.types import Floats2d, Ints1d
from thinc.util import has_torch, torch2xp, xp2torch
OPS = NumpyOps()
inputs = OPS.xp.asarray([[4, 2, 3, 4], [1, 5, 3, 1], [9, 8, 5, 7]], dtype="f")
outputs = OPS.xp.asarray(
[
[0.39948627, 0.05406459, 0.14696279, 0.39948627],
[0.01562812, 0.8532666, 0.11547707, 0.01562812],
[0.657233, 0.24178252, 0.01203764, 0.08894681],
],
dtype="f",
)
def test_unnormalized_softmax_backprop():
model = Softmax_v2(normalize_outputs=False)
model.initialize(inputs, outputs)
_, backprop = model(inputs, is_train=False)
with pytest.raises(ValueError, match="backprop is not supported"):
backprop(OPS.xp.zeros_like(outputs))
# Backprop should not fail when training.
_, backprop = model(inputs, is_train=True)
dX = backprop(OPS.xp.zeros_like(outputs))
assert OPS.xp.all(dX == 0.0)
def torch_softmax_with_temperature(
model: Model, X: Floats2d, targets: Ints1d
) -> Tuple[Floats2d, Floats2d]:
import torch
Wt = xp2torch(model.get_param("W"))
bt = xp2torch(model.get_param("b"))
temperature = model.attrs["softmax_temperature"]
Xt = xp2torch(X, requires_grad=True)
Yt_gold = xp2torch(targets).long()
XWbt = (Xt @ Wt) + bt
XWbt_temp = XWbt / temperature
loss = torch.nn.CrossEntropyLoss()
output = loss(XWbt_temp, Yt_gold)
output.backward()
return cast(
Floats2d, torch2xp(torch.nn.functional.softmax(XWbt_temp, dim=-1))
), cast(Floats2d, torch2xp(Xt.grad))
@pytest.mark.skipif(not has_torch, reason="needs PyTorch")
@pytest.mark.parametrize("temperature", [0.5, 1.0, 2.0])
def test_softmax_temperature(temperature):
model = Softmax_v2(
temperature=temperature,
init_W=lambda ops, shape: ops.xp.eye(shape[1], dtype="f"),
init_b=lambda ops, shape: ops.xp.zeros(shape, dtype="f"),
)
X = OPS.xp.arange(-1, 1, 0.2, dtype="f").reshape(1, 10)
targets = OPS.asarray1i([4])
Y_gold = OPS.xp.eye(10, dtype="f")[targets]
model.initialize(X, Y_gold)
Yt, dXt = torch_softmax_with_temperature(model, X, targets)
Y, backprop = model(X, is_train=True)
dX = backprop(Y - Y_gold)
assert_allclose(Y, Yt, atol=1e-4)
assert_allclose(dX, dXt, atol=1e-4)
def test_reject_incorrect_temperature():
with pytest.raises(ValueError, match=r"softmax temperature.*zero"):
Softmax_v2(normalize_outputs=False, temperature=0.0)
model = Softmax_v2(normalize_outputs=False)
model.attrs["softmax_temperature"] = 0.0
model.initialize(inputs, outputs)
with pytest.raises(ValueError, match=r"softmax temperature.*zero"):
model(inputs, is_train=False)
| 2,849 |
nilearn/tests/test_init.py
|
chouhanaryan/nilearn
| 2 |
2024939
|
import sys
import warnings
import pytest
# import time warnings don't interfere with warning's tests
with warnings.catch_warnings(record=True):
from nilearn import _py35_deprecation_warning
from nilearn import _python_deprecation_warnings
def test_py35_deprecation_warning():
with pytest.warns(FutureWarning,
match='Python 3.5 support is deprecated'
):
_py35_deprecation_warning()
def test_python_deprecation_warnings():
if sys.version_info.major == 3 and sys.version_info.minor == 5:
with pytest.warns(FutureWarning,
match='Python 3.5 support is deprecated'
):
_python_deprecation_warnings()
def test_warnings_filter_scope():
"""
Tests that warnings generated at Nilearn import in Python 3.5 envs
do not change the warnings filter for subsequent warnings.
"""
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.warn('Dummy warning 1') # Will be raised.
warnings.filterwarnings("ignore", message="Dummy warning")
warnings.warn('Dummy warning 2') # Will not be raised.
import nilearn # noqa: F401 # Irrespective of warning raised in py3.5
warnings.warn('Dummy warning 3') # ...this should not be raised.
assert str(raised_warnings[0].message) == 'Dummy warning 1'
assert str(raised_warnings[-1].message) != 'Dummy warning 3'
| 1,464 |
flatland/node_subsystem/diagram_layout_specification.py
|
lelandstarr/flatland-model-diagram-editor
| 10 |
2025064
|
"""
diagram_layout_specification.py
"""
from flatland.database.flatlanddb import FlatlandDB as fdb
from flatland.datatypes.geometry_types import Padding, Position, Alignment, HorizAlign, VertAlign
from sqlalchemy import select
# To convert db string values to our alignment enums
# We can't just use enum values themselves since int values are used by linear geometry, so we need this addtional map
halign_map = {"left": HorizAlign.LEFT, "center": HorizAlign.CENTER, "right": HorizAlign.RIGHT}
valign_map = {"top": VertAlign.TOP, "center": VertAlign.CENTER, "bottom": VertAlign.BOTTOM}
class DiagramLayoutSpecification:
"""
Diagram Layout Specification
Defines a set of values that determine how a Diagram and Grid is positioned on a Canvas and
how Nodes are positioned relative to the Diagram and Grid.
Attributes
- Default margin -- The Canvas area surrounding the Diagram, can be zero
- Default diagram origin -- The default lower left corner of the Diagram in Canvas coordinates
- Default cell padding -- The minimum cell area surrounding a Node, can be zero, but shouldn't be since it
prevents nodes in adjacent cells from touching
- Default cell alignment -- Default alignment of Node within its Cell, typically center, center
"""
Default_margin = None
Default_diagram_origin = None
Default_cell_padding = None
Default_cell_alignment = None
def __init__(self):
"""
Constructor - Load values from database
"""
spec = fdb.MetaData.tables['Diagram Layout Specification']
q = select([spec])
i = fdb.Connection.execute(q).fetchone()
assert i, "No Diagram Layout Specification in database"
DiagramLayoutSpecification.Default_margin = Padding(
top=i['Default margin top'], bottom=i['Default margin bottom'],
left=i['Default margin left'], right=i['Default margin right']
)
DiagramLayoutSpecification.Default_diagram_origin = Position(
x=i['Default diagram origin x'], y=i['Default diagram origin y']
)
DiagramLayoutSpecification.Default_cell_padding = Padding(
top=i['Default cell padding top'], bottom=i['Default cell padding bottom'],
left=i['Default cell padding left'], right=i['Default cell padding right']
)
DiagramLayoutSpecification.Default_cell_alignment = Alignment(
vertical=halign_map[i['Default cell alignment vertical']],
horizontal=valign_map[i['Default cell alignment horizontal']]
)
| 2,605 |
trash/halu_oop.py
|
muhiqsimui/PyTraining
| 6 |
2025047
|
class User:
def __init__(self,name,tipe):
self.name=name
self.tipe=tipe
def upgradeTipe(self,tipe):
self.tipe=tipe
user1=User("Iqbal","Prince")
user2=User("<NAME>","Idol")
user3=User("Tzuyu","Singer")
user4=[User("Joy","RV member"),User("Irene","RV member"),User("Wendy","RV Member")]
def bias():
for i in range(len(user4)):
print(user4[i].name)
print(user1.name+" Have "+str(len(user4))+" bias then ")
bias()
user4[2].upgradeTipe("Girlfriend")
tipeA=user4[2].tipe
print(user1.name+" <3 "+user4[2].name+" Cause she is my "+tipeA)
if(tipeA=="Girlfriend"):
condition=True
else:
condition=False
x="Lovely" if condition else "good"
print(x)
| 675 |
OfficialTutorial/03_1_3_3_list.py
|
koichi210/Python
| 0 |
2025111
|
# -*- coding: utf-8 -*-
# リスト型のスライスの代入
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# 置き換え
letters[2:5] = ['C', 'D', 'E']
print(letters)
| 140 |
ada_loss/chainer_impl/sanity_checker.py
|
kumasento/gradient-scaling
| 7 |
2024618
|
""" Check whether the application of adaptive loss scale is correct or not. """
import chainer
import pandas as pd
class SanityChecker(object):
""" Sanity checker """
def __init__(self, check_per_n_iter=1000):
self.check_per_n_iter = check_per_n_iter
self.history = []
self.counter = 0
self.curr_iter = 0
def check(self, gy, W, g1, g2, g3, loss_scale, n_uf, curr_iter):
""" Check whether loss scale can help reliefing the underflow. """
xp = chainer.backend.get_array_module(g1)
if self.curr_iter != curr_iter:
self.curr_iter = curr_iter
self.counter = 0
if (
xp.isnan(g1.array).any()
or xp.isnan(g2.array).any()
or xp.isnan(gy.array).any()
or xp.isnan(W.array).any()
):
return
nnz1 = xp.count_nonzero(g1.array)
nnz2 = xp.count_nonzero(g2.array) # fp16
nnz3 = xp.count_nonzero(g3.array) # fp32
nuf1 = nnz1 - nnz3
nuf2 = nnz2 - nnz3
self.history.append(
[
self.curr_iter,
self.counter,
loss_scale.item(),
nnz1.item(),
nnz2.item(),
nnz3.item(),
(nuf1 / g3.size * 100).item(),
(nuf2 / g3.size * 100).item(),
]
)
# print(self.history[-1])
self.counter += 1
# variance calculation
# gy = gy.array.astype('float32')
# W = W.array.astype('float32')
# g2 = g2.array.astype('float32')
# mu1, sigma1 = gy.mean(), gy.std()
# mu2, sigma2 = W.mean(), W.std()
# mu3, sigma3 = g2.mean(), g2.std()
# mu3_ = mu1 * mu2
# sigma3_ = xp.sqrt(((sigma1**2 + mu1**2) * (sigma2**2 + mu2**2) - (mu3_)**2))
# print('mu1: {} mu2: {} sigma1: {} sigma2: {}'.format(mu1, mu2, sigma1, sigma2))
# print('mu3: {} {} sigma3: {} {}'.format(mu3, mu3_, sigma3, sigma3_))
# print('scale: {} NNZ scaled: {} ({:.4f}%) base: {} ({:.4f}%) float32: {} ({:.4f}%) n_uf: {:.4f}%'.format(
# loss_scale.item(),
# nnz1.item(),
# (100 - nnz1 / g1.size * 100).item(),
# nnz2.item(),
# (100 - nnz2 / g2.size * 100).item(),
# nnz3.item(),
# (100 - nnz3 / g3.size * 100).item(),
# n_uf * 100))
def export(self):
return pd.DataFrame(
self.history,
columns=[
"iter",
"id",
"loss_scale",
"nnz_ls",
"nnz_fp16",
"nnz_fp32",
"nuf_ls",
"nuf_fp16",
],
)
| 2,772 |
roles/upload-logs-base1/library/test_zuul_google_storage_upload.py
|
OpenTelekomCloud/otc-zuul-jobs
| 1 |
2022905
|
# Copyright (C) 2018-2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import testtools
from .zuul_google_storage_upload import Credentials
FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
'test-fixtures')
class TestCredential(testtools.TestCase):
def test_credential(self):
path = os.path.join(FIXTURE_DIR, 'gcs', 'auth.json')
headers = {}
c = Credentials(path)
c.before_request(None, None, None, headers)
self.assertEqual("Bearer something", headers['authorization'])
| 1,201 |
models/action.py
|
z1pti3/jimiPlugin-humio
| 3 |
2024380
|
import urllib
from datetime import datetime, timedelta
import socket,sys,json,requests
from pathlib import Path
from functools import reduce
import operator
from core import settings, logging, helpers, auth, cache, db
from core.models import action, conduct
from plugins.humio.includes import humio
humioSettings = settings.config["humio"]
class _humioSearch(action._action):
searchQuery = str()
searchRepository = str()
searchStart = str()
searchEnd = str()
searchLive = bool()
humioOverrideSettings = bool()
humioJob = str()
humioHost = str()
humioPort = int()
humioAPIToken = str()
humioTimeout = int()
def run(self,data,persistentData,actionResult):
searchQuery = helpers.evalString(self.searchQuery,{"data" : data})
searchRepository = helpers.evalString(self.searchRepository,{"data" : data})
searchStart = helpers.evalString(self.searchStart,{"data" : data})
searchEnd = helpers.evalString(self.searchEnd,{"data" : data})
if not self.humioOverrideSettings:
if "ca" in humioSettings:
h = humio.humioClass(humioSettings["host"],humioSettings["port"],humioSettings["apiToken"],humioSettings["secure"],humioSettings["ca"],humioSettings["requestTimeout"])
else:
h = humio.humioClass(humioSettings["host"],humioSettings["port"],humioSettings["apiToken"],humioSettings["secure"],requestTimeout=humioSettings["requestTimeout"])
else:
humioTimeout = 30
if self.humioTimeout > 0:
humioTimeout = self.humioTimeout
if not hasattr(self,"plain_humioAPIToken"):
self.plain_humioAPIToken = auth.getPasswordFromENC(self.humioAPIToken)
if "ca" in humioSettings:
h = humio.humioClass(self.humioHost,self.humioPort,self.plain_humioAPIToken,True,humioSettings["ca"],humioTimeout)
else:
h = humio.humioClass(self.humioHost,self.humioPort,self.plain_humioAPIToken,True,requestTimeout=humioTimeout)
if not self.searchLive:
kwargs = { }
# Skipping any undefined search values
if searchQuery:
kwargs["searchQuery"] = searchQuery
if searchStart:
kwargs["searchStart"] = searchStart
kwargs["searchLive"] = self.searchLive
if searchEnd:
kwargs["searchEnd"] = searchEnd
createJobResult = h.createJob(searchRepository,**kwargs)
if createJobResult[0] == 200:
humioJob = createJobResult[1]
wait = True
pollResult = h.pollJob(searchRepository,humioJob,wait)
if pollResult[0] == 200 and "events" in pollResult[1]:
actionResult["events"] = pollResult[1]["events"]
actionResult["rc"] = 0
actionResult["result"] = True
return actionResult
else:
if not self.humioJob:
logging.debug("Humio No Existing Job Found, class={0}".format(self.parse(True)),10)
kwargs = { }
# Skipping any undefined search values
if self.searchQuery:
kwargs["searchQuery"] = self.searchQuery
if self.searchStart:
kwargs["searchStart"] = self.searchStart
if self.searchLive:
kwargs["searchLive"] = self.searchLive
if self.searchEnd:
kwargs["searchEnd"] = self.searchEnd
createJobResult = h.createJob(self.searchRepository,**kwargs)
if createJobResult[0] == 200:
self.humioJob = createJobResult[1]
self.update(["humioJob"])
logging.debug("Humio Job Created, jobID={0}, class={1}".format(self.humioJob,self.parse(True)),8)
else:
raise humio.jobCreateException(self._id,self.name,self.searchQuery)
if self.humioJob:
logging.debug("Humio polling..., class={0}".format(self.parse(True)),15)
wait = False
if not self.searchLive:
wait = True
pollResult = h.pollJob(self.searchRepository,self.humioJob,wait)
if pollResult[0] == 200 and "events" in pollResult[1]:
actionResult["events"] = pollResult[1]["events"]
actionResult["humio"] = {"searchQuery" : self.searchQuery, "searchRepository" : str(self.searchRepository)}
actionResult["rc"] = 0
actionResult["result"] = True
else:
self.humioJob = ""
self.update(["humioJob"])
actionResult["msg"] = "Error: Unable to poll humio job. job='{0}'".format(self.humioJob)
actionResult["rc"] = -1
actionResult["result"] = False
def setAttribute(self,attr,value,sessionData=None):
if attr == "searchQuery":
if db.fieldACLAccess(sessionData,self.acl,attr,accessType="write"):
self.humioJob = ""
self.searchQuery = value
return True
return False
if attr == "humioAPIToken" and not value.startswith("ENC "):
if db.fieldACLAccess(sessionData,self.acl,attr,accessType="write"):
self.humioAPIToken = "ENC {0}".format(auth.getENCFromPassword(value))
return True
return False
return super(_humioSearch, self).setAttribute(attr,value,sessionData=sessionData)
class _humioIngest(action._action):
humio_ingest_token = str()
humio_repo = str()
field = list()
custom_data = dict()
custom_time = bool()
time_field = str()
flatten_field = str()
def run(self,data,persistentData,actionResult):
if not hasattr(self,"plain_humioAPIToken"):
self.plain_humio_ingest_token = auth.getPasswordFromENC(self.humio_ingest_token)
# Get data dict
if len(self.custom_data) > 0:
dataToSend = helpers.evalDict(self.custom_data,{"data" : data})
else:
if len(self.field) > 0:
dataToSend = helpers.getDictValue(self.field[0],{"data" : data})
else:
dataToSend = data
# Apply flatten
if self.flatten_field:
for key,value in data[self.flatten_field].items():
dataToSend[key] = value
del dataToSend[self.flatten_field]
# Send events
if type(dataToSend) is list:
events = []
for entry in dataToSend:
events.append(self.buildEvents(entry))
if not self.shippingHandlerBulk(events):
actionResult["result"] = False
actionResult["rc"] = 1
return actionResult
elif type(dataToSend) is dict:
if not self.shippingHandler(dataToSend):
actionResult["result"] = False
actionResult["rc"] = 2
return actionResult
actionResult["result"] = True
actionResult["rc"] = 0
return actionResult
def shippingHandler(self,entry):
if self.custom_time:
timing = entry[self.time_field]
else:
timing = datetime.now().timestamp()
if self.humio_repo != "":
return self.shipHumio(entry,timing)
def buildEvents(self,event):
if self.custom_time:
timing = event[self.time_field]
else:
timing = datetime.now().timestamp()
return { "timestamp": timing * 1000, "attributes" : event }
def shipHumio(self,event,timing):
api_url = "https://{}:443/api/v1/dataspaces/{}/ingest".format(humioSettings["host"],self.humio_repo)
headers = {"Authorization":"Bearer {}".format(self.plain_humio_ingest_token),"Content-Type":"application/json","Accept":"application/json"}
data = [{
"tags" : {},
"events": [ { "timestamp": timing * 1000, "attributes" : event } ]
}]
if "ca" in humioSettings:
r=requests.post(api_url,headers=headers,data=json.dumps(data),verify=Path(humioSettings["ca"]))
else:
r=requests.post(api_url,headers=headers,data=json.dumps(data))
if r.status_code != 200:
print(r.status_code)
return False
return True
def shippingHandlerBulk(self,events):
api_url = "https://{}:443/api/v1/dataspaces/{}/ingest".format(humioSettings["host"],self.humio_repo)
headers = {"Authorization":"Bearer {}".format(self.plain_humio_ingest_token),"Content-Type":"application/json","Accept":"application/json"}
data = [{
"tags" : {},
"events": events
}]
if "ca" in humioSettings:
r=requests.post(api_url,headers=headers,data=json.dumps(data),verify=Path(humioSettings["ca"]))
else:
r=requests.post(api_url,headers=headers,data=json.dumps(data))
if r.status_code != 200:
print(r.status_code)
return False
return True
def getFromDict(self, dataDict, mapList):
return reduce(operator.getitem, mapList, dataDict)
def setAttribute(self,attr,value,sessionData=None):
if attr == "humio_ingest_token" and not value.startswith("ENC "):
if db.fieldACLAccess(sessionData,self.acl,attr,accessType="write"):
self.humio_ingest_token = "ENC {0}".format(auth.getENCFromPassword(value))
return True
return False
return super(_humioIngest, self).setAttribute(attr,value,sessionData=sessionData)
| 9,877 |
photos/models.py
|
victorlomi/Gallery
| 0 |
2023860
|
from django.db import models
class Location(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
def save_location(self, *args, **kwargs):
self.save(*args, **kwargs)
def update_location(self, name):
self.name = name
def delete_location(self):
self.delete()
class Category(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
def save_category(self, *args, **kwargs):
self.save(*args, **kwargs)
def update_category(self, name):
self.name = name
def delete_category(self):
self.delete()
class Image(models.Model):
image = models.ImageField(upload_to="photos/")
name = models.CharField(max_length=60)
description = models.TextField()
location = models.ForeignKey(Location, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.name
@classmethod
def search_image(cls, category):
category_images = cls.objects.filter(category__name__icontains=category)
return category_images
@classmethod
def filter_by_location(cls, location):
location_images = cls.objects.filter(location__name__icontains=location)
return location_images
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_image(self, img):
self.image = img.image
self.name = img.name
self.description = img.description
self.location = img.location
self.category = img.category
@classmethod
def get_image_by_id(cls, id):
return cls.objects.get(id=id)
| 1,567 |
qwerty/app/models.py
|
josip-milic/asc_qwerty_test
| 0 |
2025133
|
from django.db import models
# Create your models here.
from django.db import models
class Event(models.Model):
title = models.CharField(max_length=255)
date = models.DateTimeField()
description = models.TextField()
location_lat = models.DecimalField(max_digits=9, decimal_places=6)
location_lng = models.DecimalField(max_digits=9, decimal_places=6)
marker_type = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'%s' % self.title
def __str__(self):
return ("{} | {} | {}...".format(self.title, self.date, self.description[:20])).encode('ascii', errors='replace')
| 693 |
python/spdm/util/RefResolver.py
|
simpla-fusion/spdb
| 0 |
2025264
|
import collections
import contextlib
import inspect
import json
import os
import pathlib
import pkgutil
import pprint
import re
import sys
import uuid
from copy import copy
import jsonschema
from . import io
from .Alias import Alias
from .dict_util import format_string_recursive
from .logger import logger
from .Multimap import Multimap
from .sp_export import sp_pkg_data_path
from .urilib import getvalue_r, uridefrag, urijoin, urisplit, uriunsplit
def _extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def _properties(validator, properties, instance, schema):
for p, subschema in properties.items():
if isinstance(subschema, collections.abc.Iterable) \
and "default" in subschema \
and hasattr(instance, "setdefault"):
instance.setdefault(p, subschema["default"])
for error in validate_properties(validator, properties, instance, schema,):
yield error
return jsonschema.validators.extend(
validator_class, {"properties": _properties},
)
_DefaultValidatingValidator = _extend_with_default(jsonschema.Draft7Validator)
class RefResolver(object):
""" Resolve and fetch '$ref' in the document (json,yaml,https ...)
schemas:
pkgdata : pkgutil.get_data(o.authority or __package__,o.path)
https,http : requests.get(uri).json()
josn : json.load(open(uri))
yaml : json.load(open(uri))
Example(code):
>>> repo = RefResolver()
>>> repo.alias.append("https://fusionyun.org/schemas/draft-00/",
"pkgdata:///../schemas")
>>> repo.alias.append(f"/modules/", "file://workspaces/FyPackages/modules/")
@note
* compatible with jsonschema.RefResolver
TODO (salmon 20190915): support XML,XSD,XSLT
"""
def __init__(self, *,
base_uri="",
encode='UTF-8',
prefetch=None,
enable_remote=False,
enable_validate=True,
enable_envs_template=True,
default_file_ext='yaml',
default_schema="http://json-schema.org/draft-07/schema#",
alias=None,
**kwargs
):
super().__init__()
self._alias = Alias(glob_pattern_char='*')
self._encode = encode
self._scopes_stack = [base_uri] if len(
base_uri) > 0 and base_uri[-1] == '/' else [base_uri+"/"]
self._default_file_ext = default_file_ext
self._default_schema = urijoin(base_uri, default_schema)
self._enable_remote = enable_remote
self._enable_validate = enable_validate
self._enable_envs_template = enable_envs_template
if prefetch is not None:
# if not isinstance(prefetch, pathlib.Path):
# prefetch = pathlib.Path(prefetch)
# if prefetch.is_dir():
prefetch = f"{prefetch}/" if prefetch[-1] != '/' else prefetch
self._alias.prepend("https://", prefetch)
self._alias.prepend("http://", prefetch)
if alias is None:
pass
elif isinstance(alias, collections.abc.Mapping):
for k, v in alias.items():
self._alias.append(self.normalize_uri(k), v)
elif isinstance(alias, collections.abc.Sequence):
for k, v in alias:
self._alias.append(self.normalize_uri(k), v)
else:
raise TypeError(f"Require list or map, not [{type(alias)}]")
self._cache = {}
self._validator = {"http://json-schema.org/draft-07/schema#":
_DefaultValidatingValidator}
@property
def alias(self):
return self._alias
def normalize_uri(self, uri):
if uri is None:
uri = None
elif type(uri) is str:
pass
elif isinstance(uri, collections.abc.Sequence):
uri = "/".join(uri)
else:
raise TypeError(f"Illegal type {type(uri).__name__}")
return urijoin(self.resolution_scope, uri)
def remove_prefix(self, p: str):
return self.relative_path(p)
def relative_path(self, p: str):
# FIXME: not complete , only support sub-path
prefix = self.resolution_scope
if p is None:
return None
elif p.startswith(prefix):
return p[len(prefix):].strip("/.")
else:
raise NotImplementedError()
_normalize_ids = ["$schema", "$id", "$base"]
def validate(self, doc):
if doc is None:
raise ValueError(f"Try to validate an empty document!")
for nid in RefResolver._normalize_ids:
if nid not in doc:
continue
doc[nid] = self.normalize_uri(doc[nid])
schema = doc.get("$schema", None)
if isinstance(schema, str):
schema_id = schema
schema = {"$id": schema_id}
elif isinstance(schema, collections.abc.Mapping):
schema_id = schema.get("$id", None)
else:
schema_id = None
schema = {"$id": schema_id}
validator = self._validator.get(schema_id, None)
if validator is None:
try:
schema = self.fetch(schema, no_validate=True)
except Exception:
logger.error(f"Can not find schema : {schema}")
else:
validator = _DefaultValidatingValidator(schema, resolver=self)
self._validator[schema["$id"]] = validator
if validator is not None:
validator.validate(doc)
return doc
def _do_fetch(self, uri):
uri = self.normalize_uri(uri)
new_doc = self._cache.get(uri, None)
if new_doc is not None:
return new_doc
for a_uri in self._alias.match(uri):
new_doc = io.read(a_uri)
if not new_doc:
pass
else:
new_doc["$id"] = uri
new_doc["$source_file"] = a_uri
self._cache[uri] = new_doc
break
return new_doc
def fetch(self, doc, no_validate=False) -> dict:
""" fetch document from source, then validate and fill default
value basing on $schema in document
"""
if isinstance(doc, (str, collections.abc.Sequence)):
new_doc = self._do_fetch(doc)
elif isinstance(doc, collections.abc.Mapping):
new_doc = copy(doc)
else:
raise TypeError(type(doc))
if isinstance(new_doc, collections.abc.Mapping):
# new_doc["$schema"] = self.normalize_uri(new_doc.get("$schema", ""))
if not no_validate and self._enable_validate:
self.validate(new_doc)
return new_doc
def clear_cache(self):
self._cache.clear() # pylint: disable= no-member
def glob(self, mod=None):
mod_prefix = self.normalize_uri(f"{mod or ''}%_PATH_%")
for n_uri in self._alias.match(mod_prefix):
for p, f in io.glob(n_uri):
yield p, f
##########################################################################
# Begin: compatible with jsonschema.RefResolver
@classmethod
def from_schema(cls_, schema, base_uri=None):
# if schema is None:
# return cls_()
# s_id = schema.get("$id", "")
# base_uri = base_uri or cls_._base_uri
# res = cls_(base_uri=base_uri)
# res.insert(s_id, schema)
return cls_()
def push_scope(self, scope):
self._scopes_stack.append(urijoin(self.resolution_scope, scope))
def pop_scope(self):
try:
self._scopes_stack.pop()
except IndexError:
raise IndexError("Failed to pop from an empty stack")
@property
def resolution_scope(self):
return self._scopes_stack[-1]
@contextlib.contextmanager
def in_scope(self, scope):
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
uri, resolved = self.resolve(ref)
self.push_scope(uri)
try:
yield resolved
finally:
self.pop_scope()
def resolve(self, ref):
""" Parse reference or description, return URI and full schema"""
uri = self.normalize_uri(ref)
return uri, self.fetch(uri, no_validate=True)
def resolve_from_uri(self, uri):
return self.fetch(uri, no_validate=True)
def resolve_remote(self, uri):
return self.fetch(uri, no_validate=True)
def resolve_local(self, local_path):
return self.fetch(urijoin("local://", local_path), no_validate=True)
def resolve_fragment(self, obj, fragment):
return getvalue_r(obj, fragment)
# END :compatible with jsonschema.RefResolver
##########################################################################
# RefResolver.HANDLERS["pyobject"] = lambda p: {
# "$schema": "PyObject", "$class": p}
| 9,301 |
parsl/tests/test_checkpointing/test_periodic.py
|
daheise/parsl
| 0 |
2025053
|
import argparse
import datetime
import time
import pytest
from dateutil.parser import parse
import parsl
from parsl.app.app import App
from parsl.tests.configs.local_threads_checkpoint_periodic import config
def local_setup():
global dfk
dfk = parsl.load(config)
def local_teardown():
# explicit clear without dfk.cleanup here, because the
# test does that already
parsl.clear()
@App('python', cache=True)
def slow_double(x, sleep_dur=1):
import time
time.sleep(sleep_dur)
return x * 2
def tstamp_to_seconds(line):
print("Parsing line: ", line)
parsed = parse(line[0:23], fuzzy=True)
epoch = datetime.datetime.utcfromtimestamp(0)
f = (parsed - epoch).total_seconds()
return f
@pytest.mark.local
def test_periodic(n=4):
"""Test checkpointing with task_periodic behavior
"""
d = {}
print("Launching : ", n)
for i in range(0, n):
d[i] = slow_double(i)
print("Done launching")
for i in range(0, n):
d[i].result()
print("Done sleeping")
time.sleep(16)
dfk.cleanup()
# Here we will check if the loglines came back with 5 seconds deltas
print("Rundir: ", dfk.run_dir)
with open("{}/parsl.log".format(dfk.run_dir), 'r') as f:
log_lines = f.readlines()
expected_msg = "] Done checkpointing"
expected_msg2 = "] No tasks checkpointed in this pass"
lines = [line for line in log_lines if expected_msg in line or expected_msg2 in line]
assert len(lines) >= 3, "Insufficient checkpoint lines in logfile"
deltas = [tstamp_to_seconds(line) for line in lines]
assert deltas[1] - deltas[0] < 5.5, "Delta between checkpoints exceeded period"
assert deltas[2] - deltas[1] < 5.5, "Delta between checkpoints exceeded period"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
x = test_periodic(n=4)
| 2,219 |
bot/command.py
|
meooow25/cp-discord-bot
| 11 |
2025272
|
class Command:
"""An executable bot command."""
def __init__(self, func, name=None, usage=None, desc=None, hidden=False,
allow_guild=True, allow_dm=False):
"""
:param func: the function that actually does the work
:param name: the command name
:param usage: command usage information
:param desc: command description
:param hidden: whether the command is hidden
:param allow_guild: whether the command is allowed in guild channels
:param allow_dm: whether the command is allowed in DM channels
"""
self.func = func
self.name = func.__name__ if name is None else name
self.usage = func.__name__ if usage is None else usage
self.desc = func.__name__ if desc is None else desc
self.hidden = hidden
self.allow_guild = allow_guild
self.allow_dm = allow_dm
async def execute(self, *args, **kwargs):
"""Execute the command."""
await self.func(*args, **kwargs)
def embed_field_rep(self):
"""Returns a Discord embed field representing this command."""
return {
'name': self.usage,
'value': self.desc,
}
class IncorrectUsageException(Exception):
"""Represents an exception raised when a command is used incorrectly."""
def __init__(self, msg=None, cmd=None):
"""
:param msg: a message to be displayed
:param cmd: the command in context
"""
if cmd:
msg = f'Command "{cmd}": {msg}' if msg else f'Command "{cmd}"'
if msg:
super().__init__(msg)
else:
super().__init__()
def command(func=None, **kwargs):
"""Wraps an async function in a Command object, intended for use as a decorator"""
if func is not None:
return Command(func, **kwargs)
return lambda fun: Command(fun, **kwargs)
def assert_true(val, msg=None, cmd=None):
if val is not True:
msg = msg or f'Expected True, found {val}'
raise IncorrectUsageException(msg, cmd)
def assert_none(val, msg=None, cmd=None):
if val is not None:
msg = msg or f'Expected None, found {val}'
raise IncorrectUsageException(msg, cmd)
def assert_not_none(val, msg=None, cmd=None):
if val is None:
msg = msg or f'Expected not None, found None'
raise IncorrectUsageException(msg, cmd)
def assert_int(val, msg=None, cmd=None):
try:
int(val)
except ValueError:
msg = msg or f'Expected int, found {val}'
raise IncorrectUsageException(msg, cmd)
def assert_arglen(args, num, msg=None, cmd=None):
if len(args) != num:
msg = msg or f'Expected {num} arguments, found {len(args)}'
raise IncorrectUsageException(msg, cmd)
| 2,807 |
custom_visualisation_example.py
|
FrancescoSaverioZuppichini/mirror
| 234 |
2025011
|
from mirror import mirror
from PIL import Image
from torchvision.models import vgg16
from torchvision.transforms import ToTensor, Resize, Compose
from mirror.visualisations.core import Visualisation
from mirror.visualisations.web import WebInterface
from functools import partial
class RepeatInput(Visualisation):
def __call__(self, inputs, layer, repeat=1):
return inputs.repeat(repeat, 1, 1, 1), None
params = {'repeat' : {
'type' : 'slider',
'min' : 1,
'max' : 100,
'value' : 2,
'step': 1,
'params': {}
}
}
visualisation = partial(WebInterface.from_visualisation, RepeatInput, params=params, name='Visualisation')
# create a model
model = vgg16(pretrained=True)
# open some images
cat = Image.open("./cat.jpg")
dog_and_cat = Image.open("./dog_and_cat.jpg")
# resize the image and make it a tensor
to_input = Compose([Resize((224, 224)), ToTensor()])
# call mirror with the inputs and the model
mirror([to_input(cat), to_input(dog_and_cat)], model,
visualisations=[visualisation])
| 1,157 |
acme/RegistrationManager.py
|
reinaortega/ACME-oneM2M-CSE
| 0 |
2025232
|
#
# RegistrationManager.py
#
# (c) 2020 by <NAME>
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Managing resource / AE registrations
#
from Logging import Logging
from Constants import Constants as C
from Configuration import Configuration
import CSE, Utils
from resources import ACP
acpPrefix = 'acp_'
class RegistrationManager(object):
def __init__(self):
Logging.log('RegistrationManager initialized')
def shutdown(self):
Logging.log('RegistrationManager shut down')
#########################################################################
#
# Handle new resources in general
#
def checkResourceCreation(self, resource, originator, parentResource=None):
if resource.ty in [ C.tAE ]:
if (originator := self.handleAERegistration(resource, originator, parentResource)) is None:
return (originator, C.rcOK)
# Test and set creator attribute.
if (rc := self.handleCreator(resource, originator)) != C.rcOK:
return (None, rc)
return (originator, C.rcOK)
# Check for (wrongly) set creator attribute as well as assign it to allowed resources.
def handleCreator(self, resource, originator):
# Check whether cr is set. This is wrong
if resource.cr is not None:
Logging.logWarn('Setting "creator" attribute is not allowed.')
return C.rcBadRequest
# Set cr for some of the resource types
if resource.ty in C.tCreatorAllowed:
resource['cr'] = Configuration.get('cse.originator') if originator in ['C', 'S', '', None ] else originator
return C.rcOK
def checkResourceDeletion(self, resource, originator):
if resource.ty in [ C.tAE ]:
if not self.handleAEDeRegistration(resource):
return (False, originator)
return (True, originator)
#########################################################################
#
# Handle AE registration
#
def handleAERegistration(self, ae, originator, parentResource):
if originator == 'C':
originator = Utils.uniqueAEI('C')
elif originator == 'S':
originator = Utils.uniqueAEI('S')
elif originator is None or len(originator) == 0:
originator = Utils.uniqueAEI('S')
Logging.logDebug('Registering AE. aei: %s ' % originator)
# set the aei to the originator
ae['aei'] = originator
# Verify that parent is the CSEBase, else this is an error
if parentResource is None or parentResource.ty != C.tCSEBase:
return None
# Create an ACP for this AE-ID if there is none set
if Configuration.get("cse.ae.createACP"):
if ae.acpi is None or len(ae.acpi) == 0:
Logging.logDebug('Adding ACP for AE')
cseOriginator = Configuration.get('cse.originator')
acp = ACP.ACP(pi=parentResource.ri, rn=acpPrefix + ae.rn)
acp.addPermissionOriginator(originator)
acp.addPermissionOriginator(cseOriginator)
acp.setPermissionOperation(Configuration.get('cse.acp.pv.acop'))
acp.addSelfPermissionOriginator(cseOriginator)
acp.setSelfPermissionOperation(Configuration.get('cse.acp.pvs.acop'))
if not (res := self.checkResourceCreation(acp, originator, parentResource))[0]:
return None
CSE.dispatcher.createResource(acp, parentResource=parentResource, originator=originator)
# Set ACPI (anew)
ae['acpi'] = [ acp.ri ]
else:
ae['acpi'] = [ Configuration.get('cse.defaultACPI') ]
return originator
#
# Handle AE deregistration
#
def handleAEDeRegistration(self, resource):
# remove the before created ACP, if it exist
Logging.logDebug('DeRegisterung AE. aei: %s ' % resource.aei)
if Configuration.get("cse.ae.removeACP"):
Logging.logDebug('Removing ACP for AE')
acpi = '%s/%s%s' % (Configuration.get("cse.rn"), acpPrefix, resource.rn)
if (res := CSE.dispatcher.retrieveResource(acpi))[1] != C.rcOK:
Logging.logWarn('Could not find ACP: %s' % acpi)
return False
CSE.dispatcher.deleteResource(res[0])
return True
| 3,851 |
Generative_Models/Variational_AE/VAE.py
|
Romit-Maulik/Tutorials-Demos-Practice
| 8 |
2025014
|
import numpy as np
import tensorflow as tf
# Set seeds
np.random.seed(10)
tf.random.set_seed(10)
from tensorflow.keras.layers import Input, Dense, LSTM, Lambda, Dropout, Flatten, Reshape
from tensorflow.keras.layers import Conv2D, UpSampling2D, MaxPooling2D
from tensorflow.keras import optimizers, models, regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import load_model, Sequential, Model
from tensorflow.keras.regularizers import l1
from tensorflow.keras.utils import plot_model
from tensorflow.keras.losses import binary_crossentropy, mse
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import matplotlib.pyplot as plt
lrate = 0.001
weights_filepath = 'best_weights_vae.h5'
mode = 'train' # train, test
num_latent = 2
def model_def():
def coeff_determination(y_pred, y_true): #Order of function inputs is important here
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# reparameterization trick
# instead of sampling from Q(z|X), sample eps = N(0,I)
# then z = z_mean + sqrt(var)*eps
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
Arguments
args (tensor): mean and log of variance of Q(z|X)
Returns
z (tensor): sampled latent vector
"""
epsilon_mean = 0.0
epsilon_std = 1.0
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim), mean=epsilon_mean, stddev=epsilon_std)
return z_mean + K.exp(0.5 * z_log_var) * epsilon
## Encoder
encoder_inputs = Input(shape=(64,64,1),name='Field')
# Encode
x = Conv2D(30,kernel_size=(3,3),activation='relu',padding='same')(encoder_inputs)
enc_l2 = MaxPooling2D(pool_size=(2, 2),padding='same')(x)
x = Conv2D(25,kernel_size=(3,3),activation='relu',padding='same')(enc_l2)
enc_l3 = MaxPooling2D(pool_size=(2, 2),padding='same')(x)
x = Conv2D(20,kernel_size=(3,3),activation='relu',padding='same')(enc_l3)
enc_l4 = MaxPooling2D(pool_size=(2, 2),padding='same')(x)
x = Conv2D(15,kernel_size=(3,3),activation='relu',padding='same')(enc_l4)
enc_l5 = MaxPooling2D(pool_size=(2, 2),padding='same')(x)
x = Conv2D(10,kernel_size=(3,3),activation=None,padding='same')(enc_l5)
encoded = MaxPooling2D(pool_size=(2, 2),padding='same')(x)
x = Flatten()(x)
z_mean = Dense(num_latent, name='z_mean')(x)
z_log_var = Dense(num_latent, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(num_latent,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(encoder_inputs, z, name='encoder')
# build decoder model
latent_inputs = Input(shape=(num_latent,), name='z_sampling')
x = Dense(8)(latent_inputs)
x = Reshape((2, 2, 2))(x)
x = Conv2D(2,kernel_size=(3,3),activation=None,padding='same')(x)
dec_l1 = UpSampling2D(size=(2, 2))(x)
x = Conv2D(15,kernel_size=(3,3),activation='relu',padding='same')(dec_l1)
dec_l2 = UpSampling2D(size=(2, 2))(x)
x = Conv2D(20,kernel_size=(3,3),activation='relu',padding='same')(dec_l2)
dec_l3 = UpSampling2D(size=(2, 2))(x)
x = Conv2D(25,kernel_size=(3,3),activation='relu',padding='same')(dec_l3)
dec_l4 = UpSampling2D(size=(2, 2))(x)
x = Conv2D(30,kernel_size=(3,3),activation='relu',padding='same')(dec_l4)
dec_l5 = UpSampling2D(size=(2, 2))(x)
decoded = Conv2D(1,kernel_size=(3,3),activation=None,padding='same')(dec_l5)
decoder = Model(inputs=latent_inputs,outputs=decoded)
decoder.summary()
# instantiate VAE model
ae_outputs = decoder(encoder(encoder_inputs))
model = Model(inputs=encoder_inputs,outputs=ae_outputs,name='VAE')
# Losses and optimization
my_adam = optimizers.Adam(lr=lrate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
reconstruction_loss = mse(K.flatten(encoder_inputs), K.flatten(ae_outputs))
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
model.add_loss(vae_loss)
model.compile(optimizer=my_adam,metrics=[coeff_determination])
model.summary()
return model, decoder, encoder
# Grab data
swe_train = np.load('./snapshot_matrix_pod.npy').T[:,:4096]
swe_valid = np.load('./snapshot_matrix_test.npy').T[:,:4096]
preproc = Pipeline([('stdscaler', StandardScaler())])
swe_train = preproc.fit_transform(swe_train)
swe_valid = preproc.transform(swe_valid)
swe_train = swe_train.reshape(900,64,64,1)
swe_valid = swe_valid.reshape(100,64,64,1)
# Shuffle - to preserve the order of the initial dataset
swe_train_data = np.copy(swe_train)
swe_valid_data = np.copy(swe_valid)
np.random.shuffle(swe_train_data)
np.random.shuffle(swe_valid_data)
if __name__ == '__main__':
model,decoder,encoder = model_def()
# CNN training stuff
num_epochs = 5000
batch_size = 4
# fit network
if mode == 'train':
checkpoint = ModelCheckpoint(weights_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min',save_weights_only=True)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
callbacks_list = [checkpoint,earlystopping]
train_history = model.fit(x=swe_train_data, y=swe_train_data, epochs=num_epochs, batch_size=batch_size, callbacks=callbacks_list, validation_split=0.1)
# model.load_weights(weights_filepath)
# Encode the training data to generate time-series information
encoded_t = K.eval(encoder(swe_train[:,:,:,:].astype('float32')))[0].numpy()
encoded_v = K.eval(encoder(swe_valid[:,:,:,:].astype('float32')))[0].numpy()
encoded_t = encoded_t.reshape(900,num_latent)
encoded_v = encoded_v.reshape(100,num_latent)
plt.figure()
plt.plot(encoded_t[0:10,0],label='Dimension 1')
plt.plot(encoded_t[0:10,1],label='Dimension 2')
plt.legend()
plt.show()
np.save('VAE_Coefficient_Training_Data.npy',encoded_t)
np.save('VAE_Coefficient_Testing_Data.npy',encoded_v)
if mode == 'test':
# Visualize fields
model.load_weights(weights_filepath)
# Metric calculation
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
predicted = preproc.inverse_transform(model.predict(swe_valid).reshape(100,4096))
true = preproc.inverse_transform(swe_valid.reshape(100,4096))
print('R2 score:',r2_score(true,predicted))
print('MSE score:',mean_squared_error(true,predicted))
print('MAE score:',mean_absolute_error(true,predicted))
for time in range(0,10):
recoded = model.predict(swe_valid[time:time+1,:,:,:])
true = preproc.inverse_transform(swe_valid[time:time+1,:,:,:].reshape(1,4096)).reshape(64,64)
recoded = preproc.inverse_transform(recoded.reshape(1,4096)).reshape(64,64)
np.save('True_'+str(time)+'.npy',true)
np.save('Rec_'+str(time)+'.npy',recoded)
fig, ax = plt.subplots(nrows=1,ncols=2,figsize=(6,6))
cs1 = ax[0].imshow(true,label='input')
cs2 = ax[1].imshow(recoded,label='decoded')
for i in range(2):
ax[i].set_xlabel('x')
ax[i].set_ylabel('y')
fig.colorbar(cs1,ax=ax[0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[1],fraction=0.046, pad=0.04)
ax[0].set_title(r'True $q_1$')
ax[1].set_title(r'Reconstructed $q_1$')
plt.subplots_adjust(wspace=0.5,hspace=-0.3)
plt.tight_layout()
plt.show()
| 8,339 |
release/stubs.min/System/Net/__init___parts/HttpListenerRequest.py
|
YKato521/ironpython-stubs
| 0 |
2025061
|
class HttpListenerRequest(object):
""" Describes an incoming HTTP request to an System.Net.HttpListener object. This class cannot be inherited. """
def BeginGetClientCertificate(self, requestCallback, state):
"""
BeginGetClientCertificate(self: HttpListenerRequest,requestCallback: AsyncCallback,state: object) -> IAsyncResult
Begins an asynchronous request for the client's X.509 v.3 certificate.
requestCallback: An System.AsyncCallback delegate that references the method to invoke when the operation is
complete.
state: A user-defined object that contains information about the operation. This object is passed to
the callback delegate when the operation completes.
Returns: An System.IAsyncResult that indicates the status of the operation.
"""
pass
def EndGetClientCertificate(self, asyncResult):
"""
EndGetClientCertificate(self: HttpListenerRequest,asyncResult: IAsyncResult) -> X509Certificate2
Ends an asynchronous request for the client's X.509 v.3 certificate.
asyncResult: The pending request for the certificate.
Returns: The System.IAsyncResult object that is returned when the operation started.
"""
pass
def GetClientCertificate(self):
"""
GetClientCertificate(self: HttpListenerRequest) -> X509Certificate2
Retrieves the client's X.509 v.3 certificate.
Returns: A System.Security.Cryptography.X509Certificates object that contains the client's X.509 v.3
certificate.
"""
pass
def GetClientCertificateAsync(self):
""" GetClientCertificateAsync(self: HttpListenerRequest) -> Task[X509Certificate2] """
pass
AcceptTypes = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the MIME types accepted by the client.
Get: AcceptTypes(self: HttpListenerRequest) -> Array[str]
"""
ClientCertificateError = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets an error code that identifies a problem with the System.Security.Cryptography.X509Certificates.X509Certificate provided by the client.
Get: ClientCertificateError(self: HttpListenerRequest) -> int
"""
ContentEncoding = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the content encoding that can be used with data sent with the request
Get: ContentEncoding(self: HttpListenerRequest) -> Encoding
"""
ContentLength64 = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the length of the body data included in the request.
Get: ContentLength64(self: HttpListenerRequest) -> Int64
"""
ContentType = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the MIME type of the body data included in the request.
Get: ContentType(self: HttpListenerRequest) -> str
"""
Cookies = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the cookies sent with the request.
Get: Cookies(self: HttpListenerRequest) -> CookieCollection
"""
HasEntityBody = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a System.Boolean value that indicates whether the request has associated body data.
Get: HasEntityBody(self: HttpListenerRequest) -> bool
"""
Headers = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the collection of header name/value pairs sent in the request.
Get: Headers(self: HttpListenerRequest) -> NameValueCollection
"""
HttpMethod = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the HTTP method specified by the client.
Get: HttpMethod(self: HttpListenerRequest) -> str
"""
InputStream = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a stream that contains the body data sent by the client.
Get: InputStream(self: HttpListenerRequest) -> Stream
"""
IsAuthenticated = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a System.Boolean value that indicates whether the client sending this request is authenticated.
Get: IsAuthenticated(self: HttpListenerRequest) -> bool
"""
IsLocal = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a System.Boolean value that indicates whether the request is sent from the local computer.
Get: IsLocal(self: HttpListenerRequest) -> bool
"""
IsSecureConnection = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a System.Boolean value that indicates whether the TCP connection used to send the request is using the Secure Sockets Layer (SSL) protocol.
Get: IsSecureConnection(self: HttpListenerRequest) -> bool
"""
IsWebSocketRequest = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: IsWebSocketRequest(self: HttpListenerRequest) -> bool
"""
KeepAlive = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a System.Boolean value that indicates whether the client requests a persistent connection.
Get: KeepAlive(self: HttpListenerRequest) -> bool
"""
LocalEndPoint = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get the server IP address and port number to which the request is directed.
Get: LocalEndPoint(self: HttpListenerRequest) -> IPEndPoint
"""
ProtocolVersion = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the HTTP version used by the requesting client.
Get: ProtocolVersion(self: HttpListenerRequest) -> Version
"""
QueryString = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the query string included in the request.
Get: QueryString(self: HttpListenerRequest) -> NameValueCollection
"""
RawUrl = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the URL information (without the host and port) requested by the client.
Get: RawUrl(self: HttpListenerRequest) -> str
"""
RemoteEndPoint = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the client IP address and port number from which the request originated.
Get: RemoteEndPoint(self: HttpListenerRequest) -> IPEndPoint
"""
RequestTraceIdentifier = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the request identifier of the incoming HTTP request.
Get: RequestTraceIdentifier(self: HttpListenerRequest) -> Guid
"""
ServiceName = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the Service Provider Name (SPN) that the client sent on the request.
Get: ServiceName(self: HttpListenerRequest) -> str
"""
TransportContext = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the System.Net.TransportContext for the client request.
Get: TransportContext(self: HttpListenerRequest) -> TransportContext
"""
Url = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the System.Uri object requested by the client.
Get: Url(self: HttpListenerRequest) -> Uri
"""
UrlReferrer = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the Uniform Resource Identifier (URI) of the resource that referred the client to the server.
Get: UrlReferrer(self: HttpListenerRequest) -> Uri
"""
UserAgent = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the user agent presented by the client.
Get: UserAgent(self: HttpListenerRequest) -> str
"""
UserHostAddress = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the server IP address and port number to which the request is directed.
Get: UserHostAddress(self: HttpListenerRequest) -> str
"""
UserHostName = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the DNS name and,if provided,the port number specified by the client.
Get: UserHostName(self: HttpListenerRequest) -> str
"""
UserLanguages = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the natural languages that are preferred for the response.
Get: UserLanguages(self: HttpListenerRequest) -> Array[str]
"""
| 9,318 |
rdm/test_formatters/xml_util.py
|
dheater/rdm
| 76 |
2023182
|
from xml.etree import ElementTree
DISABLED_PREFIX = 'DISABLED_'
def xml_load(xml_path):
return ElementTree.parse(xml_path)
def check_disabled(name):
if name.startswith(DISABLED_PREFIX):
return name[len(DISABLED_PREFIX):], True
else:
return name, False
def flattened_gtest_results(test_results):
flattened_results = {}
for test_suite in test_results.iter('testsuite'):
suite_name, suite_disabled = check_disabled(test_suite.get('name', '_'))
for test_case in test_suite.iter('testcase'):
case_name, case_disabled = check_disabled(test_case.get('name', '_'))
test_name = ".".join([suite_name, case_name])
status = test_case.get('status')
if suite_disabled or case_disabled or (status is not None and status != 'run'):
result = "skip"
message = None
else:
result = test_case.get('result')
failure = test_case.find('failure')
if failure is None:
message = None
if result is None:
result = "pass"
else:
message = failure.get('message')
result = 'fail'
flattened_results[test_name] = {
'name': test_name,
'result': result,
'message': message,
}
return flattened_results
def flattened_qttest_results(test_results):
flattened_results = {}
for test_case in test_results.iter('TestCase'):
case_name = test_case.get('name', '_')
for test_function in test_case.iter('TestFunction'):
function_name = test_function.get('name', '_')
test_name = ".".join([case_name, function_name])
for incident in test_function.iter('Incident'):
result = incident.get("type")
description = incident.find('Description')
if description is None:
message = None
else:
message = description.text
flattened_results[test_name] = {
'name': test_name,
'result': result,
'message': message,
}
return flattened_results
def auto_translator(test_results):
if test_results.find('Environment'):
return flattened_qttest_results(test_results)
else:
return flattened_gtest_results(test_results)
| 2,529 |
steem_simplemap.py
|
AusPrinzip/Steem-Maps
| 0 |
2024547
|
from flask import Flask, jsonify, render_template, request, Response
from geopy.geocoders import Nominatim
from steemdata import SteemData
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
geolocator = Nominatim(timeout=10)
app = Flask(__name__)
s = SteemData()
@app.route('/geolocation')
def geolocation():
user = request.args.get('user', 0, type=str)
s = SteemData()
data = s.Accounts.aggregate(
[
{
"$match": {
"name": user
}
},
{
"$project":
{
"account":"$account",
"location":"$profile.location",
"_id":0
}},
{"$limit":1},
])
for elemento in data:
location = geolocator.geocode(elemento["location"])
print((elemento["account"], elemento["location"],location.latitude, location.longitude))
return jsonify(result= str(user) + str("/") + str(location.longitude) + str("/") + str(location.latitude))
@app.route('/')
def index():
return render_template('index_simplemap.html')
| 1,015 |
Services/ApiAddressService.py
|
AiursoftWeb/Kahla.CLI
| 10 |
2022877
|
from package import stable
class ApiAddressService(object):
def __init__(self):
if stable:
self.serverAddress = "https://server.kahla.app"
else:
self.serverAddress = "https://staging.server.kahla.app"
def getaddress(self):
return self.serverAddress
| 308 |
1 Python Basics/5_attributes.py
|
narayanants/python-mega-course
| 0 |
2025302
|
student_grades = [9.1,8.2,7.3,9.1,9.2,9.1]
print(student_grades.count(9.1))
mysum = sum(student_grades)
mylength = len(student_grades)
print(mysum)
print(mylength)
mean = mysum/mylength
print(mean)
| 203 |
preprocessor/feature_extractor.py
|
AhsanAliLodhi/statistical_data_preprocessing
| 0 |
2025416
|
import pandas as pd
import numpy as np
from tqdm import tqdm
from pyod.models.hbos import HBOS
from .misc import print_c, is_number, make_conf
from .imputer import remove_single_value_features, collapse_category
from .constants import DATE_TIME_FEATURES
def extract_numericals(df: pd.DataFrame,col: str, pbar :tqdm = None, verbose: bool = True):
df = df.copy(deep = True)
msg = 'creating '+col+'_numerical'
if pbar is None:
print_c(verbose,msg)
else:
pbar.set_description(msg)
df[col+'_numerical'] = np.NaN
df.loc[is_number(df[col]) ,col+'_numerical'] = [float(item) for item in df[col][is_number(df[col])]]
return df
def extract_numericals_forall(df: pd.DataFrame, verbose: bool = True):
df = df.copy(deep=True)
df.columns = [column.lower() for column in df.columns]
conf = make_conf(df)
conf = [col for col in list(conf.keys()) if conf[col]['type'] == 'categorical']
cols = tqdm(conf)
new_cols = []
for col in cols:
df = extract_numericals(df,col,cols,verbose)
new_cols.append(col+'_numerical')
df = remove_single_value_features(df,verbose,include = new_cols)
return df
def extract_is_nan(df: pd.DataFrame, col: str, pbar: tqdm = None, verbose: bool = True) -> pd.DataFrame:
"""
Create a null column
:param df: the data
:param col: the column name
:param conf: the config dir
:param pbar: tqdm progress bar
:param verbose: verbosity
:return:
"""
df = df.copy(deep=True)
nulls = df[col].isnull()
if nulls.sum() == 0:
return df
msg = "Adding "+col+'_'+'isnull column'
if pbar is None:
print_c(verbose, msg)
else:
pbar.set_description(msg)
df[col+'_'+'isnull'] = df[col].isnull()
return df
def extract_is_nan_forall(df: pd.DataFrame, verbose: bool = True):
df = df.copy(deep=True)
df.columns = [column.lower() for column in df.columns]
conf = make_conf(df)
conf = [col for col in list(conf.keys()) if conf[col]['type'] == 'numerical']
cols = tqdm(conf)
new_cols = []
for col in cols:
df = extract_is_nan(df,col,cols,verbose)
new_cols.append(col+'_isnull')
df = remove_single_value_features(df,verbose,include = new_cols)
return df
def extract_is_inf(df: pd.DataFrame, col: str, pbar = None, verbose: bool = True, seperate_ninf: bool = False) -> pd.DataFrame:
"""
Create an is_inf column
:param df: the data
:param col: the column name
:param conf: the config dir
:param pbar: tqdm progress bar
:return:
"""
df = df.copy(deep=True)
msg = "Adding "+col+'_'+'isinf column'
if pbar is None:
print_c(verbose,msg)
else:
pbar.set_description(msg)
df[col+'_'+'isinf'] = 0
df.loc[np.isinf(df[col]) ,col+'_'+'isinf'] = 1
if seperate_ninf:
df.loc[np.isneginf(df[col]) ,col+'_'+'isinf'] = -1
else:
df.loc[np.isneginf(df[col]) ,col+'_'+'isinf'] = 1
return df
def extract_is_inf_forall(df: pd.DataFrame, verbose: bool = True, seperate_ninf: bool = False):
df = df.copy(deep=True)
df.columns = [column.lower() for column in df.columns]
conf = make_conf(df)
conf = [col for col in list(conf.keys()) if conf[col]['type'] == 'numerical']
cols = tqdm(conf)
new_cols = []
for col in cols:
df = extract_is_inf(df,col,cols,verbose,seperate_ninf=seperate_ninf)
new_cols.append(col+'_isinf')
df = remove_single_value_features(df,verbose,include = new_cols)
return df
def extract_datetime_features(df: pd.DataFrame, col: str, pbar: tqdm = None, verbose: bool = True)\
-> pd.DataFrame:
"""
Process a datetime column for machine learning
:param df: the data
:param col: the column
:param conf: the config dir
:param pbar: tqdm progress bar
:param verbose: verbosity
:return: updated dataframe
"""
df = df.copy(deep=True)
numbers = DATE_TIME_FEATURES['NUMERICS']
booleans = DATE_TIME_FEATURES['BOOLEANS']
msg = "Extracting 15 datetime features from "+str(col)
if pbar is None:
print_c(verbose, msg)
else:
pbar.set_description(msg)
mask = ~df[col].isnull()
for feature in numbers:
df[col+'_'+feature] = 0
df.loc[mask ,col+'_'+feature] = [getattr(x, feature) for x in df[col][mask]]
for feature in booleans:
df[col+'_'+feature] = -1
df.loc[mask ,col+'_'+feature] = [getattr(x, feature) for x in df[col][mask]]
df[col+'_'+feature] = df[col+'_'+feature].astype('int32')
return df
def extract_datetime_features_forall(df: pd.DataFrame, verbose: bool = True):
df = df.copy(deep=True)
df.columns = [column.lower() for column in df.columns]
conf = make_conf(df)
conf = [col for col in list(conf.keys()) if conf[col]['type'] == 'datetime']
cols = tqdm(conf)
features = DATE_TIME_FEATURES['NUMERICS'] + DATE_TIME_FEATURES['BOOLEANS']
new_cols = []
for col in cols:
df = extract_datetime_features(df,col,cols,verbose)
for feature in features:
new_cols.append(col+'_'+feature)
df = remove_single_value_features(df,verbose,include = new_cols)
return df
def onehot_encode(df: pd.DataFrame, col: str, pbar: tqdm = None, verbose: bool = True, replace = True,
allow_na: bool = True ,cutoff: int = 50, cutoff_class: str = 'other' ) -> pd.DataFrame:
"""
One hot encode
:param df: the data
:param col: the column name
:param conf: the config dir
:param pbar: tqdm progress bar
:param verbose: verbosity
:return:
"""
df = df.copy(deep=True)
msg = "One hot encoding "+ str(col)
if pbar is None:
print_c(verbose, msg)
else:
pbar.set_description(msg)
if cutoff > 0:
df = collapse_category(df = df, col = col, pbar = pbar, verbose = verbose, cutoff_class = cutoff_class, cutoff = cutoff)
one_hot = pd.get_dummies(df[col], prefix=col+'_onehot_', dummy_na = allow_na)
try:
df = df.join(one_hot)
except Exception as e:
msg = "Ignoring "+str(col)
if pbar is None:
print_c(verbose, msg)
else:
pbar.set_description(msg)
if replace:
df.drop(col, axis=1, inplace=True)
msg = "Dropping "+col
if pbar is None:
print_c(verbose, msg)
else:
pbar.set_description(msg)
return df
def onehot_encode_all(df: pd.DataFrame, verbose: bool = True, allow_na: bool = True, cutoff: int = 50,
cutoff_class: str = 'other') -> pd.DataFrame:
df = df.copy(deep=True)
df.columns = [column.lower() for column in df.columns]
conf = make_conf(df)
# Filter categorical columns
conf = [col for col in list(conf.keys()) if conf[col]['type'] == 'categorical']
cols = tqdm(conf)
for col in cols:
df = onehot_encode(df,col,cols,verbose,allow_na = allow_na,cutoff = cutoff, cutoff_class = cutoff_class)
new_cols = [col for col in df.columns if '_onehot_' in col]
df = remove_single_value_features(df,verbose,include = new_cols)
return df
def extract_is_outlier(df: pd.DataFrame, col: str, pbar = None, verbose: bool = True, model = None,
outliers_fraction: float = 0.05, replace_with = None) -> pd.DataFrame:
"""
Create an is_outlier column
:param df: the data
:param col: the column name
:param conf: the config dir
:param pbar: tqdm progress bar
:return:
"""
df = df.copy(deep=True)
msg = "Trying to find outliers in "+str(col)
if pbar is None:
print_c(verbose,msg)
else:
pbar.set_description(msg)
if model is None:
model = HBOS(contamination=outliers_fraction)
X = df[col].astype(np.float32)
mask = ~( np.isnan(X) | np.isinf(X) | np.isneginf(X))
model.fit(X[mask].to_frame())
preds = model.predict(X[mask].to_frame())
df[col+'_'+'isoutlier'] = 0
df.loc[mask,col+'_'+'isoutlier'] = preds
if replace_with is not None:
msg = "Replacing outliers in "+str(col)+" with "+str(replace_with)
if pbar is None:
print_c(verbose,msg)
else:
pbar.set_description(msg)
df.loc[df[col+'_'+'isoutlier'] == 1, col ] = replace_with
return df
def extract_is_outlier_forall(df: pd.DataFrame, verbose: bool = True, model = None,
outliers_fraction: float = 0.05, replace_with = None) -> pd.DataFrame:
df = df.copy(deep=True)
df.columns = [column.lower() for column in df.columns]
conf = make_conf(df)
if model is None:
model = HBOS(contamination=outliers_fraction)
# Filter numerical columns
conf = [col for col in list(conf.keys()) if conf[col]['type'] == 'numerical']
cols = tqdm(conf)
for col in cols:
df = extract_is_outlier(df, col, cols, verbose, model = model,
outliers_fraction = outliers_fraction, replace_with = replace_with)
new_cols = [col for col in df.columns if '_isoutlier' in col]
df = remove_single_value_features(df,verbose,include = new_cols)
return df
| 9,160 |
integration_tests/src/main/python/json_fuzz_test.py
|
NVnavkumar/spark-rapids
| 0 |
2023838
|
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A JSON generator built based on the context free grammar from https://www.json.org/json-en.html
import random
from marks import allow_non_gpu, fuzz_test
from typing import List
from data_gen import *
from asserts import assert_gpu_and_cpu_are_equal_collect
from marks import approximate_float
_name_gen = StringGen(pattern= "[a-zA-Z]{1,30}",nullable= False)
_name_gen.start(random.Random(0))
def gen_top_schema(depth):
return gen_object_type(depth)
def gen_schema(depth):
"""
Abstract data type of JSON schema
type Schema = Object of Fields
| Array of Schema
| String
| Number
| Bool
type Fields = Field list
type Field = {Name, Schema}
type Name = String
"""
if depth > 1 and random.randint(1, 100) < 90:
return random.choice([gen_object_type, gen_array_type])(depth)
else:
return random.choice([gen_string_type, gen_number_type, gen_bool_type])()
def gen_object_type(depth):
return StructType(gen_fields_type(depth-1))
def gen_array_type(depth):
return ArrayType(gen_schema(depth-1))
def gen_fields_type(depth):
length = random.randint(1, 5)
return [gen_field_type(depth) for _ in range(length)]
def gen_field_type(depth):
return StructField(gen_name(), gen_schema(depth-1))
def gen_string_type():
return StringType()
def gen_number_type():
return random.choice([IntegerType(), FloatType(), DoubleType()])
def gen_bool_type():
return BooleanType()
def gen_name():
return _name_gen.gen()
# This is just a simple prototype of JSON generator.
# You need to generate a JSON schema before using it.
#
# #Example
# ```python
# schema = gen_schema()
# with open("./temp.json", 'w') as f:
# for t in gen_json(schema):
# f.write(t)
# ```
# to generate a random JSON file.
def gen_json(schema: DataType):
"""
JSON -> ELEMENT
"""
lines = random.randint(0, 10)
for _ in range(lines):
for t in gen_element(schema):
yield t
yield '\n'
def gen_value(schema: DataType):
"""
VALUE -> OBJECT
| ARRAY
| STRING
| NUMBER
| BOOL
"""
if isinstance(schema, StructType):
for t in gen_object(schema):
yield t
elif isinstance(schema, ArrayType):
for t in gen_array(schema.elementType):
yield t
elif isinstance(schema, StringType):
for t in gen_string():
yield t
elif isinstance(schema, BooleanType):
for t in gen_bool():
yield t
elif isinstance(schema, IntegerType):
for t in gen_integer():
yield t
elif isinstance(schema, (FloatType, DoubleType)):
for t in gen_number():
yield t
else:
raise Exception("not supported schema")
def gen_object(schema: StructType):
"""
OBJECT -> '{' WHITESPACE '}'
| '{' MEMBERS '}'
"""
yield "{"
if len(schema) == 0:
for t in gen_whitespace():
yield t
else:
for t in gen_members(schema.fields):
yield t
yield "}"
def gen_members(schema: List[StructField]):
"""
MEMBERS -> MEMBER
| MEMBER ',' MEMBERS
"""
if len(schema) == 1:
for t in gen_member(schema[0]):
yield t
else:
for t in gen_member(schema[0]):
yield t
yield ","
for t in gen_members(schema[1:]):
yield t
def gen_member(schema: StructField):
"""
MEMBER -> WHITESPACE STRING WHITESPACE ':' ELEMENT
"""
for t in gen_whitespace():
yield t
yield '"' + schema.name + '"'
for t in gen_whitespace():
yield t
yield ":"
for t in gen_element(schema.dataType):
yield t
def gen_array(schema: DataType):
yield '['
for t in random.choices([gen_whitespace(), gen_elements(schema)], [10, 90], k=1)[0]:
yield t
yield ']'
def gen_elements(schema: DataType):
"""
ELEMENTS -> ELEMENT
| ELEMENT ',' ELEMENTS
"""
for t in gen_element(schema):
yield t
if random.randint(1, 100) < 80:
yield ','
for t in gen_elements(schema):
yield t
def gen_element(schema: DataType):
"""
ELEMENT -> WHITESPACE VALUE WHITESPACE
"""
for t in gen_whitespace():
yield t
for t in gen_value(schema):
yield t
for t in gen_whitespace():
yield t
def gen_string():
"""
STRING -> '"' CHARACTERS '"'
"""
yield '"'
for t in gen_characters():
yield t
yield '"'
def gen_characters():
"""
CHARACTERS -> ''
| CHAR CHARACTERS
"""
if random.randint(0,100) < 30:
yield ''
else:
for t in gen_char():
yield t
for t in gen_characters():
yield t
def gen_char():
"""
CHAR -> 0x0020 .. 0x10ffff (exclude 0x0022 and 0x005c)
| '\\' ESCAPE
"""
if random.randint(0, 99) < 80:
unicode = random.randint(0x0020, 0x10ffff)
while unicode == 0x22 or unicode == 0x5c:
unicode = random.randint(0x0020, 0x10ffff)
yield chr(unicode)
else:
yield '\\'
for t in gen_escape():
yield t
def gen_escape():
"""
ESCAPE -> '"' | '\\' | '/' | 'b' | 'f' | 'n' | 'r' | 't'
| 'u' HEX HEX HEX HEX
"""
if random.randint(0, 8) < 8:
yield random.choice(['"', '\\', '/', 'b', 'f', 'n', 'r', 't'])
else:
yield 'u'
for _ in range(4):
for t in gen_hex():
yield t
def gen_hex():
"""
HEX -> DIGIT
| 'a' .. 'f'
| 'A' .. 'F'
"""
path = random.randint(0, 2)
if path == 0:
for t in gen_digit():
yield t
elif path == 1:
yield chr(random.randint(0x41, 0x46))
else:
yield chr(random.randint(0x61, 0x66))
def gen_number():
"""
NUMBER -> INTEGER FRACTION EXPONENT
"""
for t in gen_integer():
yield t
for t in gen_fraction():
yield t
for t in gen_exponent():
yield t
def gen_integer():
"""
INTEGER -> DIGIT
| ONENINE DIGITS
| '-' DIGIT
| '-' ONENINE DIGITS
"""
if random.randint(1, 100) <= 50:
yield '-'
if random.randint(1, 100) <= 50:
for t in gen_digit():
yield t
else:
for t in gen_onenine():
yield t
for t in gen_digits():
yield t
def gen_digits():
"""
DIGITS -> DIGIT
| DIGIT DIGITS
"""
for t in gen_digit():
yield t
if random.randint(1, 100) < 70:
for t in gen_digits():
yield t
def gen_digit():
"""
DIGIT -> '0'
| ONENINE
"""
if random.randint(0, 9) == 0:
yield '0'
else:
for t in gen_onenine():
yield t
def gen_onenine():
"""
ONENINE -> '1' .. '9'
"""
yield chr(random.randint(0x31, 0x39))
def gen_fraction():
"""
FRACTION -> "" | '.' DIGITS
"""
if random.randint(1, 100) < 50:
yield ""
else:
yield '.'
for t in gen_digits():
yield t
def gen_exponent():
"""
EXPONENT -> ""
| 'E' SIGN DIGITS
| 'e' SIGN DIGITS
"""
if random.randint(1, 100) < 20:
yield ""
else:
yield random.choice(['E', 'e'])
for t in gen_sign():
yield t
for t in gen_digits():
yield t
def gen_sign():
"""
SIGN -> ""
| '+'
| '-'
"""
yield random.choice(["", '+', '-'])
def gen_whitespace():
"""
WHITESPACE -> ''
| 0x0020 WHITESPACE
| 0x000a WHITESPACE (todo)
| 0x000d WHITESPACE (todo)
| 0x0009 WHITESPACE (todo)
"""
if random.randint(0, 4) > 3:
yield chr(random.choice([0x0020]))
for t in gen_whitespace():
yield t
else:
yield ''
def gen_bool():
"""
BOOL -> "true"
| "null"
| "false"
"""
yield random.choice(["true", "null", "false"])
_enable_all_types_conf = {
'spark.rapids.sql.format.json.enabled': 'true',
'spark.rapids.sql.format.json.read.enabled': 'true'}
@approximate_float
@allow_non_gpu('FileSourceScanExec')
@fuzz_test
def test_json_read_fuzz(enable_fuzz_test, spark_tmp_path):
depth = random.randint(1, 5)
schema = gen_top_schema(depth)
data_path = spark_tmp_path + '/JSON_FUZZ_DATA'
schema_path = spark_tmp_path + '/JSON_SCHEMA'
# write the schema for debugging
with open(schema_path, 'w') as f:
f.write("{}".format(schema))
with open(data_path, 'w') as f:
for c in gen_json(schema):
f.write(c)
assert_gpu_and_cpu_are_equal_collect(
lambda spark: spark.read.schema(schema).json(data_path),
_enable_all_types_conf
)
| 9,703 |
20-fs-ias-lec/groups/01-dev2dev/Code/BTonly/test.py
|
Kyrus1999/BACnet
| 8 |
2024923
|
import impexp
import lib.pcap as pcap
try:
print("eigene DB")
pcap.dump("test.pcap")
except Exception as e:
print(e)
try:
print("eigener Payload")
pcap.dump("payload.pcap")
except Exception as e:
print(e)
try:
print("peer Payload")
pcap.dump("peerPayload.pcap")
except Exception as e:
print(e)
| 332 |
96_Word/python/word.py
|
Omega7379/basic-computer-games
| 1 |
2024691
|
#!/usr/bin/env python3
# WORD
#
# Converted from BASIC to Python by <NAME>
import random
words = [
"DINKY",
"SMOKE",
"WATER",
"GRASS",
"TRAIN",
"MIGHT",
"FIRST",
"CANDY",
"CHAMP",
"WOULD",
"CLUMP",
"DOPEY",
]
def play_game():
"""Play one round of the game"""
random.shuffle(words)
target_word = words[0]
guess_count = 0
guess_progress = ["-"] * 5
print("You are starting a new game...")
while True:
guess_word = ""
while guess_word == "":
guess_word = input("\nGuess a five letter word. ").upper()
if guess_word == "?":
break
elif not guess_word.isalpha() or len(guess_word) != 5:
guess_word = ""
print("You must guess a five letter word. Start again.")
guess_count += 1
if guess_word == "?":
print("The secret word is", target_word)
break
else:
common_letters = ""
matches = 0
for i in range(5):
for j in range(5):
if guess_word[i] == target_word[j]:
matches += 1
common_letters = common_letters + guess_word[i]
if i == j:
guess_progress[j] = guess_word[i]
print(
"There were",
matches,
"matches and the common letters were... " + common_letters,
)
print(
"From the exact letter matches, you know............ "
+ "".join(guess_progress)
)
if "".join(guess_progress) == guess_word:
print("\nYou have guessed the word. It took", guess_count, "guesses!")
break
elif matches == 0:
print("\nIf you give up, type '?' for you next guess.")
def main():
print(" " * 33 + "WORD")
print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n")
print("I am thinking of a word -- you guess it. I will give you")
print("clues to help you get it. Good luck!!\n")
keep_playing = True
while keep_playing:
play_game()
keep_playing = input("\nWant to play again? ").lower().startswith("y")
if __name__ == "__main__":
main()
| 2,360 |
gitops-coordination/jBot-CD/on-new-version/src/start-build.py
|
JimmyDqv/jBot-gitops
| 1 |
2025399
|
import json
import logging
import boto3
import os
import uuid
log = logging.getLogger('jBot')
log.setLevel(logging.DEBUG)
def handler(event, context):
log.debug(json.dumps(event, indent=2))
codebuildProject = os.environ['CODE_BUILD_PROJECT']
environment = os.environ['ENV_ALIAS']
artifactLocation = "{0}/{1}".format(event['bucket']
['name'], event['object']['key'])
client = boto3.client('codebuild')
response = client.start_build(
projectName=codebuildProject,
sourceTypeOverride='S3',
sourceLocationOverride=artifactLocation,
environmentVariablesOverride=[
{
'name': 'ENV_ALIAS',
'value': environment,
'type': 'PLAINTEXT'
}
]
)
return event
| 834 |
base/convert_integer.py
|
vvvvcp/NeverMore
| 0 |
2024826
|
import socket
def convert_integer():
data = 1234
print "Original: %s => Long host byte order: %s, Network byte order: %s" %(data, socket.ntohl(data), socket.htonl(data))
print "Original: %s => Long host byte order: %s, Network byte order: %s" %(data, socket.ntohs(data), socket.htons(data))
if __name__ == '__main__':
convert_integer()
| 359 |
ch08-supervised/decision_tree/decision_tree_logit.py
|
GaoX2015/intro_ds
| 314 |
2023223
|
# -*- coding: UTF-8 -*-
"""
此脚本用于展示决策树联结逻辑回归模型
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeClassifier
def generateData(n):
"""
生成训练数据
"""
X, y = make_classification(n_samples=n, n_features=4)
data = pd.DataFrame(X, columns=["x1", "x2", "x3", "x4"])
data["y"] = y
return data
def trainModel(data, features, label):
"""
分别使用逻辑回归、决策树和决策树+逻辑回归建模
"""
res = {}
trainData, testData = train_test_split(data, test_size=0.5)
# 单独使用逻辑回归
logitModel = LogisticRegression()
logitModel.fit(trainData[features], trainData[label])
logitProb = logitModel.predict_proba(testData[features])[:, 1]
res["logit"] = roc_curve(testData[label], logitProb)
# 单独使用决策树
dtModel = DecisionTreeClassifier(max_depth=2)
dtModel.fit(trainData[features], trainData[label])
dtProb = dtModel.predict_proba(testData[features])[:, 1]
res["DT"] = roc_curve(testData[label], dtProb)
# 决策树和逻辑回归联结
## 为了防止过拟合,使用不同的数据训练决策树和逻辑回归
trainDT, trainLR = train_test_split(trainData, test_size=0.5)
## 使用决策树对前两个变量做变换
m = 2
_dt = DecisionTreeClassifier(max_depth=2)
_dt.fit(trainDT[features[:m]], trainDT[label])
leafNode = _dt.apply(trainDT[features[:m]]).reshape(-1, 1)
coder = OneHotEncoder()
coder.fit(leafNode)
newFeature = np.c_[
coder.transform(_dt.apply(trainLR[features[:m]]).reshape(-1, 1)).toarray(),
trainLR[features[m:]]]
_logit = LogisticRegression()
_logit.fit(newFeature[:, 1:], trainLR[label])
testFeature = np.c_[
coder.transform(_dt.apply(testData[features[:m]]).reshape(-1, 1)).toarray(),
testData[features[m:]]]
dtLogitProb = _logit.predict_proba(testFeature[:, 1:])[:, 1]
res["DT + logit"] = roc_curve(testData[label], dtLogitProb)
return res
def visualize(re):
"""
将模型结果可视化
"""
# 为在Matplotlib中显示中文,设置特殊字体
plt.rcParams["font.sans-serif"]=["SimHei"]
# 创建一个图形框
fig = plt.figure(figsize=(6, 6), dpi=80)
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
styles = ["k--", "r-.", "b"]
model = ["logit", "DT", "DT + logit"]
for i, s in zip(model, styles):
fpr, tpr, _ = re[i]
_auc = auc(fpr, tpr)
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.plot(fpr, tpr, s, label="%s:%s; %s=%0.2f" % ("模型", i,
"曲线下面积(AUC)", _auc))
else:
ax.plot(fpr, tpr, s, label="%s:%s; %s=%0.2f" % ("模型".decode("utf-8"),
i, "曲线下面积(AUC)".decode("utf-8"), _auc))
legend = plt.legend(loc=4, shadow=True)
plt.show()
if __name__ == "__main__":
np.random.seed(4040)
data = generateData(4000)
re = trainModel(data, ["x1", "x2", "x3", "x4"], "y")
visualize(re)
| 3,112 |
Algorithms/0152_Maximum_Product_Subarray/Python/Maximum_Product_Subarray_Solution_1.py
|
lht19900714/Leetcode_Python
| 0 |
2024920
|
# Space: O(1)
# Time: O(n)
class Solution:
def maxProduct(self, nums):
if len(nums) == 1: return nums[0]
current_max = nums[0]
current_min = nums[0]
res = nums[0]
for i in range(1, len(nums)):
temp = current_max
current_max = max(current_max * nums[i], current_min * nums[i], nums[i])
current_min = min(temp * nums[i], current_min * nums[i], nums[i])
res = max(current_max,res)
return res
| 503 |
tools/common/name.py
|
dmsteck/Fusion360GalleryDataset
| 193 |
2025147
|
"""
Give and get names for Fusion 360 entities
"""
import adsk.core
import adsk.fusion
import uuid
import json
import math
def get_uuid(entity, group_name="Dataset"):
if isinstance(entity, adsk.fusion.Profile):
return get_profile_uuid(entity)
elif isinstance(entity, adsk.fusion.BRepFace):
return get_brep_face_uuid(entity, group_name)
else:
uuid_att = entity.attributes.itemByName(group_name, "uuid")
if uuid_att is not None:
return uuid_att.value
else:
# Return None to allow for workarounds
return None
def get_brep_face_uuid(entity, group_name):
"""Handle the special case of split brep faces with the same uuid"""
uuid_att = entity.attributes.itemByName(group_name, "uuid")
return get_brep_face_uuid_from_attribute(entity, uuid_att)
def get_brep_face_uuid_from_attribute(entity, uuid_att):
"""Handle the special case of split brep faces with the same uuid"""
if uuid_att is None:
return None
# First check if this Face was previously split
if (uuid_att.otherParents is not None and
uuid_att.otherParents.count > 0):
# Now we know we have a split face
# because it has another parent
# Next lets see if this face is the original face
# or if was newly created from the split
for parent in uuid_att.otherParents:
if isinstance(parent, adsk.fusion.BRepFace):
is_original = entity.tempId == parent.tempId
# The original face keeps its uuid
if is_original:
return uuid_att.value
# Now we know we are the newly created split face
# so we have to make a uuid
# Due to a bug in Fusion we can't assign a new id
# as an attribute on the split face, so we append the
# number of parents at the end of the uuid
uuid_concat = f"{uuid_att.value}_{uuid_att.otherParents.count}"
return str(uuid.uuid3(uuid.NAMESPACE_URL, uuid_concat))
else:
# The face was not split, so we are good to go
return uuid_att.value
def get_profile_uuid(profile):
"""Sketch profiles don"t support attributes
so we cook up a UUID from the curves UUIDs"""
profile_curves = []
for loop in profile.profileLoops:
for curve in loop.profileCurves:
sketch_ent = curve.sketchEntity
profile_curves.append(get_uuid(sketch_ent))
# Concat all the uuids from the curves
curve_uuids = "_".join(profile_curves)
# Generate a UUID by hashing the curve_uuids
return str(uuid.uuid3(uuid.NAMESPACE_URL, curve_uuids))
def set_uuid(entity, group_name="Dataset"):
"""Set a uuid of an entity
Returns the new or existing uuid of the entity"""
if isinstance(entity, adsk.fusion.BRepFace):
return set_brep_face_uuid(entity, group_name)
uuid_att = entity.attributes.itemByName(group_name, "uuid")
if uuid_att is None:
unique_id = uuid.uuid1()
entity.attributes.add(group_name, "uuid", str(unique_id))
return str(unique_id)
else:
return uuid_att.value
def set_brep_face_uuid(entity, group_name="Dataset"):
"""Handle the special case of split brep faces with a parent"""
uuid_att = entity.attributes.itemByName(group_name, "uuid")
entity_uuid = get_brep_face_uuid_from_attribute(entity, uuid_att)
# uuid will always be returned if this is a split face
# as a special version of the parent uuid is returned
if entity_uuid is not None:
# We already have a uuid, so use it
return entity_uuid
# Add a uuid directly to the face
unique_id = uuid.uuid1()
entity.attributes.add(group_name, "uuid", str(unique_id))
return str(unique_id)
def reset_uuid(entity, group_name="Dataset"):
"""Reset a uuid of an entity
Returns the reset uuid of the entity"""
unique_id = uuid.uuid1()
entity.attributes.add(group_name, "uuid", str(unique_id))
return str(unique_id)
def set_custom_uuid(entity, custom_uuid, group_name="Dataset"):
entity.attributes.add(group_name, "uuid", custom_uuid)
def set_uuids_for_collection(entities, group_name="Dataset"):
for ent in entities:
# Strange -- We sometimes get an None entity in the contraints array
# when we have a SketchFixedSpline in the sketch. We guard against
# that crashing the threads here
if ent is not None:
set_uuid(ent, group_name)
def get_uuids_for_collection(entities, group_name="Dataset"):
"""Return a list of uuids from a collection"""
uuids = []
for ent in entities:
# Strange -- We sometimes get an None entity in the contraints array
# when we have a SketchFixedSpline in the sketch. We guard against
# that crashing the threads here
if ent is not None:
uuid = get_uuid(ent)
uuids.append(uuid)
return uuids
def set_uuids_for_sketch(sketch, group_name="Dataset"):
# Work around to ensure the profiles are populated
# on a newly opened design
sketch.isComputeDeferred = True
sketch.isVisible = False
sketch.isVisible = True
sketch.isComputeDeferred = False
# We are only interested points and curves
set_uuids_for_collection(sketch.sketchCurves)
set_uuids_for_collection(sketch.sketchPoints)
def get_temp_ids_from_collection(collection):
"""From a collection, make a set of the tempids"""
id_set = set()
for entity in collection:
if entity is not None:
temp_id = entity.tempId
id_set.add(temp_id)
return id_set
| 5,671 |
scripts/src/imports.py
|
2320sharon/segmentation_zoo-1
| 7 |
2024894
|
# Written by Dr <NAME>, Marda Science LLC
# for the USGS Coastal Change Hazards Program
#
# MIT License
#
# Copyright (c) 2021, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE zSOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from model_imports import *
# import os, shutil, json
# from skimage.io import imsave, imread
# from skimage.filters.rank import median
# from skimage.morphology import disk
# from scipy.ndimage import rotate
from glob import glob
# import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
##========================================================
def fromhex(n):
""" hexadecimal to integer """
return int(n, base=16)
##========================================================
def label_to_colors(
img,
mask,
alpha,#=128,
colormap,#=class_label_colormap, #px.colors.qualitative.G10,
color_class_offset,#=0,
do_alpha,#=True
):
"""
Take MxN matrix containing integers representing labels and return an MxNx4
matrix where each label has been replaced by a color looked up in colormap.
colormap entries must be strings like plotly.express style colormaps.
alpha is the value of the 4th channel
color_class_offset allows adding a value to the color class index to force
use of a particular range of colors in the colormap. This is useful for
example if 0 means 'no class' but we want the color of class 1 to be
colormap[0].
"""
colormap = [
tuple([fromhex(h[s : s + 2]) for s in range(0, len(h), 2)])
for h in [c.replace("#", "") for c in colormap]
]
cimg = np.zeros(img.shape[:2] + (3,), dtype="uint8")
minc = np.min(img)
maxc = np.max(img)
for c in range(minc, maxc + 1):
cimg[img == c] = colormap[(c + color_class_offset) % len(colormap)]
cimg[mask==1] = (0,0,0)
if do_alpha is True:
return np.concatenate(
(cimg, alpha * np.ones(img.shape[:2] + (1,), dtype="uint8")), axis=2
)
else:
return cimg
##========================================================
def rescale_array(dat,
mn,
mx):
'''
rescales an input dat between mn and mx
'''
m = min(dat.flatten())
M = max(dat.flatten())
return (mx-mn)*(dat-m)/(M-m)+mn
##====================================
def standardize(img):
#standardization using adjusted standard deviation
N = np.shape(img)[0] * np.shape(img)[1]
s = np.maximum(np.std(img), 1.0/np.sqrt(N))
m = np.mean(img)
img = (img - m) / s
del m, s, N
#
if np.ndim(img)==2:
img = np.dstack((img,img,img))
return img
# ##========================================================
def inpaint_nans(im):
ipn_kernel = np.array([[1,1,1],[1,0,1],[1,1,1]]) # kernel for inpaint_nans
nans = np.isnan(im)
while np.sum(nans)>0:
im[nans] = 0
vNeighbors = convolve2d((nans==False),ipn_kernel,mode='same',boundary='symm')
im2 = convolve2d(im,ipn_kernel,mode='same',boundary='symm')
im2[vNeighbors>0] = im2[vNeighbors>0]/vNeighbors[vNeighbors>0]
im2[vNeighbors==0] = np.nan
im2[(nans==False)] = im[(nans==False)]
im = im2
nans = np.isnan(im)
return im
#-----------------------------------
def plot_seg_history_iou(history, train_hist_fig):
"""
"plot_seg_history_iou(history, train_hist_fig)"
This function plots the training history of a model
INPUTS:
* history [dict]: the output dictionary of the model.fit() process, i.e. history = model.fit(...)
* train_hist_fig [string]: the filename where the plot will be printed
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS: None (figure printed to file)
"""
n = len(history.history['val_loss'])
plt.figure(figsize=(20,10))
plt.subplot(121)
plt.plot(np.arange(1,n+1), history.history['mean_iou'], 'b', label='train accuracy')
plt.plot(np.arange(1,n+1), history.history['val_mean_iou'], 'k', label='validation accuracy')
plt.xlabel('Epoch number', fontsize=10); plt.ylabel('Mean IoU Coefficient', fontsize=10)
plt.legend(fontsize=10)
plt.subplot(122)
plt.plot(np.arange(1,n+1), history.history['loss'], 'b', label='train loss')
plt.plot(np.arange(1,n+1), history.history['val_loss'], 'k', label='validation loss')
plt.xlabel('Epoch number', fontsize=10); plt.ylabel('Loss', fontsize=10)
plt.legend(fontsize=10)
# plt.show()
plt.savefig(train_hist_fig, dpi=200, bbox_inches='tight')
#
# #-----------------------------------
# def crf_refine(label, img, nclasses = 2, theta_col=100, theta_spat=3, compat=120):
# """
# "crf_refine(label, img)"
# This function refines a label image based on an input label image and the associated image
# Uses a conditional random field algorithm using spatial and image features
# INPUTS:
# * label [ndarray]: label image 2D matrix of integers
# * image [ndarray]: image 3D matrix of integers
# OPTIONAL INPUTS: None
# GLOBAL INPUTS: None
# OUTPUTS: label [ndarray]: label image 2D matrix of integers
# """
#
# gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))
# # print(gx.shape)
# img = np.dstack((img,gx,gy))
#
# H = label.shape[0]
# W = label.shape[1]
# U = unary_from_labels(1+label,nclasses,gt_prob=0.51)
# d = dcrf.DenseCRF2D(H, W, nclasses)
# d.setUnaryEnergy(U)
#
# # to add the color-independent term, where features are the locations only:
# d.addPairwiseGaussian(sxy=(theta_spat, theta_spat),
# compat=3,
# kernel=dcrf.DIAG_KERNEL,
# normalization=dcrf.NORMALIZE_SYMMETRIC)
# feats = create_pairwise_bilateral(
# sdims=(theta_col, theta_col),
# schan=(2,2,2),
# img=img,
# chdim=2)
#
# d.addPairwiseEnergy(feats, compat=compat,kernel=dcrf.DIAG_KERNEL,normalization=dcrf.NORMALIZE_SYMMETRIC)
# Q = d.inference(20)
# kl1 = d.klDivergence(Q)
# return np.argmax(Q, axis=0).reshape((H, W)).astype(np.uint8), kl1
# #
#
# ###############################################################
# ### DATA FUNCTIONS
# ###############################################################
#
#
# #-----------------------------------
# def seg_file2tensor(f):
# """
# "seg_file2tensor(f)"
# This function reads a jpeg image from file into a cropped and resized tensor,
# for use in prediction with a trained segmentation model
# INPUTS:
# * f [string] file name of jpeg
# OPTIONAL INPUTS: None
# OUTPUTS:
# * image [tensor array]: unstandardized image
# GLOBAL INPUTS: TARGET_SIZE
# """
# bits = tf.io.read_file(f)
# if 'jpg' in f:
# image = tf.image.decode_jpeg(bits)
# elif 'png' in f:
# image = tf.image.decode_png(bits)
#
# w = tf.shape(image)[0]
# h = tf.shape(image)[1]
# tw = TARGET_SIZE[0]
# th = TARGET_SIZE[1]
# resize_crit = (w * th) / (h * tw)
# image = tf.cond(resize_crit < 1,
# lambda: tf.image.resize(image, [w*tw/w, h*tw/w]), # if true
# lambda: tf.image.resize(image, [w*th/h, h*th/h]) # if false
# )
#
# nw = tf.shape(image)[0]
# nh = tf.shape(image)[1]
# image = tf.image.crop_to_bounding_box(image, (nw - tw) // 2, (nh - th) // 2, tw, th)
# # image = tf.cast(image, tf.uint8) #/ 255.0
#
# return image
#
#
| 8,569 |
app/api/serializers.py
|
MLH-Sprint-2/MailSafe
| 0 |
2025253
|
# Serialize the Alias Model
from rest_framework import serializers
from .models import Aliases
class AliasesSerializers(serializers.ModelSerializer):
class Meta:
model = Aliases
# using all keyword than using list
fields = "__all__"
| 261 |
domain/controller/kbrd_controller.py
|
gajoc/indexing-api
| 0 |
2025155
|
from domain.controller.common_controller import CommonController
from utils.constants import UserAction, KEYBOARD_BUTTON_2_ACTION
class KeyboardController(CommonController):
def __init__(self):
super().__init__()
self._to_command = KEYBOARD_BUTTON_2_ACTION
@property
def user_prompt_info(self):
return 'Co robimy? 1.dane 2.kopia 3.nastepny 4.poprzedni 5.nieczytelny 6.koniec\n'
def wait_for_user_action(self) -> UserAction:
user_choice = input(self.user_prompt_info)
return self._to_command.get(user_choice)
| 569 |
db/core/log/logbase.py
|
adriangrepo/qreservoir
| 2 |
2024235
|
from sqlalchemy import Boolean, Column, Integer, String
from sqlalchemy import REAL
from sqlalchemy import ForeignKey
from db.base import Base
class LogBase(Base):
""" generated source for class LogBase """
__tablename__ = 'log'
qr_classname = "Log"
id = Column(Integer, primary_key=True, nullable = False)
#Parents
log_set_id = Column(Integer, ForeignKey('log_set.id'))
log_domain_id = Column(Integer, ForeignKey('log_domain.id'))
log_service_id = Column(Integer, ForeignKey('log_service.id'))
parameter_set_id = Column(Integer, ForeignKey('parameter_set.id'))
well_id = Column(Integer, ForeignKey('well.id'))
#LogType.name property
#TODO change this to store log_type_uid
log_type_name = Column(String(), nullable=False)
#ZType
#TODO change this to store z_measure_type_uid
z_measure_type_name = Column(String(), nullable = False)
#stored in log service
z_measure_reference = Column(String(), nullable = True)
#JSON string
log_data_str = Column(String(), nullable=True)
#JSON string
z_measure_data_str = Column(String(), nullable=True)
consistent_step = Column(Boolean)
#depth calculated as per header start/stop/step
honour_las_depth_values = Column(Boolean)
z_measure_min = Column(REAL, nullable = False)
z_measure_max = Column(REAL, nullable = False)
z_measure_step = Column(REAL, nullable = True)
total_samples = Column(Integer, nullable=True)
# even though run number is in logService, could be multiple runs stored in the las file (eg las v3.0)
run_number = Column(Integer, nullable=True)
value_min = Column(REAL, nullable = True)
value_max = Column(REAL, nullable = True)
is_logarithmic = Column(Boolean)
#instead of a global null store it per log?
null = Column(REAL, nullable = True)
#statistics
mean = Column(REAL, nullable = True)
median = Column(REAL, nullable = True)
stdev = Column(REAL, nullable = True)
# rest are for display purposes
blocked = Column(Boolean)
active = Column(Boolean)
pseudo_well_log = Column(Boolean)
'''
plot_min = Column(REAL, nullable = True)
plot_max = Column(REAL, nullable = True)
rgba = Column(String(), nullable = True)
trace_width = Column(REAL, nullable = True)
trace_style = Column(String(), nullable = True)
'''
log_plot_left = Column(REAL, nullable = True)
log_plot_right = Column(REAL, nullable = True)
log_plot_default = Column(REAL, nullable = True)
#if a logarithmic plot
log_plot_log_cycles = Column(Integer, nullable=True)
log_plot_points_on = Column(Boolean)
histogram_left = Column(REAL, nullable = True)
histogram_right = Column(REAL, nullable = True)
histogram_default = Column(REAL, nullable = True)
cross_plot_left = Column(REAL, nullable = True)
cross_plot_right = Column(REAL, nullable = True)
cross_plot_default = Column(REAL, nullable = True)
#MPL uses float width, Qt uses int so need to convert using NumberUtils
line_width = Column(REAL, nullable = True)
line_style = Column(String(), nullable = True)
point_size = Column(REAL, nullable = True)
point_style = Column(String(), nullable = True)
#hexcode
rgb = Column(String(), nullable = True)
alpha = Column(String(), nullable = True)
source = Column(String(), nullable = True)
history_set_id = Column(Integer, ForeignKey('history_set.id'))
name = Column(String(), nullable = False)
comments = Column(String(), nullable = True)
def __init__(self):
self.importLog = bool()
#convert to SI
self.unit = str()
self.validName = bool()
self.isDuplicate = bool()
self.fileMnemonic = str()
self.fileUnit = str()
self.fileDescription = str()
self.rowIndex = int()
#Real list
self.log_data = []
#Real list
self.z_measure_data = []
| 3,994 |
CodeGenerator/CodeGeneratorBase.py
|
zhang15780/Code-generator
| 1 |
2024610
|
# -*- coding: utf-8 -*-
# @Time : 2019/10/16 12:48
# @Author :
# @Site :
# @File : CodeGeneratorBase.py
# @Software: PyCharm
from abc import ABCMeta, abstractmethod
import os
from jinja2 import TemplateNotFound
from CodeGenerator.CodeGeneratorCore import CoreCodeGenerator
from dbConnect.sqlutils import mysql_db
class GeneratorBase(metaclass=ABCMeta):
"""
模板代码生成器基类
子类继承该类,实现formatter_data方法格式化数据到模板对象需要的数据类型
template 指定模板路径及名称
template_dir jinja查找模板目录
"""
template = None
BaseDir = os.path.dirname(os.path.abspath(__name__))
template_dir = os.path.join(BaseDir, 'template')
def __init__(self, db=None):
self._generator = None
self.db = db
self.tb_name = None
self.table_info = None
self._init()
def _init(self):
"""
初始化代码生成器核心对象,指定模板目录
:return:
"""
self._generator = CoreCodeGenerator(self.template_dir)
if self.db is None:
self.db = mysql_db
def render(self, *args, **kwargs):
"""
渲染模板
:param args:
:param kwargs:
:return:
"""
if not self.template:
raise TemplateNotFound('内部错误,没有找到模板文件')
self._generator.set_template(self.template)
self._generator.render(*args, **kwargs)
def save(self, out_file_path, encoding='utf-8'):
"""
保存文件
:param out_file_path:
:param encoding:
:return:
"""
save_path = os.path.join(out_file_path, self.template.replace('.tpl', ''))
self._generator.save(save_path, encoding)
def query_data(self, db_name, tb_name):
self.tb_name = tb_name
query_sql = r"""SELECT COLUMN_NAME as name,COLUMN_COMMENT as content,DATA_TYPE as datatype
FROM information_schema.COLUMNS
WHERE TABLE_NAME='{}' AND TABLE_SCHEMA='{}';""".format(tb_name, db_name)
self.table_info = self.db.query(query_sql)
@abstractmethod
def formatter_data(self, *args, **kwargs):
"""
针对子类进行数据格式化
:param args:
:param kwargs:
:return:
"""
pass
class GeneratorTeseHtml(GeneratorBase):
"""
test.html模板渲染类
"""
template = 'test.html.tpl'
def __init__(self, db=None):
super(GeneratorTeseHtml, self).__init__(db)
def formatter_data(self, other_data, expect_field=['ID']):
filter_data = []
for column in self.table_info:
if column['name'] not in expect_field:
filter_data.append(column)
data = {
'columns': filter_data
}
data.update(dict(other_data))
return data
class GeneratorTesePY(GeneratorBase):
"""
test.py模板文件渲染类
"""
template = 'test.py.tpl'
def __init__(self, db=None):
super(GeneratorTesePY, self).__init__(db)
def formatter_data(self, other_data, expect_field=['ID']):
filter_data = []
for column in self.table_info:
if column['name'] not in expect_field:
filter_data.append(column)
data = {
'columns': filter_data
}
data.update(dict(other_data))
return data
class GeneratorViewPY(GeneratorBase):
"""
test.py模板文件渲染类
"""
template = 'views.py.tpl'
def __init__(self, db=None):
super(GeneratorViewPY, self).__init__(db)
def formatter_data(self, other_data, expect_field=['ID']):
filter_data = []
time_list = []
for column in self.table_info:
if column['name'] not in expect_field:
filter_data.append(column)
if column['name'].lower().find('time') != -1:
time_list.append(column['name'])
capitalize_name = self.tb_name.split('_')[-1].capitalize()
data = {
'columns': filter_data,
'tbname': self.tb_name,
'BaseName': capitalize_name + 'Base',
'queryName': 'Query' + capitalize_name,
'createName': 'Create' + capitalize_name,
'removeName': 'Remove' + capitalize_name,
'formatter_time_list': time_list,
}
data.update(dict(other_data))
return data
class GeneratorUrlPY(GeneratorBase):
"""
test.py模板文件渲染类
"""
template = 'url.py.tpl'
def __init__(self, db=None):
super(GeneratorUrlPY, self).__init__(db)
def formatter_data(self, other_data, expect_field=['ID']):
filter_data = []
time_list = []
for column in self.table_info:
if column['name'] not in expect_field:
filter_data.append(column)
if column['name'].lower().find('time') != -1:
time_list.append(column['name'])
capitalize_name = self.tb_name.split('_')[-1].capitalize()
data = {
'columns': filter_data,
'baseurl': self.tb_name.split('_')[-1].lower(),
'BaseName': capitalize_name + 'Base',
'queryName': 'Query' + capitalize_name,
'createName': 'Create' + capitalize_name,
'removeName': 'Remove' + capitalize_name,
}
data.update(dict(other_data))
return data
| 5,293 |
students/K33402/practical_works/Malinina_Anna/simple_django_web_project/django_project_malinina/project_first_app/forms.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
| 4 |
2024831
|
from django import forms
from .models import CarOwner
class CarOwnerForm(forms.ModelForm):
class Meta:
model = CarOwner
fields = [
"name",
"surname",
"birthday",
"user_info"
]
| 256 |
src/proytit1/pages/algoritmo.py
|
prodriguez12/proytit1
| 0 |
2025345
|
import os
def organizador(ruta_entrada, ruta_salida):
archivo = open(ruta_entrada,'r')
linea = archivo.readline()
aux=linea.split(" ")
horaTotal=int(aux[0])
minutoTotal=int(aux[1])
h = 0
m = 0
s = 0
linea = archivo.readline()
N = int(linea)
M = 0
while N>0:
N = N-1
M = M+1
linea = archivo.readline()
aux = linea.split(" ")
s = s + int(aux[1])
if s > 60:
m = m + 1
s = s - 60
if int(aux[0])>60:
h = h + int(aux[0])/60
m = m + int(aux[0])%60
else:
m = m + int(aux[0])
if m > 60:
h = h + 1
m = m - 60
if horaTotal<h and minutoTotal<m:
N=0
salida = open(ruta_salida, 'w')
salida.write("Despues de "+str(M)+" vueltas el tiempo total del recorrido es: "
+str(h)+":"+str(m)+":"+str(s)+".")
| 936 |
order/tests.py
|
abdukhashimov/django-rest-gsoft
| 0 |
2024771
|
from django.test import TestCase
from order.models import Order
class TestOrder(TestCase):
def test_orders_string_repr(self):
order = Order.objecs.create(
chat_id='199945',
first_name='Max',
last_name='Max',
company_name='Greatsoft',
phone_number='+998935789768',
comment='this is the comment'
)
self.assertTrue(str(order), '+998935789768')
| 446 |
latex_converter.py
|
davi-juliano/ClassPack
| 0 |
2025330
|
import matplotlib.pyplot as plt
import numpy as np
def convert_tex_document(filename):
import tex2pix
print("A")
f = open(filename)
print("B")
r = tex2pix.Renderer(f)
print("C")
r.mkpdf(filename.replace(".tex", ".pdf"))
print("D")
def convert_coords_map(mat, timestamp):
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
fig, ax = plt.subplots(1, 1)
c = ax.pcolor(mat, edgecolors='w', cmap='inferno', linewidths=4)
ax.set_title('Representação do mapeamento de sala')
fig.tight_layout()
plt.savefig(str(timestamp)+'.pdf')
if __name__=="__main__":
distances_draw(" 0011209.json")
| 643 |
pymysql/tests/test_nextset.py
|
carsonip/PyMySQL
| 53 |
2025146
|
import pytest
import pymysql
from pymysql import util
from pymysql.tests import base
from pymysql.constants import CLIENT
class TestNextset(base.PyMySQLTestCase):
def test_nextset(self):
con = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
cur = con.cursor()
cur.execute("SELECT 1; SELECT 2;")
self.assertEqual([(1,)], list(cur))
r = cur.nextset()
self.assertTrue(r)
self.assertEqual([(2,)], list(cur))
self.assertIsNone(cur.nextset())
def test_skip_nextset(self):
cur = self.connect(client_flag=CLIENT.MULTI_STATEMENTS).cursor()
cur.execute("SELECT 1; SELECT 2;")
self.assertEqual([(1,)], list(cur))
cur.execute("SELECT 42")
self.assertEqual([(42,)], list(cur))
def test_nextset_error(self):
con = self.connect(client_flag=CLIENT.MULTI_STATEMENTS)
cur = con.cursor()
for i in range(3):
cur.execute("SELECT %s; xyzzy;", (i,))
self.assertEqual([(i,)], list(cur))
with self.assertRaises(pymysql.ProgrammingError):
cur.nextset()
self.assertEqual((), cur.fetchall())
def test_ok_and_next(self):
cur = self.connect(client_flag=CLIENT.MULTI_STATEMENTS).cursor()
cur.execute("SELECT 1; commit; SELECT 2;")
self.assertEqual([(1,)], list(cur))
self.assertTrue(cur.nextset())
self.assertTrue(cur.nextset())
self.assertEqual([(2,)], list(cur))
self.assertFalse(bool(cur.nextset()))
@pytest.mark.xfail
def test_multi_cursor(self):
con = self.connect(client_flag=CLIENT.MULTI_STATEMENTS)
cur1 = con.cursor()
cur2 = con.cursor()
cur1.execute("SELECT 1; SELECT 2;")
cur2.execute("SELECT 42")
self.assertEqual([(1,)], list(cur1))
self.assertEqual([(42,)], list(cur2))
r = cur1.nextset()
self.assertTrue(r)
self.assertEqual([(2,)], list(cur1))
self.assertIsNone(cur1.nextset())
def test_multi_statement_warnings(self):
con = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
cursor = con.cursor()
try:
cursor.execute('DROP TABLE IF EXISTS a; '
'DROP TABLE IF EXISTS b;')
except TypeError:
self.fail()
#TODO: How about SSCursor and nextset?
# It's very hard to implement correctly...
| 2,579 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.