hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fc917207e27b82f9f95bd908b31449427f8a5d4
| 17,042 |
py
|
Python
|
marsyas-vamp/marsyas/scripts/Python/scheduler_libs.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/Python/scheduler_libs.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/scripts/Python/scheduler_libs.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 1998-2007 George Tzanetakis <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#######
# /author Neil Burroughs [email protected]
# /version 1 January 10, 2007
#
# This script generates library functions for the scheduler expression parser
# language stuff. This script is not guaranteed to be bug free or even useful.
#
# Usage:
#-----------------------------------
# 1: lib Foo|F.Bar|B
# 2:
# 3: pure mrs_string fun|alt(mrs_real a, mrs_natural b)
# 4: mrs_string s="hello";
# 5: mrs_bool x;
# 6: {
# 7: mrs_natural z=a+b;
# 8: if (x) { s=s+" "+ltos(z); }
# 9: x = z<0;
# 10: return s;
# 11: }
#-----------------------------------
# 1. library definition starts with keyword 'lib' the names following denote a
# path to the library. The true path is Foo.Bar, all functions defined after
# this statement until a new lib definition will be in this library. This
# means that the function fun is called by 'Foo.Bar.fun'. Alternate names or
# aliases for portions of the path can be defined using the | symbol. In the
# above example F is an alias for Foo so the path to fun could also be
# written as 'Foo.B.fun' or 'F.B.fun' etc.
#
# 3. the function definition may start with 'pure' where pure implies that if
# the parameters to the function are constants then the function can be
# evaluated at parse time to a constant, ie no side-effects. If pure isn't
# specified then the function is not pure. the return type must be a type
# supported by the ExVal class (names starting with 'mrs_'). The function
# name can also have aliases divided by the | symbol where the first name
# is the true name. Parameters must be defined using the 'mrs_' names.
# 4. Normally functions do not have state but as a bonus variables whose values
# persist may defined after the parameters definition and prior to the
# opening function body brace. These types can be the 'mrs_' types or valid
# C++ types.
# 6. The function body begins with a opening brace {.
# 7-10. The function body contains valid C++ code and will likely use the
# parameter values defined on line 3.
# 11. The function body ends with a closing brace }.
#
# Built in functions:
# A few built in functions for string conversions can be used:
# ltos(v) - converts v from a mrs_natural value to a std::string
# dtos(v) - converts v from a mrs_real value to a std::string
# btos(v) - converts v from a mrs_bool value to a std::string
# stol(v) - converts v from a std::string value to a mrs_natural
#
import re
import getopt
import sys
natural_t=['char','unsigned char','signed char',
'short','unsigned short','signed short',
'int','unsigned int','signed int',
'long','unsigned long','signed long',
'mrs_natural']
real_t=['float','double','mrs_real']
bool_t=['mrs_bool','bool']
string_t=['mrs_string','string','std::string']
timer_t=['mrs_timer','TmTimer**']
methods=[
('toNatural',['mrs_natural']),#natural_t),
('toReal',['mrs_real']), #real_t),
('toBool',bool_t),
('toString',string_t),
('toTimer',timer_t)
]
conversions=[
('std::string',string_t),
('bool',bool_t),
('TmTimer**',timer_t)
]
defaults=[
('0', natural_t),
('0.0',real_t),
('false',bool_t),
('""',string_t),
('NULL',timer_t)
]
default_value={}
for (a,b) in defaults:
for t in b: default_value[t]=a
to_method={}
for (t,xs) in methods:
for x in xs:
to_method[x]=t
conversion={}
for (t,xs) in conversions:
for x in xs:
conversion[x]=t
valid_types=['mrs_string','mrs_bool','mrs_natural','mrs_real','mrs_timer']
def trim_nl(line):
if line==None: return ''
a=line.find('\n')
if a<0: return line.strip()
return line[:a].strip()
def count_leading_whsp(line):
i=0
while i<len(line):
if line[i]<>' ': return i#-1
i+=1
return i#-1
class Reader:
def __init__(self):
self.me=0
self.totypes={}
self.toctypes={}
self.lnum=0
self.errors=0
def complain(self,msg):
print "Error(line:"+str(self.lnum)+"):",msg
self.errors+=1
def reduce_indent(self,lines):
# discover minimum whitespace tab amount
lwsp=-1
for line in lines:
x=count_leading_whsp(line)
if lwsp<0 or x<lwsp: lwsp=x
# remove leading whitespace
if lwsp>0:
i=0
while i<len(lines):
lines[i]=lines[i][lwsp:]
i+=1
return lines
def un_indent(self,lines,body_start,min_indent):
i=len(lines)-1
while i>=body_start:
lines[i]=lines[i][min_indent:]
i-=1
def split_brackets(self,line,(pos,count,quotes,body)):
new_line=''
if (pos==0 and len(body)>0) or pos==2:
new_line=body.pop()
for c in line:
if not quotes:
if c=='{':
count+=1
if pos==0:
pos=1 # discard {
if len(new_line)>0:
body.append(new_line)
new_line=''
# body.append('##__FUNCTION_BODY__')
else:
new_line+=c
elif c=='}':
count-=1
if count>0:
new_line+=c
elif count==0:
pos=2
if len(new_line)>0:
body.append(new_line)
new_line=''
body.append('##__FUNCTION_END__')
else:
self.complain("Too many '}' brackets")
elif c=='"':
quotes=True
new_line+=c
else:
new_line+=c
elif c=='"':
quotes=False
new_line+=c
else:
new_line+=c
if len(new_line)>0:
body.append(new_line)
return (pos,count,quotes,body)
def parse_fun_name(self,line):
line=re.sub(r'(\w)\s+(\|)', '\1\2', line) # \w | => \w|
line=re.sub(r'(\|)\s+(\w)', '\1\2', line) # \w | => \w|
line.strip()
if line.find(' ')>=0:
self.complain("invalid function name declaration")
return ''
p=line.find('|')
if p<0: fun_name=line
else: fun_name=line[:p]
return (fun_name,line)
def parse_lib_name(self,line):
line=re.sub(r'(\w)\s+(\||\.)', '\1\2', line) # \w | => \w|
line=re.sub(r'(\||\.)\s+(\w)', '\1\2', line) # \w | => \w|
line.strip()
if line.find(' ')>=0:
self.complain("invalid function name declaration")
return None
xs=line.split('.')
ys=[]
for x in xs:
p=x.find('|')
if p<0: ys.append(x)
else: ys.append(x[:p])
lib_name=''
lib_path=''
for y in ys:
lib_name=lib_name+y.capitalize()
if len(lib_path)==0: lib_path=y
else: lib_path=lib_path+'.'+y
return (lib_name,lib_path,line)
def chop(self,regex,split_char,params):
r=re.compile(regex) #
params=trim_nl(params)
if len(params)==0: return []
ps=params.split(split_char)
nps=[]
for p in ps:
p=trim_nl(p)
if len(p)>0:
m=r.match(p)
if m: nps.append([ trim_nl(a) for a in m.groups() ])
else: return None
return nps
def fix_to_methods(self,params):
new_params=[]
for p in params:
if p[0] in to_method.keys():
t=to_method(p[0])
new_params.append(p.append(t))
else:
self.complain("Invalid parameter type "+p[0])
return None
return new_params
def form_params_tuple(self,params):
new_params=[]
s='('
for p in params:
if p[0] in to_method.keys():
t=to_method[p[0]]
new_params.append(p.append(t))
else:
self.complain("Invalid parameter type "+p[0])
return None
if len(s)>1: s+=','
s+=p[0]
s+=')'
return (s,params)
def form_predefs(self,predefs):
new_predefs=[]
for p in predefs:
if p[3]=='':
if p[0] in default_value.keys():
new_predefs.append((p[0],p[1],default_value[p[0]]))
else:
self.complain("invalid pre-defined parameter")
return None
else: new_predefs.append((p[0],p[1],p[3]))
return new_predefs
def form_fun_tuple(self,data):
# mrs_natural huh|who(mrs_real+ c) mrs_natural a; mrs_natural b=5;
regex_fun=re.compile('^\s*(pure\ |)\s*(mrs_\w+)\s+(\w[\w|\|\ ]*)\(([^\)]*)\)\s*(.*)')
a=data[0]
body=data[1:]
m=regex_fun.match(a)
if not m:
self.complain("invalid function declaration")
return None
g=m.groups()
pure='false'
if g[0]=='pure ': pure='true'
ret_type=g[1]
if not (ret_type in valid_types):
self.complain("invalid function return type")
return None
fname=self.parse_fun_name(g[2])
if fname==None: return None
params=self.chop('(\w+)\s*(\w+)',',',g[3])
if params==None:
self.complain("invalid parameter definition")
return None
params=self.form_params_tuple(params)
# params=self.fix_to_methods(params)
predefs=self.chop('(\w+)\s+(\w+)\s*(\=\s*(.*))?',';',g[4])
if predefs==None:
self.complain("invalid class var definition")
return None
predefs=self.form_predefs(predefs)
return (pure,ret_type,fname,params,predefs,body)
def rd(self,in_file):
regex_fun=re.compile('^\s*(pure\ |)\s*(mrs_\w+)\s+(.*)')
regex_lib=re.compile('^\s*lib\s+(.+)')
fh = open(in_file, "r")
body=0
min_indent=-1
in_function=False
fbody=(0,0,False,[])
result=[]
for line in fh:
line=trim_nl(line)
if in_function:
fbody=self.split_brackets(line,fbody)
if fbody[0]==2:
data=fbody[3]
if data[len(data)-1]<>'##__FUNCTION_END__':
c=data.pop() # not doing anything with this, oh well
data.pop() # kill the ## thing
x=self.form_fun_tuple(data)
if x==None: return None
result.append(("fun",x))
in_function=False
elif len(line)>0:
m=regex_fun.match(line)
if m:
fbody=self.split_brackets(line,(0,0,False,[]))#line.find('{')
if fbody[0]==2:
x=self.form_fun_tuple(fbody[3])
if x==None: return None
result.append(("fun",x))
else: in_function=True
else:
m=regex_lib.match(line)
if m:
x=self.parse_lib_name(m.groups()[0])
if x==None: return None
result.append(("lib",x))
return result
class Gen:
def __init__(self):
self.me=0
# ('lib', (
# l1: 'FooBar',
# l2: 'Foo.Bar',
# l3: 'Foo|F.Bar|B'
# )),
# ('fun', (
# f1 'true',
# f2 'mrs_string',
# (
# f3 'fun',
# f4 'fun|alt'
# ),(
# f5 '(mrs_real,mrs_natural)',
# f6 [['mrs_real', 'a', 'toReal'], ['mrs_natural', 'b', 'toNatural']]
# ),
# f7 [('mrs_string', 's', '"hello"'), ('mrs_bool', 'x', 'false')],
# f8 ['mrs_natural z=a+b;', 'if (x) { s=s+" "+ltos(z); }', 'x = z<0;', 'return s;']
# ))
def gen_lib(self,lib,fun):
(l1,l2,l3)=lib
(f1,f2,(f3,f4),(f5,f6),f7,f8)=fun
fname="ExFun_"+l1+f3.capitalize()
# st->addReserved("Real|R.cos(mrs_real)|(mrs_natural)",new ExFun_RealCos("mrs_real","Real.cos(mrs_real)"));
s='"'
if l3<>'': s+=l3+'.'
s+=f4+f5+'",new '+fname+'("'+f2+'","'
if l2<>'': s+=l2+'.'
s+=f3+f5+'")'
return s
def gen_fun(self,lib,fun):
(l1,l2,l3)=lib
(f1,f2,(f3,f4),(f5,f6),f7,f8)=fun
fname="ExFun_"+l1+f3.capitalize()
s ="class "+fname+" : public ExFun { public:\n"
for p in f7:
s+=" "+p[0]+" "+p[1]+";\n"
s+=" "+fname+'() : ExFun("'+f2+'","'+f5+'",'+f1+') {'
if len(f7)==0: s+=' }\n'
else:
s+="\n "
for p in f7:
s+=" "+p[1]+"="
if p[2]=='': s+=default_value[p[0]]+";"
else: s+=p[2]+";"
s+='\n }\n'
s+=' virtual ExVal calc() {\n'
px=0
for p in f6:
t=p[0]
if t in ('mrs_string','string'): t='std::string'
elif t=='mrs_bool': t='bool'
s+=' '+t+' '+p[1]+'=(params['+str(px)+']->eval()).'+p[2]+'();\n' # fix this to_method error
px+=1
for x in f8: s+=x+'\n'
s+=' }\n'
s+=' ExFun* copy() { return new '+fname+'(); }\n'
s+='};\n'
return s
def format_libs(self,data):
v='void Marsyas::load_symbols(ExRecord* st)\n{\n'
s=''
opened=False
for d in data:
if d[0]=='#':
if opened: ret+='}'
nm='loadlib_'+d[1:]
s+='void Marsyas::'+nm+'(ExRecord* st)\n{\n'
v+=' '+nm+'(st);\n'
opened=True
else:
s+=' st->addReserved('+d+');\n'
if opened: s+='}\n'
v+='}\n'
return (v,s)
def format_funs(self,data):
s=''
for d in data: s+=d
return s
def gen(self,data):
funs=[]
libs=[]
lib=('','','')
for (a,b) in data:
# ('lib', ('OtherWho', 'Other.Who', 'Other|Oth.Who|W'))
if a=="lib":
lib=b
libs.append("#"+b[0])
elif a=="fun":
funs.append(self.gen_fun(lib,b))
libs.append(self.gen_lib(lib,b))
return (self.format_libs(libs),self.format_funs(funs))
def usage():
print "Generate Scheduler Expression Functions"
print "Usage: python scheduler_libs.py infile"
loadsym_h="""/**
* The load_symbols function is the master function called by the parser to load
* all libraries. It already exists in ExNode.cpp. Therefore, you need to copy
* the lines within it into the Marsyas::load_symbols(..) function in ExNode.cpp
*/"""
loadlibs_h="""/***
* These are the new library functions you defined and should be placed in
* their own header file, ie "NewLibs.h". You will need to add the line
* #include "NewLibs.h" at the top of ExNode.cpp so that the load_symbols
* function can find your header.
*/"""
funs_h="""/***
* These functions may be placed in their own header file, ie "Fun.h". That
* header file must be included prior to the definition of the loadlib functions
* that use these functions, so if the loadlib functions are in "NewLibs.h" then
* add the lib \'#include "Fun.h"\' at the top of "NewLibs.h"
*/"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "o", ["output="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
xs=[]
r=Reader()
if len(args)==0:
usage()
sys.exit(0)
for a in args: xs.extend(r.rd(a))
g=Gen()
((loadsym,loadlibs),funs)=g.gen(xs)
loadsym=loadsym_h+loadsym
loadlibs=loadlibs_h+loadlibs
funs=funs_h+funs
fh = open('Append_to_ExLibs.h', 'w')
fh.write(loadlibs)
fh.write(loadsym)
fh.close()
fh = open('Append_to_ExFuns.h', 'w')
fh.write(funs)
fh.close()
main()
| 34.152305 | 110 | 0.51508 |
d798f6b9cf9af2b70c67f441f4759c5a3eef9eb4
| 1,026 |
py
|
Python
|
priklad.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
priklad.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
priklad.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
from random import randint, choice
def priklad(a,b):
num0, num1 = randint(a,b), randint(a,b)
operation = choice(['//','+','*','-'])
return f"{num0 if num0 >= num1 else num1} {operation} {num1 if num1 < num0 else num0}", ((num0 if num0 >= num1 else num1) + (num1 if num1 <= num0 else num0)) if operation == "+" else ((num0 if num0 >= num1 else num1) - (num1 if num1 <= num0 else num0)) if operation == "-" else ((num0 if num0 >= num1 else num1) * (num1 if num1 <= num0 else num0)) if operation == "*" else ((num0 if num0 >= num1 else num1) // (num1 if num1 <= num0 else num0)) if operation == "//" else 0
prikladu = range(int(input("Příkladů? ")))
spravne = 0
for _ in prikladu:
p, v = priklad(0,20)
if int(input(f"Kolik je {p}? ")) == v:
print("Správně")
spravne += 1
else:
print("Špatně")
procent = (spravne/len(prikladu))*100
print(f"Máš {procent} % správně. Tvoje znamka je {1 if procent > 80 else 2 if procent > 60 else 3 if procent > 40 else 4 if procent > 20 else 5}")
| 54 | 476 | 0.610136 |
ad3efe94360d629d0ea213e7d7c36f81ca3548e0
| 6,924 |
py
|
Python
|
intelligence/anthill/src/managers/Prod_Prod_Manager.py
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | 10 |
2018-04-12T22:29:52.000Z
|
2021-10-18T17:07:45.000Z
|
intelligence/anthill/src/managers/Prod_Prod_Manager.py
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | null | null | null |
intelligence/anthill/src/managers/Prod_Prod_Manager.py
|
FoxComm/highlander
|
1aaf8f9e5353b94c34d574c2a92206a1c363b5be
|
[
"MIT"
] | 1 |
2018-07-06T18:42:05.000Z
|
2018-07-06T18:42:05.000Z
|
from recommenders.Prod_Prod import Prod_Prod
from util.InvalidUsage import InvalidUsage
from util.response_utils import (
products_list_from_response,
zip_responses,
format_es_response
)
from util.neo4j_utils import (
add_purchase_event,
get_all_channels,
get_purchased_products,
get_declined_products,
get_all_by_channel,
get_popular_products
)
EMPTY = {'products': []}
def start_pprec_from_db(channel_id, neo4j_client):
"""start_pprec_from_db
finds all channels used in neo4j, and starts up
prod-prod recommenders for each one channel
"""
pprec = Prod_Prod()
for [cust_id, prod_id] in get_all_by_channel(channel_id, neo4j_client):
pprec.add_point(cust_id, prod_id)
return pprec
class Prod_Prod_Manager(object):
"""Prod_Prod_Manager
provides an interface for several Prod_Prod recommenders
"""
def __init__(self, neo4j_client, es_client):
self.recommenders = {}
self.neo4j_client = neo4j_client
self.es_client = es_client
for channel_id in get_all_channels(self.neo4j_client):
self.update_pprec(channel_id, start_pprec_from_db(channel_id, self.neo4j_client))
def get_recommender(self, channel_id):
"""get_pprec
Return Prod_Prod object at channel_id if found else create new Prod_Prod
"""
if channel_id in self.recommenders.keys():
return self.recommenders[channel_id]
else:
return Prod_Prod()
def update_pprec(self, channel_id, pprec):
"""update_pprec
Update the Prod_Prod dictionary for the given channel_id
"""
self.recommenders[channel_id] = pprec
def validate_channel(self, channel_id):
"""validate_channel
"""
if channel_id < 0:
raise InvalidUsage('Invalid Channel ID', status_code=400,
payload={'error_code': 100})
elif channel_id not in self.recommenders.keys():
raise InvalidUsage('Channel ID not found', status_code=400,
payload={'error_code': 101})
def is_valid_channel(self, channel_id):
return channel_id in self.recommenders.keys()
def fallback_to_popular(self, response, source):
"""fallback_to_popular
if response contains no products, instead use popular products
This still requires data to be in neo4j
"""
if len(response['products']) > 0:
response.update(source=source)
return response
else:
return self.fallback_to_all(
get_popular_products(self.neo4j_client),
source='anthill-popular')
def fallback_to_popular_full(self, response, source, from_param, size_param):
"""fallback_to_popular_full
full es response version of fallback_to_popular
"""
es_resp = self.es_client.get_products_list(
products_list_from_response(response),
from_param,
size_param)
if len(es_resp['result']) > 0:
response = zip_responses(response, es_resp)
response.update(source=source)
return response
else:
popular_response = get_popular_products(self.neo4j_client)
es_resp = self.es_client.get_products_list(
products_list_from_response(popular_response),
from_param,
size_param)
return self.fallback_to_all(
zip_responses(popular_response, es_resp),
source='anthill-popular',
only_ids=False)
def fallback_to_all(self, response, source, only_ids=False):
"""fallback_to_all
matches all products in elasticsearch
"""
if len(response['products']) > 0:
response.update(source=source)
return response
else:
response = format_es_response(
self.es_client.get_products_list([], 0, 10),
only_ids=only_ids)
response.update(source='es-match-all')
return response
def recommend(self, prod_id, channel_id):
"""recommend
take a product id
get list of product ids from the recommender
"""
if not self.is_valid_channel(channel_id):
return self.fallback_to_popular(EMPTY, source='')
rec = self.get_recommender(channel_id)
if prod_id in rec.product_ids():
resp = rec.recommend([prod_id])
else:
resp = EMPTY
return self.fallback_to_popular(response=resp, source='anthill-similar')
def recommend_full(self, prod_id, channel_id, from_param, size_param):
"""recommend_full
take a product id
get a list of full products from elasticsearch based on
product ids from the recommender
"""
recommender_output = self.recommend(prod_id, channel_id)
return self.fallback_to_popular_full(
response=recommender_output,
source=recommender_output['source'],
from_param=from_param,
size_param=size_param)
def cust_recommend(self, cust_id, channel_id):
"""cust_recommend
take a customer id
get list of product ids from the recommender
"""
if not self.is_valid_channel(channel_id):
return self.fallback_to_popular(EMPTY, source='')
prod_ids = get_purchased_products(cust_id, channel_id, self.neo4j_client)
excludes = get_declined_products(cust_id, self.neo4j_client)
resp = self.recommenders[channel_id].recommend(prod_ids, excludes)
return self.fallback_to_popular(response=resp, source='anthill-similar')
def cust_recommend_full(self, cust_id, channel_id, from_param, size_param):
"""cust_recommend_full
get a list of full products from elasticsearch based on
product ids from the recommender
"""
recommender_output = self.cust_recommend(cust_id, channel_id)
return self.fallback_to_popular_full(
response=recommender_output,
source=recommender_output['source'],
from_param=from_param,
size_param=size_param)
def add_point(self, cust_id, prod_id, channel_id):
"""add_point
add a purchase event to the recommender and to neo4j
"""
pprec = self.get_recommender(channel_id)
pprec.add_point(cust_id, prod_id)
self.update_pprec(channel_id, pprec)
def train(self, payload):
"""train
train a recommender with a set of purchase events
"""
add_purchase_event(payload, self.neo4j_client)
cust_id = payload.get('cust_id')
channel_id = payload.get('channel_id')
for prod_id in payload.get('prod_ids'):
self.add_point(cust_id, prod_id, channel_id)
| 37.225806 | 93 | 0.644858 |
0e951142d175163d89ded69a3954c3c63779ef45
| 741 |
py
|
Python
|
backend/python/django/rest/blog/serializer.py
|
Untanky/bachelor-thesis
|
01b3c00765006ab6b140607e426533a7eed1508b
|
[
"MIT"
] | 1 |
2021-02-08T17:07:54.000Z
|
2021-02-08T17:07:54.000Z
|
backend/python/django/rest/blog/serializer.py
|
Untanky/bachelor-thesis
|
01b3c00765006ab6b140607e426533a7eed1508b
|
[
"MIT"
] | null | null | null |
backend/python/django/rest/blog/serializer.py
|
Untanky/bachelor-thesis
|
01b3c00765006ab6b140607e426533a7eed1508b
|
[
"MIT"
] | 1 |
2021-02-08T17:08:01.000Z
|
2021-02-08T17:08:01.000Z
|
from rest_framework import serializers
import sys, os
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../../../dao/src"))
from Post import Post
class PostSerializer(serializers.BaseSerializer):
def to_representation(self, instance):
return {
'id': instance.id,
'title': instance.title,
'description': instance.description,
'url': "/post/" + str(instance.id)
}
def to_internal_value(self, instance):
if 'id' in instance.keys():
return Post(id = instance['id'], title = instance['title'], description = instance['description'])
else:
return Post(title = instance['title'], description = instance['description'])
| 37.05 | 110 | 0.623482 |
38cd592ea722aa8b0b3fc4da1492b3f76124dd2b
| 694 |
py
|
Python
|
flipui/gui/settingsdialog.py
|
julianschick/flipdot-brose-code
|
b2caf2c52c55a0d5b80dbf3d5adbbf5aec1d79c2
|
[
"MIT"
] | null | null | null |
flipui/gui/settingsdialog.py
|
julianschick/flipdot-brose-code
|
b2caf2c52c55a0d5b80dbf3d5adbbf5aec1d79c2
|
[
"MIT"
] | null | null | null |
flipui/gui/settingsdialog.py
|
julianschick/flipdot-brose-code
|
b2caf2c52c55a0d5b80dbf3d5adbbf5aec1d79c2
|
[
"MIT"
] | null | null | null |
from PySide2.QtWidgets import QDialog
from PySide2.QtCore import QSettings
from gui.settingsdialog_ui import Ui_SettingsDialog
class SettingsDialog(QDialog):
def __init__(self):
super().__init__()
self._ui = Ui_SettingsDialog()
self._ui.setupUi(self)
self.accepted.connect(self.__accepted)
settings = QSettings()
self._ui.hostLineEdit.setText(settings.value("hostname", ""))
self._ui.portSpinBox.setValue(settings.value("port", 3000, int))
def __accepted(self):
settings = QSettings()
settings.setValue("hostname", self._ui.hostLineEdit.text())
settings.setValue("port", self._ui.portSpinBox.value())
| 31.545455 | 72 | 0.690202 |
2db8daa5700affbf3da62664cc96e7a80f615bd9
| 626 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/dd.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/dd.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/dd.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
class society:
def__init__(self):
self.society_name="surya apartment"
self.house_number="20"
self.income=25000
self flat="a type"
def inputdata(self,name,no,nom,):
self.society_name=nm
self.house_no=no
self.no_of_members=nom
self.income=ic
self.allocate_flat()
def allocate_flat(self):
if self.income>=25000:
self.flat="a type"
if self.income>=20000 and self.income<25000:
self.flat="b type"
if self.income<20000:
self.flat="c type"
def show data(self):
print x.society_name:
print x.house_no:
print x.no_of_members:
print x.income
print x.flat
| 21.586207 | 47 | 0.678914 |
facdb786714b75ab38116a12bd3cd78d1f359f27
| 1,133 |
py
|
Python
|
project/api/dump/schemas.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1 |
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
project/api/dump/schemas.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286 |
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
project/api/dump/schemas.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
from marshmallow import fields
from project.api import marshmallow
from project.api.event.schemas import EventDumpSchema
from project.api.event_category.schemas import EventCategoryDumpSchema
from project.api.event_reference.schemas import EventReferenceDumpSchema
from project.api.image.schemas import ImageDumpSchema
from project.api.location.schemas import LocationDumpSchema
from project.api.organization.schemas import OrganizationDumpSchema
from project.api.organizer.schemas import OrganizerDumpSchema
from project.api.place.schemas import PlaceDumpSchema
class DumpResponseSchema(marshmallow.Schema):
events = fields.List(fields.Nested(EventDumpSchema))
places = fields.List(fields.Nested(PlaceDumpSchema))
locations = fields.List(fields.Nested(LocationDumpSchema))
event_categories = fields.List(fields.Nested(EventCategoryDumpSchema))
organizers = fields.List(fields.Nested(OrganizerDumpSchema))
images = fields.List(fields.Nested(ImageDumpSchema))
organizations = fields.List(fields.Nested(OrganizationDumpSchema))
event_references = fields.List(fields.Nested(EventReferenceDumpSchema))
| 49.26087 | 75 | 0.835834 |
faf84d3ace88f8d66f04d6fb40d3f4c0d98fe72a
| 1,126 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/khmer-1.1-py2.7-linux-x86_64.egg/khmer/tests/khmer_tst_utils.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/khmer-1.1-py2.7-linux-x86_64.egg/khmer/tests/khmer_tst_utils.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/khmer-1.1-py2.7-linux-x86_64.egg/khmer/tests/khmer_tst_utils.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
import tempfile
import os
import shutil
from pkg_resources import Requirement, resource_filename, ResolutionError
def get_test_data(filename):
filepath = None
try:
filepath = resource_filename(
Requirement.parse("khmer"), "khmer/tests/test-data/" + filename)
except ResolutionError:
pass
if not filepath or not os.path.isfile(filepath):
filepath = os.path.join(os.path.dirname(__file__), 'test-data',
filename)
return filepath
cleanup_list = []
def get_temp_filename(filename, tempdir=None):
if tempdir is None:
tempdir = tempfile.mkdtemp(prefix='khmertest_')
cleanup_list.append(tempdir)
return os.path.join(tempdir, filename)
def cleanup():
global cleanup_list
for path in cleanup_list:
shutil.rmtree(path, ignore_errors=True)
cleanup_list = []
| 26.809524 | 76 | 0.685613 |
ea923edc9eb9bb6aa37571eba6b3ec52fc17a94c
| 1,867 |
py
|
Python
|
data/parquet-testing/lineitem-to-parquet.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 2,816 |
2018-06-26T18:52:52.000Z
|
2021-04-06T10:39:15.000Z
|
data/parquet-testing/lineitem-to-parquet.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310 |
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
data/parquet-testing/lineitem-to-parquet.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270 |
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
import tempfile
import os
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import glob
parquet_compression = 'snappy'
outdir = tempfile.mkdtemp()
parquet_folder = os.path.join(outdir, "out.parquet")
nthreads = 8
memory_gb = 10
spark = SparkSession.builder.master("local[%d]" % nthreads).config('spark.sql.parquet.compression.codec', parquet_compression).config("spark.ui.enabled", "false").config("spark.local.dir", outdir).config("spark.driver.memory", "%dg" % memory_gb).config("spark.executor.memory", "%dg" % memory_gb).getOrCreate()
sc = spark.sparkContext
schema = StructType([
StructField("l_orderkey", LongType(), False),
StructField("l_partkey", LongType(), False),
StructField("l_suppkey", LongType(), False),
StructField("l_linenumber", IntegerType(), False),
StructField("l_quantity", IntegerType(), False),
StructField("l_extendedprice", DoubleType(), False),
StructField("l_discount", DoubleType(), False),
StructField("l_tax", DoubleType(), False),
StructField("l_returnflag", StringType(), False),
StructField("l_linestatus", StringType(), False),
StructField("l_shipdate", StringType(), False),
StructField("l_commitdate", StringType(), False),
StructField("l_receiptdate", StringType(), False),
StructField("l_shipinstruct", StringType(), False),
StructField("l_shipmode", StringType(), False),
StructField("l_comment", StringType(), False)])
df = spark.read.format("csv").schema(schema).option("header", "false").option("delimiter", "|").load("lineitem-sf1.tbl.gz").repartition(1)
df.write.mode('overwrite').format("parquet").save(parquet_folder)
os.rename(glob.glob(os.path.join(parquet_folder, '*.parquet'))[0], "lineitem-sf1.%s.parquet" % parquet_compression)
| 41.488889 | 310 | 0.684521 |
57934881090cbcc6954030700380de5e96fc26f1
| 1,707 |
py
|
Python
|
resources/mechanics_lib/api/graphs/shapes.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 7 |
2016-01-20T02:33:00.000Z
|
2021-02-04T04:06:57.000Z
|
resources/mechanics_lib/api/graphs/shapes.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | null | null | null |
resources/mechanics_lib/api/graphs/shapes.py
|
PRECISE/ROSLab
|
2a6a295b71d4c73bc5c6ae2ec0330274afa31d0d
|
[
"Apache-2.0"
] | 3 |
2016-10-05T07:20:30.000Z
|
2017-11-20T10:36:50.000Z
|
from face import Face
from math import sin, cos, pi
class RegularNGon(Face):
def __init__(self, name, n, length, edgeNames=None, allEdges=None):
pts = []
lastpt = (0, 0)
dt = (2 * pi / n)
for i in range(n):
lastpt = (lastpt[0] + cos(i * dt), lastpt[1] + sin(i * dt))
pts.append(lastpt)
Face.__init__(self, name, pts, edgeNames=edgeNames, allEdges=allEdges)
class Square(RegularNGon):
def __init__(self, name, length, edgeNames=None, allEdges=None):
RegularNGon.__init__(self, name, 4, length, edgeNames=edgeNames, allEdges=allEdges)
class Rectangle(Face):
def __init__(self, name, l, w, edgeNames=None, allEdges=None):
Face.__init__(self, name, ((l, 0), (l, w), (0, w), (0,0)), edgeNames=edgeNames, allEdges=allEdges)
class RightTriangle(Face):
def __init__(self, name, l, w, edgeNames=None, allEdges=None):
Face.__init__(self, name, ((l, 0), (0, w), (0,0)), edgeNames=edgeNames, allEdges=allEdges)
if __name__ == "__main__":
r = Rectangle("r1", 15, 10)
p = RegularNGon("pent", 5, 1)
print p.pts
print [e.name for e in p.edges]
s = Square("square", 1)
ae = []
f1 = s.copy("one").setEdges(("12", "13", "15", "14"), allEdges = ae)
f2 = s.copy("two").setEdges(("12", "24", "26", "23"), allEdges = ae)
f3 = s.copy("three").setEdges(("13", "23", "36", "35"), allEdges = ae)
f4 = s.copy("four").setEdges(("14", "45", "46", "24"), allEdges = ae)
f5 = s.copy("five").setEdges(("15", "35", "56", "45"), allEdges = ae)
f6 = s.copy("six").setEdges(("26", "46", "56", "36"), allEdges = ae)
print f1.neighbors()
print f2.neighbors()
print f3.neighbors()
print f4.neighbors()
print f5.neighbors()
print f6.neighbors()
| 34.836735 | 102 | 0.616872 |
aa3ecc781eff302254fd24495c7387348d079ecc
| 2,539 |
py
|
Python
|
main.py
|
Incompleteusern/Spork-Automation
|
b6d6b908443301c3c7c8c6983cceaeb316884dbe
|
[
"Apache-2.0"
] | 2 |
2020-11-24T16:07:47.000Z
|
2020-11-24T16:07:50.000Z
|
main.py
|
Incompleteusern/Spork-Automation
|
b6d6b908443301c3c7c8c6983cceaeb316884dbe
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Incompleteusern/Spork-Automation
|
b6d6b908443301c3c7c8c6983cceaeb316884dbe
|
[
"Apache-2.0"
] | 1 |
2020-11-20T20:14:44.000Z
|
2020-11-20T20:14:44.000Z
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common import exceptions
import json
def parse_json(json_file_path):
with open(json_file_path, 'r') as json_file:
dictionary = json.load(json_file)
return dictionary
class SporkInstance:
def __init__(self, driver_path, is_headless, json_creds_path= 'creds.json'):
if is_headless:
option = webdriver.ChromeOptions()
option.add_argument('headless')
else: option = None
self.driver = webdriver.Chrome(driver_path, options=option)
self.driver.get('https://spork.school/schedule')
self.credentials = parse_json(json_creds_path)
def enter_credentials(self):
try:
usernameField = WebDriverWait(self.driver, 10).until(ec.presence_of_element_located((By.NAME, 'username')))
usernameField.clear()
usernameField.send_keys(self.credentials.get('username'))
passwordField = WebDriverWait(self.driver, 10).until(ec.presence_of_element_located((By.NAME, 'password')))
passwordField.clear()
passwordField.send_keys(self.credentials.get('password'))
passwordField.send_keys(Keys.ENTER)
#wait 3 seconds for the passwordfield to stop being attached to the DOM
staleness = WebDriverWait(self.driver, 5).until(ec.staleness_of(passwordField))
#whether to continue, as it would produce an error if it tried to use webdriver and it quit
return True
except (exceptions.NoSuchElementException, exceptions.TimeoutException):
print('Unable to enter credentials')
self.driver.quit()
return False
def click_join_button(self):
try:
joinButtons = WebDriverWait(self.driver, 10).until(ec.presence_of_all_elements_located((By.CSS_SELECTOR, 'button.ui.green.compact.button')))
for button in joinButtons:
button.click()
except (exceptions.NoSuchElementException, exceptions.TimeoutException):
print('No button to press')
self.driver.quit()
if __name__ == '__main__':
client = SporkInstance('chromedriver.exe', False, 'creds.json')
status = client.enter_credentials()
if status:
client.click_join_button()
| 43.033898 | 152 | 0.680977 |
a4ad6577353cb7d08273aba0f044aba46c9d5607
| 705 |
py
|
Python
|
site_generation/pelicanconf.py
|
globalwatchpost/CloudRegionsList
|
f428b39fc797403cf6ed5d38196a5df29f59d8af
|
[
"MIT"
] | null | null | null |
site_generation/pelicanconf.py
|
globalwatchpost/CloudRegionsList
|
f428b39fc797403cf6ed5d38196a5df29f59d8af
|
[
"MIT"
] | null | null | null |
site_generation/pelicanconf.py
|
globalwatchpost/CloudRegionsList
|
f428b39fc797403cf6ed5d38196a5df29f59d8af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Global Watchpost LLC'
SITENAME = 'List Cloud Regions'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'UTC'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
PLUGINS = [ 'list_cloud_regions', ]
PLUGIN_PATHS = [ 'plugins', ]
THEME = "./theme"
TAGS_SAVE_AS = None
CATEGORIES_SAVE_AS = None
AUTHORS_SAVE_AS = None
ARCHIVES_SAVE_AS = None
INDEX_SAVE_AS = None
| 19.583333 | 77 | 0.751773 |
1066bbc68a180d11ac71ef3706f783f677b9a3f0
| 800 |
py
|
Python
|
Lineare Regression/lineare_regression/drei_punkte_beispiel.py
|
severinhaller/einf-machinelearning
|
4dfc8f1da0d81c5aa800d1459f81b72d1bf6dd9b
|
[
"MIT"
] | null | null | null |
Lineare Regression/lineare_regression/drei_punkte_beispiel.py
|
severinhaller/einf-machinelearning
|
4dfc8f1da0d81c5aa800d1459f81b72d1bf6dd9b
|
[
"MIT"
] | null | null | null |
Lineare Regression/lineare_regression/drei_punkte_beispiel.py
|
severinhaller/einf-machinelearning
|
4dfc8f1da0d81c5aa800d1459f81b72d1bf6dd9b
|
[
"MIT"
] | null | null | null |
points = [(1,1), (2,2), (2.5,1)]
print(points[2][0])
y=0
x=0
#point -> neue Variable für jeden Listeneintrag
for point in points:
print(point[1])
y = y+point[1]
x = x+point[0]
print(x,y)
avg_x = x / len(points)
avg_y = y / len(points)
print(avg_x,avg_y)
w_enumerator = 0
w_denominator = 0
for point in points:
w_enumerator += (point[0]-avg_x)*(point[1]-avg_y) # Du tuesch das setze. aso bi jedem loop durchlauf wird de alt wert ersetzt. Du wotsch glaub "w_enumerator +=" anstatt "w_enumerator =", (das addiert nach jedem durchlauf de neui wert zum alte wert vo w_enumerator dezue)
print(w_enumerator)
for point in points:
w_denominator += (point[0]-avg_x)**2
print(w_denominator)
w1 = w_enumerator/w_denominator
print(w1)
w0 = 0
w0 = avg_y - w1 * avg_x
print(w0)
| 20 | 274 | 0.68125 |
f4d59d140e90d92aa8fcf71270fab5cc567e3bda
| 176 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/ab12.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/ab12.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/ab12.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
#sum of square root upto n
import math
n=input("Enter range:");
j=1
for i in range(1,n,1):
print "Number:",j,"SquareRoot",math.sqrt(j),
j=j+2
| 14.666667 | 51 | 0.545455 |
877792f6657423c040e4c6f5b8e1b958b9627700
| 449 |
py
|
Python
|
LeetCode_problems/First_Unique_Character_in_a_String/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 165 |
2020-10-03T08:01:11.000Z
|
2022-03-31T02:42:08.000Z
|
LeetCode_problems/First_Unique_Character_in_a_String/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 383 |
2020-10-03T07:39:11.000Z
|
2021-11-20T07:06:35.000Z
|
LeetCode_problems/First_Unique_Character_in_a_String/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 380 |
2020-10-03T08:05:04.000Z
|
2022-03-19T06:56:59.000Z
|
from collections import Counter
class Solution:
def firstUniqChar(self, s: str) -> int:
ct=Counter(s)
for i in range(len(s)):
if ct[s[i]]==1:
return i
return -1
# Counter makes a frequency map of letters in string
# we iterate over the letters of the string and check if it has only 1 count, then return the first one
# else if no count is 1, we return -1 because no letter is unique
| 34.538462 | 103 | 0.632517 |
8777fed7ecae5743a5d50ef386c96297ed0c37c1
| 1,073 |
py
|
Python
|
distance.py
|
Kingpin007/SC-Lab
|
ba8fa9dfca4b950ebcb8c8d409f7e215c7971925
|
[
"MIT"
] | null | null | null |
distance.py
|
Kingpin007/SC-Lab
|
ba8fa9dfca4b950ebcb8c8d409f7e215c7971925
|
[
"MIT"
] | null | null | null |
distance.py
|
Kingpin007/SC-Lab
|
ba8fa9dfca4b950ebcb8c8d409f7e215c7971925
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 09:18:34 2018
@author: VP LAB
"""
import numpy
fuzzySet1 = list(tuple())
fuzzySet2 = list(tuple())
l1 = list()
l2 = list()
x = int(input("Enter the number of elements: "))
element = 1;
print("Enter elements for the set1: ")
for i in range(x):
a = float(input("Enter element for fuzzy set 1:"))
l1.append(a)
fuzzySet1.append((element,a))
element += 1
element = 1
print("Enter elements for the set2: ")
for i in range(x):
a = float(input("Enter element for fuzzy set 2: "))
fuzzySet2.append((element,a))
l1[i] = l1[i] - a
element += 1
print("FUZZY SET 1: ")
for currentSet in fuzzySet1:
print(currentSet,end=",")
print("\nFUZZY SET 2: ")
for currentSet in fuzzySet2:
print(currentSet,end=",")
ed = round((numpy.linalg.norm(numpy.array(l1))),4)
hd = 0
for i in l1:
hd += round(abs(i),4)
print("\nEuclidean distance : " + str(ed))
print("\nNormalized Euclidean distance : " + str(ed/x))
print("\nHamming distance : " + str(hd))
print("\nNormalized Hamming distance : " + str(hd/x))
| 26.170732 | 55 | 0.637465 |
21c6526d025492ba368ebcdd3366e667d2438a2f
| 15,440 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/traci/_person.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/traci/_person.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/traci/_person.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# -*- coding: utf-8 -*-
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2011-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file _person.py
# @author Jakob Erdmann
# @date 2015-02-06
from __future__ import absolute_import
from .domain import Domain
from . import constants as tc
from . import _simulation as simulation
class Reservation(object):
def __init__(self, id, persons, group, fromEdge, toEdge, departPos, arrivalPos,
depart, reservationTime):
self.id = id
self.persons = persons
self.group = group
self.fromEdge = fromEdge
self.toEdge = toEdge
self.arrivalPos = arrivalPos
self.departPos = departPos
self.depart = depart
self.reservationTime = reservationTime
def __attr_repr__(self, attrname, default=""):
if getattr(self, attrname) == default:
return ""
else:
val = getattr(self, attrname)
if val == tc.INVALID_DOUBLE_VALUE:
val = "INVALID"
return "%s=%s" % (attrname, val)
def __repr__(self):
return "Reservation(%s)" % ', '.join([v for v in [
self.__attr_repr__("id"),
self.__attr_repr__("persons"),
self.__attr_repr__("group"),
self.__attr_repr__("fromEdge"),
self.__attr_repr__("toEdge"),
self.__attr_repr__("departPos"),
self.__attr_repr__("arrivalPos"),
self.__attr_repr__("depart"),
self.__attr_repr__("reservationTime"),
] if v != ""])
def _readReservation(result):
# compound size and type
assert(result.read("!i")[0] == 9)
id = result.readTypedString()
persons = result.readTypedStringList()
group = result.readTypedString()
fromEdge = result.readTypedString()
toEdge = result.readTypedString()
departPos = result.readTypedDouble()
arrivalPos = result.readTypedDouble()
depart = result.readTypedDouble()
reservationTime = result.readTypedDouble()
return Reservation(id, persons, group, fromEdge, toEdge, departPos, arrivalPos, depart, reservationTime)
_RETURN_VALUE_FUNC = {tc.VAR_STAGE: simulation._readStage,
}
class PersonDomain(Domain):
def __init__(self):
Domain.__init__(self, "person", tc.CMD_GET_PERSON_VARIABLE, tc.CMD_SET_PERSON_VARIABLE,
tc.CMD_SUBSCRIBE_PERSON_VARIABLE, tc.RESPONSE_SUBSCRIBE_PERSON_VARIABLE,
tc.CMD_SUBSCRIBE_PERSON_CONTEXT, tc.RESPONSE_SUBSCRIBE_PERSON_CONTEXT,
_RETURN_VALUE_FUNC)
def getSpeed(self, personID):
"""getSpeed(string) -> double
Returns the speed in m/s of the named person within the last step.
"""
return self._getUniversal(tc.VAR_SPEED, personID)
def getPosition(self, personID):
"""getPosition(string) -> (double, double)
Returns the position of the named person within the last step [m,m].
"""
return self._getUniversal(tc.VAR_POSITION, personID)
def getPosition3D(self, personID):
"""getPosition(string) -> (double, double, double)
Returns the position of the named person within the last step [m,m,m].
"""
return self._getUniversal(tc.VAR_POSITION3D, personID)
def getAngle(self, personID):
"""getAngle(string) -> double
Returns the angle in degrees of the named person within the last step.
"""
return self._getUniversal(tc.VAR_ANGLE, personID)
def getSlope(self, personID):
"""getSlope(string) -> double
Returns the slope at the current position of the person in degrees
"""
return self._getUniversal(tc.VAR_SLOPE, personID)
def getRoadID(self, personID):
"""getRoadID(string) -> string
Returns the id of the edge the named person was at within the last step.
"""
return self._getUniversal(tc.VAR_ROAD_ID, personID)
def getLaneID(self, personID):
"""getLaneID(string) -> string
Returns the id of the lane the named person was at within the last step.
If the current person stage does not provide a lane, "" is returned.
"""
return self._getUniversal(tc.VAR_LANE_ID, personID)
def getTypeID(self, personID):
"""getTypeID(string) -> string
Returns the id of the type of the named person.
"""
return self._getUniversal(tc.VAR_TYPE, personID)
def getLanePosition(self, personID):
"""getLanePosition(string) -> double
The position of the person along the lane measured in m.
"""
return self._getUniversal(tc.VAR_LANEPOSITION, personID)
def getColor(self, personID):
"""getColor(string) -> (integer, integer, integer, integer)
Returns the person's rgba color.
"""
return self._getUniversal(tc.VAR_COLOR, personID)
def getLength(self, personID):
"""getLength(string) -> double
Returns the length in m of the given person.
"""
return self._getUniversal(tc.VAR_LENGTH, personID)
def getWaitingTime(self, personID):
"""getWaitingTime() -> double
The waiting time of a person is defined as the time (in seconds) spent with a
speed below 0.1m/s since the last time it was faster than 0.1m/s.
(basically, the waiting time of a person is reset to 0 every time it moves).
"""
return self._getUniversal(tc.VAR_WAITING_TIME, personID)
def getWidth(self, personID):
"""getWidth(string) -> double
Returns the width in m of this person.
"""
return self._getUniversal(tc.VAR_WIDTH, personID)
def getMinGap(self, personID):
"""getMinGap(string) -> double
Returns the offset (gap to front person if halting) of this person.
"""
return self._getUniversal(tc.VAR_MINGAP, personID)
def getNextEdge(self, personID):
"""getNextEdge() -> string
If the person is walking, returns the next edge on the persons route
(including crossing and walkingareas). If there is no further edge or the
person is in another stage, returns the empty string.
"""
return self._getUniversal(tc.VAR_NEXT_EDGE, personID)
def getEdges(self, personID, nextStageIndex=0):
"""getEdges(string, int) -> list(string)
Returns a list of all edges in the nth next stage.
For waiting stages this is a single edge
For walking stages this is the complete route
For driving stages this is [origin, destination]
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
"""
return self._getUniversal(tc.VAR_EDGES, personID, "i", nextStageIndex)
def getStage(self, personID, nextStageIndex=0):
"""getStage(string, int) -> int
Returns the type of the nth next stage
0 for not-yet-departed
1 for waiting
2 for walking
3 for driving
4 for access to busStop or trainStop
5 for personTrip
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
"""
return self._getUniversal(tc.VAR_STAGE, personID, "i", nextStageIndex)
def getRemainingStages(self, personID):
"""getStage(string) -> int
Returns the number of remaining stages (at least 1)
"""
return self._getUniversal(tc.VAR_STAGES_REMAINING, personID)
def getVehicle(self, personID):
"""getVehicle(string) -> string
Returns the id of the current vehicle if the person is in stage driving
and has entered a vehicle.
Return the empty string otherwise
"""
return self._getUniversal(tc.VAR_VEHICLE, personID)
def getTaxiReservations(self, onlyNew):
"""getTaxiReservations(int) -> list(Stage)
Returns all reservations. If onlyNew is 1, each reservation is returned
only once
"""
answer = self._getCmd(tc.VAR_TAXI_RESERVATIONS, "", "i", onlyNew)
answer.read("!B") # Type
result = []
for _ in range(answer.readInt()):
answer.read("!B") # Type
result.append(_readReservation(answer))
return tuple(result)
def removeStages(self, personID):
"""remove(string)
Removes all stages of the person. If no new phases are appended,
the person will be removed from the simulation in the next simulationStep().
"""
# remove all stages after the current and then abort the current stage
while self.getRemainingStages(personID) > 1:
self.removeStage(personID, 1)
self.removeStage(personID, 0)
def add(self, personID, edgeID, pos, depart=tc.DEPARTFLAG_NOW, typeID="DEFAULT_PEDTYPE"):
"""add(string, string, double, double, string)
Inserts a new person to the simulation at the given edge, position and
time (in s). This function should be followed by appending Stages or the person
will immediately vanish on departure.
"""
format = "tssdd"
values = [4, typeID, edgeID, depart, pos]
self._setCmd(tc.ADD, personID, format, *values)
def appendWaitingStage(self, personID, duration, description="waiting", stopID=""):
"""appendWaitingStage(string, float, string, string)
Appends a waiting stage with duration in s to the plan of the given person
"""
format = "tidss"
values = [4, tc.STAGE_WAITING, duration, description, stopID]
self._setCmd(tc.APPEND_STAGE, personID, format, *values)
def appendWalkingStage(self, personID, edges, arrivalPos, duration=-1, speed=-1, stopID=""):
"""appendWalkingStage(string, stringList, double, double, double, string)
Appends a walking stage to the plan of the given person
The walking speed can either be specified, computed from the duration parameter (in s) or taken from the
type of the person
"""
if isinstance(edges, str):
edges = [edges]
format = "tilddds"
values = [6, tc.STAGE_WALKING, edges, arrivalPos, duration, speed, stopID]
self._setCmd(tc.APPEND_STAGE, personID, format, *values)
def appendDrivingStage(self, personID, toEdge, lines, stopID=""):
"""appendDrivingStage(string, string, string, string)
Appends a driving stage to the plan of the given person
The lines parameter should be a space-separated list of line ids
"""
format = "tisss"
values = [4, tc.STAGE_DRIVING, toEdge, lines, stopID]
self._setCmd(tc.APPEND_STAGE, personID, format, *values)
def appendStage(self, personID, stage):
"""appendStage(string, stage)
Appends a stage object to the plan of the given person
Such an object is obtainable using getStage
"""
format, values = simulation._writeStage(stage)
self._setCmd(tc.APPEND_STAGE, personID, format, *values)
def replaceStage(self, personID, stageIndex, stage):
"""replaceStage(string, int, stage)
Replaces the nth subsequent stage with the given stage object
Such an object is obtainable using getStage
"""
format, values = simulation._writeStage(stage)
format = "ti" + format
values = [2, stageIndex] + values
self._setCmd(tc.REPLACE_STAGE, personID, format, *values)
def removeStage(self, personID, nextStageIndex):
"""removeStage(string, int)
Removes the nth next stage
nextStageIndex must be lower then value of getRemainingStages(personID)
nextStageIndex 0 immediately aborts the current stage and proceeds to the next stage
"""
self._setCmd(tc.REMOVE_STAGE, personID, "i", nextStageIndex)
def rerouteTraveltime(self, personID):
"""rerouteTraveltime(string) -> None Reroutes a pedestrian (walking person).
"""
self._setCmd(tc.CMD_REROUTE_TRAVELTIME, personID, "t", 0)
def moveToXY(self, personID, edgeID, x, y, angle=tc.INVALID_DOUBLE_VALUE, keepRoute=1):
'''Place person at the given x,y coordinates and force it's angle to
the given value (for drawing).
If the angle is set to INVALID_DOUBLE_VALUE, the vehicle assumes the
natural angle of the edge on which it is driving.
If keepRoute is set to 1, the closest position
within the existing route is taken. If keepRoute is set to 0, the vehicle may move to
any edge in the network but it's route then only consists of that edge.
If keepRoute is set to 2 the person has all the freedom of keepRoute=0
but in addition to that may even move outside the road network.
edgeID is an optional placement hint to resolve ambiguities'''
format = "tsdddb"
values = [5, edgeID, x, y, angle, keepRoute]
self._setCmd(tc.MOVE_TO_XY, personID, format, *values)
def setSpeed(self, personID, speed):
"""setSpeed(string, double) -> None
Sets the maximum speed in m/s for the named person for subsequent step.
"""
self._setCmd(tc.VAR_SPEED, personID, "d", speed)
def setType(self, personID, typeID):
"""setType(string, string) -> None
Sets the id of the type for the named person.
"""
self._setCmd(tc.VAR_TYPE, personID, "s", typeID)
def setWidth(self, personID, width):
"""setWidth(string, double) -> None
Sets the width in m for this person.
"""
self._setCmd(tc.VAR_WIDTH, personID, "d", width)
def setHeight(self, personID, height):
"""setHeight(string, double) -> None
Sets the height in m for this person.
"""
self._setCmd(tc.VAR_HEIGHT, personID, "d", height)
def setLength(self, personID, length):
"""setLength(string, double) -> None
Sets the length in m for the given person.
"""
self._setCmd(tc.VAR_LENGTH, personID, "d", length)
def setMinGap(self, personID, minGap):
"""setMinGap(string, double) -> None
Sets the offset (gap to front person if halting) for this vehicle.
"""
self._setCmd(tc.VAR_MINGAP, personID, "d", minGap)
def setColor(self, personID, color):
"""setColor(string, (integer, integer, integer, integer))
Sets the color for the vehicle with the given ID, i.e. (255,0,0) for the color red.
The fourth component (alpha) is optional.
"""
self._setCmd(tc.VAR_COLOR, personID, "c", color)
| 39.287532 | 112 | 0.646049 |
df2c593ddd9ef3aebdacbb0478b893eabfe1dcaa
| 2,611 |
py
|
Python
|
backend/api/btb/api/schema/types/company.py
|
prototypefund/project-c
|
a87a49d7c1317b1e3ec03ddd0ce146ad0391b5d2
|
[
"MIT"
] | 4 |
2020-04-30T16:11:24.000Z
|
2020-06-02T10:08:07.000Z
|
backend/api/btb/api/schema/types/company.py
|
prototypefund/project-c
|
a87a49d7c1317b1e3ec03ddd0ce146ad0391b5d2
|
[
"MIT"
] | 291 |
2020-04-20T13:11:13.000Z
|
2022-02-10T21:54:46.000Z
|
backend/api/btb/api/schema/types/company.py
|
prototypefund/project-c
|
a87a49d7c1317b1e3ec03ddd0ce146ad0391b5d2
|
[
"MIT"
] | 2 |
2020-04-19T14:56:01.000Z
|
2020-04-19T18:09:34.000Z
|
from graphene import ID, String, ObjectType, List, Field, Float, Int, NonNull, Boolean
from btb.api.schema.resolvers import (
demands_by_company,
supplies_by_company,
company_by_id,
)
from .skills import Skill
from flask import g
from .industry import Industry
class CompanyContact(ObjectType):
id = ID(required=True)
first_name = String(required=True)
last_name = String(required=True)
picture_url = String(required=False)
class Company(ObjectType):
id = ID(required=True)
name = String(required=True)
address_line1 = String(required=False)
address_line2 = String(required=False)
address_line3 = String(required=False)
postal_code = String(required=True)
city = String(required=True)
industry = Field(Industry, required=False)
contact = Field(CompanyContact, required=True)
# lazy
demands = List(lambda: NonNull(Demand), resolver=demands_by_company)
supplies = List(lambda: NonNull(Supply), resolver=supplies_by_company)
# def resolve_contact(root, info):
# if root.
def resolve_industry(root, info):
if root.industry_id is None:
return []
return g.industry_loader.load(root.industry_id)
class Demand(ObjectType):
id = ID(required=True)
is_active = Boolean(required=True)
name = String(required=True)
description = String(required=False)
skills = List(NonNull(Skill), required=True)
quantity = Int(required=True)
max_hourly_salary = Float(required=False)
company = Field(lambda: Company, required=True, resolver=company_by_id)
# we only have this for now
def resolve_description(root, info):
if root.description_ext is None:
return None
return root.description_ext
def resolve_skills(root, info):
if root.skills is None:
return []
return g.skill_loader.load_many(root.skills)
class Supply(ObjectType):
id = ID(required=True)
is_active = Boolean(required=True)
name = String(required=True)
description = String(required=False)
skills = List(NonNull(Skill), required=True)
quantity = Int(required=True)
hourly_salary = Float(required=False)
company = Field(lambda: Company, required=True, resolver=company_by_id)
# we only have this for now
def resolve_description(root, info):
if root.description_ext is None:
return None
return root.description_ext
def resolve_skills(root, info):
if root.skills is None:
return []
return g.skill_loader.load_many(root.skills)
| 27.197917 | 86 | 0.687476 |
df6479427b8d08154922a0b1dea90ac48e63fb48
| 331 |
py
|
Python
|
exercises/en/test_02_05_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/en/test_02_05_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/en/test_02_05_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"from spacy.tokens import Doc" in __solution__
), "Are you importing the Doc class correctly?"
assert doc.text == "spaCy is cool!", "Are you sure you created the Doc correctly?"
assert "print(doc.text)" in __solution__, "Are you printing the Doc's text?"
__msg__.good("Well done!")
| 41.375 | 86 | 0.667674 |
10dc2b5ab9cd5868010c4d5edd939dd8d285be21
| 1,157 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch06_arrays/ex11_array_min_max_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch06_arrays/ex11_array_min_max_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch06_arrays/ex11_array_min_max_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch06_arrays.solutions.ex11_array_min_max_and_pos import find_min, find_max, find_min_pos, find_max_pos
def test_find_min_and_max():
values = [ 2, 3, 4, 5, 6, 7, 8, 9, 1, 10 ]
assert find_min(values) == 1
assert find_max(values) == 10
@pytest.mark.parametrize("lower, upper, expected_pos, expected_value",
[(0, 10, 8, 1), (2, 7, 3, 2), (0, 7, 3, 2)])
def test_find_min_pos(lower, upper, expected_pos, expected_value):
values = [ 5, 3, 4, 2, 6, 7, 8, 9, 1, 10 ]
result_pos = find_min_pos(values, lower, upper)
assert expected_pos == result_pos
assert expected_value == values[result_pos]
@pytest.mark.parametrize("lower, upper, expected_pos, expected_value",
[(0, 10, 9, 49), (2, 7, 5, 10), (0, 7, 1, 22)])
def test_find_max_pos(lower, upper, expected_pos, expected_value):
values = [ 1, 22, 3, 4, 5, 10, 7, 8, 9, 49 ]
result_pos = find_max_pos(values, lower, upper)
assert expected_pos == result_pos
assert expected_value == values[result_pos]
| 30.447368 | 107 | 0.65255 |
10eebbaaaac06a8b84894140b3ab093624b4ca8b
| 2,886 |
py
|
Python
|
quark_core_api/data/storage/validation.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
quark_core_api/data/storage/validation.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
quark_core_api/data/storage/validation.py
|
arcticle/Quark
|
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
|
[
"MIT"
] | null | null | null |
from future.utils import viewitems
_schema = {
"$required" : ["id", "name", "dir"],
"$unique" : ["id", "name", "dir"],
"$fields": {
"id" : {
"$type" : int
},
"name" : {
"$type" : str
},
"dir" : {
"$type" : str
}
}
}
class Validator(object):
def __init__(self, schema):
self.schema = schema
self._validator_funcs = {
"CollectionObject" : self._validate_collection_object
}
self._validation_funcs = {
"$required" : self._required_validation,
"$unique" : self._uniqueness_validation,
"$fields" : self._field_validation,
"$type" : self._type_validation
}
def validate(self, data, storage_object):
object_type = type(storage_object).__name__
func = self._validator_funcs[object_type]
return func(data, storage_object)
def _validate_collection_object(self, data, storage_object):
result = ValidationResult()
for validation, constraint in viewitems(self.schema):
func = self._validation_funcs[validation]
func(data, constraint, storage_object, result)
return result
def _uniqueness_validation(self, data, constraint, storage_object, result):
fields = {}
for field in constraint:
if field in data:
fields[field] = data[field]
query = {"$or": fields}
obj = storage_object.find_one(query)
if obj:
result.add_error("Uniqueness validation failed for fields {}. Existing entry: {}".format(fields, obj))
def _required_validation(self, data, constraint, storage_object, result):
for field in constraint:
if field not in data:
result.add_error("Required validation failed for field \"{}\".".format(field))
def _field_validation(self, data, constraint, storage_object, result):
for field in constraint:
for validation, constr in viewitems(constraint[field]):
func = self._validation_funcs[validation]
if field in data:
func(data[field], constr, storage_object, result)
def _type_validation(self, data, constraint, storage_object, result):
if not isinstance(data, constraint):
try:
# Try type casting to test if the value can be converted
constraint(data)
except:
result.add_error("Type validation failed for \"{}\". Expected \"{}\"".format(data, constraint.__name__))
class ValidationResult(object):
def __init__(self):
self.errors = []
def add_error(self, error):
self.errors.append(error)
@property
def has_error(self):
return len(self.errors) > 0
| 31.032258 | 120 | 0.581774 |
33c393d9f9c6c44d861c89e628fa2fd432646d1c
| 1,171 |
py
|
Python
|
easy/5/python/app.py
|
carlan/dailyprogrammer
|
f8448c6a35277c567d0f1ecab781d45b294c8d0f
|
[
"MIT"
] | 1 |
2019-02-26T16:34:06.000Z
|
2019-02-26T16:34:06.000Z
|
easy/5/python/app.py
|
carlan/dailyprogrammer
|
f8448c6a35277c567d0f1ecab781d45b294c8d0f
|
[
"MIT"
] | null | null | null |
easy/5/python/app.py
|
carlan/dailyprogrammer
|
f8448c6a35277c567d0f1ecab781d45b294c8d0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""app.py: challenge #4"""
__author__ = "Carlan Calazans"
__copyright__ = "Copyright 2016, Carlan Calazans"
__credits__ = ["Carlan Calazans"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Carlan Calazans"
__email__ = "carlancalazans at gmail dot com"
__status__ = "Development"
class Protected(object):
def __init__(self):
self.auth_info = {}
self.logged_in = False
def load_login_and_password_from_file(self):
with open('secret.txt', 'r') as file:
for line in file:
user, passwd = line.rstrip("\n").split(':')
self.auth_info[user] = passwd
def get_user_input(self):
self.load_login_and_password_from_file()
username = input('What\'s your username? ')
password = input('What\'s your password? ')
self.login(username, password)
self.authorize()
def login(self, username, password):
if username in self.auth_info.keys():
self.logged_in = (self.auth_info[username] == password)
else:
self.logged_in = False
def authorize(self):
if(self.logged_in):
print('Logged in')
else:
print('Not allowed to login')
protected = Protected()
protected.get_user_input()
| 26.022222 | 58 | 0.701964 |
d54e67750524efac03bb6fd2c25b5c9728d6d744
| 2,405 |
py
|
Python
|
spo/utils/payrexx.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | null | null | null |
spo/utils/payrexx.py
|
libracore/spo
|
efff6da53a776c4483f06d9ef1acc8a7aa96b28e
|
[
"MIT"
] | 6 |
2019-08-23T18:36:26.000Z
|
2019-11-12T13:12:12.000Z
|
spo/utils/payrexx.py
|
libracore/spo
|
c6617a4624d683e27ee3fde745313c30504f3fd1
|
[
"MIT"
] | 1 |
2021-08-14T22:22:43.000Z
|
2021-08-14T22:22:43.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import urllib.request
import requests
import hmac
import hashlib
import base64
import json
API_BASE = "https://api.payrexx.com/v1.0/"
def create_payment(title, description, reference, purpose, amount,
vat_rate, sku, currency, success_url):
post_data = {
"title": title,
"description": description,
"psp": 1,
"referenceId": reference,
"purpose": purpose,
"amount": amount,
"vatRate": vat_rate,
"currency": currency,
"sku": sku,
"preAuthorization": 0,
"reservation": 0,
"successRedirectUrl": success_url
}
data = urllib.parse.urlencode(post_data).encode('utf-8')
settings = frappe.get_doc("Einstellungen Onlinetermin", "Einstellungen Onlinetermin")
if not settings.payrexx_api_key:
frappe.throw( _("Bitte Payrexx Einstellungen in den Einstellungen Onlinetermin eintragen") )
dig = hmac.new(settings.payrexx_api_key.encode('utf-8'), msg=data, digestmod=hashlib.sha256).digest()
post_data['ApiSignature'] = base64.b64encode(dig).decode()
data = urllib.parse.urlencode(post_data, quote_via=urllib.parse.quote).encode('utf-8')
r = requests.post("{0}Invoice/?instance={1}".format(API_BASE, settings.payrexx_instance), data=data)
response = json.loads(r.content.decode('utf-8'))
invoice = response['data'][0]
return invoice
def get_payment_status(payrexx_id):
post_data = {}
data = urllib.parse.urlencode(post_data).encode('utf-8')
settings = frappe.get_doc("Einstellungen Onlinetermin", "Einstellungen Onlinetermin")
if not settings.payrexx_api_key:
frappe.throw( _("Bitte Payrexx Einstellungen in den Einstellungen Onlinetermin eintragen") )
dig = hmac.new(settings.payrexx_api_key.encode('utf-8'), msg=data, digestmod=hashlib.sha256).digest()
post_data['ApiSignature'] = base64.b64encode(dig).decode()
data = urllib.parse.urlencode(post_data, quote_via=urllib.parse.quote).encode('utf-8')
r = requests.get("{0}Invoice/{2}/?instance={1}".format(API_BASE, settings.payrexx_instance, payrexx_id), data=data)
response = json.loads(r.content.decode('utf-8'))
invoice = response['data'][0]
return invoice
| 41.465517 | 119 | 0.701455 |
d555c1d98c79b5e5631ee28f4d125e73080c465a
| 503 |
py
|
Python
|
project/views/frontend.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
project/views/frontend.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | 35 |
2022-01-24T22:15:59.000Z
|
2022-03-31T15:01:35.000Z
|
project/views/frontend.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request
from project import app
frontend = Blueprint(
"frontend", __name__, static_folder="../static/frontend", static_url_path="/"
)
@frontend.route("/")
def index(path=None): # pragma: no cover
return frontend.send_static_file("index.html")
@frontend.errorhandler(404)
def not_found(e): # pragma: no cover
if request.path.startswith("/api"):
return "", 404
return frontend.send_static_file("index.html")
app.register_blueprint(frontend)
| 20.958333 | 81 | 0.713718 |
895a69cce0fa63c5d26f53bf65e1e851a1d94e0b
| 27,891 |
py
|
Python
|
Programm/nebenfunktionen.py
|
christopher-chandler/emoticon_emoji_recognizer
|
4cc5ed3ab97fff320c75e7f3f5affb0d4be6d038
|
[
"MIT"
] | null | null | null |
Programm/nebenfunktionen.py
|
christopher-chandler/emoticon_emoji_recognizer
|
4cc5ed3ab97fff320c75e7f3f5affb0d4be6d038
|
[
"MIT"
] | null | null | null |
Programm/nebenfunktionen.py
|
christopher-chandler/emoticon_emoji_recognizer
|
4cc5ed3ab97fff320c75e7f3f5affb0d4be6d038
|
[
"MIT"
] | null | null | null |
'''
Hier sind zusaetzliche Funktionen gespeichert, die in das Hauptprogramm via Import importiert werden.
Die entsprechenden Beschreibung der Funktion sind bei der jeweiligen Funktionen.
'''
#########################
# Notwendige Pythonmodule
#########################
import re,os,shutil,tkinter
from datetime import datetime
from tkinter import filedialog, Tk, messagebox
#########################
# Importierung des Matplotlib-Moduls
#########################
#Es wird versucht, das Modul zu importierten.
try:
import matplotlib.pyplot as plt
from matplotlib import figure
except ImportError:
# Wenn der Benutzer matplotlib nicht schon vorher installiert hat, bekommt er eine Fehlermeldung.
# Die Fehlermeldung wird hier ausgegeben.
print(
"\nDa das Modul 'matplotlib' nicht installiert ist bzw. nicht importiert werden kann,\nkann das Programm nicht wie vorgesehen ausgefuehrt werden. ")
while True:
# Hier hat der Benutzer die Moeglichkeit das Programm trotzdem auszufuehren.
program_continue = input("\nWollen Sie das Programm trotzdem ausfuehren? (y/n) ").lower()
# Zustimmung
if program_continue == "y":
# Das Programm wird ohne matplotlib ausgufuehrt. Eine Stabilitaet des Programms kann allerdings nicht gewaehrleistet werden.
print("Die Weiternutzung des Programms ist eingeschraenkt, da 'matplotlib' nicht vorhanden ist.")
break
# Ablehnung
elif program_continue == "n":
# Das Programm wird bei einer Verneinung nicht ausgefuehrt.
print("Das Programm wird sofort beendet.")
raise SystemExit
# Unbekannte bzw. falsche Antwort
else:
print(f"{program_continue} ist keine gueltige Antwort. Entweder 'y' oder 'n' eingeben.")
#########################
# Einlesen der Datenbanken
#########################
# Die Datenbank werden als Variablen gespeichert, damit sie nicht im Programm immer wieder abgetippt werden muessen.
program_emoticon_database = "Ressourcen/Emoticons_emoji_datenbank/emoticon_basisdatenbank.tsv" # Programmdatenbank
user_emoticon_database = "Ressourcen/Emoticons_emoji_datenbank/emoticon_benutzerdatenbank.tsv" # Benutzerdatenbank
emoji_database = "Ressourcen/Emoticons_emoji_datenbank/emoji_datenbank.tsv" # Emojidatenbank
emoticon_dict, emoji_dict = dict(), dict()
# Programmdatenbank wird aufgemacht
with open(program_emoticon_database, mode="r", encoding="utf-8") as emoticon_file, \
open(emoji_database, mode="r", encoding="utf-8") as emoji_file:
'''
Die Dateien werden eingelesen und die entsprechenden Dictionaries werden ergaenzt.
Es wird immer das erste Element bzw. [0] an die entsprechende Liste angehangen,
da das Emoticon bzw. Emoticon dort gespeichert ist. Danach werden die Dateien zugemacht.
'''
for emoticon in emoticon_file: emoticon_dict[emoticon.split()[0]] = emoticon # Emoticons
for emoji in emoji_file: emoji_dict[emoji.split()[0]] = emoji # Emojis
#########################
# Nebenfunktionen
#########################
'''
Ein Menuesystem, das im Programm aufgerufen werden kann.
Es nimmt drei Argumente:
output_menu {name der Funktion : funktion } ohne Klammern.
menu_name - Name des Menues
menu_inforamtion - Information, die Unterhalb des Menuenamen angezeigt wird.
'''
def menu(output_menu, menu_name, menu_information):
invalid_option = f'Leider ist ein Fehler ist aufgetreten. Mit der Eingabetaste gelangen Sie wieder in das {menu_name}.'
while True:
print(f'\n\t\t~ {menu_name} ~\n') # Name des Menues
print(f'{menu_information}\n') # Zugehoerige Information
print("Hinweis: Gross- und Kleinschreibung muessen nicht bei der Eingabe beruecksichtigt werden\n")
# Mit einer For-Schleife werden die Menupunkte aufgezaehlt.
for num, elem in enumerate(output_menu, start=1):
print(f'{num}: {elem}')
# Der Benutzer wird aufgefordert sich eine Funktion ausgesucht.
choice_str = input("\nEntweder die Nummer des Menuepunkts oder dessen Namen bitte eingeben:").strip()
# Die entsprechende Funktion wird von menu_option entnommen und ausgefuehrt.
menu_option = output_menu.get(choice_str.title())
# Wenn die Eingabe sich mit einer Funktion in dem Dictionary uebereinstimmt, wird die Schleife gebrochen.
if menu_option:
break
# Es wird geprueft, ob der Benutzer eine Ziffer eingegeben hat.
else:
try:
# Die Funktion nummerisch aufrufen
choice_num = int(choice_str)
except:
# Der Benutzer wird darauf hingewiesen, dass die numerische Eingabe ungueltig ist.
input(invalid_option)
else:
if 0 < choice_num and choice_num <= len(output_menu):
# Die Werte (Funktionen) des Dictionarys werden als Liste gespeichert.
func_list = list(output_menu.values())
# Es wird aus dieser Liste die Funktion per Indizierung gezogen.
function_number = choice_num - 1
# Die Funktion
options_func_dict = func_list[function_number]
# Die Funktion wird weitergegben.
# Die Schleife wird gebrochen.
break
else:
# Der Benutzer wird darauf hingewiesen, dass die Eingabe ungueltig ist.
input(invalid_option)
# Die Option wird mit return wiedergeben, damit es von anderen Funktionen verarbeitet werden kann.
try:
# Wenn die Funktion keine Argumente hat,
# Wird nur die Funktion ausgefuehrt
return options_func_dict()
# Wenn die Funktionen Argumente hat,
# werden die Ergebnisse ausgegeben.
except Exception as error:
return options_func_dict
'''
token_filter taggt die Tokens nach den angegebenen Regexausdruecken.
Es wird im Hauptprogramm eingesetzt, um zu verhindern, dass Woerter faelscherweise als Emoticons
getaggt werden. Die Regextags in dieser Funktion sind somit eher intern und tauchen nicht im Programm auf.
'''
def token_filter(token_list):
# Die Ergebnisse der potenziellen Emoticons werden hier gespeichert.
potenzielle_emoticons = list()
# Sonderbuchstaben, die in fremden europaeischen Sprachen auftreten koennen z.B. im Spanischen, Italienischen.
special_letters=r'ÀÂÄÇÈÉÊÎÔÖÙÛÜssÀÂÄÇÈÉÊÎÔÖÙÛÜÝĀÁÍŁÓŚĆĄÃĚŻàâäçèéêîôöùûüssàâäçèéêîôöùûüýāáíłóśćąãěż'
# Gaenige Zeichensetzung
punct_symbol="[\\/«»<>+&?='#%١٢٤!*()%;:\-+,\.]"
# Regex besteht aus mehreren RA die durch | getrennt werden.
# Das passende RA Dictionary bzw. Group wird mit einer For-Schleife ausgegeben.
regex = re.compile(rf'''(?P<basic_word>^[/<[]*[A-Za-z{special_letters}]+[A-Za-z{special_letters}]$)|# Normale Woerter
(?P<mixed_word>[\/[<>\-][\w+]|[\w{3}]{punct_symbol}$)|# Woerter mit Zeichensetzungen
(?P<word_contraction>^[A-Za-z{special_letters}]+\b[',!.?]*[A-Za-z{special_letters}]*$)|#Abkuerzungen
(?P<user>^(@|@/|)[A-Za-z{special_letters}]+(:|[0-9])(.+)$)|# Benutzernamen
(?P<time>([0-9]+)(:[0-9])(:[0-9]+)*)|# Uhrzeit
(?P<email>(\w+[.])*\w+@\w+[.]\w+)|# Emailaddressen
(?P<website>^(https://|http://)*(www)*[A-Za-z]+[.][A-Za-z]+(.+)$)|# Webseite
(?P<hashtag>[#]\w{3,}|(\[)[#](.+)(]))|# Hashtags
(?P<numbers>[%]*[\d+])|# Ziffern
(?P<abbreviations>^[A-Z]+(.+)[0-9]*|[0-9]([amAM]|[pmPM]))|# Abkuerzungen
(?P<common_commbinations>(->)+|(--)+|(,-)+$|(amp)|(:&))|# Kombination, die in den Texten haeufig vorkommen.
(?P<punct_sym>^{punct_symbol}$|(=))+# Zeichensetzung ''', re.VERBOSE) # Verbose erlaubt Kommentare
# Mit einer For-Schleife werden die Tokens entsprechend getaggt.
for token in token_list:
# Wenn ein Match gefunden wird, wird das Token gezeigt.
if regex.search(token):
# Wenn ein Match Objekt vorhanden ist, wird das dict davon erzeugt.
regex_search_dict = regex.search(token).groupdict()
# Anhand des Wertes wird der passende Schluessel bzw. Wortklasse ermittelt.
'''
Da es nur ein gueltiges Schluessel-Wert-Paar im Dictionary geben kann, sind die anderen == None .
Z.B. {'cap': None, 'date': None, 'card': None,etc.'}
Mit der If-Bedinung werden diese None-Objekte ignoriert. es wird als das Paar gewaehlt, dass eine Wortklasse hat.
'''
for (wert, schluessel) in regex_search_dict.items():
# Das gueltige Paar wird in die Liste regex_ergebnisse angehangen.
if schluessel != None:potenzielle_emoticons.append((token, wert))
else:
# Wenn dem Token kein passender Tag zugeordnet werden kann, bekommt das Token automatisch den Tag 'potenzielles_emoticon'
# Diese Tags werden dann von den Emoticon und Emoji Erkenner weiter verarbeitet.
potenzielle_emoticons.append((token, "potenzielles_emoticon"))
# Rueckgabe der Ergebnisse
return potenzielle_emoticons
'''
Die Funktion hat ein Argument 'text' und tokenisiert und paarst sie entsprechend.
Personbezogene Daten z.b. Hashtags,Webseiten, etc, die oft in Tweets gefunden werden, werden entsprechend anonymisiert.
'''
def tokenizer(text):
# Hier werden die Tokens gespeichert.
tokens = list()
for row in text:
# Satzzeichen, Emoticons und Emojis werden durch Leerzeichen ergaenzt, damit sie leichter tokenisiert werden koennen.
# Es wird darauf geachtet, dass sie nicht von Emoticons oder potenziellen Emoticons entfernt werden.
# Regeluaere Ausdruecke, die beim Tokenisieren eingesetzt werden:
# Zeichensetzungen
punctuation = re.findall(r"[a-zA-Z]{2,}[:]+|[,.!?*\"\']", row)
# Klammern
brackets = re.findall(r"\([a-zA-ZÄÖÜäöüß]{2,}\)*|\([a-zA-ZÄÖÜäöüß]{2,}|[a-zA-ZÄÖÜäöüß]{2,}\)", row)
# Bestimmung der Uhrzeit, damit sie nicht falsch gepaarst werden.
time_date = re.compile("[0-9]{2}:[0-9]{2}:*[0-9]{0,2}|^\d\d?\.\d\d?\.*(\d{2}|\d{4})*")
# Entfernung der Hashtags, Benutzernamen, E-mailadresse und Webseiten mit naiven regulaeren Ausdruecken.
row = re.sub("(\w+[.])*\w+@\w+[.]\w+", "emailadresse-entfernt", row)
row = re.sub("(@\w+)|(@[/]+[\w]+)", "Benutzer-entfernt", row) # Benutzername
row = re.sub("#\w{3,}", "Hashtag-entfernt", row) # Hashtag
row = re.sub("(https://|www.)[/w\w+.]+", "Link-entfernt", row) # Internetseiten
# Emoticons von den Tokens trennen
for emoticon in emoticon_dict.keys():
# Ergaenzung der Emoticons durch Leerzeichen
if emoticon in row.split():
# Uhrzeiten und Daten ausschliessen, da sie falscherweise als Emoticons erkannt werden koennten.
if time_date.findall(row):
row = row.replace(emoticon, " " + emoticon + " ")
# Emojis von den Tokens trennen
for emoji in emoji_dict.keys():
# Ergaenzung der Emoticons durch Leerzeichen
if emoji in row:
row = row.replace(emoji, " " + emoji + " ")
# Satzzeichen von den Tokens trennen
for word in punctuation:
# Doppelpunkte werden anders behandelt, damit sie nicht aus Versehen von Emoticons getrennt werden.
if ":" in word and word in row and word[0].isnumeric() == False:
# Aus Hello: wird Hello :
colon = word.index(":")
row = row.replace(word, word[:colon] + " " + word[colon:])
else:
# Bei den anderen Zeichen wird es durch Leerzeichen ergaenzt z.B. Hey. Hey .
row = row.replace(word, f" {word} ")
# Klammern von den Tokens trennen
for word in brackets:
# Wenn ein Klammer nur am Anfang und am Ende vorkommt.
if "(" == word[0] and ")" == word[-1]:
row = row.replace(word, word[0] + " " + word[1:-1] + " " + word[-1])
elif "(" == word[0] and ")" != word[-1]:
# Wenn ein Klammer nur am Anfang vorkommt.
row = row.replace(word, " ( " + word[1:])
else:
# Wenn ein Klammer nur am Ende vorkommt.
row = row.replace(word, word[:-1] + " ) ")
# Die Zeile wird entsprechend gepaarst und tokenisiert.
tokens.extend(row.split())
# Wiedergabe der Tokens
return tokens
'''
Es wird per Tkinter ein Dialogfenster aufgerufen,
damit der Benutzer sich einen .txt-Datei auf seinem Rechner aussuchen kann.
Diese Datei wird dann von dem Programm entsprechend verarbeitet.
'''
def file_finder():
# Tk wird aufgerufen
root = Tk()
#Das TKFenster steht im Vordergrund, damit der Benutzer das Fenster nicht übersieht.
root.attributes("-topmost", True)
#Das Tkfenster wird zugemacht.
root.withdraw()
#Root-fenster wird zugemacht
# Dialogfenster fuer die Dateien
filename = filedialog.askopenfilename()
# Der Pfadname der Datei wird zurueckgegeben. Diesen muss man entsprechend aufmachen.
root.withdraw()
return filename
# Hinweis fenster mit Tkinter
def information_window(message):
#Ein Tkfenster wird aufgerufen
root = tkinter.Tk()
#Das Fenster steht im Vordergrund, damit der Benutzer das Fenster nicht übersieht
root.attributes("-topmost", True)
#Das Fenster wird wieder zugemacht
root.withdraw()
#Information
messagebox.showinfo("Information", message)
# Diese Funktion nimmt ein Dictionary als Argument an. Die Ergebnisse werden dann in Idle spaltenweise ausgegeben.
#wenn rel_has_run = True, werden zusaetzliche Ergebnisse mitausgegeben werden.
''' Bei True:
Anzahl saemtlicher Tags 3004
Anzahl falscher Tags 0
uebereinstimmung 100.0
Fehlerrate 0.0
Token Goldtag Programmtag
'''
def idle_results(res_dict,rel_has_run=False):
if rel_has_run:
i = 0 # Zaehler fuer die For-Schleife
for key in res_dict:
# Es werden die ersten 5 Zeile anders behandelt, da sie die Struktur und Ergebnisse
# der Datei erklaeren. Die anderen Zeilen sind die Tokens und Tags.
if i < 5:
i += 1
print(f"{key}\t\t{res_dict[key]}\n")
# Nach der fuenften Zeile werden die Ergebnisse gleich a
else:
# If-Elif entscheidet die Menge an benoetigten Tabulatoren zwischen Schluessel und Wert
if len(key) >= 8:
print(f"{key[1]}\t\t{res_dict[key]}\n")
else:
print(f"{key[1]}\t\t\t{res_dict[key]}\n")
else:
# Je nach laenge des Wortes oder Emoticon/Emoji wird dieses sowie seine Klassifizierung in die Konsole ausgegeben.
for line,key in enumerate(res_dict,start=1):
# If-Elif bestimmen je nach Laenge des Schluessels die Menge an benoetigten Tabulatoren zwischen Schluessel und Wert.
if len(key) >= 8:
print(f"{line}\t{key[1]}\t{res_dict.get(key)}")
elif len(key) >= 4:
print(f"{line}\t{key[1]}\t\t{res_dict.get(key)}")
else:
print(f"{line}\t{key[1]}\t\t\t{res_dict.get(key)}")
# Programminformationen werden mit dieser Funktion ausgegeben
def program_information():
print("\nProgramminformation\n")
# Die Information wird als ein Dictionary gespeichert.
information_dict = { "Name:": "Emoji- und Emoticon-Erkenner",
"Version:": "1.0",
"Versionsdatum:": "10.09.2020",
"Programmierer:": "Christopher Chandler, Gianluca Cultraro"}
# Ausgabe der Information ueber das Programm
for entry in information_dict: print(entry,information_dict[entry])
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
# Das Programm kann durch diese Funktion beendet werden.
def program_end():
# Der Benutzer bekommt die Moeglichkeit, seine Antwort nochmal zu bestaetigen.
while True:
final_answer=input("⚠️ Wollen Sie das Programm wirklich beenden?(y/n) ⚠️").lower()
if final_answer == "y":
print ("Das Programm wird jetzt beendet.")
# Beenden des Programms
raise SystemExit
# Ablehnung
elif final_answer == "n":
print("Das Programm wird nicht beendet. Sie werden zum Hauptmenue weitergeleitet.")
input("Druecken Sie die Eingabetaste, um fortzufahren: ")
break
# Unbekannte bzw. falsche Antwort
else:
print(f"{final_answer} ist keine gueltige Antwort. Entweder 'y' oder 'n' eingeben.")
# Die Emoticons der Benutzerdatenbank kann durch diese Funktion leergemacht werden.
def delete_user_emoticons():
# Pfadangabe der Datenbank
user_emoticon_database = "Ressourcen/Emoticons_emoji_datenbank/emoticon_benutzerdatenbank.tsv"
# Die While-Schleife bleibt solange bestehen, bis der Benutzer eine richtige Eingabe gemacht hat.
while True:
# Es wird geprueft, ob die Datenbank nicht existiert.
if os.path.exists(user_emoticon_database) is not True:
# Wenn dies der Fall ist, wird die folgende Information angezeigt.
print( "Die Benutzerdatenbank ist momentan nicht vorhanden. Diese wird erst nach der ersten Textanalyse erstellt.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Der Benutzer bekommt die Moeglichkeit, seine Antwort nochmal zu bestaetigen.
answer=input("⚠️ Sind Sie sich sicher, dass Sie Ihre Emoticondatenbank wirklich loeschen wollen ? Dies kann nicht rueckgaengig gemacht werden.⚠ (y/n)").lower()
if os.path.exists(user_emoticon_database) and answer == "y":
os.remove(user_emoticon_database)# Entfernung der Benutzerdatenbank
print("Ihre Datenbank wurde geloescht.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Die Datenbank wird bei einer Verneinung nicht geloescht.
elif answer == "n":
print("Ihre Datenbank wurde nicht geloescht.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Die Ergebnisse der Auswertungen kann durch diese Funktion geloescht werden.
def delete_result_folder():
# Feststellen des Ortes des Ergebnisordners
dirname = os.path.dirname(__file__)
output_folder = os.path.join(dirname, "Ergebnisse")
# Die While-Schleife bleibt solange bestehen, bis der Benutzer eine richtige Eingabe gemacht hat.
while True:
# Es wird geprueft, ob die Datenbank nicht existiert.
if os.path.exists(output_folder) is not True:
# Falls ja wird der Nutzer hierdrauf hingewiesen und die Schleife abgebrochen
print("Dieser Ordner ist momentan nicht vorhanden. Ihr Ergebnisordner wird erst nach der ersten Ergebnisausgabe erstellt.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Der Benutzer bekommt die Moeglichkeit, seine Antwort nochmal zu bestaetigen.
answer = input("⚠️ Sind Sie sich sicher, dass Sie Ihren Ergebnisordner wirklich loeschen wollen ? Dies kann nicht rueckgaengig gemacht werden.⚠ (y/n)").lower()
# Sollte der Ordner existieren und der Nutzer bejaht haben
if os.path.exists(output_folder) and answer == "y":
# wird der Ergebnisordner geloescht
shutil.rmtree(output_folder, ignore_errors=True)
print("Ihr Ergebnisordner wurde geloescht.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Die Datenbank wird bei einer Verneinung nicht geloescht.
elif answer == "n":
print("Ihr Ergebnisordner wurde nicht geloescht.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Output Menu mit internen Funktionen
# Alle Ausgaben finden in der Konsole statt
# Die Sekundenanzahlen werden mit 4 Nachkommastellen angegeben
def time_analysis_menu(running_time):
# Ausgabe des letzten Vorgangs
def current_running_time():
# Die letzten drei Zeiten werden summiert und ausgegeben, da ein Vorgang aus drei Berechnungen besteht.
print(f"Die Dauer des letzten Vorgangs betraegt: {sum(running_time[-3:]):0.4f}")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptenue zu gelangen.")
# Ausgabe aller gespeicherter Zeiten
def all_running_time():
print(f"Die Dauer aller Vorgaenge betraegt: {sum(running_time):0.4f}\n")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptenue zu gelangen.")
# Loeschen der schon gespeicherten Zeiten
def delete_times():
while True:
# Der Benutzer wird nach einer Einverstaendis gefragt
choice = input("⚠ Sind Sie sicher, dass Sie die bisherigen Zeiten loeschen wollen? Dies kann nicht rueckgaengig gemacht werden.⚠ (y/n)").lower()
# Sollte die Liste leer sein, so wird der Benutzer darauf hingewiesen und die Schleife abgebrochen.
if not running_time:
print("Es sind noch keine Zeiten zum Loeschen vorhanden.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Sollte der Benutzer mit "Y" antworten, so wird die Liste geleert und die Schleife anschliessend abgebrochen
elif choice == "y":
running_time.clear()
break
# Sollte der Benutzer mit "N" antworten, so wird nur die Schleife abgebrochen
elif choice == "n":
print("Ihre Zeiten wurden nicht geloescht.")
input("\nDruecken Sie die Eingabetaste, um wieder in das Hauptmenue zu gelangen.")
break
# Funktionsmenue
time_menu = {"Die Dauer des letzten Vorgangs": current_running_time,
"Die Dauer aller Vorgaenge": all_running_time,
"Alle Zeiten loeschen": delete_times}
# Menu name
menu_name = f"\nZeit Berechnungen\n"
# Hinweise innerhalbs des Menues
menu_information = "Wie soll die berechnete Zeit ausgegeben werden?"
# Das Menue wird aufgerufen.
menu(time_menu, menu_name, menu_information)
# Auswertung der Ergebnisse
def output_results(res_dict, twit_has_run = False, fiit_has_run = False, rel_has_run = False):
# Ausgabe der Ergebnisse in der Konsole
def display_results():
print("Analyseergebnisse:\n")
# Eine separate Funktion wird aufgerufen, um die Ergebnisse ordentlich in der Konsole wiederzugeben.
idle_results(res_dict,rel_has_run)
# Ausgabe der Ergebnisse als Datei
def file_results(res_dict):
# Je nachdem, welcher der drei Parameter der Funktion output_results True ist, wird der Name der Datei bestimmt
if twit_has_run: # Fuer Twitterdateien
save_name = "Twitter"
elif fiit_has_run: # Fuer Textdateien_training
save_name = "Text"
elif rel_has_run: # Fuer Abspeicherung der Verlaesslichkeitsanalysen_test
save_name = "Verlässlichkeits"
# Erstellen eines Zeitstempels
fulldate = datetime.now()
# Der Timestamp wird im Format Stunde-Minute_Tag_Monat_Jahr
timestamp = fulldate.strftime("%H-%M_%d_%B_%Y")
# Der Dateiname wird zusammengefuegt
filename = f"{save_name}_Ergebnisse_{timestamp}.txt"
# Zielposition fuer den Ergebnissorder wird festgestellt
dirname = os.path.dirname(__file__)
# Zielposition und Ordnername werden zusammengefuegt
folderpath = os.path.join(dirname, "Ergebnisse")
# Der Ergebnisordner wird erstellt
os.makedirs(folderpath, exist_ok=True)
# Speicherort fuer die Datei wird festgesetzt
savefile = os.path.join(dirname, "Ergebnisse", filename)
# Neue Ergebnissdatei wird geschrieben
file = open(savefile, mode="w", encoding="utf-8")
# Je nach Laenge des Wortes oder Emoticon/Emoji wird dieses sowie seine Klassifizierung in das Ergebnislexikon geschrieben
if rel_has_run:
i=0 # Zaehler fuer die For-Schleife
for key in res_dict:
# Es werden die ersten 5 Zeile anders behandelt, da sie die Struktur und Ergebnisse
# der Datei erklaeren. Die anderen Zeilen sind die Tokens und Tags.
if i < 5:
i+=1
file.write(f"{key}\t\t{res_dict[key]}\n")
#Nach der fuenften Zeile werden die Ergebnisse gleich a
else:
# If-Elif entscheidet die Menge an benoetigten Tabulatoren zwischen Schluessel und Wert
if len(key) >= 8:
file.write(f"{key[1]}\t\t{res_dict[key]}\n")
else:
file.write(f"{key[1]}\t\t\t{res_dict[key]}\n")
file.close()
# Bei Tweets oder Text Dateien werde die Ergebnisse anders ausgegeben.
elif twit_has_run==True or fiit_has_run==True:
for key in res_dict:
# If-Elif entscheidet die Menge an benoetigten Tabulatoren zwischen Schluessel und Wert
if len(key) >= 8:
# key[0] damit die Nummerierung der aus dem Dictionary entfernt wird.
file.write(f"{key[0]}\t{res_dict.get(key)}\n")
else:
file.write(f"{key[1]}\t\t\t{res_dict.get(key)}\n")
file.close()
# Funktion zum Aufrufen beider Optionen
def both_results():
display_results()
file_results(res_dict)
# Funktionsmenue
output_menu = {"Konsole": display_results,
"Datei": file_results(res_dict),
"Konsole + Datei": both_results}
# Menu Name
menu_name = '\nAusgabemenue\n'
# Menu Info
menu_information = 'Wie sollen die Ergebnisse ausgegeben werden?\n'
# Aufruf des Menu
menu(output_menu, menu_name, menu_information)
'''
Mit dieser Funktion wird eine graphische Darstellung der Ergebnisse erzeugt.
Diese erscheint anschliessend im Ergebnisordner.
'''
def bar_chart(values_list):
# Die Werte auf der X-Achse
x_achse = [f'Emoticons\n{values_list[0]}', f'Emojis\n{values_list[1]}',
f'Neubildung\n{values_list[2]}', f'Rest\n{values_list[3]}']
# Die Farben der Balken
colors = ["gold", "royalblue", "purple", "teal"]
# Die Werte in der values_liste
values = values_list
# Erstellung der X-Werte
x_pos = [i for i, _ in enumerate(x_achse)]
# Erstellung der Balken
plt.bar(x_pos, values, color=colors)
# Beschriftung des Graphen
plt.xlabel("Tokenkategorie")
plt.ylabel("Anzahl der Tokens")
plt.title(f"Auswertung")
# Der Timestamp wird im Format Stunde-Minute_Tag_Monat_Jahr
fulldate = datetime.now()
timestamp = fulldate.strftime("%H-%M_%d_%B_%Y")
# Der Dateiname wird zusammengefuegt, damit die Verlaesslichkeitsdatei und die Darstellung den gleichen Namen bis auf die Endung tragen.
filename = f"Ergebnisse/Verlässlichkeits_Ergebnisse_{timestamp}.png"
plt.xticks(x_pos, x_achse)
# Ergaenzung des Graphen, damit die Beschriftung auf der X-Achse komplett angezeigt wird.
plt.gcf().subplots_adjust(bottom=0.15)
# Erstellung der Datei in dem Ordner.
plt.savefig(filename, dpi=500)
| 47.113176 | 167 | 0.650353 |
98aba963ec6fdd466b1b18b9f660e46771e7d742
| 1,068 |
py
|
Python
|
SurveyManager/survey/urls.py
|
javiervar/SurveyManager
|
bbe2ed356654c32586c587f58c609c8ce014e96b
|
[
"MIT"
] | null | null | null |
SurveyManager/survey/urls.py
|
javiervar/SurveyManager
|
bbe2ed356654c32586c587f58c609c8ce014e96b
|
[
"MIT"
] | null | null | null |
SurveyManager/survey/urls.py
|
javiervar/SurveyManager
|
bbe2ed356654c32586c587f58c609c8ce014e96b
|
[
"MIT"
] | null | null | null |
from django.urls import path
from survey import views
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns=[
path('',views.index,name="index"),
path('constructor/',views.Constructor,name="constructor"),
path('egresados/',views.Egresados,name="egresados"),
path('encuesta/<int:id>/',views.GetEncuesta,name="encuesta"),
path('respuestas/<int:id>/',views.Respuestas,name="respuestas"),
path('responder/',views.Responder,name="responder"),
path('survey/',views.Survey.as_view(),name="survey"),
path('saveQuestion/',views.SaveQuestion.as_view(),name="saveQuestion"),
path('saveAnswer/',views.SaveAnswer.as_view(),name="saveAnswer"),
path('deleteQuestion/',views.DeleteQuestion.as_view(),name="deleteQuestion"),
path('getAlumnos/',views.GetAlumnos.as_view(),name="getAlumnos"),
path('guardarRespuesta/',views.GuardarRespuesta.as_view(),name="guardarRespuesta"),
path('guardarCarrera/',views.GuardarCarrera.as_view(),name="guardarCarrera"),
path('enviar/',views.MandarCorreo.as_view(),name="enviar"),
]
| 48.545455 | 84 | 0.764045 |
123bbab6612de06257016d7efa358b1547b9ba86
| 310 |
py
|
Python
|
hidden_server_data.py
|
I4-Projektseminar-HHU-2016/seminar-project-marionline03
|
ec2b2d2b588baa647c7a5e542e1d20a39cd0fc09
|
[
"CC0-1.0"
] | null | null | null |
hidden_server_data.py
|
I4-Projektseminar-HHU-2016/seminar-project-marionline03
|
ec2b2d2b588baa647c7a5e542e1d20a39cd0fc09
|
[
"CC0-1.0"
] | null | null | null |
hidden_server_data.py
|
I4-Projektseminar-HHU-2016/seminar-project-marionline03
|
ec2b2d2b588baa647c7a5e542e1d20a39cd0fc09
|
[
"CC0-1.0"
] | null | null | null |
# Name or IP of the machine running vocabulary pet server
HOST = 'localhost'
# Port Name of vocabulary pet server
PORT = '8080'
# Please fill into the brakets the root directory of vocabulary pet
# Example: STATIC_PATH = '/home/user/vocabulary_pet/static'
STATIC_PATH = '/<vocabulary_pet_directory>/static'
| 31 | 67 | 0.767742 |
89ed5857649315854f4f1a4da4d30b8de25ebaa3
| 7,776 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/output/generateMeanDataDefinitions.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/output/generateMeanDataDefinitions.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/output/generateMeanDataDefinitions.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2008-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file generateMeanDataDefinitions.py
# @author Karol Stosiek
# @author Michael Behrisch
# @date 2011-10-25
from __future__ import absolute_import
import xml.dom.minidom
import logging
import optparse
import sys
def generate_mean_data_xml(detectors_xml,
detectors_type,
detectors_frequency,
detectors_suffix,
detectors_output_type):
""" Generates mean data definitions in XML format.
- detectors_xml is the detectors XML read by xml.dom.minidom.
- detectors_type is one of the supported detectors: 'e1', 'e2' or 'e3'
- detectors_frequency is either an empty string or a positive integer.
- detectors_suffix is the suffix appended to each detector id to form
a detector's aggregated results filename. It's appended with .xml
string.
"""
meandata_xml = xml.dom.minidom.getDOMImplementation().createDocument(
None, 'additional', None)
for detector_xml in detectors_xml.getElementsByTagName(
detectors_type + "Detector"):
detector_id = detector_xml.getAttribute('id')
meandata_element = meandata_xml.createElement(detectors_output_type)
meandata_element.setAttribute("id", detector_id)
meandata_element.setAttribute("freq", str(detectors_frequency))
meandata_element.setAttribute("file", detector_id +
detectors_suffix + ".xml")
meandata_xml.documentElement.appendChild(meandata_element)
return meandata_xml
if __name__ == "__main__":
# pylint: disable-msg=C0103
def get_detector_file(provided_options):
""" Returns validated detector file name located in
provided_options. Exits, if the provided
detector file is invalid (None or empty). """
if (provided_options.detector_file is None or
provided_options.detector_file == ""):
logging.fatal("Invalid input file. \n" +
option_parser.format_help())
exit()
return xml.dom.minidom.parse(provided_options.detector_file)
def get_detector_type(provided_options):
""" Returns validated detector type located in provided_options.
Checks if the detector type is one of e1, e2 or e3. """
if provided_options.detector_type not in ('e1', 'e2', 'e3'):
logging.fatal("Invalid detector type.\n" +
option_parser.format_help())
exit()
return provided_options.detector_type
def get_detector_frequency(provided_options):
""" Returns validated detector frequency located in provided_options.
Validated frequency is either an empty string or is a positive
integer. """
if provided_options.frequency != "":
try:
frequency = int(provided_options.frequency)
if frequency < 0:
raise ValueError
return frequency
except ValueError:
logging.fatal("Invalid time range length specified.\n" +
option_parser.format_help())
exit()
return ""
def get_detector_suffix(provided_options):
""" Returns detector suffix located in provided_options. """
return provided_options.output_suffix
def get_detector_output_type(provided_options):
"""If provided_options indicated that edge-based traffic should be
created, then returns \"edgeData\"; returns \"laneData\" otherwise.
"""
if provided_options.edge_based_dump:
return "edgeData"
else:
return "laneData"
logging.basicConfig()
option_parser = optparse.OptionParser()
option_parser.add_option("-d", "--detector-file",
help="Input detector FILE",
dest="detector_file",
type="string")
option_parser.add_option("-t", "--detector-type",
help="Type of detectors defined in the input. "
"Allowed values: e1, e2, e3. Mandatory.",
dest="detector_type",
type="string")
option_parser.add_option("-f", "--frequency",
help="The aggregation period the values the "
"detector collects shall be summed up. "
"If not given, the whole time interval "
"from begin to end is aggregated, which is "
"the default. If specified, must be a "
"positive integer (seconds) representing "
"time range length.",
dest="frequency",
type="string",
default="")
option_parser.add_option("-l", "--lane-based-dump",
help="Generate lane based dump instead of "
"edge-based dump.",
dest="edge_based_dump",
action="store_false")
option_parser.add_option("-e", "--edge-based-dump",
help="Generate edge-based dump instead of "
"lane-based dump. This is the default.",
dest="edge_based_dump",
action="store_true",
default=True)
option_parser.add_option("-p", "--output-suffix",
help="Suffix to append to aggregated detector "
"output. For each detector, the detector's "
"aggregated results file with have the name "
"build from the detector's ID and this "
"suffix, with '.xml' extension. Defaults "
"to -results-aggregated.",
dest="output_suffix",
default="-results-aggregated")
option_parser.add_option("-o", "--output",
help="Output to write the mean data definition "
"to. Defaults to stdout.",
dest="output",
type="string")
(options, args) = option_parser.parse_args()
output = sys.stdout
if options.output is not None:
output = open(options.output, "w")
output.write(
generate_mean_data_xml(
get_detector_file(options),
get_detector_type(options),
get_detector_frequency(options),
get_detector_suffix(options),
get_detector_output_type(options)).toprettyxml())
output.close()
| 43.2 | 79 | 0.566615 |
14739674824a84726ddf0a80ac0466e61377c774
| 3,816 |
py
|
Python
|
tools/legacy/veh-parser/readVeh.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 2 |
2020-03-18T18:23:27.000Z
|
2020-08-02T15:59:16.000Z
|
tools/legacy/veh-parser/readVeh.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 5 |
2019-07-07T16:47:47.000Z
|
2020-08-10T16:20:00.000Z
|
tools/legacy/veh-parser/readVeh.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 1 |
2022-02-16T14:59:12.000Z
|
2022-02-16T14:59:12.000Z
|
import sys
import glob
import json
importFilenames = []
for importFilename in sys.argv[1:]:
files = glob.glob(importFilename)
importFilenames.extend(files)
def readDataFile(filename):
with open(filename, "r") as vehicleInfo:
return json.loads(vehicleInfo.read())
constants = readDataFile("vehFileConstants.json")
vehicleData = readDataFile("datVehicle.json")
engineData = readDataFile("datEngine.json")
reactorData = readDataFile("datReactor.json")
mountData = readDataFile("datIntMounts.json")
shieldData = readDataFile("datShield.json")
armorData = readDataFile("datArmor.json")
def generateRawIds(collection, collectionIdName, componentInfo):
results = []
groupId = componentInfo["groupIdStart"]
engineId = componentInfo["componentIdStart"]
for item in collection:
results.append({
"datId": item[collectionIdName],
"fileIds": (groupId, engineId),
"item": item
})
if len(results) % componentInfo["groupMaxCount"] == 0:
groupId += 1
engineId = componentInfo["componentIdStart"]
else:
engineId += componentInfo["idDelta"]
return results
engines = generateRawIds(engineData["engines"], "engineId", constants["engine"])
reactors = generateRawIds(reactorData["reactors"], "reactorId", constants["reactor"])
computers = generateRawIds(filter(lambda x: x["componentType"] == "computer", mountData["internalMounts"]), "internalMountId", constants["computer"])
shields = generateRawIds(shieldData["shields"], "shieldId", constants["shield"])
armors = generateRawIds(armorData["armors"], "armorId", constants["armor"])
defaultComponent = {
"item": {
"displayInfo": {
"longName": {
"text": {
"en": "N/A"
}
}
}
}
}
def findComponent(rawVehicle, componentInfo, collection, fallback):
index = componentInfo["idIndex"]
return next(
filter(lambda x: x["fileIds"] == (rawVehicle[index], rawVehicle[index + 1]), collection),
fallback)
def getComponentName(component):
return component["item"]["displayInfo"]["longName"]["text"]["en"]
for importFilename in importFilenames:
with open(importFilename, "rb") as rawVehicleFile:
rawVehicle = rawVehicleFile.read()
engineId = constants["engine"]["idIndex"]
reactorId = constants["reactor"]["idIndex"]
computerId = constants["computer"]["idIndex"]
shieldId = constants["shield"]["idIndex"]
armorId = constants["armor"]["idIndex"]
rawVehicleId = rawVehicle[constants["vehicle"]["idIndex"]]
rawEngineId = rawVehicle[engineId]
matchingInfo = next(filter(lambda x: x["vehicleId"] == rawVehicleId - 140, vehicleData["vehicles"]), None)
if matchingInfo is not None:
matchingEngine = findComponent(rawVehicle, constants["engine"], engines, defaultComponent)
matchingReactor = findComponent(rawVehicle, constants["reactor"], reactors, defaultComponent)
matchingComputer = findComponent(rawVehicle, constants["computer"], computers, defaultComponent)
matchingShield = findComponent(rawVehicle, constants["shield"], shields, defaultComponent)
matchingArmor = findComponent(rawVehicle, constants["armor"], armors, defaultComponent)
engineName = getComponentName(matchingEngine)
reactorName = getComponentName(matchingReactor)
computerName = getComponentName(matchingComputer)
shieldName = getComponentName(matchingShield)
armorName = getComponentName(matchingArmor)
print(importFilename, ": ", ", ".join([matchingInfo["identityTag"]["text"]["en"],engineName, reactorName, computerName, shieldName, armorName]))
else:
print(importFilename, rawVehicleId, rawEngineId)
| 39.75 | 152 | 0.684748 |
1ad1c27a5e910ed8a34cb4341209c4d60c4682c3
| 651 |
py
|
Python
|
Tutorials/10 Days of Statistics/Day 7/pearson_correlation1.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Tutorials/10 Days of Statistics/Day 7/pearson_correlation1.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Tutorials/10 Days of Statistics/Day 7/pearson_correlation1.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
from math import sqrt
def mean(arr, n):
return sum(arr)/n
def std(arr, mean, n):
s = 0
for i in range(n):
s += (arr[i]-mean)**2
return sqrt(s/n)
def pearson(arr1, arr2, n):
mu1 = mean(arr1, n)
mu2 = mean(arr2, n)
std1 = std(arr1, mu1, n)
std2 = std(arr2, mu2, n)
cov = 0
for i in range(n):
cov += (arr1[i] - mu1) * (arr2[i] - mu2)
cov /= n
pearson = cov / (std1 * std2)
return pearson
n = int(input())
arr1 = [float(arr_i) for arr_i in input().strip().split(' ')]
arr2 = [float(arr_i) for arr_i in input().strip().split(' ')]
print(pearson(arr1, arr2, n))
| 19.727273 | 61 | 0.542243 |
214fbe3c866c71321edd80d1949933cb22634790
| 250 |
py
|
Python
|
progressbar.py
|
ligang945/pyMisc
|
3107c80f7f53ffc797b289ec73d1ef4db80f0b63
|
[
"MIT"
] | null | null | null |
progressbar.py
|
ligang945/pyMisc
|
3107c80f7f53ffc797b289ec73d1ef4db80f0b63
|
[
"MIT"
] | null | null | null |
progressbar.py
|
ligang945/pyMisc
|
3107c80f7f53ffc797b289ec73d1ef4db80f0b63
|
[
"MIT"
] | null | null | null |
from __future__ import division
import sys,time
j = '#'
if __name__ == '__main__':
for i in range(1,61):
j += '#'
sys.stdout.write(str(int((i/60)*100))+'% ||'+j+'->'+"\r")
sys.stdout.flush()
time.sleep(0.5)
print
| 22.727273 | 66 | 0.532 |
b4e9cedcd2ad1fb55f6d4de8d7fcdc29fff40159
| 1,141 |
py
|
Python
|
src/compgen2/testdata/manipulator.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | 1 |
2022-02-02T12:41:06.000Z
|
2022-02-02T12:41:06.000Z
|
src/compgen2/testdata/manipulator.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | null | null | null |
src/compgen2/testdata/manipulator.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | null | null | null |
from typing import Callable
class Manipulator:
def __init__(self, m: Callable, type: str, chance: float) -> None:
self.m = m
self.type = self.set_type(type)
self.chance = self.set_chance(chance)
def set_type(self, type: str) -> str:
"""Read in a type and check on its correctness
Args:
type (str): Can be any of "char", "word". Other values will produce a ValueError
Returns:
type (str): If valid, it passes the input value.
"""
if type in ["char", "word"]:
return type
else:
raise ValueError("Invalid type provided.")
def set_chance(self, chance: float) -> float:
"""Read in a probability and check on its correctness
Args:
chance (float): Should be a float between 0. and 1. However, any float can be provided
Returns:
chance (float): If valid, it passes the input value.
"""
if type(chance) == float:
return chance
else:
raise ValueError("Invalid value for chance provided. Must be of type float.")
| 35.65625 | 96 | 0.576687 |
6ca41e7773404eb58e84e98c2f492425736ec014
| 13,759 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/sumolib/visualization/helpers.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
sumolib/visualization/helpers.py
|
KhalidHALBA-GR-NIST/UCEFSUMOV2V
|
4eb7ac761303a171718a10e4131184746bded9fd
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
sumolib/visualization/helpers.py
|
KhalidHALBA-GR-NIST/UCEFSUMOV2V
|
4eb7ac761303a171718a10e4131184746bded9fd
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2013-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file helpers.py
# @author Daniel Krajzewicz
# @author Laura Bieker
# @author Michael Behrisch
# @date 2013-11-11
from __future__ import absolute_import
from __future__ import print_function
import os
import gc
import sys
import matplotlib
if 'matplotlib.backends' not in sys.modules:
if 'TEXTTEST_SANDBOX' in os.environ or (os.name == 'posix' and 'DISPLAY' not in os.environ):
matplotlib.use('Agg')
from pylab import arange, close, cm, get_cmap, figure, legend, log, plt, savefig, show, title # noqa
from pylab import xlabel, xlim, xticks, ylabel, ylim, yticks # noqa
from matplotlib.ticker import FuncFormatter as ff # noqa
from matplotlib.collections import LineCollection # noqa
# http://datadebrief.blogspot.de/2010/10/plotting-sunrise-sunset-times-in-python.html
def m2hm1(x, i):
h = int(x / 3600)
m = int((x % 3600) / 60)
return '%(h)02d:%(m)02d' % {'h': h, 'm': m}
def m2hm2(x, i):
h = int(x / 3600)
m = int((x % 3600) / 60)
s = int(x % 60)
return '%(h)02d:%(m)02d:%(s)02d' % {'h': h, 'm': m, 's': s}
def addPlotOptions(optParser):
optParser.add_option("--colors", dest="colors",
default=None, help="Defines the colors to use")
optParser.add_option("--colormap", dest="colormap",
default="nipy_spectral", help="Defines the colormap to use")
optParser.add_option("-l", "--labels", dest="labels",
default=None, help="Defines the labels to use")
optParser.add_option("--xlim", dest="xlim",
default=None, help="Defines x-limits of the figure <XMIN>,<XMAX>")
optParser.add_option("--ylim", dest="ylim",
default=None, help="Defines y-limits of the figure <YMIN>,<YMAX>")
optParser.add_option("--xticks", dest="xticks",
default=None, help="Set x-axis ticks <XMIN>,<XMAX>,<XSTEP>,<XSIZE> or <XSIZE>")
optParser.add_option("--yticks", dest="yticks",
default=None, help="Set y-axis ticks <YMIN>,<YMAX>,<YSTEP>,<YSIZE> or <YSIZE>")
optParser.add_option("--xtime1", dest="xtime1", action="store_true",
default=False, help="Use a time formatter for x-ticks (hh:mm)")
optParser.add_option("--ytime1", dest="ytime1", action="store_true",
default=False, help="Use a time formatter for y-ticks (hh:mm)")
optParser.add_option("--xtime2", dest="xtime2", action="store_true",
default=False, help="Use a time formatter for x-ticks (hh:mm:ss)")
optParser.add_option("--ytime2", dest="ytime2", action="store_true",
default=False, help="Use a time formatter for y-ticks (hh:mm:ss)")
optParser.add_option("--xgrid", dest="xgrid", action="store_true",
default=False, help="Enable grid on x-axis")
optParser.add_option("--ygrid", dest="ygrid", action="store_true",
default=False, help="Enable grid on y-axis")
optParser.add_option("--xticksorientation", dest="xticksorientation",
type="float", default=None, help="Set the orientation of the x-axis ticks")
optParser.add_option("--yticksorientation", dest="yticksorientation",
type="float", default=None, help="Set the orientation of the x-axis ticks")
optParser.add_option("--xlabel", dest="xlabel",
default=None, help="Set the x-axis label")
optParser.add_option("--ylabel", dest="ylabel",
default=None, help="Set the y-axis label")
optParser.add_option("--xlabelsize", dest="xlabelsize",
type="int", default=16, help="Set the size of the x-axis label")
optParser.add_option("--ylabelsize", dest="ylabelsize",
type="int", default=16, help="Set the size of the x-axis label")
optParser.add_option("--title", dest="title",
default=None, help="Set the title")
optParser.add_option("--titlesize", dest="titlesize",
type="int", default=16, help="Set the title size")
optParser.add_option("--adjust", dest="adjust",
default=None, help="Adjust the subplots <LEFT>,<BOTTOM> or <LEFT>,<BOTTOM>,<RIGHT>,<TOP>")
optParser.add_option("-s", "--size", dest="size",
default=False, help="Defines the figure size <X>,<Y>")
optParser.add_option("--no-legend", dest="nolegend", action="store_true",
default=False, help="Disables the legend")
optParser.add_option("--legend-position", dest="legendposition",
default=None, help="Sets the legend position")
def addInteractionOptions(optParser):
optParser.add_option("-o", "--output", dest="output", metavar="FILE",
default=None, help="Comma separated list of filename(s) the figure shall be written to")
optParser.add_option("-b", "--blind", dest="blind", action="store_true",
default=False, help="If set, the figure will not be shown")
def addNetOptions(optParser):
optParser.add_option("-w", "--default-width", dest="defaultWidth",
type="float", default=.1, help="Defines the default edge width")
optParser.add_option("-c", "--default-color", dest="defaultColor",
default='k', help="Defines the default edge color")
def applyPlotOptions(fig, ax, options):
if options.xlim:
xlim(float(options.xlim.split(",")[0]), float(
options.xlim.split(",")[1]))
if options.yticksorientation:
ax.tick_params(
axis='y', which='major', tickdir=options.xticksorientation)
if options.xticks:
vals = options.xticks.split(",")
if len(vals) == 1:
ax.tick_params(axis='x', which='major', labelsize=float(vals[0]))
elif len(vals) == 4:
xticks(
arange(float(vals[0]), float(vals[1]), float(vals[2])), size=float(vals[3]))
else:
print(
"Error: ticks must be given as one float (<SIZE>) or four floats (<MIN>,<MAX>,<STEP>,<SIZE>)")
sys.exit()
if options.xtime1:
ax.xaxis.set_major_formatter(ff(m2hm1))
if options.xtime2:
ax.xaxis.set_major_formatter(ff(m2hm2))
if options.xgrid:
ax.xaxis.grid(True)
if options.xlabel:
xlabel(options.xlabel, size=options.xlabelsize)
if options.xticksorientation:
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(options.xticksorientation)
if options.ylim:
ylim(float(options.ylim.split(",")[0]), float(
options.ylim.split(",")[1]))
if options.yticks:
vals = options.yticks.split(",")
if len(vals) == 1:
ax.tick_params(axis='y', which='major', labelsize=float(vals[0]))
elif len(vals) == 4:
yticks(
arange(float(vals[0]), float(vals[1]), float(vals[2])), size=float(vals[3]))
else:
print(
"Error: ticks must be given as one float (<SIZE>) or four floats (<MIN>,<MAX>,<STEP>,<SIZE>)")
sys.exit()
if options.ytime1:
ax.yaxis.set_major_formatter(ff(m2hm1))
if options.ytime2:
ax.yaxis.set_major_formatter(ff(m2hm2))
if options.ygrid:
ax.yaxis.grid(True)
if options.ylabel:
ylabel(options.ylabel, size=options.ylabelsize)
if options.yticksorientation:
labels = ax.get_yticklabels()
for label in labels:
label.set_rotation(options.yticksorientation)
if options.title:
title(options.title, size=options.titlesize)
if options.adjust:
vals = options.adjust.split(",")
if len(vals) == 2:
fig.subplots_adjust(left=float(vals[0]), bottom=float(vals[1]))
elif len(vals) == 4:
fig.subplots_adjust(left=float(vals[0]), bottom=float(
vals[1]), right=float(vals[2]), top=float(vals[3]))
else:
print(
"Error: adjust must be given as two floats (<LEFT>,<BOTTOM>) or four floats " +
"(<LEFT>,<BOTTOM>,<RIGHT>,<TOP>)")
sys.exit()
def plotNet(net, colors, widths, options):
shapes = []
c = []
w = []
for e in net._edges:
shapes.append(e.getShape())
if e._id in colors:
c.append(colors[str(e._id)])
else:
c.append(options.defaultColor)
if e._id in widths:
w.append(widths[str(e._id)])
else:
w.append(options.defaultWidth)
line_segments = LineCollection(shapes, linewidths=w, colors=c)
ax = plt.gca()
ax.add_collection(line_segments)
ax.set_xmargin(0.1)
ax.set_ymargin(0.1)
ax.autoscale_view(True, True, True)
def getColor(options, i, a):
if options.colors:
v = options.colors.split(",")
if i >= len(v):
print("Error: not enough colors given")
sys.exit(1)
return v[i]
if options.colormap[0] == '#':
colormap = parseColorMap(options.colormap[1:])
cm.register_cmap(name="CUSTOM", cmap=colormap)
options.colormap = "CUSTOM"
colormap = get_cmap(options.colormap)
# cm = options.colormap# get_cmap(options.colormap)
cNorm = matplotlib.colors.Normalize(vmin=0, vmax=a)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=colormap)
return scalarMap.to_rgba(i)
def getLabel(f, i, options):
label = f
if options.labels:
label = options.labels.split(",")[i]
return label
def openFigure(options):
if options.size:
x = float(options.size.split(",")[0])
y = float(options.size.split(",")[1])
fig = figure(figsize=(x, y))
else:
fig = figure()
ax = fig.add_subplot(111)
return fig, ax
def closeFigure(fig, ax, options, haveLabels=True, optOut=None):
if haveLabels and not options.nolegend:
if options.legendposition:
legend(loc=options.legendposition)
else:
legend()
applyPlotOptions(fig, ax, options)
if options.output or optOut is not None:
n = options.output
if optOut is not None:
n = optOut
for o in n.split(","):
savefig(o)
if not options.blind:
show()
fig.clf()
close()
gc.collect()
def logNormalise(values, maxValue):
if not maxValue:
for e in values:
if not maxValue or maxValue < values[e]:
maxValue = values[e]
emin = None
emax = None
for e in values:
if values[e] != 0:
values[e] = log(values[e]) / log(maxValue)
if not emin or emin > values[e]:
emin = values[e]
if not emax or emax < values[e]:
emax = values[e]
for e in values:
values[e] = (values[e] - emin) / (emax - emin)
def linNormalise(values, minColorValue, maxColorValue):
for e in values:
values[e] = (values[e] - minColorValue) / \
(maxColorValue - minColorValue)
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val / 16)] + hex[int(val - int(val / 16) * 16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0]) * 16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap) - 1):
if colormap[i + 1][0] > val:
scale = (val - colormap[i][0]) / \
(colormap[i + 1][0] - colormap[i][0])
r = colormap[i][1][0] + \
(colormap[i + 1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + \
(colormap[i + 1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + \
(colormap[i + 1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = {"red": [], "green": [], "blue": []}
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
value = float(value)
r = color[1:3]
g = color[3:5]
b = color[5:7]
# ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
ret["red"].append((value, toFloat(r) / 255., toFloat(r) / 255.))
ret["green"].append((value, toFloat(g) / 255., toFloat(g) / 255.))
ret["blue"].append((value, toFloat(b) / 255., toFloat(b) / 255.))
# ret.append( (value, color) )
colormap = matplotlib.colors.LinearSegmentedColormap("CUSTOM", ret, 1024)
return colormap
| 40.587021 | 115 | 0.590232 |
9f07f56d60d263da7864b5cdddf5c4c4f37d96d9
| 1,482 |
py
|
Python
|
python/en/_packages/scipy/scipy-scipy.signal.lfilter-example_01.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_packages/scipy/scipy-scipy.signal.lfilter-example_01.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_packages/scipy/scipy-scipy.signal.lfilter-example_01.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
"""
scipy-scipy.signal.lfilter-example_01.py
This is an example code for scipy.signal.lfilter
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html
"""
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(-1,1,201)
x = ( np.sin( 2*np.pi*0.75*t*(1-t) + 2.1 )
+ 0.1 * np.sin( 2*np.pi*1.25*t + 1 )
+ 0.18* np.cos( 2*np.pi*3.85*t) )
# Add small Gaussian noise to x
xn = x + np.random.randn( len(t) ) * 0.08
# Create an order 3 lowpass butterworth filter
b, a = signal.butter(3,0.05)
print( "b = ", b )
print( "a = ", a )
# Apply the filter to xn.
# Use lfilter_zi to choose the initial condition of the filter
zi = signal.lfilter_zi(b,a)
print( "xn[0] = ", xn[0] )
z,_ = signal.lfilter(b,a,xn,zi=zi*xn[0])
# Apply the filter again, to have a result filtered at an order the same as filtfilt
z2,_ = signal.lfilter(b,a,z,zi=zi*z[0])
# Use filtfilt to apply the filter
y = signal.filtfilt(b,a,xn)
# Plot the original signal and the various filtered versions
plt.figure
plt.subplot(221)
plt.plot(t,x,'k-')
plt.grid(True)
#plt.plot(t,xn,'b')
plt.plot(t,xn,'b',alpha=0.75)
plt.legend(('x','xn_0.75'))
plt.subplot(222)
plt.plot(t,x,'k-')
plt.plot(t,z,'r--')
plt.grid(True)
plt.title('lfilter once to xn')
plt.subplot(223)
plt.plot(t,x,'k-')
plt.plot(t,z2)
plt.grid(True)
plt.title('lfilter twice to xn')
plt.subplot(224)
plt.plot(t,x,'k-')
plt.plot(t,y)
plt.grid(True)
plt.title('filtfilt to xn')
| 23.15625 | 84 | 0.667341 |
9f3abccb50ecbea2d6830f0cd555c4ebe804ed3c
| 362 |
py
|
Python
|
Algorithms/Implementation/sherlock_and_squares.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/sherlock_and_squares.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/sherlock_and_squares.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from math import sqrt
# I think +100 is too much though..
squares = [i ** 2 for i in range(1, int(sqrt(10 ** 9)) + 100)]
for _ in range(int(input().strip())):
cnt = 0
A, B = list(map(int, input().split()))
for s in squares:
if s >= A and s <= B:
cnt += 1
if s > B:
break
print(cnt)
| 24.133333 | 62 | 0.51105 |
9819bac180513db9b09a39cbcfa679b0004af63a
| 1,169 |
py
|
Python
|
challenges/crossingSum/python3/crossingSum.py
|
jimmynguyen/codefights
|
f4924fcffdb4ff14930618bb1a781e4e02e9aa09
|
[
"MIT"
] | 5 |
2020-05-21T03:02:34.000Z
|
2021-09-06T04:24:26.000Z
|
challenges/crossingSum/python3/crossingSum.py
|
jimmynguyen/codefights
|
f4924fcffdb4ff14930618bb1a781e4e02e9aa09
|
[
"MIT"
] | 6 |
2019-04-24T03:39:26.000Z
|
2019-05-03T02:10:59.000Z
|
challenges/crossingSum/python3/crossingSum.py
|
jimmynguyen/codefights
|
f4924fcffdb4ff14930618bb1a781e4e02e9aa09
|
[
"MIT"
] | 1 |
2021-09-06T04:24:27.000Z
|
2021-09-06T04:24:27.000Z
|
def crossingSum(matrix, a, b):
return sum(matrix[a]) + sum([x[b] for i, x in enumerate(matrix) if i != a])
if __name__ == '__main__':
input0 = [[[1,1,1,1], [2,2,2,2], [3,3,3,3]], [[1,1], [1,1]], [[1,1], [3,3], [1,1], [2,2]], [[100]], [[1,2], [3,4]], [[1,2,3,4]], [[1,2,3,4,5], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2]]]
input1 = [1, 0, 3, 0, 1, 0, 1]
input2 = [3, 0, 0, 0, 1, 3, 1]
expectedOutput = [12, 3, 9, 100, 9, 10, 21]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
assert len(input2) == len(expectedOutput), '# input2 = {}, # expectedOutput = {}'.format(len(input2), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = crossingSum(input0[i], input1[i], input2[i])
assert actual == expected, 'crossingSum({}, {}, {}) returned {}, but expected {}'.format(input0[i], input1[i], input2[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
| 77.933333 | 222 | 0.59367 |
2cc1bb32e6f28ad30310ccc2fe1f5b5bf9b1460e
| 2,704 |
py
|
Python
|
easyp2p/ui/credentials_window.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 4 |
2019-07-18T10:58:28.000Z
|
2021-11-18T16:57:45.000Z
|
easyp2p/ui/credentials_window.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 1 |
2019-07-05T09:21:47.000Z
|
2019-07-05T09:21:47.000Z
|
easyp2p/ui/credentials_window.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 2 |
2019-07-05T08:56:34.000Z
|
2020-06-09T10:03:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Niko Sandschneider
"""Module implementing CredentialsWindow."""
from typing import Optional
from PyQt5.QtCore import pyqtSlot, QCoreApplication
from PyQt5.QtWidgets import QDialog, QMessageBox
from easyp2p.ui.Ui_credentials_window import Ui_CredentialsWindow
_translate = QCoreApplication.translate
class CredentialsWindow(QDialog, Ui_CredentialsWindow):
"""Class for getting P2P platform login credentials from the user."""
def __init__(
self, platform: str, keyring_exists: bool,
save_in_keyring: bool = False) -> None:
"""
Constructor of CredentialsWindow.
Args:
platform: Name of the P2P platform.
keyring_exists: True if a keyring is available, False if not.
Keyword Args:
save_in_keyring: If True the save_in_keyring checkbox will be
checked and disabled.
"""
super().__init__()
self.setupUi(self)
self.platform = platform
self.username: Optional[str] = None
self.password: Optional[str] = None
self.save_in_keyring = False
self.label_platform.setText(
_translate(
'CredentialsWindow',
f'Please enter username and password for {platform}:'))
if not keyring_exists:
self.check_box_save_in_keyring.setEnabled(False)
elif save_in_keyring:
self.check_box_save_in_keyring.setChecked(True)
self.check_box_save_in_keyring.setEnabled(False)
self.save_in_keyring = True
@pyqtSlot()
def on_button_box_accepted(self):
"""
Make sure credentials were entered if user clicks on OK.
"""
if not self.line_edit_username.text() or \
not self.line_edit_password.text():
QMessageBox.warning(
self,
_translate('CredentialsWindow', 'Fields are not filled'),
_translate(
'CredentialsWindow',
'Please fill in fields for username and password!'))
return
self.username = self.line_edit_username.text()
self.password = self.line_edit_password.text()
self.save_in_keyring = self.check_box_save_in_keyring.isChecked()
self.accept()
@pyqtSlot()
def on_button_box_rejected(self):
"""
Make sure save_in_keyring is False if user clicks Cancel.
"""
self.save_in_keyring = False
self.reject()
def warn_user(self, header, msg):
"""Display a warning message to the user."""
QMessageBox.warning(self, header, msg)
| 32.97561 | 73 | 0.628328 |
394aca5543ae5400d7bce2655d11e4328aac1532
| 16,562 |
py
|
Python
|
src/onegov/swissvotes/models/policy_area.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/models/policy_area.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/models/policy_area.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from cached_property import cached_property
from decimal import Decimal
from onegov.swissvotes import _
class PolicyArea(object):
""" Helper class for handling of descriptors.
There are three levels of descriptors, each new level refining the
given category.
Policy areas are internally represented as a string value containing the
descriptor of all three levels separated by a comma, e.g. "1.12.121".
Policy areas are stored in the dataset as float, with the pre-decimal part
refering to the first level category and the decimal part to the category
of the given level. For example:
Level 1 descriptor "1": 1
Level 2 descriptor "1.12": 1.12
Level 3 descriptor "1.12.121": 1.121
"""
def __init__(self, value, level=None):
""" Creates a new policy descriptor out of the given value.
The given value might be a string (such as "1.12" or "1.12.121"), a
list (such as [1, 12] or [1, 12, 121]) or a float together with a level
(such as 1.12/2 or 1.121/3).
"""
if isinstance(value, str):
self.value = value
elif isinstance(value, list):
self.value = '.'.join([str(x) for x in value])
elif isinstance(value, Decimal):
assert level is not None
self.value = '.'.join(
str(int(value * 10 ** x)) for x in range(level)
)
else:
raise NotImplementedError()
def __repr__(self):
return self.value
def __eq__(self, other):
return self.value == other.value
@cached_property
def level(self):
return self.value.count('.') + 1
@cached_property
def descriptor(self):
""" Returns the highest descriptor, e.g. 121 if "1.12.121". """
return int(self.value.split('.')[-1])
@cached_property
def descriptor_path(self):
""" Returns all descriptors, e.g [1, 12, 12, 121] if "1.12.121". """
return [int(part) for part in self.value.split('.')]
@cached_property
def descriptor_decimal(self):
""" Returns the descriptor as float for the dataset, e.g 1.121 if
"1.12.121".
"""
return Decimal(self.descriptor) / (10 ** (self.level - 1))
@cached_property
def label(self):
""" Returns a translatable label of the highest descriptor, e.g.
"Bundesverfassung" if "1.12.121".
"""
return self.label_path[-1]
@cached_property
def label_path(self):
""" Returns translatable labels for all descriptor levels, e.g.
["Staatsordnung", "Politisches System", "Bundesverfassung"] if
"1.12.121".
"""
result = []
lookup = PolicyAreaDefinition.all()
for part in self.descriptor_path:
lookup = lookup.get(part)
if not lookup:
result.append(str(self.descriptor_decimal))
break
result.append(lookup.label or str(self.descriptor))
return result
def html(self, request):
title = ' > '.join([
request.translate(part) for part in self.label_path
])
return f'<span>{title}</span>'
class PolicyAreaDefinition(object):
""" Helper class for all the policy areas and their translatable labels.
Example: Get the label of the policy area "1.12.121":
PolicyAreaDefinition.all().get(1).get(12).get(121).label
"""
def __init__(self, path=None, label=None, children=None):
self.path = path or []
self.decimal = None
self.value = path[-1] if self.path else None
self.label = label
self.children = children or []
self.index = {
child.value: index for index, child in enumerate(self.children)
}
def get(self, key):
""" Returns the child with the given value. """
if key in self.index:
return self.children[self.index[key]]
@staticmethod
def all():
""" Returns the tree of all policy areas. """
return PolicyAreaDefinition(children=[
PolicyAreaDefinition([1], _("d-1-1"), [
PolicyAreaDefinition([1, 11], _("d-2-11")),
PolicyAreaDefinition([1, 12], _("d-2-12"), [
PolicyAreaDefinition([1, 12, 121], _("d-3-121")),
PolicyAreaDefinition([1, 12, 122], _("d-3-122")),
PolicyAreaDefinition([1, 12, 123], _("d-3-123")),
PolicyAreaDefinition([1, 12, 124], _("d-3-124")),
]),
PolicyAreaDefinition([1, 13], _("d-2-13"), [
PolicyAreaDefinition([1, 13, 131], _("d-3-131")),
PolicyAreaDefinition([1, 13, 132], _("d-3-132")),
PolicyAreaDefinition([1, 13, 133], _("d-3-133")),
PolicyAreaDefinition([1, 13, 134], _("d-3-134")),
]),
PolicyAreaDefinition([1, 14], _("d-2-14"), [
PolicyAreaDefinition([1, 14, 141], _("d-3-141")),
PolicyAreaDefinition([1, 14, 142], _("d-3-142")),
PolicyAreaDefinition([1, 14, 143], _("d-3-143")),
]),
PolicyAreaDefinition([1, 15], _("d-2-15"), [
PolicyAreaDefinition([1, 15, 151], _("d-3-151")),
PolicyAreaDefinition([1, 15, 152], _("d-3-152")),
PolicyAreaDefinition([1, 15, 153], _("d-3-153")),
]),
PolicyAreaDefinition([1, 16], _("d-2-16"), [
PolicyAreaDefinition([1, 16, 161], _("d-3-161")),
PolicyAreaDefinition([1, 16, 162], _("d-3-162")),
PolicyAreaDefinition([1, 16, 163], _("d-3-163")),
PolicyAreaDefinition([1, 16, 164], _("d-3-164")),
PolicyAreaDefinition([1, 16, 165], _("d-3-165")),
PolicyAreaDefinition([1, 16, 166], _("d-3-166")),
])
]),
PolicyAreaDefinition([2], _("d-1-2"), [
PolicyAreaDefinition([2, 21], _("d-2-21"), [
PolicyAreaDefinition([2, 21, 211], _("d-3-211")),
PolicyAreaDefinition([2, 21, 212], _("d-3-212")),
PolicyAreaDefinition([2, 21, 213], _("d-3-213")),
]),
PolicyAreaDefinition([2, 22], _("d-2-22"), [
PolicyAreaDefinition([2, 22, 221], _("d-3-221")),
PolicyAreaDefinition([2, 22, 222], _("d-3-222")),
PolicyAreaDefinition([2, 22, 223], _("d-3-223")),
PolicyAreaDefinition([2, 22, 224], _("d-3-224")),
]),
PolicyAreaDefinition([2, 23], _("d-2-23"), [
PolicyAreaDefinition([2, 23, 231], _("d-3-231")),
PolicyAreaDefinition([2, 23, 232], _("d-3-232")),
]),
PolicyAreaDefinition([2, 24], _("d-2-24")),
PolicyAreaDefinition([2, 25], _("d-2-25")),
PolicyAreaDefinition([2, 26], _("d-2-26"), [
PolicyAreaDefinition([2, 26, 261], _("d-3-261")),
PolicyAreaDefinition([2, 26, 262], _("d-3-262")),
]),
PolicyAreaDefinition([2, 27], _("d-2-27")),
PolicyAreaDefinition([2, 28], _("d-2-28")),
]),
PolicyAreaDefinition([3], _("d-1-3"), [
PolicyAreaDefinition([3, 31], _("d-2-31"), [
PolicyAreaDefinition([3, 31, 311], _("d-3-311")),
PolicyAreaDefinition([3, 31, 312], _("d-3-312")),
PolicyAreaDefinition([3, 31, 313], _("d-3-313")),
]),
PolicyAreaDefinition([3, 32], _("d-2-32"), [
PolicyAreaDefinition([3, 32, 321], _("d-3-321")),
PolicyAreaDefinition([3, 32, 322], _("d-3-322")),
PolicyAreaDefinition([3, 32, 323], _("d-3-323")),
PolicyAreaDefinition([3, 32, 324], _("d-3-324")),
PolicyAreaDefinition([3, 32, 325], _("d-3-325")),
PolicyAreaDefinition([3, 32, 326], _("d-3-326")),
PolicyAreaDefinition([3, 32, 327], _("d-3-327")),
PolicyAreaDefinition([3, 32, 328], _("d-3-328")),
]),
PolicyAreaDefinition([3, 33], _("d-2-33")),
]),
PolicyAreaDefinition([4], _("d-1-4"), [
PolicyAreaDefinition([4, 41], _("d-2-41"), [
PolicyAreaDefinition([4, 41, 411], _("d-3-411")),
PolicyAreaDefinition([4, 41, 412], _("d-3-412")),
PolicyAreaDefinition([4, 41, 413], _("d-3-413")),
PolicyAreaDefinition([4, 41, 414], _("d-3-414")),
PolicyAreaDefinition([4, 41, 415], _("d-3-415")),
PolicyAreaDefinition([4, 41, 416], _("d-3-416")),
]),
PolicyAreaDefinition([4, 42], _("d-2-42"), [
PolicyAreaDefinition([4, 42, 421], _("d-3-421")),
PolicyAreaDefinition([4, 42, 422], _("d-3-422")),
PolicyAreaDefinition([4, 42, 423], _("d-3-423")),
PolicyAreaDefinition([4, 42, 424], _("d-3-424")),
]),
PolicyAreaDefinition([4, 43], _("d-2-43"), [
PolicyAreaDefinition([4, 43, 431], _("d-3-431")),
PolicyAreaDefinition([4, 43, 432], _("d-3-432")),
]),
PolicyAreaDefinition([4, 44], _("d-2-44"), [
PolicyAreaDefinition([4, 44, 441], _("d-3-441")),
PolicyAreaDefinition([4, 44, 442], _("d-3-442")),
PolicyAreaDefinition([4, 44, 443], _("d-3-443")),
]),
]),
PolicyAreaDefinition([5], _("d-1-5"), [
PolicyAreaDefinition([5, 51], _("d-2-51")),
PolicyAreaDefinition([5, 52], _("d-2-52")),
PolicyAreaDefinition([5, 53], _("d-2-53")),
PolicyAreaDefinition([5, 54], _("d-2-54")),
PolicyAreaDefinition([5, 55], _("d-2-55")),
]),
PolicyAreaDefinition([6], _("d-1-6"), [
PolicyAreaDefinition([6, 61], _("d-2-61"), [
PolicyAreaDefinition([6, 61, 611], _("d-3-611")),
PolicyAreaDefinition([6, 61, 612], _("d-3-612")),
PolicyAreaDefinition([6, 61, 613], _("d-3-613")),
PolicyAreaDefinition([6, 61, 614], _("d-3-614")),
]),
PolicyAreaDefinition([6, 62], _("d-2-62")),
PolicyAreaDefinition([6, 63], _("d-2-63")),
PolicyAreaDefinition([6, 64], _("d-2-64")),
]),
PolicyAreaDefinition([7], _("d-1-7"), [
PolicyAreaDefinition([7, 71], _("d-2-71")),
PolicyAreaDefinition([7, 72], _("d-2-72")),
PolicyAreaDefinition([7, 73], _("d-2-73")),
PolicyAreaDefinition([7, 74], _("d-2-74")),
PolicyAreaDefinition([7, 75], _("d-2-75")),
]),
PolicyAreaDefinition([8], _("d-1-8"), [
PolicyAreaDefinition([8, 81], _("d-2-81"), [
PolicyAreaDefinition([8, 81, 811], _("d-3-811")),
PolicyAreaDefinition([8, 81, 812], _("d-3-812")),
]),
PolicyAreaDefinition([8, 82], _("d-2-82"), [
PolicyAreaDefinition([8, 82, 821], _("d-3-821")),
PolicyAreaDefinition([8, 82, 822], _("d-3-822")),
]),
PolicyAreaDefinition([8, 83], _("d-2-83"), [
PolicyAreaDefinition([8, 83, 831], _("d-3-831")),
PolicyAreaDefinition([8, 83, 832], _("d-3-832")),
]),
PolicyAreaDefinition([8, 84], _("d-2-84")),
PolicyAreaDefinition([8, 85], _("d-2-85")),
PolicyAreaDefinition([8, 86], _("d-2-86")),
PolicyAreaDefinition([8, 87], _("d-2-87")),
]),
PolicyAreaDefinition([9], _("d-1-9"), [
PolicyAreaDefinition([9, 91], _("d-2-91"), [
PolicyAreaDefinition([9, 91, 911], _("d-3-911")),
PolicyAreaDefinition([9, 91, 912], _("d-3-912")),
]),
PolicyAreaDefinition([9, 92], _("d-2-92"), [
PolicyAreaDefinition([9, 92, 921], _("d-3-921")),
PolicyAreaDefinition([9, 92, 922], _("d-3-922")),
]),
PolicyAreaDefinition([9, 93], _("d-2-93"), [
PolicyAreaDefinition([9, 93, 931], _("d-3-931")),
PolicyAreaDefinition([9, 93, 932], _("d-3-932")),
PolicyAreaDefinition([9, 93, 933], _("d-3-933")),
PolicyAreaDefinition([9, 93, 934], _("d-3-934")),
PolicyAreaDefinition([9, 93, 935], _("d-3-935")),
PolicyAreaDefinition([9, 93, 936], _("d-3-936")),
PolicyAreaDefinition([9, 93, 937], _("d-3-937")),
PolicyAreaDefinition([9, 93, 938], _("d-3-938")),
]),
]),
PolicyAreaDefinition([10], _("d-1-10"), [
PolicyAreaDefinition([10, 101], _("d-2-101"), [
PolicyAreaDefinition([10, 101, 1011], _("d-3-1011")),
PolicyAreaDefinition([10, 101, 1012], _("d-3-1012")),
PolicyAreaDefinition([10, 101, 1013], _("d-3-1013")),
PolicyAreaDefinition([10, 101, 1014], _("d-3-1014")),
PolicyAreaDefinition([10, 101, 1015], _("d-3-1015")),
]),
PolicyAreaDefinition([10, 102], _("d-2-102"), [
PolicyAreaDefinition([10, 102, 1021], _("d-3-1021")),
PolicyAreaDefinition([10, 102, 1022], _("d-3-1022")),
PolicyAreaDefinition([10, 102, 1023], _("d-3-1023")),
PolicyAreaDefinition([10, 102, 1024], _("d-3-1024")),
PolicyAreaDefinition([10, 102, 1025], _("d-3-1025")),
PolicyAreaDefinition([10, 102, 1026], _("d-3-1026")),
PolicyAreaDefinition([10, 102, 1027], _("d-3-1027")),
PolicyAreaDefinition([10, 102, 1028], _("d-3-1028")),
]),
PolicyAreaDefinition([10, 103], _("d-2-103"), [
PolicyAreaDefinition([10, 103, 1031], _("d-3-1031")),
PolicyAreaDefinition([10, 103, 1032], _("d-3-1032")),
PolicyAreaDefinition([10, 103, 1033], _("d-3-1033")),
PolicyAreaDefinition([10, 103, 1034], _("d-3-1034")),
PolicyAreaDefinition([10, 103, 1035], _("d-3-1035")),
PolicyAreaDefinition([10, 103, 1036], _("d-3-1036")),
PolicyAreaDefinition([10, 103, 1037], _("d-3-1037")),
PolicyAreaDefinition([10, 103, 1038], _("d-3-1038")),
]),
]),
PolicyAreaDefinition([11], _("d-1-11"), [
PolicyAreaDefinition([11, 111], _("d-2-111")),
PolicyAreaDefinition([11, 112], _("d-2-112")),
PolicyAreaDefinition([11, 113], _("d-2-113")),
PolicyAreaDefinition([11, 114], _("d-2-114"), [
PolicyAreaDefinition([11, 114, 1141], _("d-3-1141")),
PolicyAreaDefinition([11, 114, 1142], _("d-3-1142")),
]),
PolicyAreaDefinition([11, 115], _("d-2-115")),
]),
PolicyAreaDefinition([12], _("d-1-12"), [
PolicyAreaDefinition([12, 121], _("d-2-121")),
PolicyAreaDefinition([12, 122], _("d-2-122")),
PolicyAreaDefinition([12, 123], _("d-2-123")),
PolicyAreaDefinition([12, 124], _("d-2-124")),
PolicyAreaDefinition([12, 125], _("d-2-125"), [
PolicyAreaDefinition([12, 125, 1251], _("d-3-1251")),
PolicyAreaDefinition([12, 125, 1252], _("d-3-1252")),
PolicyAreaDefinition([12, 125, 1253], _("d-3-1253")),
PolicyAreaDefinition([12, 125, 1254], _("d-3-1254")),
]),
])
])
| 46.522472 | 79 | 0.47923 |
808a193ff68efa61f7aef67ad45ad7d43013f767
| 15,832 |
py
|
Python
|
webserver/webserver.py
|
tarent/osm_service
|
462f3d74d3ed16d307b65da6f24dcea5a982aacf
|
[
"Apache-2.0"
] | null | null | null |
webserver/webserver.py
|
tarent/osm_service
|
462f3d74d3ed16d307b65da6f24dcea5a982aacf
|
[
"Apache-2.0"
] | null | null | null |
webserver/webserver.py
|
tarent/osm_service
|
462f3d74d3ed16d307b65da6f24dcea5a982aacf
|
[
"Apache-2.0"
] | null | null | null |
# Fix Cannot import name 'cached_property': https://stackoverflow.com/a/60157748/3593881
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask import Flask
from flask.json import jsonify
from osm_service import OsmService
from flask_restplus import Api, Resource, reqparse
app = Flask(__name__)
app.config.from_envvar('SETTINGS_FILE')
with app.app_context():
osm = OsmService(app.config["DATABASE_USER"], app.config["DATABASE_PASSWORD"], app.config["DATABASE_HOST"], app.config["DATABASE_PORT"], app.config["DATABASE_NAME"])
api = Api(app, version='1.0', title='OSM Service API',
description='Documentation for the OSM Service API.')
ns = api.namespace('relative', description='Operations for getting data relative to a given point')
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>')
class FullReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the full report: Landuse, Parking, Chemists, Convenience Stores, Supermarkets, Malls, Schools, Kindergartens, Hospitals, Doctors, Railway Stations, Tram Stations, Bus Stations within a radius around a point described by the given latitude and longitude."""
try:
return {
"input": {
"center": {
"lat": latitude,
"lon": longitude
},
"radius": radius
},
"result": {
"relative_type_of_area": osm.getLanduse(latitude, longitude, radius),
"malls": osm.getMalls(latitude, longitude, radius),
"chemists": osm.getChemists(latitude, longitude, radius),
"convenience": osm.getConvenience(latitude, longitude, radius),
"supermarkets": osm.getSupermarket(latitude, longitude, radius),
"parks": osm.getParks(latitude, longitude, radius),
"parking": osm.getParking(latitude, longitude, radius),
"schools": osm.getSchools(latitude, longitude, radius),
"kindergartens": osm.getKindergarten(latitude, longitude, radius),
"hospitals": osm.getHospitals(latitude, longitude, radius),
"doctors": osm.getDoctors(latitude, longitude, radius),
"railway_stations": osm.getRailwayStations(latitude, longitude, radius),
"tram_stations": osm.getTramStations(latitude, longitude, radius),
"bus_stations": osm.getBusStations(latitude, longitude, radius),
}
}, 200
except:
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/malls')
class MallReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Malls within a radius around a point described by the given latitude and longitude."""
try:
return osm.getMalls(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/chemists')
class ChemistReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Chemists within a radius around a point described by the given latitude and longitude."""
try:
return osm.getChemists(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/convenience')
class ConvenienceReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Convenience Stores within a radius around a point described by the given latitude and longitude."""
try:
return osm.getConvenience(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/supermarkets')
class SupermarketReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Supermarkets within a radius around a point described by the given latitude and longitude."""
try:
return osm.getSupermarket(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/landuse')
class LanduseReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Landuse within a radius around a point described by the given latitude and longitude."""
try:
return osm.getLanduse(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/parking')
class ParkingReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns car parks within a radius around a point described by the given latitude and longitude."""
try:
return osm.getParking(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/parks')
class ParkReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns parks within a radius around a point described by the given latitude and longitude."""
try:
return osm.getParks(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/schools')
class SchoolReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Schools within a radius around a point described by the given latitude and longitude."""
try:
return osm.getSchools(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/kindergarten')
class KindergartenReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Kindergartens within a radius around a point described by the given latitude and longitude."""
try:
return osm.getKindergarten(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/hospitals')
class HospitalReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Hospitals within a radius around a point described by the given latitude and longitude."""
try:
return osm.getHospitals(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/doctors')
class DoctorReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Doctors within a radius around a point described by the given latitude and longitude."""
try:
return osm.getDoctors(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/railway')
class RailwayStationReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Railway Stations within a radius around a point described by the given latitude and longitude."""
try:
return osm.getRailwayStations(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/tram')
class TramStationReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Tram Stations within a radius around a point described by the given latitude and longitude."""
try:
return osm.getTramStations(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
@ns.route('/<float:latitude>,<float:longitude>/<int:radius>/bus')
class BusStationReport(Resource):
@api.doc(responses={200: 'OK', 500: 'Internal Server Error'},
params={'latitude': 'Specify the latitude associated with the point.',
'longitude': 'Specify the longitude associated with the point.',
'radius': 'Specify the radius (meters) covering the circular region of interest around the point '
'(coordinate) described by the latitude and longitude.'})
def get(self, latitude, longitude, radius):
"""Returns the Bus Stations within a radius around a point described by the given latitude and longitude."""
try:
return osm.getBusStations(latitude, longitude, radius), 200
except Exception as e:
print(e)
return "", 500
| 54.405498 | 275 | 0.620958 |
aff5d1f9651ddccc73299c416733c510aa6ca2e6
| 3,610 |
py
|
Python
|
Buchstabenraetsel_Summen_mini.py
|
UlrichBerntien/Uebungen-Python
|
67e6d885dd2319e999979410448364ec4d59dc51
|
[
"Apache-2.0"
] | null | null | null |
Buchstabenraetsel_Summen_mini.py
|
UlrichBerntien/Uebungen-Python
|
67e6d885dd2319e999979410448364ec4d59dc51
|
[
"Apache-2.0"
] | null | null | null |
Buchstabenraetsel_Summen_mini.py
|
UlrichBerntien/Uebungen-Python
|
67e6d885dd2319e999979410448364ec4d59dc51
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Buchstabenrätsel Summen
https://www.programmieraufgaben.ch/aufgabe/buchstabenraetsel-summen/mtt2cuwo
"""
# Programmieraufgabe:
#
# Schreiben Sie ein Programm, das Symbolrätsel der folgenden Art (Summen)
# löst:
# aab + bbc = dde.
# Dabei bedeuten gleiche Buchstaben auch immer gleiche Ziffern und
# verschiedene Buchstaben bedeuten auch verschiedene Ziffern.
#
# Programmidee:
#
# Sucht nach einer Lösung für die Buchtabenrätsel Summe. Es werden nicht alle
# möglichen Lösungen gesucht, die Suche wird beim ersten Erfolg abgebrochen.
# Gegeben ist eine Summe von Zahlen und das Ergebnis, dabei sind Ziffern durch
# Buchstaben ersetzt. Jeder Buchstabe steht für eine andere Ziffer. Sind in
# der Gleichung Ziffern enthalten, dann kann kein Buchstabe eine dieser Ziffern
# sein. In den Summen und im Ergebnis können auch Ziffern vorgegeben sein.
#
# Verwendet wird ein Backtracking-Algorithmus. Ziel ist ein kurzes Programm mit
# dem Verzicht auf einen optimierten Lösungsalgorithmus.
#
# Autor, Erstellung:
# Ulrich Berntien, 2018-10-30
#
# Sprache:
# Python 3.6.6
from typing import *
def save_eval(equation: str) -> bool:
"""
Gleichung ausrechnen mit Abfangen von Exceptions.
Achtung: Code wird ohne Prüfung ausgeführt.
:param equation: Gleichung in Python-Syntax.
:return: True falls die Gleichung True ergibt, sonst False.
"""
try:
return eval(equation)
except SyntaxError:
return False
def solve(equation: str, letters: Set[str], digits: List[str]) -> str:
"""
Backtracking Algorithmus mit rekursiver Realisierung.
:param equation: Die teilweise gelöste Gleichung.
:param letters: Diese Buchstaben sind noch festzulegen.
:param digits: Diese Ziffern sind noch verfügbar.
:return: Die Lösung oder ein leerer String.
"""
if not letters and save_eval(equation):
# Eine Lösung ist gefunden
return equation
elif not letters or not digits:
# keine Buchstaben oder Ziffern mehr verfügbar
return ""
else:
# Alle Ziffern für einen Buchstaben ausprobieren
try_letters = letters.copy()
letter = try_letters.pop()
for digit in digits:
try_digits = digits.copy()
try_digits.remove(digit)
result = solve(equation.replace(letter, digit), try_letters, try_digits)
if result:
# Bei diesem Versuch wurde eine Lösuzng gefunden
return result
def solve_letter_sum(letter_sum: str) -> str:
"""
Sucht nach einer Lösung für die Buchtabenrätsel Summe.
:param letter_sum: Die Buchstabenrätsel Summe.
:return: Eine Lösung, wenn keine Lösung gefunden wurde ein leerer String.
"""
# Gleichung ins Python-Format für Kontrollberechnungen
# Achtung: keine Kontrolle der Gleichung für minimiale Programmlänge
equation = letter_sum.replace("=", "==")
letters = set(c for c in equation if c.isalpha())
digits = [d for d in "0123456789" if d not in equation]
return solve(equation, letters, digits)
# Kontrolle der Lösungsfunktion
if __name__ == '__main__':
testcases = ("aab + bbc = dde",
"SEND + MORE = MONEY",
"abc+111=468",
"abc+111=dab",
"abc+def+ghi=acfe",
"12ab+cdef=dg12",
"abc+abc=268",
"a+b=8")
for test in testcases:
print("Aufgabe:", test)
print(". . . .:", solve_letter_sum(test))
| 34.056604 | 84 | 0.660111 |
b339ac12f7b339ab07d3b4da541129d4ffe4ee0c
| 393 |
py
|
Python
|
BIZa/2014/Tskipu_a_k/task_2_28.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BIZa/2014/Tskipu_a_k/task_2_28.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BIZa/2014/Tskipu_a_k/task_2_28.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 2. Вариант 28.
#Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Эпикур. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Цкипуришвили Александр
# 25.05.2016
print("Каждый уходит из жизни так, словно только что вошел.")
print("\n\t\t\t\t\t Эпикур")
input("нажимте Enter для выхода")
| 43.666667 | 199 | 0.765903 |
2fbcad032b3dc344d842421b5220e7166d6668f4
| 2,127 |
py
|
Python
|
results/xlights/city-backend/xlight/views.py
|
Rebstorm/dthack17
|
41cea4804fefe6a397114056c43d17c54b8e979d
|
[
"MIT"
] | null | null | null |
results/xlights/city-backend/xlight/views.py
|
Rebstorm/dthack17
|
41cea4804fefe6a397114056c43d17c54b8e979d
|
[
"MIT"
] | null | null | null |
results/xlights/city-backend/xlight/views.py
|
Rebstorm/dthack17
|
41cea4804fefe6a397114056c43d17c54b8e979d
|
[
"MIT"
] | null | null | null |
"""Django default module for view generation"""
from django.shortcuts import redirect, render
from django.http import HttpResponse, Http404
import json
from xlight.xlight_core import XLightHandler
from xlight.models import TrafficLight, ApiStatus
from re import sub
from django.core import serializers
def cleanup_url_path(url_path):
"""Removes redundant or unwanted symbols from the provided URL path"""
if not url_path:
url_path = '/'
clean_path = sub('[/]+', '/', url_path)
clean_path = sub('index.html$', '', clean_path)
return clean_path
class ViewHandler ():
"""Instances of this class handle incoming GET requests and serve
the appropriate HTTP responses"""
xlight_handler = None
"""Handler to create the webpage context for incoming GET requests"""
def __init__(self):
"""Constructor"""
self.xlight_handler = XLightHandler()
def get(self, request):
"""This method serves the GET requests to the web photo albums"""
if not request:
raise TypeError
if 'favicon' in request.path:
raise Http404('no favi here')
clean_path = cleanup_url_path(request.path)
if not request.path == clean_path:
return redirect(clean_path)
if not request.path.endswith('/'):
request.path = request.path + '/'
beaconid = request.GET.get('beaconid')
raw = request.GET.get('raw')
xlight = self.xlight_handler.get_xlight_state(beaconid)
error = ApiStatus(http_code=200, error_message="")
if not xlight:
if raw:
return HttpResponse('-1')
error = ApiStatus(
http_code=404, error_message="xlight not registered")
xlight = TrafficLight()
serialized_data = serializers.serialize("json", [xlight, error])
if raw:
return HttpResponse(xlight.current_status)
else:
return HttpResponse(
serialized_data, content_type="application/json")
| 30.826087 | 75 | 0.621533 |
6429607cbc5c38f9ec87bca03a26ad4ca5164497
| 10,935 |
py
|
Python
|
scripts/extraction/postgisS2Extract.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 17 |
2021-01-18T07:27:01.000Z
|
2022-03-10T12:26:21.000Z
|
scripts/extraction/postgisS2Extract.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 4 |
2021-04-29T11:20:44.000Z
|
2021-12-06T10:19:17.000Z
|
scripts/extraction/postgisS2Extract.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 47 |
2021-01-21T08:25:22.000Z
|
2022-03-21T14:28:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Guido Lemoine
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
# Version :
""" postgisS2Extract_ext.py:
A routine to extract zonal statistics from imagery in S3 object storage.
Assumes postgis data base use for scene metadata, features to extract and result storage.
Essential part of DIAS functionality for CAP Checks by Monitoring
Author: Guido Lemoine, European Commission, Joint Research Centre
License: see git repository
Version 1.1 - 2019-06-27
Revisions in 1.1:
- Externalize configuration parameters to postgisS2Extract_ext.json
- Get parcel_table srid and image srid dynamically
- Exit if no records found for processing
- Resolve missing data accounting
- Housekeeping
Revisions in 1.2 - 2020-12-11 Konstantinos Anastasakis:
- Code cleanup (flake8)
"""
import time
import sys
import os
import io
import json
import psycopg2
import psycopg2.extras
import rasterio
import pandas as pd
from rasterstats import zonal_stats
import download_with_boto3 as dwb
start = time.time()
# Rev 1.1. configuration parsing from json
with open('s3_config.json', 'r') as f:
s3config = json.load(f)
s3config = s3config['s3']
with open('db_config_s2.json', 'r') as f:
dbconfig = json.load(f)
dbconfig = dbconfig['database']
# Input data base is postgis
connString = "host={} dbname={} user={} port={} password={}".format(
dbconfig['connection']['host'], dbconfig['connection']['dbname'],
dbconfig['connection']['dbuser'], dbconfig['connection']['port'],
dbconfig['connection']['dbpasswd'])
# print(connString)
inconn = psycopg2.connect(connString)
if not inconn:
print("No in connection established")
sys.exit(1)
outconn = psycopg2.connect(connString)
if not outconn:
print("No out connection established")
sys.exit(1)
incurs = inconn.cursor()
srid = -1
sridSql = "select srid from geometry_columns where f_table_name = '{}';"
try:
incurs.execute(sridSql.format(dbconfig['tables']['parcel_table']))
result = incurs.fetchone()
if not result:
print("{} does not exist or is not a spatial table")
else:
srid = result[0]
except (Exception, psycopg2.DatabaseError) as error:
print(error)
inconn.close()
sys.exit(1)
print("Parcel srid = ", srid)
# Get the first image record that is not yet processed
imagesql = """
SELECT id, reference, obstime from dias_catalogue, {}
WHERE footprint && wkb_geometry and {} = '{}'
And obstime between '{}' and '{}'
And status ='ingested' and card='s2' ORDER by obstime asc limit 1
"""
updateSql = """
UPDATE dias_catalogue set status='{}'
WHERE id = {} and status = '{}'"""
try:
incurs.execute(imagesql.format(
dbconfig['tables']['aoi_table'],
dbconfig['args']['aoi_field'], dbconfig['args']['name'],
dbconfig['args']['startdate'], dbconfig['args']['enddate']))
result = incurs.fetchone()
if not result:
print("No images with status 'ingested' found")
inconn.close()
sys.exit(1)
else:
oid = result[0]
reference = result[1]
obstime = result[2]
# Fails if this record is changed in the meantime
incurs.execute(updateSql.format('inprogress', oid, 'ingested'))
inconn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
inconn.close()
sys.exit(1)
parcelcountsql = """
SELECT count(es.ogc_fid)
FROM {} es, dias_catalogue dias, {} aoi
WHERE es.wkb_geometry && st_transform(dias.footprint, {})
And es.wkb_geometry && st_transform(st_buffer(aoi.wkb_geometry::geography,
1000)::geometry, {})
And st_area(es.wkb_geometry) > 3000.0
And aoi.{} = '{}' and dias.id = {}
"""
incurs.execute(parcelcountsql.format(
dbconfig['tables']['parcel_table'],
dbconfig['tables']['aoi_table'], srid, srid,
dbconfig['args']['aoi_field'], dbconfig['args']['name'],
oid, dbconfig['tables']['results_table'], oid))
nrecs = incurs.fetchone()
# If no parcels inside, we can stop
if nrecs[0] == 0:
print("Image {} contains no parcels (FATAL)".format(reference))
incurs.execute(updateSql.format('no_parcels', oid, 'inprogress'))
inconn.commit()
incurs.close()
inconn.close()
sys.exit(1)
# Copy input data from S3 to local disk
# SOBLOO
# rootpath = '{}/L1C'.format(reference.split('_')[0])
# CREODIAS
rootpath = 'Sentinel-2/MSI/L2A'
obstime = reference.split('_')[2][0:8]
obs_path = "{}/{}/{}".format(obstime[0:4], obstime[4:6], obstime[6:8])
mgrs_tile = reference.split('_')[5]
full_tstamp = reference.split('_')[2]
# There was an issue with the manifest.safe sometime during 2018, and we need
# to check the GRANULE directory to understand where image data is located
# CREODIAS
s3path = "{}/{}/{}/GRANULE/".format(rootpath, obs_path, reference)
# SOBLOO
# s3path = "{}/{}/{}.SAFE/GRANULE/".format(rootpath, reference, reference.replace('MSIL1C', 'MSIL2A'))
flist = dwb.listFileFromS3(s3path)
# print(flist)
if not flist:
print("Resource {} not available in S3 storage (FATAL)".format(s3path))
incurs.execute(updateSql.format('S2_nopath', oid, 'inprogress'))
inconn.commit()
incurs.close()
inconn.close()
sys.exit(1)
# We want 3 image files only, e.g. to create NDVI and have some idea about local image quality
# SOBLOO does not produce 10 m L2A bands and only B8A (not B08)!
s3subdir = flist[1].replace(s3path, '').split('/')[0]
print(s3path)
print(flist[1])
print(s3subdir)
selection = {'B4': '{}/{}_{}_{}_{}.jp2'.format('R10m', mgrs_tile, full_tstamp, 'B04', '10m'),
'B8': '{}/{}_{}_{}_{}.jp2'.format('R10m', mgrs_tile, full_tstamp, 'B08', '10m'),
'SC': '{}/{}_{}_{}_{}.jp2'.format('R20m', mgrs_tile, full_tstamp, 'SCL', '20m')
}
file_set = {}
for k in selection.keys():
s = selection.get(k)
fpath = "data/{}".format(s.split('/')[-1])
alt_s = s.replace('0m/', '0m/L2A_')
if dwb.getFileFromS3('{}{}/IMG_DATA/{}'.format(s3path, s3subdir, s), fpath) == 1:
print("Image {} found in bucket".format(s))
file_set[k] = fpath
elif dwb.getFileFromS3('{}{}/IMG_DATA/{}'.format(s3path, s3subdir, alt_s), fpath) == 1:
# LEVEL2AP has another naming convention.
print("Image {} found in bucket".format(alt_s))
file_set[k] = fpath
else:
print("Neither Image {} nor {} found in bucket".format(s, alt_s))
incurs.execute(updateSql.format(
'{} notfound'.format(k), oid, 'inprogress'))
inconn.commit()
incurs.close()
inconn.close()
sys.exit(1)
# Get the parcel polygon in this image' footprint
print(file_set)
outsrid = int('326{}'.format(mgrs_tile[1:3]))
incurs.close()
outconn = psycopg2.connect(connString)
if not outconn:
print("No out connection established")
sys.exit(1)
# Open a named cursor
incurs = inconn.cursor(name='fetch_image_coverage',
cursor_factory=psycopg2.extras.DictCursor)
parcelsql = """
SELECT es.ogc_fid, ST_AsGeoJSON(st_transform(es.wkb_geometry, {}))::json
FROM {} es, dias_catalogue dias, {} aoi
WHERE es.wkb_geometry && st_transform(dias.footprint, {})
And es.wkb_geometry && st_transform(st_buffer(aoi.wkb_geometry::geography,
1000)::geometry, {})
And st_area(es.wkb_geometry) > 3000.0
And aoi.{} = '{}' and dias.id = {}
-- and es.ogc_fid not in (select distinct pid from {} where obsid = {})
"""
incurs.execute(parcelsql.format(
outsrid, dbconfig['tables']['parcel_table'],
dbconfig['tables']['aoi_table'], srid, srid,
dbconfig['args']['aoi_field'], dbconfig['args']['name'],
oid, dbconfig['tables']['results_table'], oid))
sqlload = time.time() - start
print("Images loaded and {} features selected from database in {} seconds".format(
nrecs[0], sqlload))
nrows = {}
for k in file_set.keys():
nrows[k] = 0
affine = {}
array = {}
bands = file_set.keys()
for b in bands:
with rasterio.open(file_set.get(b)) as src:
affine[b] = src.transform
array[b] = src.read(1)
while True:
rowset = incurs.fetchmany(size=2000)
if not rowset:
break
features = {"type": "FeatureCollection",
"features": [{"type": "feature", "geometry": f[1],
"properties": {"pid": int(f[0])}} for f in rowset]}
for b in bands:
zs = zonal_stats(
features, array[b], affine=affine[b],
stats=["count", "mean", "std", "min", "max",
"percentile_25", "percentile_50", "percentile_75"],
prefix="", nodata=0, geojson_out=True)
df = pd.DataFrame(zs)
df = pd.DataFrame.from_dict(df.properties.to_dict(), orient='index')
df['obsid'] = oid
df['band'] = b
df.rename(index=str, columns={
"percentile_25": "p25", "percentile_50": "p50",
"percentile_75": "p75"}, inplace=True)
nrows[b] = nrows[b] + len(df)
# df is the dataframe
if len(df) > 0:
df.dropna(inplace=True)
if len(df.values) > 0:
df_columns = list(df)
s_buf = io.StringIO()
df.to_csv(s_buf, header=False, index=False, sep=',')
s_buf.seek(0)
outcurs = outconn.cursor()
# print(tuple(df_columns))
try:
#psycopg2.extras.execute_batch(outcurs, insert_stmt, df.values)
outcurs.copy_from(
s_buf, dbconfig['tables']['results_table'],
columns=tuple(df_columns), sep=',')
outconn.commit()
except psycopg2.IntegrityError:
print("insert statement {} contains duplicate index")
# except Error as e:
# print(e)
finally:
outcurs.close()
else:
print("No valid data in block {}".format(nrows[b]))
outconn.close()
incurs.close()
incurs = inconn.cursor()
try:
incurs.execute(updateSql.format('extracted', oid, 'inprogress'))
inconn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
inconn.close()
if outconn:
outconn.close()
incurs.close()
inconn.close()
if os.path.exists(fpath):
print("Removing {}".format(fpath))
os.remove(fpath)
for f in file_set.keys():
if os.path.exists(file_set.get(f)):
print("Removing {}".format(file_set.get(f)))
os.remove(file_set.get(f))
print("Total time required for {} features and {} bands: {} seconds".format(
nrows.get('B8'), len(bands), time.time() - start))
| 30.630252 | 102 | 0.621948 |
ff4b7248e63cbc0bcfc0cf9fae33b3943466ff79
| 2,207 |
py
|
Python
|
haas_lib_bundles/python/libraries/uln2003/uln2003.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/libraries/uln2003/uln2003.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/libraries/uln2003/uln2003.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2015-2021 Alibaba Group Holding Limited
MicroPython's driver for ULN2003
Author: HaaS
Date: 2022/03/15
"""
from driver import GPIO
from utime import sleep_ms
from micropython import const
import math
class ULN2003(object):
"""
This class implements uln2003 chip's defs.
"""
def __init__(self, a, a_, b, b_):
self._a = None
self._a_ = None
self._b = None
self._b_ = None
if not isinstance(a, GPIO):
raise ValueError("parameter is not an GPIO object")
if not isinstance(a_, GPIO):
raise ValueError("parameter is not an GPIO object")
if not isinstance(b, GPIO):
raise ValueError("parameter is not an GPIO object")
if not isinstance(b_, GPIO):
raise ValueError("parameter is not an GPIO object")
# make ULN2003's internal object points to gpio
self._a = a
self._a_ = a_
self._b = b
self._b_ = b_
def motorCw(self, speed=4):
self._a.write(1)
self._a_.write(0)
self._b.write(0)
self._b_.write(0)
sleep_ms(speed)
self._a.write(0)
self._a_.write(1)
self._b.write(0)
self._b_.write(0)
sleep_ms(speed)
self._a.write(0)
self._a_.write(0)
self._b.write(1)
self._b_.write(0)
sleep_ms(speed)
self._a.write(0)
self._a_.write(0)
self._b.write(0)
self._b_.write(1)
sleep_ms(speed)
def motorCcw(self, speed=4):
self._a.write(0)
self._a_.write(0)
self._b.write(0)
self._b_.write(1)
sleep_ms(speed)
self._a.write(0)
self._a_.write(0)
self._b.write(1)
self._b_.write(0)
sleep_ms(speed)
self._a.write(0)
self._a_.write(1)
self._b.write(0)
self._b_.write(0)
sleep_ms(speed)
self._a.write(1)
self._a_.write(0)
self._b.write(0)
self._b_.write(0)
sleep_ms(speed)
def motorStop(self):
self._a.write(0)
self._a_.write(0)
self._b.write(0)
self._b_.write(0)
| 23.231579 | 63 | 0.550974 |
440dcb7179b2021d1efe85cb1c9db8f0e166b4af
| 1,935 |
py
|
Python
|
src/test/tests/databases/lines.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/databases/lines.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/databases/lines.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: lines.py
#
# Tests: mesh - 2D lines (unstructured), 3D lines (unstructured)
# plots - mesh
#
# Programmer: Alister Maguire
# Date: Tue Mar 17 08:50:32 PDT 2020
#
# Modifications:
#
# Mark C. Miller, Mon Jan 11 10:32:17 PST 2021
# Replace AssertEqual() with TestValueEQ()
# ----------------------------------------------------------------------------
def TestMeshPlot():
#
# First, let's make sure that 3d lines are read appropriately.
#
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
OpenDatabase(data_path("lines_test_data/spring.lines"))
AddPlot("Mesh", "Lines", 1, 1)
DrawPlots()
Query("SpatialExtents")
# Check dimensionality.
ext_len = len(GetQueryOutputValue())
TestValueEQ("Verifying 3D lines", ext_len, 6)
# Check the rendering.
Test("mesh_plot_00")
DeleteAllPlots()
CloseDatabase(data_path("lines_test_data/spring.lines"))
#
# Next, let's check 2d lines.
#
OpenDatabase(data_path("lines_test_data/2d.lines"))
AddPlot("Mesh", "Lines", 1, 1)
DrawPlots()
Query("SpatialExtents")
# Check dimensionality.
ext_len = len(GetQueryOutputValue())
TestValueEQ("Verifying 2D lines", ext_len, 4)
# Check the rendering.
Test("mesh_plot_01")
DeleteAllPlots()
CloseDatabase(data_path("lines_test_data/2d.lines"))
#
# This test makes sure that consecutive points are only
# removed from one line at a time.
#
OpenDatabase(data_path("lines_test_data/consecutive.lines"))
AddPlot("Mesh", "Lines", 1, 1)
DrawPlots()
# Check the rendering.
Test("mesh_plot_02")
DeleteAllPlots()
CloseDatabase(data_path("lines_test_data/consecutive.lines"))
def main():
TestMeshPlot()
Exit()
main()
| 24.493671 | 78 | 0.591214 |
9260876f2bc37b3b3bb06faa32efc9c91bfddb75
| 122 |
py
|
Python
|
pyhton/modules/internal-module/src/main/python/main.py
|
NovaOrdis/playground
|
6fe076b5d246ac5b492ab8d5de04eef43d797b62
|
[
"Apache-2.0"
] | 5 |
2016-11-16T02:18:45.000Z
|
2018-03-14T19:57:11.000Z
|
pyhton/modules/internal-module/src/main/python/main.py
|
NovaOrdis/playground
|
6fe076b5d246ac5b492ab8d5de04eef43d797b62
|
[
"Apache-2.0"
] | null | null | null |
pyhton/modules/internal-module/src/main/python/main.py
|
NovaOrdis/playground
|
6fe076b5d246ac5b492ab8d5de04eef43d797b62
|
[
"Apache-2.0"
] | 10 |
2016-06-06T18:28:13.000Z
|
2018-07-01T18:20:06.000Z
|
import os
import sys
sys.path.append(os.path.dirname(__file__) + "/my_module")
import my_module
my_module.some_function()
| 20.333333 | 57 | 0.795082 |
2bcdff62c172ffd9dd109f55ecd5b831b173b082
| 337 |
py
|
Python
|
app/models.py
|
Eggolt/Webdashboard
|
440e7cda510d4adf559572636611cbecf1b0ffae
|
[
"MIT"
] | null | null | null |
app/models.py
|
Eggolt/Webdashboard
|
440e7cda510d4adf559572636611cbecf1b0ffae
|
[
"MIT"
] | null | null | null |
app/models.py
|
Eggolt/Webdashboard
|
440e7cda510d4adf559572636611cbecf1b0ffae
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, Float, String, Integer, Numeric
from .database import Base
class salary(Base):
__tablename__ = "salary"
id = Column(Integer, primary_key=True, index=True)
player = Column(String, unique=True, index=True)
fieldposition = Column(String)
team = Column(String)
salary= Column(Numeric)
| 30.636364 | 62 | 0.721068 |
92163257ab9f28ab64b7b2298398dc41f2c32b4d
| 1,463 |
py
|
Python
|
S4CTF/2021/misc/tom/tom.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
S4CTF/2021/misc/tom/tom.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
S4CTF/2021/misc/tom/tom.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import random, sys, os, signal, string, re
import inspect
from flag import flag
import primefac
def tom(n):
c = (n % 2) ^ 1
while True:
FU = list(primefac.primefac(n + c))
FD = list(primefac.primefac(n - c))
if len(FU) == 2:
return c, FU
elif len(FD) == 2:
return c, FD
else:
c += 2
def die(*args):
pr(*args)
quit()
def pr(*args):
s = " ".join(map(str, args))
sys.stdout.write(s + "\n")
sys.stdout.flush()
def sc():
return sys.stdin.readline().strip()
def main():
step = 40
c, border = 4, "+"
pr(border*72)
pr(border, " hi power programmers and coders! Your mission is to find a special ", border)
pr(border, " number with desired property that we named it Tom. Now review the ", border)
pr(border, " source code and get the flag! ", border)
pr(border*72)
while c <= step:
r = random.randint(1, 20 + (c - 5))
pr("| Send an integer `n' greater than", 11**c, "and less than", 11**(c+1), "such tom(n) =", r)
ans = sc()
try:
ans = int(ans)
if ans > 11**c and ans < 11**(c+1):
if tom(ans)[0] == r:
c += 1
if c == step:
die("| Congrats, you got the flag:", flag)
else:
pr("| good job, try the next level :)")
else:
print(tom(ans), r)
die("| Your answer is not correct!", tom(ans)[0] == r)
else:
die("Quiting ...")
except:
die("Bye :P")
if __name__ == '__main__':
main()
| 22.859375 | 97 | 0.561859 |
a65bcea6bd5e28400b4206fbdf0ae99572ee8bc2
| 561 |
py
|
Python
|
20-hs-redez-sem/groups/02-unionDir/filesystem-redez-client/utils/json.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-hs-redez-sem/groups/02-unionDir/filesystem-redez-client/utils/json.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-hs-redez-sem/groups/02-unionDir/filesystem-redez-client/utils/json.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
import pathlib
import ntpath
import hash_
def _file_to_json(syspath):
systemname = str(ntpath.basename(syspath))
print(systemname)
data = []
jsondata = {}
pathlist = pathlib.Path(syspath).glob('**/*.*')
for path in pathlist:
dict = {}
dict['name'] = str(ntpath.basename(path))
dict['fullpath'] = systemname + str(path).replace(syspath, "").replace(str(ntpath.basename(path)), "")
dict['hash'] = hash_.get_hash(path)
data.append(dict)
jsondata["{}".format(syspath)] = data
return jsondata
| 29.526316 | 110 | 0.622103 |
f37253245fe7a4af17c67be30ad2ee70e7ee32ee
| 1,392 |
py
|
Python
|
elements/python/8/12/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11 |
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
elements/python/8/12/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1 |
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
elements/python/8/12/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
class Node(object):
def __init__(self, x, nxt):
self.x = x
self.next = nxt
def splitmod(n, k):
"""
Split n into k lists containing the elements of n in positions i (mod k).
Return the heads of the lists and the tails.
"""
heads = [None]*k
tails = [None]*k
i = 0
while n is not None:
if heads[i] is None:
heads[i] = n
if tails[i] is not None:
tails[i].next = n
tails[i] = n
n.next, n = None, n.next
i = (i+1)%k
return heads, tails
def evenoddmerge(n):
"""
Rearrange n so that all even nodes appear first, then all odd nodes.
"""
heads, tails = splitmod(n, 2)
tails[0].next = heads[1]
return heads[0]
def to_list(n):
"""
to_list converts the linked list n to a list.
"""
L = []
while n is not None:
L.append(n.x)
n = n.next
return L
def from_list(L):
"""
from_list builds a linked list from the given list.
"""
n = None
for i in xrange(len(L)-1, -1, -1):
n = Node(x=L[i], nxt=n)
return n
def display(n):
"""
display prints a view of the linked list.
"""
print ' -> '.join(map(str, to_list(n)))
def test():
n = from_list(range(11))
display(n)
evenoddmerge(n)
display(n)
def main():
test()
if __name__ == '__main__':
main()
| 18.315789 | 77 | 0.529454 |
45ed0151a6bf3bb087533fc775f6977a8ab11e68
| 11,303 |
py
|
Python
|
src/test/tests/hybrid/timelock.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/hybrid/timelock.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/hybrid/timelock.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: timelock.py
#
# Tests: mesh - 2D, 3D, curvilinear, single domain
# plots - FilledBoundary, Pseudocolor
# databases - PDB, Silo
#
# Purpose: This test case tests out locking windows in time with multiple
# databases in multiple windows. It makes sure that we get the
# right database correlations and the right time sliders.
#
# Programmer: Brad Whitlock
# Date: Thu Mar 17 09:58:35 PDT 2005
#
# Modifications:
# Brad Whitlock, Wed Mar 23 09:23:53 PDT 2005
# I made it truncate the window information so the scalable rendering flag
# is not included. This way, I don't need mode-specific baselines. I also
# added 2 new test cases to check the window information because one of
# the baseline images in test case 2 was incorrect. Finally, I added code
# in the CleanSlate function to make sure that time locking is off. That
# was what caused the incorrect test case image.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Eric Brugger, Fri Jul 30 13:53:48 PDT 2010
# I increased the number of lines of information it prints to 18.
#
# ----------------------------------------------------------------------------
import os
#
# Look at the first few lines of the string representation of the
# WindowInformation to see the list of time sliders, etc.
#
def TestWindowInformation(testname):
# Get the window information and convert it to a string.
s = str(GetWindowInformation())
# Only use the first 18 or so lines from the string.
lines = s.split("\n")
s = ""
for i in range(18):
if(i < len(lines)):
s = s + lines[i]
s = s + "\n"
# Get the window information and convert it to a string.
TestText(testname, s)
#
# Tests that the database correlations look a certain way.
#
def TestCorrelations(testname):
names = GetDatabaseCorrelationNames()
s = ""
for name in names:
c = GetDatabaseCorrelation(name)
s = s + str(c) + "\n"
TestText(testname, s)
def CleanSlate():
# Delete all but the first window.
windows = list(GetGlobalAttributes().windows)
windows.sort()
for win in windows[1:]:
SetActiveWindow(win)
DeleteWindow()
# Delete all of the plots.
DeleteAllPlots()
# Delete all of the database correlations:
sources = GetGlobalAttributes().sources
cL = GetDatabaseCorrelationNames()
for name in cL:
if name not in sources:
DeleteDatabaseCorrelation(name)
# Close all of the sources.
for src in sources:
CloseDatabase(src)
# Make sure clone window on first reference is off.
SetCloneWindowOnFirstRef(0)
# Make sure that window 1 is not locked in time!
if GetWindowInformation().lockTime == 1:
ToggleLockTime()
#
# Returns whether all files in the list are in the current directory.
#
def FilesPresent(files):
currentFileList = os.listdir(".")
count = 0
retval = 0
if type(files) == type(()) or type(files) == type([]):
for file in files:
if file in currentFileList:
count = count + 1
retval = count == len(files)
else:
# We got here because the files argument was
# a single value instead of a tuple or list.
if files in currentFileList:
retval = 1
return retval
#
# Waits for all files in the list to be present in the current directory.
#
def WaitForFilesToBePresent(files):
while(FilesPresent(files) == 0): sleep(1)
#
# Remove all .visit files from the current directory.
#
def RemoveAllVisItFiles():
currentFileList = os.listdir(".")
for file in currentFileList:
if file[-5:] == ".silo" or file[-6:] == ".visit":
try:
os.unlink(file)
except:
# Ignore any exceptions
pass
#
# Set a better view for wave.
#
def SetWaveDatabaseView():
v0 = View3DAttributes()
v0.viewNormal = (-0.735926, 0.562657, 0.376604)
v0.focus = (5, 0.753448, 2.5)
v0.viewUp = (0.454745, 0.822858, -0.340752)
v0.viewAngle = 30
v0.parallelScale = 5.6398
v0.nearPlane = -11.2796
v0.farPlane = 11.2796
v0.imagePan = (0.0589778, 0.0898255)
v0.imageZoom = 1.32552
v0.perspective = 1
v0.eyeAngle = 2
SetView3D(v0)
#
# Set the active window and also set the window's background color so it's
# easy to tell which window we're looking at.
#
def GotoWindow(win):
SetActiveWindow(win)
a = GetAnnotationAttributes()
if win == 1:
a.backgroundColor = (255,200,200,255)
elif win == 2:
a.backgroundColor = (200,255,200,255)
else:
a.backgroundColor = (200,200,255,255)
SetAnnotationAttributes(a)
#
# Test that we get an active time slider when a correlation is modified
# as a result of locking the window in time.
#
def test1(testindex):
TestSection("Make sure we get a time slider when locking a window "
"causes the most suitable correlation to be altered.")
SetWindowLayout(4)
# Turn on "CloneWindowOnFirstRef"
SetCloneWindowOnFirstRef(1)
# Copy wave.visit to this directory a few times.
f = open(silo_data_path("wave.visit") , "rt")
lines = f.readlines()
f.close()
f0 = open("wave.visit","wt")
f1 = open("wave1.visit","wt")
f2 = open("wave2.visit","wt")
for line in lines:
f0.write(silo_data_path(line))
f1.write(silo_data_path(line))
f2.write(silo_data_path(line))
f0.close()
f1.close()
f2.close()
GotoWindow(1)
OpenDatabase("wave.visit")
AddPlot("Pseudocolor", "pressure")
DrawPlots()
SetWaveDatabaseView()
Test("timelock_%02d" % testindex)
TestWindowInformation("timelock_%02d" % (testindex+1))
# Go to the next window. The plot should be copied. Replace the database
# before we draw the plots so we'll be using a different database.
GotoWindow(2)
ReplaceDatabase("wave1.visit")
DrawPlots()
Test("timelock_%02d" % (testindex+2))
TestWindowInformation("timelock_%02d" % (testindex+3))
# Go to the next window. The plot should be copied. Replace the database
# before we draw the plots so we'll be using a different database.
GotoWindow(3)
ReplaceDatabase("wave2.visit")
DrawPlots()
Test("timelock_%02d" % (testindex+4))
TestWindowInformation("timelock_%02d" % (testindex+5))
# Lock window 1 and 2. This should result in a database correlation.
GotoWindow(1)
ToggleLockTime()
GotoWindow(2)
ToggleLockTime()
TestCorrelations("timelock_%02d" % (testindex+6))
# Lock window 3 in time now also. This should result in the new database
# correlation being modified to accomodate window 3's database.
GotoWindow(3)
ToggleLockTime()
TestCorrelations("timelock_%02d" % (testindex+7))
# Change time states and make sure all windows look the same.
SetTimeSliderState(36)
Test("timelock_%02d" % (testindex+8))
TestWindowInformation("timelock_%02d" % (testindex+9))
GotoWindow(2)
Test("timelock_%02d" % (testindex+10))
TestWindowInformation("timelock_%02d" % (testindex+11))
GotoWindow(1)
Test("timelock_%02d" % (testindex+12))
TestWindowInformation("timelock_%02d" % (testindex+13))
# Get ready for the next test.
CleanSlate()
RemoveAllVisItFiles()
return testindex + 14
#
# Test that time locking works for multiple windows. What we're really
# testing is that the database is copied to the newly referenced window
# even though we have "CloneWindowOnFirstRef" set to off. Defect '6053.
#
def test2(testindex):
TestSection("Test that the time slider works when time locking multiple windows")
a = GetAnnotationAttributes()
b = GetAnnotationAttributes()
SetAnnotationAttributes(b)
SetCloneWindowOnFirstRef(0)
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
AddPlot("FilledBoundary", "material(mesh)")
DrawPlots()
Test("timelock_%02d" % testindex)
SetWindowLayout(2)
GotoWindow(2)
SetAnnotationAttributes(b)
TestWindowInformation("timelock_%02d" % (testindex+1))
AddPlot("Pseudocolor", "mesh/nummm")
DrawPlots()
Test("timelock_%02d" % (testindex+2))
# Turn on time locking in both windows.
GotoWindow(1)
ToggleLockTime()
GotoWindow(2)
ToggleLockTime()
# See if both windows updated when we changed the time in window 2.
SetTimeSliderState(5)
Test("timelock_%02d" % (testindex+3))
TestWindowInformation("timelock_%02d" % (testindex+4))
GotoWindow(1)
ResetView()
Test("timelock_%02d" % (testindex+5))
TestWindowInformation("timelock_%02d" % (testindex+6))
# Get ready for the next test.
CleanSlate()
SetAnnotationAttributes(a)
return testindex + 7
#
# Make sure that replacing into a time-locked window updates the database
# correlation.
#
def test3(testindex):
TestSection("Make sure replacing into a time-locked window updates "
"the database correlation.")
SetWindowLayout(4)
# Turn on "CloneWindowOnFirstRef"
SetCloneWindowOnFirstRef(1)
dbs = (silo_data_path("wave.visit") ,
silo_data_path("wave_tv.visit") )
OpenDatabase(dbs[0])
AddPlot("Pseudocolor", "pressure")
DrawPlots()
SetWaveDatabaseView()
Test("timelock_%02d" % testindex)
GotoWindow(2)
DeleteAllPlots()
AddPlot("FilledBoundary", "Material")
DrawPlots()
Test("timelock_%02d" % (testindex+1))
ToggleLockTime()
TestWindowInformation("timelock_%02d" % (testindex+2))
GotoWindow(1)
ToggleLockTime()
TestWindowInformation("timelock_%02d" % (testindex+3))
# Go to window 3 and make sure that certain fields were copied.
# Window 3 should already be locked in time and it should contain
# a Pseudocolor plot.
GotoWindow(3)
TestWindowInformation("timelock_%02d" % (testindex+4))
# Replace the database with wave_tv so we can make sure that VisIt can
# create correlations when it needs to during a replace. Note that we
# also replace with a later time state. This should cause the time states
# for the other windows to be updated.
ReplaceDatabase(dbs[1], 36)
TestCorrelations("timelock_%02d" % (testindex+5))
DrawPlots()
Test("timelock_%02d" % (testindex+6))
# Test that we change change to the transient variable.
ChangeActivePlotsVar("transient")
Test("timelock_%02d" % (testindex+7))
# Make sure that the time state changed in windows 1,2.
GotoWindow(2)
Test("timelock_%02d" % (testindex+8))
TestWindowInformation("timelock_%02d" % (testindex+9))
GotoWindow(1)
Test("timelock_%02d" % (testindex+10))
TestWindowInformation("timelock_%02d" % (testindex+11))
# Get ready for the next test.
CleanSlate()
return testindex + 12
#
# Run the tests
#
try:
testindex = 0
testindex = test1(testindex)
testindex = test2(testindex)
testindex = test3(testindex)
except:
RemoveAllVisItFiles()
raise
Exit()
| 30.384409 | 85 | 0.654959 |
34674a397f3c619050184b148bc8cbc48ab91d72
| 891 |
py
|
Python
|
books/PythonAutomate/webscrap/automate_2048.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/webscrap/automate_2048.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/webscrap/automate_2048.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""automate_2048.py
2048 게임 웹 페이지로 접속 해
게임 종료될 때까지 실행"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import time
url = "https://gabrielecirulli.github.io/2048/"
driver = webdriver.Chrome()
driver.get(url)
wait = WebDriverWait(driver, 10)
# redirect 될 때까지 wait
wait.until(EC.url_changes(url))
# game-container 생성될때까지 wait
wait.until(lambda x: x.find_element_by_class_name("game-container"))
html_elem = driver.find_element_by_tag_name('html')
while True:
html_elem.send_keys(Keys.RIGHT)
html_elem.send_keys(Keys.DOWN)
html_elem.send_keys(Keys.LEFT)
html_elem.send_keys(Keys.UP)
try:
driver.find_element_by_css_selector('.game-over')
except Exception:
pass
else:
break
| 25.457143 | 68 | 0.754209 |
34807d22c299dce3817b3a3411675e1fdb07443c
| 4,428 |
py
|
Python
|
test/test_npu/test_pin_memory.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_pin_memory.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_pin_memory.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import torch
import torch.npu
import threading
from contextlib import contextmanager
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, PY3
TEST_NPU = torch.npu.is_available()
TEST_MULTINPU = TEST_NPU and torch.npu.device_count() >= 2
class TestPinMemory(unittest.TestCase):
def setUp(self) -> None:
# before one test
pass
def tearDown(self) -> None:
# after one test
pass
@classmethod
def setUpClass(cls) -> None:
# before all test
pass
@classmethod
def tearDownClass(cls) -> None:
# after all test
pass
#@unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property")
def test_pin_memory(self):
x = torch.randn(3, 5)
self.assertFalse(x.is_pinned())
if not torch.npu.is_available():
self.assertRaises(RuntimeError, lambda: x.pin_memory())
else:
pinned = x.pin_memory()
print("x:", x )
print("pinned:", pinned)
#self.assertTrue(pinned.is_pinned())
self.assertEqual(pinned.numpy().all(), x.numpy().all())
self.assertNotEqual(pinned.data_ptr(), x.data_ptr())
# test that pin_memory on already pinned tensor has no effect
self.assertIs(pinned.numpy().all(), pinned.pin_memory().numpy().all())
#self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr())
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t().tolist(), x.t().pin_memory().tolist())
self.assertFalse((x.t().numpy()-x.t().pin_memory().numpy()).all())
def test_caching_pinned_memory(self):
#cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, 'allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
npu_tensor = torch.npu.FloatTensor([0])
#torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
time.sleep(5)
npu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, 'allocation re-used too soon')
self.assertEqual(npu_tensor.tolist(), [1])
#self.assertEqual(list(npu_tensor), [1])
@unittest.skipIf(not TEST_MULTINPU, "only one NPU detected")
def test_caching_pinned_memory_multi_npu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
#cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
npu_tensor0 = torch.npu.FloatTensor([0], device=0)
npu_tensor1 = torch.npu.FloatTensor([0], device=1)
with torch.npu.device(1):
#torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
time.sleep(5)
npu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, 'allocation re-used too soon')
with torch.npu.device(0):
npu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(npu_tensor1[0].item(), 1)
self.assertEqual(npu_tensor0[0].item(), 2)
def test_empty_shared(self):
t = torch.Tensor()
t.share_memory_()
if __name__ == "__main__":
unittest.main()
| 35.142857 | 97 | 0.647245 |
942fae3ed46dbae0cca3dfd85152d354955d194f
| 1,454 |
py
|
Python
|
tests/test_zeitreihe.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
tests/test_zeitreihe.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
tests/test_zeitreihe.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
import pytest # type:ignore[import]
from bo4e.bo.zeitreihe import Zeitreihe, ZeitreiheSchema
from bo4e.enum.medium import Medium
from bo4e.enum.mengeneinheit import Mengeneinheit
from bo4e.enum.messart import Messart
from bo4e.enum.messgroesse import Messgroesse
from bo4e.enum.wertermittlungsverfahren import Wertermittlungsverfahren
from tests.serialization_helper import assert_serialization_roundtrip # type:ignore[import]
from tests.test_zeitreihenwert import example_zeitreihenwert # type:ignore[import]
class TestZeitreihe:
@pytest.mark.parametrize(
"zeitreihe",
[
pytest.param(
Zeitreihe(
bezeichnung="Foo",
beschreibung="Bar",
version="0.0.1",
messgroesse=Messgroesse.BLINDLEISTUNG,
messart=Messart.MAXIMALWERT,
medium=Medium.STROM,
einheit=Mengeneinheit.KVARH,
wertherkunft=Wertermittlungsverfahren.MESSUNG,
werte=[example_zeitreihenwert],
)
),
],
)
def test_serialization_roundtrip(self, zeitreihe: Zeitreihe):
assert_serialization_roundtrip(zeitreihe, ZeitreiheSchema())
def test_missing_required_attribute(self):
with pytest.raises(TypeError) as excinfo:
_ = Zeitreihe()
assert "missing 6 required" in str(excinfo.value)
| 36.35 | 92 | 0.652682 |
170352a340119d775fa03a5f227af62c44c28ed0
| 1,281 |
py
|
Python
|
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/83criando_lendo_escrevendo_apagando_arquivos/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/83criando_lendo_escrevendo_apagando_arquivos/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/83criando_lendo_escrevendo_apagando_arquivos/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
# https://docs.python.org/3/library/functions.html#open
# Obs: 'w+' cria um arquivo do zero
# Obs: 'a+' vai add mais coisas ao arquivo
# Obs: 'r' vai ler o arquivo
# Começo do codigo
# file = open('abc.txt', 'w+')
# file.write('linha 1\n')
# file.write('linha 2\n')
# file.write('linha 3\n')
#
# file.seek(0, 0) # Mostra tudo
# print('Lendo Linhas')
# print(file.read())
# print('#' * 20)
#
# file.seek(0, 0)
# print(file.readline(), end='')
# print(file.readline(), end='')
# print(file.readline(), end='')
#
# file.seek(0, 0)
#
# print('#' * 20)
# for linha in file.readlines():
# print(linha, end='')
# file.close()
# Começo do codigo
# try:
# file = open('abc.txt', 'w+')
# file.write('Linha')
# file.seek(0, 0)
# print(file.read())
# finally:
# file.close()
# Comeco do codigo melhor jeito de fazer
# with open('abc.txt', 'w+') as file:
# file.write('linha 1 ')
# file.write('linha 2 ')
# file.write('linha 3')
# file.seek(0)
# print(file.read())
# comeco do codigo
# with open('abc.txt', 'r') as file: # 'r' Vai apenas ler o arquivo
# print(file.read())
# Comeco do codigo
with open('abc.txt', 'a+') as file: # 'a+' Vai adicionar sem apagar ele
file.write(' Outra linha')
file.seek(0)
print(file.read())
| 22.086207 | 72 | 0.587822 |
ca0dc592add60105317749c5655981e83a4511b6
| 417 |
py
|
Python
|
src/onegov/core/orm/types/lowercase_text_type.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/orm/types/lowercase_text_type.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/orm/types/lowercase_text_type.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from sqlalchemy.types import TypeDecorator, TEXT
from sqlalchemy_utils.operators import CaseInsensitiveComparator
class LowercaseText(TypeDecorator):
""" Text column that forces all text to be lowercase. """
impl = TEXT
omparator_factory = CaseInsensitiveComparator
def process_bind_param(self, value, dialect):
if value is not None:
return value.lower()
return value
| 26.0625 | 64 | 0.726619 |
047910e708b43a859cabff80f6ec49bffed4791e
| 1,830 |
py
|
Python
|
src/balldetection/SimpleBall.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | 7 |
2015-11-27T09:53:32.000Z
|
2021-01-13T17:35:54.000Z
|
src/balldetection/SimpleBall.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
src/balldetection/SimpleBall.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
# tmp
import cv
import numpy as np
from src.Util import getcolour
from src.balldetection.Ball import SimpleBall
class SimpleBallFilter(object):
def __init__(self):
self.balls = []
def filter(self, rgb, depth, ball_list, args={}):
# first call?
if not len(self.balls):
if len(ball_list) == 3:
for ball in ball_list:
self.balls.append(SimpleBall(ball['position'], radius=ball['radius']))
else: # find the right ball to update
# remember which balls have been updated in this frame
updated = dict(zip(self.balls, [False for _ in self.balls]))
# update all balls with "close" positions
for new_ball in ball_list:
new_ball['used'] = False
for ball in self.balls:
if not updated[ball] and ball.isClose(new_ball):
new_ball['used'] = True
ball.updatePosition(new_ball['position'])
updated[ball] = True
# now update the balls that were not "close" using the closest of the remaining positions
non_updated_balls = [b for b in self.balls if not updated[b]]
non_used_positions = [p for p in ball_list if not p['used']]
for ball in non_updated_balls:
if len(non_used_positions) == 0:
return rgb, depth, self.balls
pos = sorted(non_used_positions, key=lambda p: ball.distance(p['position'], ball.futurePosition(True)))[0]
ball.updatePosition(pos['position'])
non_used_positions.remove(pos)
# reset ball updated status
for ball in self.balls:
updated[ball] = False
return rgb, depth, self.balls
| 37.346939 | 122 | 0.571585 |
edb44e83bc75b8818d94bce1a245c1303278ba56
| 476 |
py
|
Python
|
Zh3r0/2021/crypto/import_numpy_as_MT/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
Zh3r0/2021/crypto/import_numpy_as_MT/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
Zh3r0/2021/crypto/import_numpy_as_MT/challenge.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
import os
from numpy import random
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from secret import flag
def rand_32():
return int.from_bytes(os.urandom(4),'big')
flag = pad(flag,16)
for _ in range(2):
# hate to do it twice, but i dont want people bruteforcing it
random.seed(rand_32())
iv,key = random.bytes(16), random.bytes(16)
cipher = AES.new(key,iv=iv,mode=AES.MODE_CBC)
flag = iv+cipher.encrypt(flag)
print(flag.hex())
| 21.636364 | 65 | 0.703782 |
6123671b3f9a94a4d6659a1a60dd1b40794dc194
| 451 |
py
|
Python
|
python/asyncio/async_http.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/asyncio/async_http.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/asyncio/async_http.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import httpx
# Traditional python
def do_stuff():
resp = httpx.get("https://www.example.com/")
return resp.json()
# With Async
async def do_stuff_async():
async with httpx.AsyncClient() as client:
resp = await client.get("https://www.example.com")
return resp.json
if __name__ == "__main__":
print("Start Async")
print(do_stuff_async())
print("Start Non Async")
print(do_stuff())
| 20.5 | 59 | 0.620843 |
612cf541a873e0ea5aadee712f785ede8e9af48f
| 141 |
py
|
Python
|
3.Python/aromalsanthosh.py
|
tresa2002/Learn-Coding
|
2ad4c8948f77c421a97b58adc00a59bb85c37de0
|
[
"MIT"
] | 8 |
2020-10-01T14:15:53.000Z
|
2021-10-03T06:10:03.000Z
|
3.Python/aromalsanthosh.py
|
tresa2002/Learn-Coding
|
2ad4c8948f77c421a97b58adc00a59bb85c37de0
|
[
"MIT"
] | null | null | null |
3.Python/aromalsanthosh.py
|
tresa2002/Learn-Coding
|
2ad4c8948f77c421a97b58adc00a59bb85c37de0
|
[
"MIT"
] | 29 |
2020-10-02T14:24:53.000Z
|
2020-10-12T10:52:31.000Z
|
name, age = "Aromal S", 20
username = "aromalsanthosh"
print ('Hello!')
print("Name: {}\nAge: {}\nUsername: {}".format(name, age, username))
| 28.2 | 68 | 0.64539 |
fcd6135249e75ebce7f7a72f26fd53083cc883f6
| 289 |
py
|
Python
|
Yakisizwe/views.py
|
NAL0/nalbt
|
c411ead60fac8923e960e67f4bbad5c7aeffc614
|
[
"MIT"
] | null | null | null |
Yakisizwe/views.py
|
NAL0/nalbt
|
c411ead60fac8923e960e67f4bbad5c7aeffc614
|
[
"MIT"
] | null | null | null |
Yakisizwe/views.py
|
NAL0/nalbt
|
c411ead60fac8923e960e67f4bbad5c7aeffc614
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'Yakisizwe/initiative.html')
def index2(request):
return render(request, 'Yakisizwe/dash.html')
def index3(request):
return render(request, 'Yakisizwe/Education.html')
| 22.230769 | 55 | 0.747405 |
bd86333e3c64ad680c6b914c23acf242c815fa73
| 5,327 |
py
|
Python
|
PlaidCTF/2021/crypto/Fake_Medallion/carnival.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
PlaidCTF/2021/crypto/Fake_Medallion/carnival.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
PlaidCTF/2021/crypto/Fake_Medallion/carnival.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
from bank import Bank
import random
from os import urandom
FLAG = "PCTF{REDACTED}"
# Game of Razzle
class RazzleGame:
def __init__(self):
self.welcome_message = (
"Welcome to our custom game of razzle! It takes one "
"medallion for each game. You roll 8 dies and take the "
"sum of the values rolled. If that sum is less than 12 or "
"greater than 42, you get $2000! "
"If you lose, you lose your medallion."
)
def play(self):
# Well, razzle is supposed to be a scam
while True:
res = []
for i in range(8):
res.append(random.randint(1,6))
s = sum(res)
if s >= 12 and s <= 42:
return (False, res)
# Carnival, where you use medallions as tokens of entry for our games.
class Carnival:
def __init__(self, peername):
self.bank = Bank()
self.secret = FLAG
self.user_money = 1024
self.peername = peername
def menu(self):
return open('welcome.txt', 'rb').read()
def help(self):
return open('help.txt', 'r').read()
# Call out the robber's IP address
def contact_police(self):
peer = self.peername[0] + ":" + str(self.peername[1])
return {'error':
f"{peer} is trying to rob our carnival with " +
"fake medallions."}
# Playing razzle
def play_razzle(self, med_id):
legit = self.bank.verify_medallion(med_id)
if not legit:
return self.contact_police()
else:
# Of course, you can't just use our services for free
razzle = RazzleGame()
win, res = razzle.play()
if win:
self.user_money += 2000
return {
'msg': razzle.welcome_message,
'rolls': res,
'win': win
}
# Clients can buy our carnival's medallion for $1000. If you already
# have a medallion, please spend it before buying a new one.
def money_for_medallion(self):
if self.user_money < 1000:
return {'error': "insufficient funds"}
self.user_money -= 1000
med_id = self.bank.new_medallion()
return {'msg': f"Your new medallion {med_id} now stored at our bank."}
# Clients can redeem their medallions for $999. The one dollar
# difference is our competitive handling fee.
def medallion_for_money(self, med_id):
# Please also destroy the medallion in the process
legit = self.bank.verify_medallion(med_id)
if not legit:
return self.contact_police()
else:
# Of course, you can't just use our services for free
self.user_money += 999
return {'msg': "Here you go. "}
# Clients can refresh the system, void all previously
# owned medallions, and gain a new medallion, for $1. Clients
# must prove that they previously own at least 1 medallion, though.
def medallion_for_medallion(self, med_id):
if self.user_money < 1:
return {'error': "insufficient funds"}
self.user_money -= 1
# Please also destroy the medallion in the process
legit = self.bank.verify_medallion(med_id)
if not legit:
return self.contact_police()
else:
old_medallion = self.bank.get_medallion(med_id)
self.bank.refresh_bank()
new_id = self.bank.new_medallion()
return {'msg': f"New medallion {new_id} created. " +
"Your old one was " +
old_medallion +
". That one is now invalid."}
# Our carnival bank offers free-of-charge computers for
# each bit in the medallion. This is not necessary for
# ordinary clients of the carnival.
def play_with_medallion(self, data):
return self.bank.operate_on_medallion(data)
# Script for interacting with the user
def interact(self, data):
if 'option' not in data:
return {'error': 'no option selected'}
if data['option'] == 'help':
res = {'help': self.help()}
elif data['option'] == 'money_for_med':
res = self.money_for_medallion()
elif data['option'] == 'med_for_money':
if 'med_id' not in data:
return {'error': 'incomplete data'}
res = self.medallion_for_money(int(data['med_id']))
elif data['option'] == 'med_for_med':
if 'med_id' not in data:
return {'error': 'incomplete data'}
res = self.medallion_for_medallion(int(data['med_id']))
elif data['option'] == 'play_razzle':
res = self.play_razzle(int(data['med_id']))
elif data['option'] == 'op_on_med':
res = self.play_with_medallion(data)
else:
return {'error': 'unrecognized option'}
if 'error' in res:
return res
if self.user_money > 15213:
res['flag'] = ("We shan't begin to fathom how you " +
"cheated at our raffle game. To attempt to appease "
f"you, here is a flag: {self.secret}")
res['curr_money'] = self.user_money
return res
| 36.486301 | 78 | 0.56805 |
00f0dc5f82345fa2b5ac9b64dc1efa74948b2162
| 226 |
py
|
Python
|
webinterface/tests/unit_tests/test_model_ScheduleGroup.py
|
monoclecat/Cleaning-Schedule-generator
|
b12fa8a6f834a89b805bf062a0df45279a7a8796
|
[
"MIT"
] | 2 |
2021-11-28T23:04:00.000Z
|
2022-01-13T19:47:45.000Z
|
webinterface/tests/unit_tests/test_model_ScheduleGroup.py
|
monoclecat/Cleaning-Schedule-generator
|
b12fa8a6f834a89b805bf062a0df45279a7a8796
|
[
"MIT"
] | 25 |
2020-03-29T14:40:46.000Z
|
2021-09-22T17:37:15.000Z
|
webinterface/tests/unit_tests/test_model_ScheduleGroup.py
|
monoclecat/cleaning-schedule-management-system
|
b12fa8a6f834a89b805bf062a0df45279a7a8796
|
[
"MIT"
] | 1 |
2020-07-04T11:42:17.000Z
|
2020-07-04T11:42:17.000Z
|
from django.test import TestCase
from webinterface.models import *
class ScheduleGroupTest(TestCase):
def test__str(self):
group = ScheduleGroup(name="test")
self.assertEqual(group.__str__(), group.name)
| 25.111111 | 53 | 0.725664 |
978f22e375467aa43164a00cf6d86426f674e41a
| 217 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_3_List/55. highest number in a list.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_3_List/55. highest number in a list.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_3_List/55. highest number in a list.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
"""
Write a Python function that returns the highest number in a list.
input_list = [9,6,45,67,12]
Expected output = 67
"""
def high_num(input_list):
input_list.sort()
return input_list[-1]
| 14.466667 | 66 | 0.645161 |
c111cde960b3d308a8d427089d812347cc30b48c
| 7,297 |
py
|
Python
|
test/test_npu/test_allclose.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_allclose.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_allclose.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
def create_all_one_tensor(item):
dtype = item[0]
format = item[1]
shape = item[2]
input1 = np.ones(shape).astype(dtype)
cpu_input = torch.from_numpy(input1)
npu_input = torch.from_numpy(input1).to("npu")
if format != -1:
npu_input = npu_input.npu_format_cast(format)
return cpu_input, npu_input
class TestAllclose(TestCase):
def cpu_op_exec(self, input_x, input_y):
output = torch.allclose(input_x, input_y)
return output
def npu_op_exec(self, input_x, input_y):
output = torch.allclose(input_x, input_y)
return output
def test_allclose_random(self, device):
test_cases = [
[[np.float32, -1, (1, 2)], [np.float32, -1, (1, 2)]],
[[np.float32, -1, (1234, 2234)], [np.float32, -1, (1234, 2234)]],
[[np.float32, -1, (321, 421, 521)], [np.float32, -1, (421, 521)]],
[[np.float32, -1, (1, 600)], [np.float32, -1, (400, 200, 1)]],
[[np.float32, -1, (20, 30, 40, 1)], [np.float32, -1, (30, 40, 50)]],
[[np.float16, -1, (1, 2)], [np.float16, -1, (1, 2)]],
[[np.float16, -1, (1234, 2234)], [np.float16, -1, (1234, 2234)]],
[[np.float16, -1, (321, 421, 521)], [np.float16, -1, (421, 521)]],
[[np.float16, -1, (1, 600)], [np.float16, -1, (400, 200, 1)]],
[[np.float16, -1, (20, 30, 40, 1)], [np.float16, -1, (30, 40, 50)]],
[[np.int8, -1, (1, 2)], [np.int8, -1, (1, 2)]],
[[np.int8, -1, (1234, 2234)], [np.int8, -1, (1234, 2234)]],
[[np.int8, -1, (321, 421, 521)], [np.int8, -1, (421, 521)]],
[[np.int8, -1, (1, 600)], [np.int8, -1, (400, 200, 1)]],
[[np.int8, -1, (20, 30, 40, 1)], [np.int8, -1, (30, 40, 50)]],
[[np.uint8, -1, (1, 2)], [np.uint8, -1, (1, 2)]],
[[np.uint8, -1, (1234, 2234)], [np.uint8, -1, (1234, 2234)]],
[[np.int32, -1, (1, 2)], [np.int32, -1, (1, 2)]],
[[np.int32, -1, (1234, 2234)], [np.int32, -1, (1234, 2234)]],
[[np.int32, -1, (321, 421, 521)], [np.int32, -1, (421, 521)]],
[[np.int32, -1, (1, 600)], [np.int32, -1, (400, 200, 1)]],
[[np.int32, -1, (20, 30, 40, 1)], [np.int32, -1, (30, 40, 50)]],
]
for item in test_cases:
cpu_input_x, npu_input_x = create_common_tensor(item[0], 0, 100)
cpu_input_y, npu_input_y = create_common_tensor(item[1], 0, 100)
if cpu_input_x.dtype == torch.float16:
cpu_input_x = cpu_input_x.to(torch.float32)
if cpu_input_y.dtype == torch.float16:
cpu_input_y = cpu_input_y.to(torch.float32)
cpu_output = np.array(self.cpu_op_exec(cpu_input_x, cpu_input_y))
npu_output = np.array(self.npu_op_exec(npu_input_x, npu_input_y))
self.assertRtolEqual(cpu_output, npu_output)
def test_allclose_x_equal_y(self, device):
test_cases = [
[[np.float32, -1, (1, 2)], [np.float32, -1, (1, 2)]],
[[np.float32, -1, (1234, 2234)], [np.float32, -1, (1234, 2234)]],
[[np.float32, -1, (321, 421, 521)], [np.float32, -1, (421, 521)]],
[[np.float32, -1, (1, 600)], [np.float32, -1, (400, 200, 1)]],
[[np.float32, -1, (20, 30, 40, 1)], [np.float32, -1, (30, 40, 50)]],
[[np.float16, -1, (1, 2)], [np.float16, -1, (1, 2)]],
[[np.float16, -1, (1234, 2234)], [np.float16, -1, (1234, 2234)]],
[[np.float16, -1, (321, 421, 521)], [np.float16, -1, (421, 521)]],
[[np.float16, -1, (1, 600)], [np.float16, -1, (400, 200, 1)]],
[[np.float16, -1, (20, 30, 40, 1)], [np.float16, -1, (30, 40, 50)]],
[[np.int8, -1, (1, 2)], [np.int8, -1, (1, 2)]],
[[np.int8, -1, (1234, 2234)], [np.int8, -1, (1234, 2234)]],
[[np.int8, -1, (321, 421, 521)], [np.int8, -1, (421, 521)]],
[[np.int8, -1, (1, 600)], [np.int8, -1, (400, 200, 1)]],
[[np.int8, -1, (20, 30, 40, 1)], [np.int8, -1, (30, 40, 50)]],
[[np.uint8, -1, (1, 2)], [np.uint8, -1, (1, 2)]],
[[np.uint8, -1, (1234, 2234)], [np.uint8, -1, (1234, 2234)]],
[[np.int32, -1, (1, 2)], [np.int32, -1, (1, 2)]],
[[np.int32, -1, (1234, 2234)], [np.int32, -1, (1234, 2234)]],
[[np.int32, -1, (321, 421, 521)], [np.int32, -1, (421, 521)]],
[[np.int32, -1, (1, 600)], [np.int32, -1, (400, 200, 1)]],
[[np.int32, -1, (20, 30, 40, 1)], [np.int32, -1, (30, 40, 50)]],
]
for item in test_cases:
cpu_input_x, npu_input_x = create_all_one_tensor(item[0])
cpu_input_y, npu_input_y = create_all_one_tensor(item[1])
if cpu_input_x.dtype == torch.float16:
cpu_input_x = cpu_input_x.to(torch.float32)
if cpu_input_y.dtype == torch.float16:
cpu_input_y = cpu_input_y.to(torch.float32)
cpu_output = np.array(self.cpu_op_exec(cpu_input_x, cpu_input_y))
npu_output = np.array(self.npu_op_exec(npu_input_x, npu_input_y))
self.assertRtolEqual(cpu_output, npu_output)
def test_allclose_scalar_1(self, device):
input_x = np.array([1e-08]).astype(np.float32)
input_y = np.array([1e-09]).astype(np.float32)
cpu_input_x = torch.from_numpy(input_x)
npu_input_x = torch.from_numpy(input_x).to("npu")
cpu_input_y = torch.from_numpy(input_y)
npu_input_y = torch.from_numpy(input_y).to("npu")
cpu_output = np.array(self.cpu_op_exec(cpu_input_x, cpu_input_y))
npu_output = np.array(self.npu_op_exec(npu_input_x, npu_input_y))
self.assertRtolEqual(cpu_output, npu_output)
def test_allclose_scalar_2(self, device):
input_x = np.array([1e-07]).astype(np.float32)
input_y = np.array([1e-08]).astype(np.float32)
cpu_input_x = torch.from_numpy(input_x)
npu_input_x = torch.from_numpy(input_x).to("npu")
cpu_input_y = torch.from_numpy(input_y)
npu_input_y = torch.from_numpy(input_y).to("npu")
cpu_output = np.array(self.cpu_op_exec(cpu_input_x, cpu_input_y))
npu_output = np.array(self.npu_op_exec(npu_input_x, npu_input_y))
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestAllclose, globals(), except_for='cpu')
if __name__ == "__main__":
torch.npu.set_device("npu:3")
run_tests()
| 48.324503 | 80 | 0.560641 |
c16e8cd8f2aa7de4c3a6b9bb3388a318ef32adbb
| 781 |
py
|
Python
|
source/pkgsrc/lang/python27/patches/patch-Lib_distutils_command_install.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/lang/python27/patches/patch-Lib_distutils_command_install.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/lang/python27/patches/patch-Lib_distutils_command_install.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-Lib_distutils_command_install.py,v 1.1 2018/06/17 19:21:21 adam Exp $
Add a knob (enviroment variable) for disabling installation of egg metadata
in extensions until we have infrastructure in place for dealing w/ it.
--- Lib/distutils/command/install.py.orig 2014-12-10 15:59:34.000000000 +0000
+++ Lib/distutils/command/install.py
@@ -666,7 +666,8 @@ class install (Command):
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
- ('install_egg_info', lambda self:True),
]
+ if not os.environ.has_key('PKGSRC_PYTHON_NO_EGG'):
+ sub_commands += [('install_egg_info', lambda self:True),]
# class install
| 43.388889 | 84 | 0.636364 |
e7858ec610bd27d93a6af538fd260e0a362086c2
| 5,936 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/game/DRT/randomRides.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/game/DRT/randomRides.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/game/DRT/randomRides.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2010-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file randomRides.py
# @author Jakob Erdmann
# @date 2019-02-24
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import random
import optparse
if 'SUMO_HOME' in os.environ:
sys.path.append(os.path.join(os.environ['SUMO_HOME'], 'tools'))
import sumolib # noqa
def get_options(args=None):
optParser = optparse.OptionParser()
optParser.add_option("-n", "--net-file", dest="netfile",
help="define the net file")
optParser.add_option("-a", "--additional-files", dest="additional",
help="define additional files for loading busStops (mandatory)")
optParser.add_option("-o", "--output-file", dest="outfile",
help="define the output trip filename")
optParser.add_option("--poi-output", dest="poiout",
help="define the output file for busStop pois")
optParser.add_option("--prefix", dest="tripprefix",
default="", help="prefix for the trip ids")
optParser.add_option("-t", "--trip-attributes", dest="tripattrs",
default="", help="additional trip attributes. When generating pedestrians, attributes for " +
"<person> and <walk> are supported.")
optParser.add_option("-b", "--begin", type="float", default=0, help="begin time")
optParser.add_option("-e", "--end", type="float", default=3600, help="end time (default 3600)")
optParser.add_option("--poi-offset", dest="poiOffset", type="float",
default=12, help="offset of stop-poi from the lane in m")
optParser.add_option("--initial-duration", dest="duration", type="int", default=5, help="inital stop duration in s")
optParser.add_option("-p", "--period", type="float", default=1,
help="Generate vehicles with equidistant departure times and period=FLOAT (default 1.0).")
optParser.add_option("-s", "--seed", type="int", help="random seed")
optParser.add_option("--min-distance", type="float", dest="min_distance",
default=0.0, help="require start and end edges for each trip to be at least <FLOAT> m apart")
optParser.add_option("--max-distance", type="float", dest="max_distance",
default=None, help="require start and end edges for each trip to be at most <FLOAT> m " +
"apart (default 0 which disables any checks)")
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="tell me what you are doing")
(options, args) = optParser.parse_args(args=args)
if not options.additional or not options.outfile:
optParser.print_help()
sys.exit(1)
if options.period <= 0:
print("Error: Period must be positive", file=sys.stderr)
sys.exit(1)
if options.poiout is not None and options.netfile is None:
print("Error: poi-output requires a net-file", file=sys.stderr)
sys.exit(1)
return options
def main(options):
if options.seed:
random.seed(options.seed)
busStops = [bs.id for bs in sumolib.xml.parse_fast(options.additional, 'busStop', ['id'])]
stopColors = {}
if options.poiout:
colorgen = sumolib.miscutils.Colorgen(('distinct', 'distinct', 'distinct'))
net = sumolib.net.readNet(options.netfile)
with open(options.poiout, 'w') as outf:
outf.write('<additional>\n')
for bs in sumolib.xml.parse(options.additional, 'busStop'):
laneShape = net.getLane(bs.lane).getShape()
sideShape = sumolib.geomhelper.move2side(laneShape, options.poiOffset)
offset = (float(bs.startPos) + float(bs.endPos)) / 2
x, y = sumolib.geomhelper.positionAtShapeOffset(sideShape, offset)
stopColors[bs.id] = colorgen()
outf.write(' <poi id="%s" x="%s" y="%s" color="%s" type="%s"/>\n' % (
bs.id, x, y, stopColors[bs.id], bs.attr_name))
outf.write('</additional>\n')
if len(busStops) < 2:
print("Error: At least two busStops are required", file=sys.stderr)
sys.exit(1)
depart = options.begin
idx = 0
with open(options.outfile, 'w') as outf:
outf.write('<routes>\n')
while depart < options.end:
bsFrom = random.choice(busStops)
bsTo = random.choice(busStops)
while bsTo == bsFrom:
bsTo = random.choice(busStops)
color = ""
if options.poiout:
color = ' color="%s"' % stopColors[bsTo]
outf.write(' <person id="%s%s" depart="%s"%s>\n' % (
options.tripprefix, idx, depart, color))
outf.write(' <stop busStop="%s" duration="%s"/>\n' % (bsFrom, options.duration))
outf.write(' <ride busStop="%s" lines="ANY"/>\n' % (bsTo))
outf.write(' </person>\n')
depart += options.period
idx += 1
outf.write('</routes>\n')
if __name__ == "__main__":
if not main(get_options()):
sys.exit(1)
| 47.111111 | 120 | 0.612702 |
823f87d4e66e931a5dab0c15a9910e75d69e85ff
| 38 |
py
|
Python
|
src/d2py/analysis/numbers.py
|
xinetzone/d2py
|
657362a0451921ef5a7b05b4a8378f7379063cdf
|
[
"Apache-2.0"
] | 3 |
2022-03-09T14:08:42.000Z
|
2022-03-10T04:17:17.000Z
|
src/d2py/analysis/numbers.py
|
xinetzone/d2py
|
657362a0451921ef5a7b05b4a8378f7379063cdf
|
[
"Apache-2.0"
] | 3 |
2021-11-07T13:11:26.000Z
|
2022-03-19T03:28:48.000Z
|
src/d2py/analysis/numbers.py
|
xinetzone/d2py
|
657362a0451921ef5a7b05b4a8378f7379063cdf
|
[
"Apache-2.0"
] | 1 |
2022-03-15T14:18:32.000Z
|
2022-03-15T14:18:32.000Z
|
'''Number of abstract base classes
'''
| 19 | 34 | 0.710526 |
68ed763b6c707d4b1c728d96c9755770fa8b98f7
| 482 |
py
|
Python
|
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/094_metodos_de_classes/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/094_metodos_de_classes/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/094_metodos_de_classes/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
class Pessoa:
ano_atual = 2019
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def get_ano_nascimento(self):
print(self.ano_atual - self.idade)
@classmethod #Eu nao entendi a logica aq, sem nexo
def por_ano_nascimento(cls, nome, ano_nascimento):
idade = cls.ano_atual - ano_nascimento
return cls(nome, idade)
p1 = Pessoa('Pedro', 23)
print(p1)
print(p1.nome, p1.idade)
p1.get_ano_nascimento()
| 20.956522 | 55 | 0.655602 |
2e7878e2f1e5215ba3150b0395451ae92c57e179
| 823 |
py
|
Python
|
sakf/app/sakf/home.py
|
spdir/sakf
|
9a07c5f90765201a42d524dc6d4554f4ccd3c750
|
[
"Apache-2.0"
] | null | null | null |
sakf/app/sakf/home.py
|
spdir/sakf
|
9a07c5f90765201a42d524dc6d4554f4ccd3c750
|
[
"Apache-2.0"
] | null | null | null |
sakf/app/sakf/home.py
|
spdir/sakf
|
9a07c5f90765201a42d524dc6d4554f4ccd3c750
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# HOME Handler
import tornado.web
from sakf.app.sakf import base
class IndexHandler(base.BaseHandlers):
@tornado.web.authenticated
@base.auth_url
def get(self):
return self.render('sakf/index.html', username=self.session['username'])
def post(self):
self.get()
class HomeHandler(base.BaseHandlers):
@tornado.web.authenticated
@base.auth_url
def get(self):
return self.render('sakf/home.html')
def post(self):
return self.get()
class HomeSettingHandler(base.BaseHandlers):
@tornado.web.authenticated
@base.auth_url
def get(self, *args, **kwargs):
return self.render('sakf/setting.html')
def post(self, *args, **kwargs):
self.get()
class TestHandler(base.BaseHandlers):
def get(self, *args, **kwargs):
self.render('sakf/test.html')
| 18.704545 | 76 | 0.692588 |
d8f190583d14a2ba69d60f46176ab3bebcda949f
| 5,026 |
py
|
Python
|
x2paddle/project_convertor/pytorch/api_mapper/utils.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 559 |
2019-01-14T06:01:55.000Z
|
2022-03-31T02:52:43.000Z
|
x2paddle/project_convertor/pytorch/api_mapper/utils.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 353 |
2019-05-07T13:20:03.000Z
|
2022-03-31T05:30:12.000Z
|
x2paddle/project_convertor/pytorch/api_mapper/utils.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 241 |
2018-12-25T02:13:51.000Z
|
2022-03-27T23:21:43.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
def api_args2kwargs(pytorch_api_name, args, first_same_attr_count):
""" 将每个OP的args转为kwargs。
Args:
pytorch_api_name (str): OP的类型名字。
args (list): 参数列表。
first_same_attr_count (int): PyTorch与Paddle前first_same_attr_count个完全相同的参数。
"""
def get_default_args(obj):
if inspect.isbuiltin(obj):
demp_str = obj.__doc__.split("->")[0].strip()[:-1]
demp_str = demp_str.split("(")[-1]
demp_str_seg = demp_str.split(",")
default_args = list()
for seg in demp_str_seg:
seg = seg.strip().replace("*", "")
if seg == "":
continue
if "=" in seg:
seg = seg.split("=")[0]
default_args.append(seg)
return default_args
else:
signature = inspect.signature(obj)
return [k for k, v in signature.parameters.items()]
if pytorch_api_name.startswith("torchvision"):
import torchvision
obj = torchvision
else:
import torch
obj = torch
for i, part in enumerate(pytorch_api_name.split(".")):
if i == 0:
continue
obj = getattr(obj, part)
default_args = get_default_args(obj)
new_kwargs = dict()
for i, default_k in enumerate(default_args):
if i >= first_same_attr_count and i < len(args):
new_kwargs[default_k] = args[i]
return new_kwargs
def rename_key(kwargs, old_key, new_key):
if old_key in kwargs:
v = kwargs.pop(old_key)
kwargs[new_key] = v
def delete_key(kwargs, old_key):
if old_key in kwargs:
kwargs.pop(old_key)
def generate_api_code(func_name, args, kwargs):
for i, arg in enumerate(args):
if not isinstance(args[i], str):
args[i] = str(args[i])
args_str = ", ".join(args)
kwargs_str_list = list()
for k, v in kwargs.items():
kwargs_str_list.append("{}={}".format(k, v))
kwargs_str = ", ".join(kwargs_str_list)
if len(args_str) > 0:
code = "{}({}, {})".format(func_name, args_str, kwargs_str)
else:
code = "{}({})".format(func_name, kwargs_str)
return code
class Mapper(object):
def __init__(self,
func_name,
pytorch_api_name,
args,
kwargs,
target_name=None):
self.func_name = func_name
self.pytorch_api_name = pytorch_api_name
self.args = args
self.kwargs = kwargs
self.target_name = target_name
def process_attrs(self):
""" 更新参数。
"""
pass
def delete_attrs(self):
""" 删除参数。
"""
pass
def check_attrs(self):
""" 确认参数的值。
"""
pass
def rename_func_name(self, torch2paddle_func_name=None):
""" 判断是否为可变参数或者关键字参数,
若为可变参数或者关键字参数,则替换参数名。
"""
if torch2paddle_func_name is not None and \
(len(self.args) > 0 and isinstance(self.args[0], str) and self.args[0].startswith("*")) or \
(len(self.args) > 1 and isinstance(self.args[-1], str) and self.args[-1].startswith("**")):
self.func_name = torch2paddle_func_name
return True
else:
return False
def convert_to_paddle(self):
""" 1. 通过执行check、process、delete转换为paddle的参数;
2. 生成paddle相关代码。
"""
self.check_attrs()
self.process_attrs()
self.delete_attrs()
return [], generate_api_code(self.func_name, self.args, self.kwargs), []
def convert_args2kwargs(self, first_same_attr_count=0):
""" 将args转换为kwargs。
"""
if len(self.args) > first_same_attr_count:
new_kwargs = api_args2kwargs(self.pytorch_api_name, self.args,
first_same_attr_count)
self.kwargs.update(new_kwargs)
self.args = self.args[:first_same_attr_count]
def run(self, torch2paddle_func_name=None):
""" 如果存在可变参数或者关键字参数,直接替换函数名为x2paddle的API;
反之,调用convert_to_paddle。
"""
if self.rename_func_name(torch2paddle_func_name):
return [], generate_api_code(self.func_name, self.args,
self.kwargs), []
else:
return self.convert_to_paddle()
| 32.012739 | 108 | 0.586152 |
41f3308c589ac65783a7b54b4b0ee3560b56b769
| 196 |
py
|
Python
|
repo/script.tv.show.next.aired/default.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | 1 |
2017-11-26T18:18:46.000Z
|
2017-11-26T18:18:46.000Z
|
repo/script.tv.show.next.aired/default.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | null | null | null |
repo/script.tv.show.next.aired/default.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | 3 |
2019-09-30T19:52:05.000Z
|
2020-04-12T21:20:56.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
script.tv.show.next.aired
TV Show - Next Aired
Main script entry point
'''
from resources.lib.main_module import MainModule
MainModule()
| 17.818182 | 48 | 0.668367 |
5ccec55fe2c8f0f2538f3c818a05e112c96a2ccb
| 370 |
py
|
Python
|
interactions/commands/ping.py
|
AuxiliumCDNG/MET-Bot
|
7e00d878c92ad4f68b0b2920235019e14c9ea2fa
|
[
"MIT"
] | null | null | null |
interactions/commands/ping.py
|
AuxiliumCDNG/MET-Bot
|
7e00d878c92ad4f68b0b2920235019e14c9ea2fa
|
[
"MIT"
] | null | null | null |
interactions/commands/ping.py
|
AuxiliumCDNG/MET-Bot
|
7e00d878c92ad4f68b0b2920235019e14c9ea2fa
|
[
"MIT"
] | null | null | null |
import time
import requests
from interactions.create import headers
def run(req, res_url=None, **kwargs):
time.sleep(3)
json = {
"content": "Meine CPU bedankt sich...und, achja:\nPong :ping_pong:",
"embeds": [],
"allowed_mentions": []
}
r = requests.patch(res_url, json=json, headers=headers)
return
| 19.473684 | 80 | 0.589189 |
7a6adfde51eadb2584aead3fed9d735b0b65046f
| 3,602 |
py
|
Python
|
app/t1health_app/static/fusioncharts-suite-xt/integrations/django/samples/fusioncharts/samples/rendering_column_line_area_combi_using_json_example.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 30 |
2018-04-01T09:08:40.000Z
|
2022-01-23T07:30:07.000Z
|
app/t1health_app/static/fusioncharts-suite-xt/integrations/django/samples/fusioncharts/samples/rendering_column_line_area_combi_using_json_example.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 14 |
2018-07-17T08:33:35.000Z
|
2021-09-29T17:26:15.000Z
|
asset/integrations/django/samples/fusioncharts/samples/rendering_column_line_area_combi_using_json_example.py
|
Piusshungu/catherine-junior-school
|
5356f4ff5a5c8383849d32e22a60d638c35b1a48
|
[
"MIT"
] | 17 |
2016-05-19T13:16:34.000Z
|
2021-04-30T14:38:42.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from ..fusioncharts import FusionCharts
# Loading Data from a Static JSON String
# It is a example to show a MsCombi 3D chart where data is passed as JSON string format.
# The `chart` method is defined to load chart data from an JSON string.
def chart(request):
# Create an object for the mscombi3d chart using the FusionCharts class constructor
mscombi3dChart = FusionCharts("mscombi3d", "ex3", "100%", 400, "chart-1", "json",
# The data is passed as a string in the `dataSource` as parameter.
"""{
"chart": {
"caption": "Salary Hikes by Country",
"subCaption": "2016 - 2017",
"numberSuffix": "%",
"rotatelabels": "1",
"theme": "fusion"
},
"categories": [{
"category": [{
"label": "Australia"
}, {
"label": "New-Zealand"
}, {
"label": "India"
}, {
"label": "China"
}, {
"label": "Myanmar"
}, {
"label": "Bangladesh"
}, {
"label": "Thailand"
}, {
"label": "South Korea"
}, {
"label": "Hong Kong"
}, {
"label": "Singapore"
}, {
"label": "Taiwan"
}, {
"label": "Vietnam"
}]
}],
"dataset": [{
"seriesName": "2016 Actual Salary Increase",
"plotToolText" : "Salaries increased by <b>$dataValue</b> in 2016",
"data": [{
"value": "3"
}, {
"value": "3"
}, {
"value": "10"
}, {
"value": "7"
}, {
"value": "7.4"
}, {
"value": "10"
}, {
"value": "5.4"
}, {
"value": "4.5"
}, {
"value": "4.1"
}, {
"value": "4"
}, {
"value": "3.7"
}, {
"value": "9.3"
}]
}, {
"seriesName": "2017 Projected Salary Increase",
"plotToolText" : "Salaries expected to increase by <b>$dataValue</b> in 2017",
"renderAs": "line",
"data": [{
"value": "3"
}, {
"value": "2.8"
}, {
"value": "10"
}, {
"value": "6.9"
}, {
"value": "6.7"
}, {
"value": "9.4"
}, {
"value": "5.5"
}, {
"value": "5"
}, {
"value": "4"
}, {
"value": "4"
}, {
"value": "4.5"
}, {
"value": "9.8"
}]
}, {
"seriesName": "Inflation rate",
"plotToolText" : "$dataValue projected inflation",
"renderAs": "area",
"showAnchors":"0",
"data": [{
"value": "1.6"
}, {
"value": "0.6"
}, {
"value": "5.6"
}, {
"value": "2.3"
}, {
"value": "7"
}, {
"value": "5.6"
}, {
"value": "0.2"
}, {
"value": "1"
}, {
"value": "2.6"
}, {
"value": "0"
}, {
"value": "1.1"
}, {
"value": "2.4"
}]
}]
}""")
# returning complete JavaScript and HTML code, which is used to generate chart in the browsers.
return render(request, 'index.html', {'output' : mscombi3dChart.render(), 'chartTitle': 'Multiseries Combination 3D Chart'})
| 25.188811 | 129 | 0.41116 |
8f137644f2bda329ed7716f7c2439204022bee4b
| 418 |
py
|
Python
|
tests/monad_test.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 126 |
2019-09-16T15:28:20.000Z
|
2022-03-20T10:57:53.000Z
|
tests/monad_test.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 54 |
2019-09-30T08:44:01.000Z
|
2022-03-20T11:10:00.000Z
|
tests/monad_test.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 11 |
2020-01-02T08:32:46.000Z
|
2022-03-20T11:10:24.000Z
|
from abc import ABC, abstractmethod
from .functor_test import FunctorTest
class MonadTest(FunctorTest, ABC):
@abstractmethod
def test_right_identity_law(self, *args):
raise NotImplementedError()
@abstractmethod
def test_left_identity_law(self, *args):
raise NotImplementedError()
@abstractmethod
def test_associativity_law(self, *args):
raise NotImplementedError()
| 23.222222 | 45 | 0.729665 |
56dc4737a7abbdee85ba033e37651ac54b6e2283
| 1,284 |
py
|
Python
|
app/fichas/models.py
|
fxavier/abt-epts
|
021a8140db32afba106a7a9e122b98452d88c225
|
[
"MIT"
] | null | null | null |
app/fichas/models.py
|
fxavier/abt-epts
|
021a8140db32afba106a7a9e122b98452d88c225
|
[
"MIT"
] | null | null | null |
app/fichas/models.py
|
fxavier/abt-epts
|
021a8140db32afba106a7a9e122b98452d88c225
|
[
"MIT"
] | null | null | null |
from django.db import models
from core.models import Provincia, Distrito, UnidadeSanitaria
from patients.models import Paciente
class SituacaoFamilia(models.Model):
nome = models.CharField(max_length=150)
parentesco = models.CharField(max_length=100)
idade = models.IntegerField()
teste_hiv = models.CharField(max_length=100)
cuidados_hiv = models.CharField(max_length=100)
em_ccr = models.CharField(max_length=100)
nid = models.CharField(max_length=100)
def __str__(self):
return self.nome
class CuidadosHiv(models.Model):
teste_hiv_pos = models.CharField(max_length=100)
data = models.DateTimeField()
local = models.CharField(max_length=100)
diagnostico_pr_criancas = models.CharField(max_length=100)
data_diagnostico = models.DateTimeField()
data_inicio_pre_tarv = models.DateTimeField()
unidade_sanitaria_inicio = models.CharField(max_length=100)
sector = models.CharField(max_length=100)
transferido_de = models.CharField(max_length=100)
class FichaResumo(models.Model):
data_abertura = models.DateTimeField()
unidade_sanitaria = models.ForeignKey(UnidadeSanitaria, on_delete=models.CASCADE)
paciente = models.ForeignKey(Paciente, on_delete=models.CASCADE)
| 35.666667 | 85 | 0.750779 |
a457d313062987a2ec24c479f0b65d77195dcc28
| 1,158 |
py
|
Python
|
ai-api-docker/main.py
|
Zeno-Paukner/cellarius
|
904b88c6dc33cf4ec2f6d70d3e1acf175b11967a
|
[
"Unlicense"
] | 1 |
2021-12-06T20:29:28.000Z
|
2021-12-06T20:29:28.000Z
|
ai-api-docker/main.py
|
Zeno-Paukner/cellarius
|
904b88c6dc33cf4ec2f6d70d3e1acf175b11967a
|
[
"Unlicense"
] | null | null | null |
ai-api-docker/main.py
|
Zeno-Paukner/cellarius
|
904b88c6dc33cf4ec2f6d70d3e1acf175b11967a
|
[
"Unlicense"
] | null | null | null |
from pydantic import BaseModel
from fastapi import FastAPI
import uvicorn
import time
from transformers import pipeline
app = FastAPI()
generator = pipeline('text-generation', model='EleutherAI/gpt-neo-1.3B')
class Input_GPT_Neo_1_3B(BaseModel):
prompt: str
max_length: int
do_sample: bool
temperature: float
# a class with output and status
class Output(BaseModel):
output: str
status: str
error_massage: str
loading_time_seconds: float
@app.post("/input")
async def input_text(input: Input_GPT_Neo_1_3B):
start_time = time.time()
print(input.prompt)
print(input.max_length)
print(input.do_sample)
print(input.temperature)
print("Loading ...")
res = generator(str(input.prompt), max_length=50, do_sample=True, temperature=0.9)
return Output(output=res[0]['generated_text'], status="OK", error_massage="", loading_time_seconds=(time.time() - start_time))
@app.get("/")
async def root():
return {"message": "Hello World"}
#uvicorn.run(app, host="0.0.0.0", port=8000, root_path="/cellarius/import-emails")
uvicorn.run(app, host="0.0.0.0", port=8080, root_path="/cellarius/ai")
| 26.318182 | 130 | 0.710708 |
2d1f9034077a8a3d87604cfd7fe4cc8ac3816495
| 719 |
pyde
|
Python
|
sketches/randomwalkturtle1/randomwalkturtle1.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/randomwalkturtle1/randomwalkturtle1.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/randomwalkturtle1/randomwalkturtle1.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
add_library('Turtle')
from random import randint
turtles = []
colors = [color(18, 184, 116), color(200, 23, 223), color(95, 145, 40),
color(8, 124, 127)]
def setup():
size(400, 400)
this.surface.setTitle(u"Random Walk mit der Schildkröte")
background(232, 226, 7)
strokeWeight(2)
for _ in range(4):
t = Turtle(this)
t.setWrapAround(True)
turtles.append(t)
def draw():
i = 0
for t in turtles:
stroke(colors[i % len(colors)])
distance = randint(1, 5)
t.forward(distance)
angle = randint(0, 360)
t.right(angle)
i += 1
if frameCount >= 5000:
print("I did it, Babe!")
noLoop()
| 22.46875 | 71 | 0.550765 |
247f8f9d05998f7566d2f457e5fac1935fa2b3f7
| 572 |
py
|
Python
|
Chapter2_Python/11-MatplotlibIntro.py
|
olebause/TensorFlow2
|
70fcb7c85c7ead0dc4f88ffa35be5f2eb93e618e
|
[
"MIT"
] | 2 |
2021-02-10T19:50:27.000Z
|
2021-12-30T06:15:55.000Z
|
Chapter3_Libraries/MatplotlibIntro.py
|
franneck94/UdemyPythonIntro
|
4895a91a04eedce7d59b61bf12e5aa209fe60f85
|
[
"MIT"
] | 1 |
2020-12-21T15:29:20.000Z
|
2022-01-15T12:06:09.000Z
|
Chapter3_Libraries/MatplotlibIntro.py
|
franneck94/UdemyPythonIntro
|
4895a91a04eedce7d59b61bf12e5aa209fe60f85
|
[
"MIT"
] | 4 |
2020-11-08T17:07:53.000Z
|
2022-02-07T06:40:55.000Z
|
import matplotlib.pyplot as plt
grades_jan = [56, 64, 78, 100]
grades_ben = [86, 94, 98, 90]
# Plot
plt.plot(range(len(grades_jan)), grades_jan, color="blue")
plt.plot(range(len(grades_ben)), grades_ben, color="red")
plt.legend(["Jan", "Ben"])
plt.xlabel("Course")
plt.ylabel("Grade in %")
plt.title("Jan vs. Ben")
plt.show()
# Scatter
plt.scatter(range(len(grades_jan)), grades_jan, color="blue")
plt.scatter(range(len(grades_ben)), grades_ben, color="red")
plt.legend(["Jan", "Ben"])
plt.xlabel("Course")
plt.ylabel("Grade in %")
plt.title("Jan vs. Ben")
plt.show()
| 23.833333 | 61 | 0.687063 |
24ec72b7f2bf1b8f4a8bb6cdd9c0e844cb435c37
| 315 |
py
|
Python
|
Licence 1/I11/TP5/ex2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 1/I11/TP5/ex2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 1/I11/TP5/ex2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
def som_div_propres(n):
sum = 0
for div in range(1, n):
if n % div == 0: sum += div
return sum
def est_presque_parfait(n):
return som_div_propres(n) == n - 1
def affiche_presque_parfait(k):
for i in range(2**k):
if est_presque_parfait(i): print(i)
affiche_presque_parfait(10)
| 21 | 43 | 0.634921 |
8ea5b0fbe4dd165fd40c70460b51c5b568521a84
| 186 |
py
|
Python
|
Pythonjunior2020/Woche2/Aufgabe_2_5_1.py
|
Zeyecx/HPI-Potsdam
|
ed45ca471cee204dde74dd2c3efae3877ee71036
|
[
"MIT"
] | null | null | null |
Pythonjunior2020/Woche2/Aufgabe_2_5_1.py
|
Zeyecx/HPI-Potsdam
|
ed45ca471cee204dde74dd2c3efae3877ee71036
|
[
"MIT"
] | null | null | null |
Pythonjunior2020/Woche2/Aufgabe_2_5_1.py
|
Zeyecx/HPI-Potsdam
|
ed45ca471cee204dde74dd2c3efae3877ee71036
|
[
"MIT"
] | null | null | null |
# 2.5.1, Woche 2, Block 5, Aufgabe 1
# Liste
schloss = ["rot","grün"]
# Elemente hinzufügen
schloss += ["gelb"]
# Elemente ausgeben
for i in range(len(schloss)):
print(schloss[i])
| 16.909091 | 36 | 0.650538 |
8ea5c8e8f535ec5973a0285d63daa71ba1e102a4
| 1,092 |
py
|
Python
|
Scholien/migrations/0006_auto_20170531_1640.py
|
wmles/scholarium.at
|
d2356b3e475df772382e035ddcb839fc7dae4305
|
[
"MIT"
] | 1 |
2017-07-24T10:19:36.000Z
|
2017-07-24T10:19:36.000Z
|
Scholien/migrations/0006_auto_20170531_1640.py
|
wmles/scholarium.at
|
d2356b3e475df772382e035ddcb839fc7dae4305
|
[
"MIT"
] | 9 |
2017-07-26T14:16:08.000Z
|
2022-03-11T23:14:40.000Z
|
Scholien/migrations/0006_auto_20170531_1640.py
|
wmles/scholarium.at
|
d2356b3e475df772382e035ddcb839fc7dae4305
|
[
"MIT"
] | 1 |
2017-03-07T12:38:23.000Z
|
2017-03-07T12:38:23.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-05-31 14:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Scholien', '0005_auto_20170510_1631'),
]
operations = [
migrations.RemoveField(
model_name='buechlein',
name='anzahl_epub',
),
migrations.RemoveField(
model_name='buechlein',
name='anzahl_mobi',
),
migrations.RemoveField(
model_name='buechlein',
name='anzahl_pdf',
),
migrations.AddField(
model_name='buechlein',
name='ob_epub',
field=models.BooleanField(default=0),
),
migrations.AddField(
model_name='buechlein',
name='ob_mobi',
field=models.BooleanField(default=0),
),
migrations.AddField(
model_name='buechlein',
name='ob_pdf',
field=models.BooleanField(default=0),
),
]
| 25.395349 | 49 | 0.550366 |
8ee2839b3858c88d6b5571491615519ce7e54556
| 1,222 |
py
|
Python
|
constant.py
|
teastares/or_lab
|
c8fb5c22d31c1e2b93381397202be7b71a3fc796
|
[
"MIT"
] | 1 |
2021-01-18T09:11:59.000Z
|
2021-01-18T09:11:59.000Z
|
constant.py
|
teastares/or_lab
|
c8fb5c22d31c1e2b93381397202be7b71a3fc796
|
[
"MIT"
] | null | null | null |
constant.py
|
teastares/or_lab
|
c8fb5c22d31c1e2b93381397202be7b71a3fc796
|
[
"MIT"
] | null | null | null |
"""
the Constant Variables.
"""
class ConstantSet(object):
"""
The class of constant number.
It doesn't follow the Pascal format since its speciality.
"""
class ConstError(TypeError):
pass
class ConstCaseError(ConstError):
pass
def __setattr__(self, key, value):
if key in self.__dict__:
raise self.ConstError("Can't change const.{0}".format(key))
if not key.isupper():
raise self.ConstCaseError("Const name {0} is not all uppercase".format(key))
self.__dict__[key] = value
const = ConstantSet()
# the category of decision variables
const.CAT_BINARY = "Binary"
const.CAT_CONTINUOUS = "Continuous"
const.CAT_INTEGER = "Integer"
# sense for a constrain
const.SENSE_LEQ = "<="
const.SENSE_EQ = "="
const.SENSE_GEQ = ">="
# sense for a model
const.SENSE_MAX = "Max"
const.SENSE_MIN = "Min"
# the lower and upper bound type of a variable
const.BOUND_TWO_OPEN = 0
const.BOUND_LEFT_OPEN = 1
const.BOUND_RIGHT_OPEN = 2
const.BOUND_TWO_CLOSED = 3
# the status of the model
const.STATUS_UNSOLVED = "Unsolved"
const.STATUS_OPTIMAL = "Optimal"
const.STATUS_NO_SOLUTION = "No feasible solution"
const.STATUS_UNBOUNDED = "Unbounded"
| 23.5 | 88 | 0.693944 |
d92c43b124b654105e5c30116385fb5180587500
| 1,294 |
py
|
Python
|
python/fastcampus/chapter06_04_03.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/fastcampus/chapter06_04_03.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/fastcampus/chapter06_04_03.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""파이썬 심화
Asyncio
비동기 I/O Coroutine 작업
- Generator -> 반복적인 객체 Return(yield)
- 즉, 실행 stop -> 다른 작업으로 위임 -> stop 지점부터 재실행 원리
- Non-Blocking 비동기 처리에 적합
"""
# BlockIO -> Thread 사용
# 쓰레드 개수 및 GIL 문제 염두, 공유 메모리 문제 해결
import asyncio
import timeit
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.request import urlopen
urls = [
"http://daum.net",
"https://google.com",
"https://tistory.com",
"https://github.com",
"https://gmarket.co.kr",
]
async def fetch(url, executor):
print("Thread Name:", threading.current_thread().getName(), "start", url)
res = await loop.run_in_executor(executor, urlopen, url)
print("Thread Name:", threading.current_thread().getName(), "done", url)
return res.read()[0:5]
async def main():
# 쓰레드 풀 생성
executor = ThreadPoolExecutor(max_workers=10)
# asyncio.ensutre_future
futures = [asyncio.ensure_future(fetch(url, executor)) for url in urls]
rst = await asyncio.gather(*futures)
print("Result: ", rst)
if __name__ == "__main__":
start = timeit.default_timer()
# 루프 생성
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
# 완료시간 - 시작시간
duration = timeit.default_timer() - start
# 총 실행 시간
print("Total Time", duration)
| 22.701754 | 77 | 0.665379 |
5c23bf0522785a7c20eb1cd41f71bac55abb8625
| 567 |
py
|
Python
|
04_SQL/commerce/auctions/migrations/0010_auto_20201103_2318.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
04_SQL/commerce/auctions/migrations/0010_auto_20201103_2318.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
04_SQL/commerce/auctions/migrations/0010_auto_20201103_2318.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-11-03 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0009_auto_20201103_2037'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(default='', max_length=64),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(default='', max_length=64),
),
]
| 23.625 | 62 | 0.57672 |
5c4496e65947e19d10d9f92b8489cfec3b3c0f0c
| 249 |
py
|
Python
|
angstrom/2019/crypto/Lattice_ZKP/otp.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
angstrom/2019/crypto/Random_ZKP/otp.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
angstrom/2019/crypto/Random_ZKP/otp.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
import numpy as np
from Crypto.Hash import SHAKE256
from Crypto.Util.strxor import strxor
def encrypt(s, flag):
raw = bytes(np.mod(s, 256).tolist())
shake = SHAKE256.new()
shake.update(raw)
pad = shake.read(len(flag))
return strxor(flag, pad)
| 22.636364 | 37 | 0.726908 |
c606ff56a0a592a1d066c60eef9aa11c98015fbb
| 464 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch07_recursion_advanced/ex02_edit_distance_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch07_recursion_advanced/ex02_edit_distance_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch07_recursion_advanced/ex02_edit_distance_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch07_recursion_advanced.solutions.ex02_edit_distance import edit_distance
@pytest.mark.parametrize("value1, value2, expected",
[("Micha", "Michael", 2),
("Ananas", "Banane", 3)])
def test_edit_distance(value1, value2, expected):
result = edit_distance(value1, value2)
assert result == expected
| 27.294118 | 78 | 0.672414 |
d68420461ad915da1b1a5e9de01f825757c53e82
| 4,861 |
py
|
Python
|
qian_gua.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 5 |
2020-08-17T08:37:16.000Z
|
2021-06-07T05:02:05.000Z
|
qian_gua.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | null | null | null |
qian_gua.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 1 |
2021-06-07T05:02:10.000Z
|
2021-06-07T05:02:10.000Z
|
import requests
import json
import time
import traceback
import CralwerSet.connect_mysql as connect_mysql
import datetime
class QianGua():
def __init__(self):
self.loginUrl = "http://api.qian-gua.com/login/Login?_="
self.apiUrl = "http://api.qian-gua.com/v1/Note/GetNoteHotList?_="
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36"
}
self.opt = {
'NoteTags': {
'彩妆': [83],
'护肤': [84],
'洗护香氛': [82],
'时尚穿搭': [77],
'美甲': [101],
'美食饮品': [87],
'母婴育儿': [93],
'旅行住宿': [86],
'健身减肥': [88],
'星座情感': [95],
'动漫': [79],
'萌宠动物': [92],
'萌娃': [94],
'影音娱乐': [80],
'情感两性': [89],
'科技数码': [96],
'出行工具': [97],
'婚嫁': [99],
'居家生活': [90],
'教育': [78],
'摄影': [81],
'医疗养生': [85],
'民生资讯': [91],
'游戏应用': [102],
'赛事': [100],
'其他': [76],
},
"BloggerProps": {
"官方号": [8],
"品牌号": [16],
"明星": [32],
"知名KOL": [4],
"头部达人": [64],
"腰部达人": [128],
"初级达人": [256],
"素人": [512],
},
"NoteType": {
"图文笔记": 'normal',
"视频笔记": 'video'
},
"isBusiness": {
'是': True,
'否': False,
},
"FansSexType": {
"女生多数": 2,
"男生多数": 1,
},
"FansGroups": [
"少男少女",
"新手宝妈",
"潮男潮女",
"轻奢白领",
"恋爱女生",
"爱美少女",
"孕妇妈妈",
"专注护肤党",
"爱买彩妆党",
"网红潮人",
"追星族",
"在校大学生",
"潮男潮女",
"恋爱青年",
"时尚潮人",
"乐活一族",
"摄影技术控",
"社交达人",
"健身男女",
"瘦身男女",
"科技生活党",
"备孕待产",
"文艺青年",
"备孕宝妈",
"工薪阶层",
"品质吃货",
"家庭妇女",
"家有萌娃",
"老手宝妈",
"宅男宅女",
"爱家控",
"流行男女",
"学生党",
"运动控",
"游戏宅男",
"医美一族",
"养生大军",
"爱车一族",
"评价吃货",
"萌宠一族",
"两性学习",
"职场新人",
"中学生",
"大学生",
"二次元萌宅",
"备婚男女",
"赛事球迷",
"其他",
],
"SortType": [
1, 2, 3, 4
]
}
pass
def login(self):
temp = str(int(time.time() * 1000))
data = {"tel": "15990048082", "pwd": "bscm666"}
response = requests.post(self.loginUrl + temp, data=data, headers=self.headers)
self.headers['Cookie'] = "User=" + response.cookies._cookies['.qian-gua.com']['/']['User'].value
def getData(self):
print('开始时间', datetime.datetime.now())
for noteTag in self.opt['NoteTags'].keys():
for bloggerProp in self.opt['BloggerProps'].keys():
for noteType in self.opt['NoteType'].keys():
for isBusiness in self.opt['isBusiness'].keys():
for fansSexType in self.opt['FansSexType'].keys():
for fansGroups in self.opt['FansGroups']:
for sortType in self.opt['SortType']:
data = {"SortType": sortType, "pageIndex": 1, "pageSize": 200, "Days": -1,
"StartTime": '2020-04-30', "EndTime": '2020-04-30', "NoteTags": noteTag,
"BloggerProps": bloggerProp, "NoteType": noteType, "isBusiness": isBusiness,
"FansSexType": fansSexType, "FansGroups": [fansGroups]}
response = requests.post(self.apiUrl + str(int(time.time() * 1000)),
headers=self.headers, data=data, verify=False).text
print(response)
print('结束时间', datetime.datetime.now())
if __name__ == '__main__':
qg = QianGua()
qg.login()
qg.getData()
| 31.980263 | 136 | 0.339436 |
06410df1a75b0e7f9b5721681799019949e270ab
| 281 |
py
|
Python
|
exercises/ja/exc_01_03_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/exc_01_03_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/exc_01_03_02.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
# 日本語クラスをインポートし、nlpオブジェクトを作成
from ____ import ____
nlp = ____
# テキストを処理
doc = ____("私はツリーカンガルーとイルカが好きです。")
# 「ツリーカンガルー」のスライスを選択
tree_kangaroos = ____
print(tree_kangaroos.text)
# 「ツリーカンガルーとイルカ」のスライスを選択
tree_kangaroos_and_dolphins = ____
print(tree_kangaroos_and_dolphins.text)
| 17.5625 | 39 | 0.807829 |
232b5c2d0219808c315a2fafc062c6134b188a5a
| 342 |
py
|
Python
|
python/data_sutram/scraper/parallel_loop.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/data_sutram/scraper/parallel_loop.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/data_sutram/scraper/parallel_loop.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from multiprocessing import Process
a = 0
b = 0
def loop_a():
global a
while 1:
print("{} = {}".format("a",a))
a += 1
def loop_b():
global b
while 1:
print("{} = {}".format("b",b))
b += 1
if __name__ == '__main__':
Process(target=loop_a).start()
Process(target=loop_b).start()
| 19 | 38 | 0.51462 |
88e5743050ca82c2d2fb4a1b2d7ddfe767fead96
| 542 |
py
|
Python
|
tests/onegov/core/test_filters.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_filters.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_filters.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from io import StringIO
from onegov.core.filters import JsxFilter
def test_jsx_filter():
filter = JsxFilter()
filter.setup()
input = StringIO((
'var component = Rect.createClass({'
'render: function() { return <div />; }'
'});'
))
output = StringIO()
filter.input(input, output)
output.seek(0)
assert output.read() == (
'"use strict";'
'var component=Rect.createClass({'
'render:function render(){return React.createElement("div",null)}'
'});'
)
| 22.583333 | 74 | 0.577491 |
aecb0d50eb08663ff9151f0fafbe49b7e960d298
| 32,954 |
py
|
Python
|
Agent5_a_0_5_knots_512_d_0_02/No simulation forward kin/transformations.py
|
schigeru/Bachelorarbeit_Code
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
[
"MIT"
] | null | null | null |
Agent5_a_0_5_knots_512_d_0_02/No simulation forward kin/transformations.py
|
schigeru/Bachelorarbeit_Code
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
[
"MIT"
] | null | null | null |
Agent5_a_0_5_knots_512_d_0_02/No simulation forward kin/transformations.py
|
schigeru/Bachelorarbeit_Code
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
[
"MIT"
] | null | null | null |
from __future__ import division
import warnings
import math
import numpy
# Documentation in HTML format can be generated with Epydoc
__docformat__ = "restructuredtext en"
def identity_matrix():
return numpy.identity(4, dtype=numpy.float64)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
l, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=numpy.float64)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array((( 0.0, -direction[2], direction[1]),
( direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=numpy.float64)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
l, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
l, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(l)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustrum.
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustrum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustrum: near <= 0")
t = 2.0 * near
M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
(0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
(0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
(0.0, 0.0, -1.0, 0.0))
else:
M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
(0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
(0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
(0.0, 0.0, 0.0, 1.0))
return numpy.array(M, dtype=numpy.float64)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("No two linear independent eigenvectors found {}".format(l))
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array((
( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0),
(-a*sinb*co, b*sina, 0.0, 0.0),
( a*cosb, b*cosa, c, 0.0),
( 0.0, 0.0, 0.0, 1.0)),
dtype=numpy.float64)
def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
"""Return matrix to transform given vector set into second vector set.
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError("Vector sets are of wrong shape or type.")
# move centroids to origin
t0 = numpy.mean(v0, axis=1)
t1 = numpy.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = numpy.linalg.eig(N)
q = V[:, numpy.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = numpy.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# translation
M[:3, 3] = t1
T = numpy.identity(4)
T[:3, 3] = -t0
M = numpy.dot(M, T)
return M
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = numpy.empty((4, ), dtype=numpy.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
"""
q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def quaternion_from_matrix(matrix):
"""Return quaternion from rotation matrix.
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
"""
return numpy.array((-quaternion[0], -quaternion[1],
-quaternion[2], quaternion[3]), dtype=numpy.float64)
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
"""
return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array((numpy.sin(t1)*r1,
numpy.cos(t1)*r1,
numpy.sin(t2)*r2,
numpy.cos(t2)*r2), dtype=numpy.float64)
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64)
self._constrain = False
if initial is None:
self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64)
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix.")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
def setconstrain(self, constrain):
"""Set state of constrain to axis mode."""
self._constrain = constrain == True
def getconstrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v = numpy.array(((point[0] - center[0]) / radius,
(center[1] - point[1]) / radius,
0.0), dtype=numpy.float64)
n = v[0]*v[0] + v[1]*v[1]
if n > 1.0:
v /= math.sqrt(n) # position outside of sphere
else:
v[2] = math.sqrt(1.0 - n)
return v
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
v *= -1.0
v /= n
return v
if a[2] == 1.0:
return numpy.array([1, 0, 0], dtype=numpy.float64)
return unit_vector([-a[1], a[0], 0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
# helper functions
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. eucledian norm, of ndarray along axis.
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
"""
return numpy.random.random(size)
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
"""
try:
module = __import__(module_name)
except ImportError:
if warn:
warnings.warn("Failed to import module " + module_name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("No Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
| 33.02004 | 85 | 0.558233 |
4e24eefa5ed95b15ce1d39b2d842ad8c412059e7
| 2,018 |
py
|
Python
|
language/grammar.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 1 |
2021-03-05T07:44:05.000Z
|
2021-03-05T07:44:05.000Z
|
language/grammar.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 3 |
2017-06-04T03:01:31.000Z
|
2017-08-04T04:04:37.000Z
|
language/grammar.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
from parsimonious.grammar import Grammar
definition = """
program = (nl* stmt (nl+ stmt)* nl*)
stmt = comment / ((eq / expr ) sc)
comment = (hash space all)
all = ~r".+"
eq = dec_map_eq / obj_eq / dec_eq / var_eq
dec_map_eq = "DM" space+ token space* "=" space* token space* ("," space* token)*
var_eq = var_lhs space* eq_sign space* var_rhs
var_lhs = token space*
var_rhs = operated
obj_eq = obj_lhs space* eq_sign space* obj_rhs
obj_lhs = direction space+ token
obj_rhs = func_call
dec_eq = dec_lhs space* eq_sign space* dec_rhs
dec_lhs = Decision space* named_token
dec_rhs = dec_node space* dec_rhs1*
dec_rhs1 = space* "," space* dec_node
dec_node = named_term / term
named_term = token ":" term
named_token = token ":" token
expr = model / samples
samples = ("Samples" space* number)
model = ("Model" space token)
term = func_call / number_token / bracketed
bracketed = '(' space* operated space* ')'
operated = dec_node operated1*
operated1 = space* operator space* dec_node
func_call = token space* '(' args ')'
args = number_token space* args1*
args1 = ',' space* number_token
decision_args = term space* (',' space* term)*
mul_div = "*" / "/"
add_sub = "+" / "-"
operator = "+" / "-" / "*" / "/" / "|" / "&" / ">" / "<" / ">=" / "<=" / "==" / "!="
Decision = "Decision"
direction = "Max" / "Min"
number_token = add / token
add = mul space* add1?
add1 = add_sub space* add
mul = expo space* mul1?
mul1 = mul_div space* mul
expo = number space* expo1?
expo1 = "^" space* expo
number = bracket_number / float
bracket_number = "(" space* add space* ")"
sign = "+" / "-"
float = sign? int ("." int)?
int = ~r"[0-9]+"
token = ~r"[a-zA-Z0-9_\-]+"
eq_sign = "="
nl = "\\n"
space = ~r"\s+"
hash = "#"
sc = ";"
"""
grammar = Grammar(definition)
| 30.119403 | 86 | 0.619921 |
9df03785a442ab15d416de2b0e5895fbea327702
| 2,122 |
py
|
Python
|
TwoSum/twoSum.py
|
Themishau/Algorithmen
|
f31627f823eb86f8673e72c4998c9029e74097fb
|
[
"MIT"
] | null | null | null |
TwoSum/twoSum.py
|
Themishau/Algorithmen
|
f31627f823eb86f8673e72c4998c9029e74097fb
|
[
"MIT"
] | null | null | null |
TwoSum/twoSum.py
|
Themishau/Algorithmen
|
f31627f823eb86f8673e72c4998c9029e74097fb
|
[
"MIT"
] | null | null | null |
from typing import List
import sys
import argparse
def convertList(l, dtype):
return list(map(dtype, l))
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
# number : index
index_seen = {}
for i, num in enumerate(nums, start=0): # i = index, num = value
"""
careful with dicts!
>>> "in" in {"in": "out"}
True
>>> "in" in {"out": "in"}
False
"""
if target - num in index_seen:
return[index_seen[target - num], i] # returns index of the two digits
# saves the number als key and index as value
elif num not in index_seen:
index_seen[num] = i
try: #look ahead
if target - nums[i + 1] in index_seen:
return [index_seen[target - nums[i + 1]], i+1]
except KeyError:
pass
def main(input = "input.txt", output = "output.txt"):
parser = argparse.ArgumentParser(description='Alg')
parser.add_argument('inputfile', help='input.txt')
parser.add_argument('outputfile', help='output.txt')
args = parser.parse_args()
result = []
input_data = []
target = []
solution = Solution()
i = 0
j = 0
with open(args.inputfile, 'r') as txt:
for line in txt:
if i % 2:
print(line)
line = line.split(',')
line = convertList(line, int)
target.append(line)
else:
print(line)
line = line.split(',')
line = convertList(line, int)
input_data.append(line)
i += 1
for i in range(0, len(target)):
for j in range(0, len(target[i])):
result.append(solution.twoSum(nums=input_data[i], target=target[i][j]))
with open(args.outputfile, 'w', encoding="ANSI") as txt:
for line in result:
print(line)
txt.writelines(str(line) + "\n")
if __name__ == '__main__':
main()
| 29.472222 | 85 | 0.504241 |
d18b29de6492d80182b53d0fe82db98f6999e9b6
| 184 |
py
|
Python
|
backend/api/admin.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
backend/api/admin.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
backend/api/admin.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from products.models import Product
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ("title", "content", "price")
| 20.444444 | 48 | 0.76087 |
e12c18b64e4a6c614377848beeb9b4ab0026091e
| 2,878 |
py
|
Python
|
udacity course code/01-06-scatterplots.py
|
bluemurder/mlfl
|
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
|
[
"MIT"
] | 1 |
2021-03-22T22:25:54.000Z
|
2021-03-22T22:25:54.000Z
|
udacity course code/01-06-scatterplots.py
|
bluemurder/mlfl
|
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
|
[
"MIT"
] | 6 |
2017-01-16T09:53:21.000Z
|
2017-01-18T12:20:09.000Z
|
udacity course code/01-06-scatterplots.py
|
bluemurder/mlfl
|
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
|
[
"MIT"
] | null | null | null |
"""Scatterplots."""
import pandas as pd
import matplotlib.pyplot as plt
from util import get_data, plot_data, compute_daily_returns
import numpy as np
def test_run():
# Read data
dates = pd.date_range('2009-01-01', '2012-12-31') # date range as index
symbols = ['SPY','XOM','GLD']
df = get_data(symbols, dates) # get data for each symbol
plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
plot_data(daily_returns, title = "Daily returns", ylabel = "Daily returns")
# Scatterplots SPY versus XOM
daily_returns.plot(kind = 'scatter', x = 'SPY', y = 'XOM')
beta_XOM, alpha_XOM = np.polyfit(daily_returns['SPY'], daily_returns['XOM'], 1)
print "beta_XOM=", beta_XOM
print "alpha_XOM=", alpha_XOM
plt.plot(daily_returns['SPY'], beta_XOM * daily_returns['SPY'] + alpha_XOM, '-', color = 'r')
plt.show()
# Scatterplots SPY versus GLD
daily_returns.plot(kind = 'scatter', x = 'SPY', y = 'GLD')
beta_GLD, alpha_GLD = np.polyfit(daily_returns['SPY'], daily_returns['GLD'], 1)
print "beta_GLD=", beta_GLD
print "alpha_GLD=", alpha_GLD
plt.plot(daily_returns['SPY'], beta_GLD * daily_returns['SPY'] + alpha_GLD, '-', color = 'r')
plt.show()
# Comment: beta_XOM is fairly high than beta_GLD, so XOM is more reactive
# to the market than GLD.
# On the other hand, alpha values denote how well the products performs well
# with respect to SPY. In this case, alpha_XOM is negative, and alpha_GLD is
# positive. This means that GLD performs better.
# Calculate correlation coefficient
print daily_returns.corr(method = 'pearson')
# As you have seen in this lesson, the distribution of daily returns for
# stocks and the market look very similar to a Gaussian.
# This property persists when we look at weekly, monthly, and annual returns
# as well.
# If they were really Gaussian we'd say the returns were normally distributed.
# In many cases in financial research we assume the returns are normally distributed.
# But this can be dangerous because it ignores kurtosis or the probability
# in the tails.
# In the early 2000s investment banks built bonds based on mortgages.
# They assumed that the distribution of returns for these mortgages was
# normally distributed.
# On thet basis they were able to show that these bonds had a very low probability of default.
# But they made two mistakes. First, they assumed that the return of each
# of these mortgages was independent; and two that this return would be
# normally distributed.
# Both of these assumptions proved to be wrong, as massive number of omeowners
# defaulted on their mortgages.
# It was these defaults that precipitated the great recession of 2008.
#
if __name__ == "__main__":
test_run()
| 42.955224 | 98 | 0.701529 |
01792e44928053b7f0ed07eeff896423bc547e6e
| 347 |
py
|
Python
|
Licence 1/I11/TP4/ex2.2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 1/I11/TP4/ex2.2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 1/I11/TP4/ex2.2.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
liste = []
entiers = 0
somme = 0
isZero = False
while not isZero:
number = float(input("Entrez un nombre: "))
if number == 0: isZero = True
elif number - int(number) == 0:
entiers += 1
somme += number
liste += [number]
print("Liste:", liste)
print("Nombre d'entiers:", entiers)
print("Somme des entiers:", somme)
| 20.411765 | 47 | 0.602305 |
112d8d59da9ccdb144fce94170d0b8bd550ddc45
| 379 |
py
|
Python
|
qmk_firmware/lib/python/qmk/cli/fileformat.py
|
DanTupi/personal_setup
|
911b4951e4d8b78d6ea8ca335229e2e970fda871
|
[
"MIT"
] | 2 |
2021-04-16T23:29:01.000Z
|
2021-04-17T02:26:22.000Z
|
qmk_firmware/lib/python/qmk/cli/fileformat.py
|
DanTupi/personal_setup
|
911b4951e4d8b78d6ea8ca335229e2e970fda871
|
[
"MIT"
] | null | null | null |
qmk_firmware/lib/python/qmk/cli/fileformat.py
|
DanTupi/personal_setup
|
911b4951e4d8b78d6ea8ca335229e2e970fda871
|
[
"MIT"
] | null | null | null |
"""Format files according to QMK's style.
"""
from milc import cli
import subprocess
@cli.subcommand("Format files according to QMK's style.", hidden=True)
def fileformat(cli):
"""Run several general formatting commands.
"""
dos2unix = subprocess.run(['bash', '-c', 'git ls-files -z | xargs -0 dos2unix'], stdout=subprocess.DEVNULL)
return dos2unix.returncode
| 27.071429 | 111 | 0.704485 |
3a9514c401ce62b23cfb5117230ce8e7945f0385
| 1,304 |
py
|
Python
|
DiceCTF/2021/crypto/garbled/obtain_flag.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
DiceCTF/2021/crypto/garbled/obtain_flag.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
DiceCTF/2021/crypto/garbled/obtain_flag.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
"""
once you've found the input labels which make the circuit return `true`,
then concatenate them together, hash them,
and xor with the provided string to obtain the flag
"""
import hashlib
import json
from yao import evaluate_circuit
from public_data import g_tables
from private_data import keys, flag
def xor(A, B):
return bytes(a ^ b for a, b in zip(A, B))
##########################################################
circuit_filename = "circuit.json"
with open(circuit_filename) as json_file:
circuit = json.load(json_file)
# ?????????????????
inputs = {
1: ?????????????????,
2: ?????????????????,
3: ?????????????????,
4: ?????????????????
}
evaluation = evaluate_circuit(circuit, g_tables, inputs)
# circuit should return `true`
for i in circuit['outputs']:
assert evaluation[i] == keys[i][1]
##########################################################
msg = "{}:{}:{}:{}".format(inputs[1], inputs[2], inputs[3], inputs[4])
msg = msg.encode('ascii')
m = hashlib.sha512()
m.update(msg)
m.digest()
xor_flag = b'\x90),u\x1b\x1dE:\xa8q\x91}&\xc7\x90\xbb\xce]\xf5\x17\x89\xd7\xfa\x07\x86\x83\xfa\x9b^\xcb\xd77\x00W\xca\xceXD7'
print( xor(m.digest(), xor_flag) )
assert xor(m.digest(), xor_flag) == flag
| 22.101695 | 126 | 0.546012 |
3a985cb982d3c57f856a6fb9785c7e10acebeeb9
| 712 |
py
|
Python
|
year_3/comppi_0/publication/views.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | null | null | null |
year_3/comppi_0/publication/views.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | 21 |
2020-03-24T16:26:04.000Z
|
2022-02-18T15:56:16.000Z
|
year_3/comppi_0/publication/views.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
import publication.scheduling
import tgbot.botsetup
def indexpublications(request):
return render(request, 'publicationindex.html')
def publishtext(request):
text = request.POST.get('publication_text', 'no text')
parse_mode = request.POST.get('parse_mode', '')
cid = tgbot.botsetup.TGBotInstance.chat.chatinst.id
tgbot.botsetup.sendMsg(cid, text, parse_mode)
return redirect('indexpublications')
def addevent(request):
return redirect('indexpublications')
# text = request.POST.get('publication_text', 'no text')
# time = request.POST.get('event_trime', 'no text')
# publication.scheduling.addPostingEvent(time, [text])
| 30.956522 | 60 | 0.73736 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.