max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
front/tests/test_identity_write_output.py
|
sylvainbonnot/front
| 0 |
2169739
|
# TODO: needs to be headless and minimum dependency
# from seleniumbase import BaseCase
# import time
#
#
#
# class ComponentsTest(BaseCase):
# def test_basic(self):
#
# # open the app and take a screenshot
# self.open("http://localhost:8501")
#
# time.sleep(5)
# self.check_window(name="first_test", level=2)
#
#
# self.assert_text("Identity")
| 390 |
LeetCode/18-4Sum/4Sum.py
|
hscspring/TheAlgorithms-Python
| 10 |
2169696
|
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
if len(nums) < 4:
return []
if len(nums) == 4 and sum(nums) == target:
return [nums]
reset = []
for i in range(len(nums)):
subtar = target-nums[i]
newnums = nums[:i] + nums[i+1: ]
for item in threeSum(newnums, subtar):
item.append(nums[i])
if sum(item) == target:
reset.append(sorted(item))
res = []
sort = [[]] + sorted(reset)
for i in range(1, len(sort)):
if sort[i-1] != sort[i]:
res.append(sort[i])
return res
def threeSum(nums, target):
sort = sorted(nums)
res = []
for i in range(len(sort) - 2):
if i > 0 and sort[i] == sort[i-1]:
continue
l, r = i+1, len(sort) - 1
while l < r:
if sort[i] + sort[l] + sort[r] < target:
l += 1
elif sort[i] + sort[l] + sort[r] > target:
r -= 1
else:
res.append([sort[i], sort[l], sort[r]])
while l < r and sort[l] == sort[l+1]:
l += 1
while l < r and sort[r] == sort[r-1]:
r -= 1
l += 1
r -= 1
return res
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
sort = sorted(nums)
res, tmp = [], []
for i in range(len(sort) - 3):
for j in range(i+1, len(sort) - 2):
if i > 0 and sort[i] == sort[i-1] and sort[j] == sort[j-1]:
continue
l, r = j+1, len(sort) - 1
while l < r:
if sort[i] + sort[j] + sort[l] + sort[r] < target:
l += 1
elif sort[i] + sort[j] + sort[l] + sort[r] > target:
r -= 1
else:
tmp.append([sort[i], sort[j], sort[l], sort[r]])
while l < r and sort[l] == sort[l+1]:
l += 1
while l < r and sort[r] == sort[r-1]:
r -= 1
l += 1
r -= 1
sort = [[]] + sorted(tmp)
for i in range(1, len(sort)):
if sort[i-1] != sort[i]:
res.append(sort[i])
return res
| 2,686 |
FSM/state_machine.py
|
SC2-ND-bot/The-PPPP-Bot
| 0 |
2168838
|
class StateMachine:
def __init__(self):
self.handlers = {}
self.startState = None
self.endStates = []
self.currentState = None
def add_state(self, name, handler, endState=False):
self.handlers[name] = handler
if endState:
self.endStates.append(name)
def set_start(self, name):
self.startState = name
def run_step(self, gameObject):
if self.currentState is None:
self.currentState = self.startState
try:
handler = self.handlers[self.currentState]
except:
raise KeyError("must call .set_start() before .run_step()")
if not self.endStates:
raise Exception("at least one state must be an end_state")
# Perform State Action
newState = handler(gameObject)
if newState in self.endStates:
print('reached end state for unit')
else:
self.currentState = newState
| 979 |
main.py
|
dm1tr/wxPython-project
| 0 |
2169431
|
import wx
import glob
import eyed3
class EditDialog(wx.Dialog):
def __init__(self, mp3):
title = f'Редактирование {mp3.tag.title}'
super().__init__(parent=None, title=title)
self.mp3 = mp3
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.title = wx.TextCtrl(
self, value=self.mp3.tag.title
)
self.add_widgets('Название', self.title)
self.artist = wx.TextCtrl(self, value=self.mp3.tag.artist)
self.add_widgets('Артист', self.artist)
self.album = wx.TextCtrl(self, value=self.mp3.tag.album)
self.add_widgets('Альбом', self.album)
btn_sizer = wx.BoxSizer()
save_btn = wx.Button(self, label='Сохранить')
save_btn.Bind(wx.EVT_BUTTON, self.on_save)
btn_sizer.Add(save_btn, 0, wx.ALL, 5)
btn_sizer.Add(wx.Button(
self, label='Отменить', id=wx.ID_CANCEL), 0, wx.ALL, 5)
self.main_sizer.Add(btn_sizer, 0, wx.CENTER)
self.SetSizer(self.main_sizer)
def add_widgets(self, label_text, text_ctrl):
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label=label_text, size=(50, -1))
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(text_ctrl, 1, wx.ALL | wx.EXPAND, 5)
self.main_sizer.Add(row_sizer, 0, wx.EXPAND)
def on_save(self, event):
self.mp3.tag.artist = self.artist.GetValue()
self.mp3.tag.album = self.album.GetValue()
self.mp3.tag.title = self.title.GetValue()
self.mp3.tag.save()
self.Close()
class Mp3Panel(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.row_obj_dict = {}
self.list_ctrl = wx.ListCtrl(
self, size=(-1, 100),
style=wx.LC_REPORT | wx.BORDER_SUNKEN
)
self.list_ctrl.InsertColumn(0, 'Название', width=200)
self.list_ctrl.InsertColumn(1, 'Артист', width=140)
self.list_ctrl.InsertColumn(2, 'Альбом', width=140)
main_sizer.Add(self.list_ctrl, 0, wx.ALL | wx.EXPAND, 5)
edit_button = wx.Button(self, label='Редактировать')
edit_button.Bind(wx.EVT_BUTTON, self.on_edit)
main_sizer.Add(edit_button, 0, wx.ALL | wx.CENTER, 5)
self.SetSizer(main_sizer)
def on_edit(self, event):
selection = self.list_ctrl.GetFocusedItem()
if selection >= 0:
mp3 = self.row_obj_dict[selection]
dialog = EditDialog(mp3)
dialog.ShowModal()
self.update_mp3_listing(self.current_folder_path)
dialog.Destroy()
def update_mp3_listing(self, folder_path):
self.current_folder_path = folder_path
self.list_ctrl.ClearAll()
self.list_ctrl.InsertColumn(0, 'Название', width=200)
self.list_ctrl.InsertColumn(1, 'Артист', width=140)
self.list_ctrl.InsertColumn(2, 'Альбом', width=140)
mp3s = glob.glob(folder_path + '/*.mp3')
mp3_objects = []
for index, mp3 in enumerate(mp3s):
mp3_object = eyed3.load(mp3)
self.list_ctrl.InsertItem(index, mp3_object.tag.title)
self.list_ctrl.SetItem(index, 1, mp3_object.tag.artist)
self.list_ctrl.SetItem(index, 2, mp3_object.tag.album)
mp3_objects.append(mp3_object)
self.row_obj_dict[index] = mp3_object
class Mp3Frame(wx.Frame):
def __init__(self):
super().__init__(parent=None, title='Редактор тегов песен.')
self.panel = Mp3Panel(self)
self.create_menu()
# Показать фрейм
self.Show()
def create_menu(self):
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
open_folder_menu_item = file_menu.Append(
wx.ID_ANY, 'Выбрать директорию',
'Открыть папку с треками'
)
menu_bar.Append(file_menu, '&File')
self.Bind(
event=wx.EVT_MENU,
handler=self.on_open_folder,
source=open_folder_menu_item
)
self.SetMenuBar(menu_bar)
def on_open_folder(self, event):
title = 'Выберите директорию'
dialog = wx.DirDialog(self, title, style=wx.DD_DEFAULT_STYLE)
if dialog.ShowModal() == wx.ID_OK:
self.panel.update_mp3_listing(dialog.GetPath())
dialog.Destroy()
if __name__ == '__main__':
# Инициализация приложения
app = wx.App()
# Собственный класс, унаследованный от библиотеки
frame = Mp3Frame()
# Цикл Событий - Event Loop
app.MainLoop()
| 4,583 |
main/entities/projectile.py
|
ouriquegustavo/fightyourstreamer
| 0 |
2169796
|
from main.entities.entity import Entity
import pygame
class Projectile(Entity):
@property
def xi(self):
return self.x - self.dx / 2
@property
def xf(self):
return self.x + self.dx / 2
@property
def yi(self):
return self.y - self.dy / 2
@property
def yf(self):
return self.y + self.dy / 2
def __init__(self, game, gid, x, y, vx, vy):
super().__init__(game, gid)
self.kind = 'projectile'
self.x = x
self.y = y
self.dx = 5
self.dy = 5
self.vx = vx
self.vy = vy
self.zorder = 0
self.collision_mask.add(3)
self.collision_mask.add(1)
self.colour = (255, 0, 155, 255)
self.sprite = pygame.Surface((self.dx, self.dy), flags=pygame.SRCALPHA)
self.sprite.fill(self.colour)
self.is_updating = True
self.is_drawing = True
def draw(self):
x = self.x - self.dx / 2 - self.game.camera.x + self.game.display.w / 2
y = self.y - self.dy / 2 - self.game.camera.y + self.game.display.h / 2
self.game.display.blit(self.sprite, (x, y))
def update(self):
self.x += self.vx
self.y += self.vy
collisions = self.check_for_collision()
col = (
collisions
and sorted(collisions.values(), key=self.min_dr, reverse=True)[0]
)
if col:
self.should_delete = True
ent = self.game.entity_manager.entities[col['gid']]
if ent.kind == 'warrior':
ent.hp -= 35
| 1,584 |
SuperGlue/train.py
|
fun-math/SLAM_with_ML
| 1 |
2169493
|
from SuperGlue import *
from Loss import *
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# train_ds=Dataset(batch_size=16).tf_data()
# valid_ds=Dataset(split='val/',batch_size=16).tf_data()
#instantiate the model and run model.fit
model=SuperGlue()
# model.load_weights('/media/ironwolf/students/amit/SLAM_with_ML/weights/hfnet_new.h5')
model.custom_compile(tf.keras.optimizers.RMSprop(),Loss())
y=tf.random.uniform((15,3),0,2,tf.int32)
x={
'kpts0' : tf.random.uniform(shape=(2,9,2)),
'desc0' : tf.random.uniform(shape=(2,256,9)),
'scores0' : tf.random.uniform((2,9),0,1),
'shape0' : tf.constant([320,240],tf.float32,(1,2)),
'kpts1' : tf.random.uniform(shape=(2,10,2)),
'desc1' : tf.random.uniform(shape=(2,256,10)),
'scores1' : tf.random.uniform((2,10),0,1),
'shape1' : tf.constant([320,240],tf.float32,(1,2)),
}
# loss=Loss()
# print(loss(y_true,model(x)))
# print(model.train_step([x, y]))
#Checked
model.assign_data(train_ds=[(x,y),(x,y),(x,y)])
model.custom_fit(valid_freq=1000,step_init = 0)
| 1,126 |
tianshu_serving/http_server.py
|
Gouzhong1223/Dubhe
| 1 |
2167941
|
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import config as configs
from fastapi import FastAPI, File, UploadFile
from utils import file_utils
import uvicorn
import threading
from logger import Logger
from typing import List
from service.inference_service_manager import InferenceServiceManager
from response import Response
app = FastAPI(version='1.0', title='Zhejiang Lab TS_Serving inference Automation',
description="<b>API for performing oneflow、tensorflow、pytorch inference</b></br></br>")
# 独立部署可在该处解决跨域问题,或在nginx和网关下解决
# app.add_middleware(
# CORSMiddleware,
# allow_origins=["*"], # 设置允许的origins来源
# allow_credentials=True,
# allow_methods=["*"], # 设置允许跨域的http方法,比如 get、post、put等。
# allow_headers=["*"]) # 允许跨域的headers,可以用来鉴别来源等作用。
parser = configs.get_parser()
args = parser.parse_args()
configs.print_args(args)
inference_service = InferenceServiceManager(args)
inference_service.init()
log = Logger().logger
@app.get("/")
def read_root():
return {"message": "ok"}
@app.post("/image_inference")
async def inference(images_path: List[str] = None):
threading.Thread(target=file_utils.download_image(images_path)) # 开启异步线程下载图片到本地
images = list()
for image in images_path:
data = {"data_name": image.split("/")[-1], "data_path": image}
images.append(data)
try:
data = inference_service.inference(args.model_name, images)
return Response(success=True, data=data)
except Exception as e:
return Response(success=False, data=str(e), error="inference fail")
@app.post("/inference")
async def inference(files: List[UploadFile] = File(...)):
"""
上传本地文件推理
"""
log.info("===============> http inference start <===============")
try:
data_list = file_utils.upload_data(files) # 上传图片到本地
except Exception as e:
log.error("upload data failed", e)
return Response(success=False, data=str(e), error="upload data failed")
try:
result = inference_service.inference(args.model_name, data_list)
log.info("===============> http inference success <===============")
return Response(success=True, data=result)
except Exception as e:
log.error("inference fail", e)
return Response(success=False, data=str(e), error="inference fail")
if __name__ == '__main__':
uvicorn.run(app, host=args.host, port=args.port)
| 2,951 |
yay_db.py
|
Tanjirou13r/Yay_BtCl_DB
| 0 |
2168232
|
# -*- coding: utf-8 -*-
import os, time, pickle, sys, logging, re, configparser, mysql.connector, random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
#------------------------------------------------------------------------------------------#
# ログインするアカウント情報、設定
alive = True
# configparserの宣言とiniファイルの読み込み
config_ini = configparser.ConfigParser()
config_ini.read('config.ini', encoding='utf-8')
email1 = config_ini.get('Account', 'email1')
password1 = config_ini.get('Account', 'password1')
os.makedirs("cache/"+email1, exist_ok=True)
host = config_ini.get('Mysql', 'host')
port = config_ini.get('Mysql', 'port')
user = config_ini.get('Mysql', 'user')
password = config_ini.get('Mysql', 'password')
database = config_ini.get('Mysql', 'database')
# MySQLコネクションの作成
conn = mysql.connector.connect(
host = host,
port = port,
user = user,
password = password,
database = database
)
conn.ping(reconnect=True)
cur = conn.cursor(buffered=True)
#------------------------------------------------------------------------------------------#
#loggingモジュール設定
#コンソールにログを出力するハンドラー
stream_log = logging.StreamHandler()
stream_log.setLevel(logging.INFO)
stream_log.setFormatter(logging.Formatter('[%(levelname)s](%(lineno)s):%(message)s'))
#ファイルにログを出力するハンドラー
file_log = logging.FileHandler(filename='logger.log')
file_log.setLevel(logging.INFO)
file_log.setFormatter(logging.Formatter('[%(asctime)s][%(levelname)s](%(filename)s:%(lineno)s):%(message)s'))
#getLogger()でrootロガーを取得し、ハンドラーを設定
logging.getLogger().addHandler(stream_log)
logging.getLogger().addHandler(file_log)
#rootロガーのログレベルは、ハンドラーの中で一番低いものを指定しておく
#こうしておかないと、子ハンドラーにエラーが伝播しない
logging.getLogger().setLevel(logging.DEBUG)
#------------------------------------------------------------------------------------------#
#ブラウザ起動
options = Options()
options.add_argument('--headless') #ヘッドレスモードを有効
options.add_argument('--no-sandbox') #Sandboxの外でプロセスを動作(セキュリティ無効化)
options.add_argument('--disable-gpu') #GPU機能を無効化
options.add_argument('--window-size=1280,1024') #ウィンドウサイズの調整
driver1 = webdriver.Chrome(options=options)
#Yay!サーバー接続確認
try:
logging.info("Browser1 Connection check...")
driver1.get('https://yay.space/timeline/following')
WebDriverWait(driver1, 5).until(EC.presence_of_all_elements_located)
logging.info("Browser1 Connected successfully...")
except:
logging.error("Browser Connection timed out...!!")
sys.exit()
#----------------------------------------------------------------------------------------------------#
###アカウントログイン###
def login():
#メインアカウント
try:
#ログイン状況チェック
logging.info("Check MainAccount login status...")
cookies = pickle.load(open("cache/" + email1 + "/cookies.pkl", "rb"))
for cookie in cookies:
driver1.add_cookie(cookie)
driver1.refresh()
WebDriverWait(driver1, 5).until(EC.presence_of_all_elements_located)
driver1.find_element_by_class_name('Header__profile__a')
logging.info("Logged in to Account from saved information...")
except:
#ログインされていない場合
try:
logging.info("Browser1 Move page...")
driver1.get('https://yay.space/login')
WebDriverWait(driver1, 5).until(EC.presence_of_all_elements_located)
except:
logging.error("Browser1 Connection timed out...!!")
sys.exit()
#ログイン情報記入 > ログインボタンクリック
logging.info("Browser1 Start login...")
driver1.find_element_by_name('email').send_keys(email1)
driver1.find_element_by_name('password').send_keys(<PASSWORD>)
driver1.find_element_by_class_name('Button.Button--less-rounded.Button--icon-login').click()
#ログイン読み込み待ち
for _ in range(50):
if driver1.current_url == "https://yay.space/timeline/following":
break
else:
time.sleep(0.1)
else:
logging.error("Browser1 Connection timed out...!!")
sys.exit()
#ログインクッキー保存
pickle.dump(driver1.get_cookies() , open("cache/" + email1 + "/cookies.pkl","wb"))
logging.info("Browser1_1 Login completed...")
#----------------------------------------------------------------------------------------------------#
#ログインユーザーのステータス
my_id = driver1.find_element_by_class_name('Header__profile__a').get_attribute("href")
my_name = driver1.find_element_by_class_name('Nickname__span').text
try:
driver1.find_element_by_class_name('ImageLoader.Avatar.Avatar--vip')
d_vip = "Enable"
except:
d_vip = "Disable"
logging.info("Login Status\n< Main Account >\nUSERID:["+my_id.replace("https://yay.space", "") + "] NAME:["+my_name + "] VIP:"+d_vip + "\n")
#----------------------------------------------------------------------------------------------------#
def main():
while alive:
# 最近ログインしたユーザーにページ移動
driver1.get("https://yay.space/users/search")
WebDriverWait(driver1, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[3]/div[2]/div[1]')))
# 最近ログインしたユーザーから最上部のユーザーマイページに移動
driver1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[3]/div[2]/div[1]/a[2]/div').click()
WebDriverWait(driver1, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/div/div[2]/div/div[1]/div[1]')))
userid = (driver1.current_url).replace("https://yay.space/user/", "")
name = driver1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/h3/div/div/span').text
# Nameの特殊文字等含まれていないか確認
name_p = re.compile('^[あ-ん\u30A1-\u30F4a-zA-Z0-9\u4E00-\u9FD0]+')
if not name_p.fullmatch(name):
name = "unkown"
profile_icon = driver1.find_element_by_class_name('User__profile__image__wrapper')
try:
icon = profile_icon.find_element_by_class_name('ImageLoader.User__profile__image').get_attribute("data-url")
icon = icon.replace("https://", "")
except: icon = ""
profile_cover = driver1.find_element_by_class_name('User__wallpaper__wrapper')
try:
cover = profile_cover.find_element_by_class_name('ImageLoader.User__wallpaper.User__wallpaper--female').get_attribute("data-url")
cover = cover.replace("https://", "")
except: cover = ""
posts = driver1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[1]/a/dd').text
posts = int(posts.replace(",", ""))
letters = driver1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[2]/a/dd').text
letters = int(letters.replace(",", ""))
circles = driver1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[3]/a/dd').text
circles = int(circles.replace(",", ""))
follower = driver1.find_element_by_xpath('//*[@id="main"]/div/div[2]/div/div[1]/div[1]/div/div[2]/dl/div[4]/a/dd').text
follower = int(follower.replace(",", ""))
# ユーザーの重複確認
cur.execute('select * from users where userid = %s' % userid)
if cur.rowcount == 0: #重複なし
# 独自IDの重複確認
while True:
id = random.randint(1000000,9999999)
cur.execute('select * from users where id = %s' % id)
if cur.rowcount == 0:
break
# データ新規登録
try: cur.execute('INSERT INTO users VALUES (%s, %s, %s, %s, %s)', (id, userid, name, icon, cover))
except: cur.execute('INSERT INTO users VALUES (%s, %s, %s, %s, %s)', (id, userid, "unkown", icon, cover))
cur.execute('INSERT INTO profiles (id, posts, letters, circles, follower) VALUES (%s, %s, %s, %s, %s)', (id, posts, letters, circles, follower))
else: #重複あり
cur.execute('UPDATE users set name=%s, icon=%s, cover=%s where userid=%s', (name, icon, cover, userid))
cur.execute('SELECT id from users where userid=%s' % userid)
id = cur.fetchall()
#cur.execute('UPDATE profiles set posts=%s, letters=%s, circles=%s, follower=%s where id=%s', (posts, letters, circles, follower, id))
conn.commit()
print("< DateBase >\nUserID:"+userid + " Name:"+name + "\r\033[2A")
#----------------------------------------------------------------------------------------------------#
if __name__ == "__main__":
try:
login()
main()
except KeyboardInterrupt:
alive = False
time.sleep(5)
#Ctrl+Cによるプログラム強制終了によるブラウザ強制終了対策
#ドライバーを終了させる
logging.warning("KeyboardInterruptをキャッチしたため、ブラウザを強制終了します")
driver1.quit()
cur.close()
conn.close()
except:
import traceback
traceback.print_exc()
driver1.quit()
cur.close()
conn.close()
| 9,446 |
main.py
|
soerenetler/neural_question_generation
| 0 |
2169887
|
import argparse
import pickle as pkl
import numpy as np
import tensorflow as tf
from absl import app, flags, logging
import model
import params
from utils import remove_eos, write_result, loss_function
from bleu_score import BleuScore
FLAGS = flags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'pred'], 'train, eval')
flags.DEFINE_string('train_sentence', '', 'path to the training sentence.')
flags.DEFINE_string('train_question', '', 'path to the training question.')
flags.DEFINE_string('eval_sentence', '', 'path to the evaluation sentence.')
flags.DEFINE_string('eval_question', '', 'path to the evaluation question.')
flags.DEFINE_string('test_sentence', '', 'path to the test sentence.')
flags.DEFINE_string('dic_dir', '', 'path to the dictionary')
flags.DEFINE_string('model_dir', '', 'path to save the model')
flags.DEFINE_string('pred_dir', '', 'path to save the predictions')
flags.DEFINE_string('params', '', 'parameter setting')
flags.DEFINE_integer('num_epochs', 10, 'training epoch size', lower_bound=0)
def main(unused):
# Load parameters
model_params = getattr(params, FLAGS.params)()
# Define estimator
q_generation = model.QG(model_params)
q_generation.compile(optimizer=tf.keras.optimizers.Adam(), loss=loss_function,run_eagerly=True,
metrics=[BleuScore()])
# Training dataset
train_sentence = np.load(FLAGS.train_sentence) # train_data
train_question = np.load(FLAGS.train_question) # train_label
TRAIN_BUFFER_SIZE = len(train_sentence)
train_input_data = tf.data.Dataset.from_tensor_slices((train_sentence, train_question)).shuffle(
TRAIN_BUFFER_SIZE).batch(model_params['batch_size'], drop_remainder=True)
# Evaluation dataset
eval_sentence = np.load(FLAGS.eval_sentence)
eval_question = np.load(FLAGS.eval_question)
EVAL_BUFFER_SIZE = len(train_sentence)
eval_input_data = tf.data.Dataset.from_tensor_slices((eval_sentence, eval_question)).shuffle(
EVAL_BUFFER_SIZE).batch(model_params['batch_size'], drop_remainder=True)
# train and evaluate
if FLAGS.mode == 'train':
example_input_batch, example_target_batch = next(iter(train_input_data))
print("Shape train_input_data: ", example_input_batch.shape, example_target_batch.shape)
q_generation.fit(train_input_data,
epochs=FLAGS.num_epochs,
validation_data=eval_input_data)
q_generation.summary()
elif FLAGS.mode == 'eval':
q_generation.evaluate(eval_input_data)
# exp_nn.evaluate(delay_secs=0)
else: # 'pred'
# Load test data
test_sentence = np.load(FLAGS.test_sentence)
# prediction input function for estimator
test_input_data = tf.data.Dataset.from_tensor_slices(
{'enc_inputs': test_sentence}).batch(model_params['batch_size'], drop_remainder=True)
# prediction
predict_results = q_generation.predict(test_input_data)
# write result(question) into file
write_result(predict_results, FLAGS.dic_dir, FLAGS.pred_dir)
if __name__ == '__main__':
app.run(main)
| 3,364 |
examples/python/example_multiple_references_2.py
|
France-ioi/codecast
| 27 |
2169743
|
a = [0, 1]
b = [2, 3]
c = [a, b]
def test(c):
v = "test1"
c[0][0] = 42
v = "test"
def test2(a, c):
v = "test1"
a[0] = 45
v = "test"
test(c)
# 42 42
print(a[0])
print(c[0][0])
test2(a, c)
# 45 45
print(a[0])
print(c[0][0])
| 249 |
tests/test_message_definition.py
|
NHSDigital/Booking-and-Referral-FHIR-API
| 3 |
2168148
|
import pytest
import requests
from .configuration import config
from assertpy import assert_that
from .example_loader import load_example
class TestMessageDefinition:
@pytest.mark.message_definition
@pytest.mark.integration
@pytest.mark.sandbox
def test_get_message_definition(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 200
expected_body = load_example("message_definition/GET-success.json")
# When
response = requests.get(
url=f"{config.BASE_URL}/{config.BASE_PATH}/MessageDefinition",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.message_definition
@pytest.mark.integration
@pytest.mark.sandbox
def test_message_definition_method_not_allowed(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 405
expected_body = load_example("method-not-allowed.json")
# When
response = requests.post(
url=f"{config.BASE_URL}/{config.BASE_PATH}/MessageDefinition",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
| 1,720 |
cookbookApp/list_recipes/migrations/0001_initial.py
|
ryan-tjitro/cookbookApp
| 0 |
2162751
|
# Generated by Django 2.2.17 on 2020-11-13 01:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('position', models.CharField(max_length=150)),
('office', models.CharField(max_length=150)),
('age', models.PositiveIntegerField()),
('start_date', models.DateField()),
('salary', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('instructions', models.CharField(max_length=10000)),
('time', models.IntegerField()),
('title', models.CharField(max_length=50)),
('recipe_yield', models.CharField(max_length=50)),
('original_url', models.CharField(max_length=200)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ingredient_info', models.CharField(max_length=100)),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='list_recipes.Recipe')),
],
),
]
| 1,983 |
src/infi/storagemodel/unix/veritas_multipath.py
|
Infinidat/infi.storagemodel
| 6 |
2170000
|
from infi.storagemodel.unix.utils import execute_command_safe
from infi.storagemodel.base import multipath, gevent_wrapper
from infi.pyutils.lazy import cached_method
from contextlib import contextmanager
from munch import Munch
from logging import getLogger
logger = getLogger(__name__)
def is_veritas_multipathing_installed():
from os import path, environ
veritas_executables = ('vxdmpadm',)
return environ.get("VMPATH") or \
any(path.exists(path.join(path.sep, "usr", "sbin", basename)) for basename in veritas_executables) or \
bool(environ.get("MOCK_VERITAS"))
class VeritasMultipathEntry(Munch):
def __init__(self, dmp_name, paths, vendor_id, product_id):
self.paths = paths
self.dmp_name = dmp_name
self.vendor_id = vendor_id
self.product_id = product_id
class VeritasSinglePathEntry(Munch):
def __init__(self, sd_device_name, ctlr, state, wwn):
self.sd_device_name = sd_device_name
self.ctlr = ctlr
self.state = state
self.wwn = wwn
class VeritasMultipathClient(object):
def get_list_of_multipath_devices(self):
multipaths = []
multipath_dicts = self.parse_paths_list(self.read_paths_list())
for multi in multipath_dicts:
paths = [VeritasSinglePathEntry(p['name'], p['ctlr'], p['state'], p['aportWWN']) for p in multi['paths']]
multipaths.append(VeritasMultipathEntry(multi['dmpdev'], paths, multi['vid'], multi['pid']))
return multipaths
def read_paths_list(self):
return execute_command_safe("vxdmpadm list dmpnode") if is_veritas_multipathing_installed() else ""
def parse_paths_list(self, paths_list_output):
from re import compile, MULTILINE, DOTALL
MULTIPATH_PATTERN = r"^dmpdev\s*=\s*(?P<dmpdev>\w+)\n" + \
r"^state\s*=\s*(?P<state>\w+)\n" + \
r"^enclosure\s*=\s*(?P<enclosure>\w+)\n" + \
r"^cab-sno\s*=\s*(?P<cab_sno>\w+)\n" + \
r"^asl\s*=\s*(?P<asl>[\w\.]+)\n" + \
r"^vid\s*=\s*(?P<vid>\w+)\n" + \
r"^pid\s*=\s*(?P<pid>[\w ]+)\n" + \
r"^array-name\s*=\s*(?P<array_name>\w+)\n" + \
r"^array-type\s*=\s*(?P<array_type>[\w/]+)\n" + \
r"^iopolicy\s*=\s*(?P<iopolicy>\w+)\n" + \
r"^avid\s*=\s*(?P<avid>[-\w]+)\n" + \
r"^lun-sno\s*=\s*(?P<lun_sno>\w*)\n" + \
r"^udid\s*=\s*(?P<udid>[\w%\.-]+)\n" + \
r"^dev-attr\s*=\s*(?P<dev_attr>[ \-\w]+)\n" + \
r"(^lun_type\s*=\s*(?P<lun_type>[-\w]+)\n)?" + \
r"(^scsi3_vpd\s*=\s*(?P<scsi3_vpd>[-\w\:]+)\n)?" + \
r"(^raid_type\s*=\s*(?P<raid_type>\w+)\n)?" + \
r"(^replicated\s*=\s*(?P<replicated>\w+)\n)?" + \
r"(^num_paths\s*=\s*(?P<num_paths>\w+)\n)?" + \
r"^###path\s*=[\s\w]+\n" + \
r"(?P<paths>(?:^path\s*=\s*[\w -\:\(\)\@\/\,]+\n)*)"
pattern = compile(MULTIPATH_PATTERN, MULTILINE | DOTALL)
matches = []
for match in pattern.finditer(paths_list_output):
logger.debug("multipath found: %s", match.groupdict())
multipath_dict = dict((key, value if value is not None else value) \
for (key, value) in match.groupdict().items())
self.parse_paths_in_multipath_dict(multipath_dict)
matches.append(multipath_dict)
return matches
def parse_paths_in_multipath_dict(self, multipath_dict):
from re import compile, MULTILINE, DOTALL
PATH_PATTERN = r"^path\s*=\s*" + \
r"(?P<name>[\w]+)\s*" + \
r"(?P<state>[\w\(\)]+)\s*" + \
r"(?P<type>[\w-]+)\s*" + \
r"(?P<transport>[\w]+)\s*" + \
r"(?P<ctlr>[\w]+)\s*" + \
r"(?P<hwpath>[\w\/\@\,]+)\s*" + \
r"(?P<aportID>[\w-]+)\s*" + \
r"(?P<aportWWN>[\w:]+)\s*" + \
r"(?P<attr>[\w-]+)\s*"
pattern = compile(PATH_PATTERN, MULTILINE | DOTALL)
matches = []
for match in pattern.finditer(multipath_dict['paths']):
logger.debug("paths found: %s", match.groupdict())
pathgroup_dict = dict((key, value if value is not None else value) for (key, value) in match.groupdict().items())
matches.append(pathgroup_dict)
multipath_dict['paths'] = matches
| 4,697 |
Exercicio3.1.py
|
Sedaca/Curso-Phyton
| 0 |
2169165
|
info = input('Digite algo: ')
print('O tipo primitivo de',info,'é',type(info))
print('Só tem espaços?')
print(info.isspace())
print('É composta por números?')
print(info.isnumeric())
print('É composta por letras?')
print(info.isalpha())
print('Está Capitalizada?')
print(info.isupper())
| 287 |
vaetc/evaluation/metrics/intervention/factorvae.py
|
ganmodokix/vaetc
| 0 |
2168506
|
import random
import numpy as np
from numpy.core.fromnumeric import argmin, shape
from tqdm import tqdm
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split
from vaetc.utils import debug_print
def make_dataset(z: np.ndarray, t: np.ndarray, size: int = 5000, t_threshold: float = 0.5) -> np.ndarray:
data_size, z_dim = z.shape
data_size, t_dim = t.shape
debug_print("Creating dataset...")
# rescaled representation
EPS = 1e-4
z = z / np.maximum(EPS, np.std(z, axis=0, keepdims=True))
argminstd = []
y = []
available_factors = []
binary = t > t_threshold
for k in range(t_dim):
mask = binary[:,k]
n1 = np.count_nonzero(mask)
n0 = data_size - n1
if min(n1, n0) >= 2:
available_factors.append(k)
for i in tqdm(range(size)):
factor = random.choice(available_factors)
value = random.choice([True, False])
mask = binary[:,factor] == value
mask_indices = np.nonzero(mask)[0]
indices = np.random.choice(mask_indices, size=(100, ))
s = z[indices]
argminstd.append(np.argmin(np.var(s, axis=0)))
y.append(factor)
return np.array(argminstd), np.array(y)
def factorvae_metric(z: np.ndarray, t: np.ndarray, random_state=42) -> float:
data_size, z_dim = z.shape
data_size, t_dim = t.shape
if t_dim == 0:
return float("nan")
argminstd, y = make_dataset(z, t)
argminstd_train, argminstd_test, y_train, y_test = train_test_split(argminstd, y, test_size=0.2, random_state=random_state)
test_size = argminstd_test.shape[0]
v, *v_ranges = np.histogramdd(np.stack([argminstd_train, y_train], axis=1), bins=(np.arange(z_dim+1)-0.5, np.arange(t_dim+1)-0.5))
majorities = np.argmax(v, axis=1) # (L, )
y_pred = np.zeros(shape=(test_size, ), dtype=int)
for j in range(z_dim):
y_pred[argminstd_test == j] = majorities[j]
acc = np.count_nonzero(y_pred == y_test) / test_size
return float(acc)
| 2,071 |
observer/subject.py
|
rlelito/DesignPatterns
| 0 |
2170035
|
from abc import ABC
from abc import abstractmethod
from observer import Observer
class Subject(ABC):
_subject_state: int = None
@abstractmethod
def attach(self, o: Observer) -> None:
pass
@abstractmethod
def detach(self, o: Observer) -> None:
pass
@abstractmethod
def notify(self) -> None:
pass
@property
def subject_state(self) -> int:
return self._subject_state
| 438 |
examples/notebooks/solutions/cartpole_fxfu.py
|
paLeziart/crocoddyl
| 1 |
2167757
|
cartpoleData = cartpoleND.createData()
cartpoleND.calcDiff(cartpoleData, x, u)
print(cartpoleData.Fx)
print(cartpoleData.Fu)
| 125 |
inclass/l12/balancedlosses(l12).py
|
ngtrunghuan/50.021-ArtificialIntelligence
| 0 |
2169199
|
import torchvision.models as models
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch
import numpy as np
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
currDir = os.path.dirname(os.path.realpath(__file__))
device = torch.device('cpu')
class Dataset(torch.utils.data.Dataset):
def __init__(self, fileName, isTraining = True, transform = None):
file = open(fileName)
self.isTraining = isTraining
self.transform = transform
self.data = []
for line in tqdm(file.readlines()):
self.data.append(list(map(float,line.split())))
self.data = np.array(self.data).astype(float)
self.countPositives = int(np.sum(self.data[:,-1]))
self.countNegatives = int(len(self) - self.countPositives)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
return (item[:-1], item[-1])
def plot(self):
ones = []
zeroes = []
data = list(self.data)
for item in data:
if item[-1] == 0:
zeroes.append(item)
else:
ones.append(item)
zeroes = np.matrix(zeroes)
ones = np.matrix(ones)
plt.plot(ones[:,0], ones[:,1], 'rs', zeroes[:,0], zeroes[:,1], 'bo')
plt.show()
def applyBalanceSampling(self, alpha = 0.5):
# _, counts = np.unique(self.data, return_counts = True, axis = 0)
originalLength = len(self)
if self.countPositives < self.countNegatives:
toDuplicate = 1
numDuplicates = int(self.countNegatives / self.countPositives - 1)
else:
toDuplicate = 0
numDuplicates = int(self.countPositives / self.countNegatives - 1)
data = []
for i in tqdm(range(originalLength)):
item = self.data[i][:-1]
label = self.data[i][-1]
if label == toDuplicate:
for j in range(numDuplicates):
# append on numpy flattens the array, making weird errors. Hence here we use much simpler list appending
data.append(list(self.data[i]))
data.append(list(self.data[i]))
self.data = np.array(data).astype(float)
self.countPositives = int(np.sum(self.data[:,-1]))
self.countNegatives = int(len(self) - self.countPositives)
def getClassCount(self, label):
if label == 0:
return self.countNegatives
if label == 1:
return self.countPositives
return 0
### MODEL PARAMETERS ###
class SimpleModelWithRelu(torch.nn.Module):
def __init__(self):
super(SimpleModelWithRelu, self).__init__()
self.fc = torch.nn.Linear(2, 1)
def forward(self, x):
return F.relu(self.fc(x))
class SimpleModel(torch.nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = torch.nn.Linear(2, 1)
def forward(self, x):
return self.fc(x)
model = SimpleModel()
criterion = torch.nn.BCEWithLogitsLoss()
learningRate = 0.1
batchSize = 128
optimiser = torch.optim.SGD(model.parameters(), lr = 0.1)
numEpoch = 10
def run(withBalancing = False):
trainDataset = Dataset(fileName = currDir + "/samplestr.txt",
isTraining = True)
testDataset = Dataset(fileName = currDir + "/sampleste.txt",
isTraining = False)
if withBalancing:
print("\nRUN WITH BALANCING")
trainDataset.applyBalanceSampling()
else:
print("\nRUN WITHOUT BALANCING")
trainLoader = torch.utils.data.DataLoader(dataset = trainDataset,
batch_size = batchSize,
shuffle = False)
testLoader = torch.utils.data.DataLoader(dataset = testDataset,
batch_size = batchSize,
shuffle = False)
totalSteps = len(trainLoader)
print("\nNumber of training samples =", len(trainDataset))
for epoch in range(numEpoch):
print("\nEpoch {}".format(epoch))
model.train()
for i, (samples, labels) in enumerate(trainLoader):
# Forward pass
outputs = model(samples.float())
loss = criterion(outputs, labels.view(len(labels), 1).float())
# Backward and optimize
loss.backward()
optimiser.step()
# if (i+1) % 100 == 0:
# print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
# .format(epoch+1, numEpoch, i+1, totalSteps, loss.item()))
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
correctOnes = 0
correctZeroes = 0
total = 0
for samples, labels in testLoader:
labels = labels.view((len(labels),1))
outputs = model(samples.float())
predicted = (outputs.data > 0).long()
total += labels.size(0)
correct += (predicted == labels.long()).sum().item()
correctMask = predicted == labels.long()
predictedOnes = predicted == torch.ones(predicted.size(), dtype = torch.long)
correctOnes += (correctMask * predictedOnes).sum().item()
predictedZeroes = predicted == torch.zeros(predicted.size(), dtype = torch.long)
correctZeroes += (correctMask * predictedZeroes).sum().item()
print('Test Accuracy of the model on the {} test images: {} %'.format(len(testDataset), 100 * correct / total))
print("True Positives Rate = {}%".format(correctOnes / testDataset.getClassCount(1) * 100))
print("True Negatives Rate = {}%".format(correctZeroes / testDataset.getClassCount(0) * 100))
def plotData(trainData = True):
if trainData:
trainDataset = Dataset(fileName = currDir + "/samplestr.txt",
isTraining = True)
trainDataset.plot()
else:
testDataset = Dataset(fileName = currDir + "/sampleste.txt",
isTraining = False)
testDataset.plot()
#--------------------------#
# USE THIS TO RUN THE CODE #
#--------------------------#
run(withBalancing = False)
run(withBalancing = True)
plotData(trainData = False)
#--------------------------#
# USE THIS TO RUN THE CODE #
#--------------------------#
| 5,610 |
modelsProject/modelsApp/models.py
|
cs-fullstack-2019-spring/django-models-cw-ChelsGreg
| 0 |
2169507
|
from django.db import models
# Create your models here.
# Dog Class with: name, breed, color, gender
class Dog(models.Model):
name = models.CharField(max_length=100)
breed = models.CharField(max_length=100)
color = models.CharField(max_length=100)
gender = models.CharField(max_length=100)
# Account Model with: username, realname, accountnumber, balance
class Account(models.Model):
userName = models.CharField(max_length=20)
realName = models.CharField(max_length=100)
accountNumber = models.IntegerField()
balance = models.DecimalField(decimal_places=10, max_digits=19)
| 612 |
examples/data/read_web_data.py
|
leilin-research/Time-series-prediction
| 552 |
2169670
|
# -*- coding: utf-8 -*-
# @author: <NAME>, <EMAIL>
# @date: 2020-01
# This script will show an example using Kaggle data: https://www.kaggle.com/c/web-traffic-time-series-forecasting
import os
import numpy as np
import pandas as pd
import tensorflow as tf
seed = 2021
np.random.seed(seed=seed)
def log_transform(x, sequence_mean):
return np.log1p(x) - sequence_mean
def sequence_mean(x, effective_length):
return np.sum(x) / effective_length
class WebDataReader(object):
def __init__(self, data_dir, mode, train_test_ratio=0.9):
data_cols = [
'data', # n_example * n_days
'is_nan',
'page_id',
'project',
'access',
'agent',
'test_data',
'test_is_nan']
self.data = [np.load(os.path.join(data_dir, '{}.npy'.format(i))) for i in data_cols]
self.n_examples = self.data[0].shape[0]
self.mode = mode
if mode == 'test':
self.idx = range(self.n_examples)
elif mode == 'train':
train_idx = np.random.choice(range(self.n_examples), int(train_test_ratio * self.n_examples),
replace=False) # set p if not equal weighted sample
self.idx = train_idx
elif mode == 'val':
train_idx = np.random.choice(range(self.n_examples), int(train_test_ratio * self.n_examples),
replace=False) # must set fixed seed Todo: still need to check if leaks happened
val_idx = np.setdiff1d(range(self.n_examples), train_idx)
self.idx = val_idx
else:
raise ValueError('only train,test or val is valid mode')
def __len__(self):
return self.n_examples
def __getitem__(self, idx):
x = [dat[idx] for dat in self.data]
return self.preprocess(x)
def iter(self):
for i in self.idx:
yield self[i]
def preprocess(self, x):
# process the saved numpy to features, it's remommended for newbie like me to write the prepeocess here with numpy
# otherwise, you can also write it in Tensorflow graph mode while tf.data.Dataset.map
'''
output: encoder_feature: [sequence_length, n_feature]
decoder_feature: [predict_sequence_length, decoder_n_feature]
'''
data, nan_data, project, access, agent = x[0], x[1], x[3], x[4], x[5]
max_encode_length = 530
num_decode_steps = 64
# encode feature
x_encode = np.zeros(max_encode_length) # x_encode: sequence_length
is_nan_encode = np.zeros(max_encode_length)
rand_len = np.random.randint(max_encode_length - 365 + 1, max_encode_length + 1)
x_encode_len = max_encode_length if self.mode == 'test' else rand_len
x_encode[:x_encode_len] = data[:x_encode_len]
log_x_encode_mean = sequence_mean(x_encode, x_encode_len)
log_x_encode = log_transform(x_encode, log_x_encode_mean)
is_nan_encode[:x_encode_len] = nan_data[:x_encode_len]
project_onehot = np.zeros(9)
np.put(project_onehot, project, 1)
access_onehot = np.zeros(3)
np.put(access_onehot, access, 1)
agent_onehot = np.zeros(2)
np.put(agent_onehot, agent, 1)
encoder_feature = np.concatenate([ # each item shape: [encode_steps, n_sub_feature]
np.expand_dims(is_nan_encode, 1),
np.expand_dims(np.equal(x_encode, 0.).astype(float), 1),
np.tile(np.expand_dims(log_x_encode_mean, 0), [max_encode_length, 1]),
np.tile(np.expand_dims(project_onehot, 0), [max_encode_length, 1]),
np.tile(np.expand_dims(access_onehot, 0), [max_encode_length, 1]),
np.tile(np.expand_dims(agent_onehot, 0), [max_encode_length, 1])
], axis=1)
# decode feature
decoder_feature = np.concatenate([ # each item shape: [decode_steps, n_sub_feature]
np.eye(num_decode_steps),
np.tile(np.expand_dims(log_x_encode_mean, 0), [num_decode_steps, 1]),
np.tile(np.expand_dims(project_onehot, 0), [num_decode_steps, 1]),
np.tile(np.expand_dims(access_onehot, 0), [num_decode_steps, 1]),
np.tile(np.expand_dims(agent_onehot, 0), [num_decode_steps, 1])
], axis=1)
# decoder target
decoder_target = np.zeros(num_decode_steps)
is_nan_decoder_target = np.zeros(num_decode_steps)
if not self.mode == 'test':
decoder_target = data[x_encode_len:x_encode_len + num_decode_steps]
is_nan_decoder_target = nan_data[x_encode_len:x_encode_len + num_decode_steps]
output = encoder_feature, decoder_feature, decoder_target, is_nan_decoder_target
return output # encoder_feature, decoder_feature, decoder_targets
class DataLoader(object):
def __init__(self, ):
pass
def __call__(self, data_dir, mode, batch_size):
data_reader = WebDataReader(data_dir, mode)
dataset = tf.data.Dataset.from_generator(data_reader.iter,
output_types=(tf.float32, tf.float32, tf.float32, tf.float32))
dataset = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return dataset
if __name__ == '__main__':
# train_data_reader=DataReader(data_dir='../data/processed',mode='train')
# train_data_reader[0]
# val_data_reader=DataReader(data_dir='../data/processed',mode='val')
# print(len(val_data_reader.idx))
# test_data_reader=DataReader(data_dir='../data/processed',mode='test')
# print(len(test_data_reader.idx))
data_loader = DataLoader()(data_dir='../data/processed', mode='train', batch_size=2)
for encoder_feature, decoder_feature, decoder_target, is_nan_decoder_target in data_loader.take(1):
print(encoder_feature.shape)
print(decoder_feature.shape)
print(decoder_target.shape)
print(is_nan_decoder_target.shape)
| 6,026 |
miller-rabin3.py
|
GudniNatan/square_sum
| 0 |
2169229
|
import random, timeit
n = int(input("test number: "))
k = int(input("accuracy level: "))
def MillerRabin(n, k=35):
r = 0
d = n - 1
while d % 2 == 0:
d /= 2
r += 1
for i in range(k):
a = random.randint(2, n - 2)
x = 1
while d >= 1:
if int(d) & 1:
x = x * a % n
d /= 2
a = (a * a) % n
if x == 1 or x == n - 1:
continue
for j in range(r-1):
x = (x * x) % n
if x == 1:
return False
if x == n - 1:
break
else:
return False
return True
start = timeit.timeit()
print(MillerRabin(n, k))
end = timeit.timeit()
print(end - start)
| 755 |
rockpaperscissors.py
|
justCodeThings/rockpaperscissors
| 0 |
2167149
|
from sklearn.linear_model import LinearRegression
from pyautogui import pymsgbox
train_input = []
train_output = []
ai_victory = 0
user_victory = 0
rounds = 1
valid_input = {"Rock" : 1, "Paper" : 2, "Scissors" : 3}
valid_output = {1 : "Rock", 2 : "Paper", 3 : "Scissors"}
victory_dict = {"Rock" : "Paper", "Paper" : "Scissors", "Scissors" : "Rock"}
last_response = "Rock"
secondTo_last_response = "Rock"
thirdTo_last_response = "Rock"
correct_aiResponse = "Paper"
predictor = LinearRegression(n_jobs=-1)
def run():
global last_response
global secondTo_last_response
global correct_aiResponse
global thirdTo_last_response
global user_victory
global ai_victory
global rounds
# Get player's input
user_response = pymsgbox.confirm(text="Rock Paper Scissors?", title="Rock Paper Scissors", buttons=("Rock", "Paper", "Scissors"))
# Place player's input into last response
thirdTo_last_response = secondTo_last_response
secondTo_last_response = last_response
last_response = user_response
# Get ai's prediction off of last 3 moves
ai_responseRaw = logic(ConvertToInput(last_response), ConvertToInput(secondTo_last_response), ConvertToInput(thirdTo_last_response))
ai_response = ConvertToOutput(ai_responseRaw)
# Increment score based on who won
victory_counter(user_response, ai_response)
pymsgbox.alert(text="AI says: "+ai_response+"\n\nRound: "+str(rounds)+"\n\nScore:\nAI: "+str(ai_victory)+" Player: "+str(user_victory), title="Rock Paper Scissors")
# Check ai's answer against the correct one
correct_aiResponse = correct(user_response)
# Train the model off of the last three moves
train_input.append([ConvertToInput(last_response), ConvertToInput(secondTo_last_response), ConvertToInput(thirdTo_last_response)])
train_output.append(ConvertToInput(correct_aiResponse))
train_model(train_input, train_output)
# Output round report to console
print(f"\nround report: \nround: {rounds}\nai wins: {ai_victory}, player wins: {user_victory}\nlast response: {last_response}\nSecond to last response: {secondTo_last_response}\nThird to last response: {thirdTo_last_response}\nML training input (1 = Rock, 2 = Paper, 3 = Scissors): {train_input}\nML training output: {train_output}")
# Winning cards
winning_cards(rounds, ai_victory, user_victory)
rounds += 1
def train_model(x, y):
global predictor
predictor.fit(X=x, y=y)
def logic(data1, data2, data3):
global predictor
test = [[data1, data2, data3]]
outcome = predictor.predict(X=test)
return outcome
def ConvertToInput(data):
global valid_input
return valid_input[str(data)]
def ConvertToOutput(data):
global valid_output
try:
return valid_output[int(data)]
except Exception as e:
print(f"Error on line 77, {data} is not a valid key. Exception caught: {e}")
return valid_output[1]
def correct(user):
global victory_dict
return victory_dict[user]
def victory_counter(user, ai):
global victory_dict
global ai_victory
global user_victory
if user == ai:
return
elif victory_dict[user] != ai:
user_victory += 1
else:
ai_victory += 1
def winning_cards(rounds, ai_victory, user_victory):
score = 7
if user_victory == score and ai_victory <= score:
win = pymsgbox.alert(text="Rounds "+str(rounds)+"\nYour score: "+str(user_victory)+"\nAI's score: "+str(ai_victory)+"\nYou outsmarted a machine designed to predict your every move with pinpoint accuracy. Color me impressed.", title="You Win!")
exit()
elif ai_victory == score and user_victory < score:
lose = pymsgbox.alert(text="Rounds "+str(rounds)+"\nYour score: "+str(user_victory)+"\nAI's score: "+str(ai_victory)+"\nYou lost. But don't feel bad. This machine is designed to learn to predict your every move with pinpoint accuracy. You hardly ever stood a chance.", title="You Lose!")
exit()
train_input.append([ConvertToInput(last_response), ConvertToInput(secondTo_last_response), ConvertToInput(thirdTo_last_response)])
train_output.append(ConvertToInput(correct_aiResponse))
train_model(train_input, train_output)
while True:
try:
run()
except Exception as e:
print(f"\n{e}\nApplication terminating...")
exit()
| 4,360 |
setup.py
|
carlmjohnson/django-js-values
| 0 |
2169196
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-js-values',
version='0.0.1',
author='<NAME>',
url='https://github.com/carlmjohnson/django-js-values',
license='MIT',
description="Django template tag for safely including values in JS",
long_description=read('README.md'),
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Framework :: Django',
],
)
| 909 |
text/_elisp/frame/bar/tool.py
|
jedhsu/text
| 0 |
2169227
|
"""
Frame Tool Bar
"""
from dataclasses import dataclass
from ._bar import FrameBar
__all__ = [
"FrameToolBar",
]
@dataclass
class FrameToolBar(
FrameBar,
):
pass
| 187 |
day_07/visualisation.py
|
Akarys42/aoc-2021
| 2 |
2169610
|
import matplotlib.pyplot as plt
import sys
import functools
with open("day_07/input.txt") as file:
INPUT = [int(n) for n in file.read().split(",")]
sys.setrecursionlimit(10 ** 6)
@functools.lru_cache(None)
def cost(n: int) -> int:
if n == 0:
# This sin is why it isn't called factorial
return 0
return cost(n - 1) + n
scores_1 = []
scores_2 = []
keys = []
for end in range(min(INPUT), max(INPUT) + 1):
keys.append(end)
scores_1.append(sum(abs(start - end) for start in INPUT))
scores_2.append(sum(cost(abs(start - end)) for start in INPUT))
plt1 = plt.figure(1)
plt.plot(scores_1)
plt.xlabel("End position")
plt.ylabel("Score")
plt.title("Day 7 part 1")
plt.savefig("images/7-1.png")
plt2 = plt.figure(2)
plt.plot(scores_2)
plt.title("Day 7 part 2")
plt.savefig("images/7-2.png")
| 831 |
src/extract_fasttext_predicate_training_data.py
|
ag-sc/SimpleQA
| 2 |
2169360
|
import sys
from utils import load_questions
LABEL_PREFIX = "__label__"
def main(questions_filepath, output_filepath):
questions = load_questions(questions_filepath)
with open(output_filepath, "w") as fout:
for q in questions:
text = q["text"]
# subject = q["subject"]
predicate = q["predicate"]
# for c in q["candidates"]:
# if c["uri"] == subject:
# ngram = q["ngram"]
fout.write(LABEL_PREFIX + predicate + " " + text + "\n")
if __name__ == "__main__":
params = sys.argv[1:]
questions_filepath = "../res/valid.txt"
output_filepath = "../res/valid_fasttext_predicates.txt"
main(questions_filepath, output_filepath)
| 751 |
openstack_dashboard/contrib/sahara/content/data_processing/wizard/tests.py
|
timpricecatalyst/horizon
| 1 |
2169320
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse(
'horizon:project:data_processing.wizard:index')
CLUSTER_GUIDE_URL = reverse(
'horizon:project:data_processing.wizard:cluster_guide')
CLUSTER_GUIDE_RESET_URL = reverse(
'horizon:project:data_processing.wizard:reset_cluster_guide',
kwargs={"reset_cluster_guide": "true"})
JOB_GUIDE_URL = reverse(
'horizon:project:data_processing.wizard:jobex_guide')
JOB_GUIDE_RESET_URL = reverse(
'horizon:project:data_processing.wizard:reset_jobex_guide',
kwargs={"reset_jobex_guide": "true"})
class DataProcessingClusterGuideTests(test.TestCase):
def test_index(self):
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'project/data_processing.wizard/wizard.html')
self.assertContains(res, 'Data Processing Guides')
self.assertContains(res, 'Cluster Creation Guide')
def test_cluster_guide(self):
res = self.client.get(CLUSTER_GUIDE_URL)
self.assertTemplateUsed(
res, 'project/data_processing.wizard/cluster_guide.html')
self.assertContains(res, 'Guided Cluster Creation')
self.assertContains(res, 'Current choice')
def test_cluster_guide_reset(self):
res = self.client.get(CLUSTER_GUIDE_RESET_URL)
self.assertRedirectsNoFollow(res, CLUSTER_GUIDE_URL)
def test_jobex_guide(self):
res = self.client.get(JOB_GUIDE_URL)
self.assertTemplateUsed(
res, 'project/data_processing.wizard/jobex_guide.html')
self.assertContains(res, 'Guided Job Execution')
def test_jobex_guide_reset(self):
res = self.client.get(JOB_GUIDE_RESET_URL)
self.assertRedirectsNoFollow(res, JOB_GUIDE_URL)
| 2,344 |
WriteOptions.py
|
Wizmann/PyRocks
| 1 |
2169577
|
#coding=utf-8
import ctypes
from librocksdb import *
rocksdb_writeoptions_create = librocksdb.rocksdb_writeoptions_create
rocksdb_writeoptions_destroy = librocksdb.rocksdb_writeoptions_destroy
class WriteOptions(object):
def __init__(self):
self.writeoptions_internal = rocksdb_writeoptions_create()
def __del__(self):
rocksdb_writeoptions_destroy(self.writeoptions_internal)
def to_rocksdb_internal(self):
return self.writeoptions_internal
| 482 |
gui.py
|
mspraggs/IsingModel
| 0 |
2168949
|
import lattice
import pygame
pygame.init()
fpsClock = pygame.time.Clock()
screen_size = (640,640)
window = pygame.display.set_mode(screen_size)
pygame.display.set_caption('Ising Model')
screen = pygame.display.get_surface()
| 227 |
functions2.py
|
xlesaux/astr-119-hw-2
| 0 |
2169699
|
import numpy as np
import sys #allows us to read command lines
#define a function that returns a value
def expo(x): #x is an argument to the function
return np.exp(x) #return the np e^x function
#define a subroutine that does not return a value
def show_expo(n):
for i in range(n):
print(expo(float(i))) #call the expo funciton
#define a main function
def main():
n = 10 #prove default value for n
#check if there is a command line argument provided
if(len(sys.argv)>1):
n = int(sys.argv[1])
#print value of n
print("n is equal to",n)
#printe^x n times
show_expo(n)
#run the main func
if __name__ == "__main__":
main()
| 647 |
snippets/jobs/lacdist.py
|
JLLeitschuh/TIPL
| 1 |
2165710
|
# jython script based on TIPL to calculate lacuna distances and volumes
import ch.psi.tomcat.tipl.VirtualAim as VA
import ch.psi.tomcat.tipl.kVoronoiShrink as KV
VA.scratchLoading=True
KV.supportedCores=6
VA.supportedCores=6
lacun=VA('lacun.tif')
mask=VA('mask.tif')
vorn=KV(lacun,mask)
vorn.run()
lout=vorn.ExportDistanceAim(lacun)
lout.WriteAim('lacundist.tif')
lout=vorn.ExportVolumesAim(lacun)
lout.WriteAim('lacunvols.tif')
| 429 |
integration_tests/tx_staking.py
|
terra-money/terra.py
| 66 |
2169531
|
import base64
from terra_sdk.client.lcd import LCDClient
from terra_sdk.client.lcd.api.tx import CreateTxOptions
from terra_sdk.core import Coin, Coins
from terra_sdk.core.staking import (
CommissionRates,
Description,
MsgBeginRedelegate,
MsgCreateValidator,
MsgDelegate,
MsgEditValidator,
MsgUndelegate,
)
from terra_sdk.key.mnemonic import MnemonicKey
def main():
terra = LCDClient(
url="https://pisco-lcd.terra.dev/",
chain_id="pisco-1",
)
mk1 = MnemonicKey(
mnemonic="nut praise glare false post crane clinic nothing happy effort loyal point parent few series task base maximum insect glass twice inflict tragic cancel"
)
mk2 = MnemonicKey(
mnemonic="invite tape senior armor tragic punch actor then patrol mother version impact floor begin fitness tool street lava evidence lemon oval width soda actual"
)
test1 = terra.wallet(mk1)
validator1_address = "terravaloper1thuj2a8sgtxr7z3gr39egng3syqqwas4hmvvlg"
validator2_address = "terravaloper1q33jd4t8788ckkq8u935wtxstjnphcsdne3gud"
"""
msgCV = MsgCreateValidator(
description=Description(moniker="testval_1"),
commission=CommissionRates(rate="0.01", max_rate="0.1", max_change_rate="0.01"),
min_self_delegation=1,
delegator_address="terra1x46rqay4d3cssq8gxxvqz8xt6nwlz4td20k38v",
validator_address="terravalcons1mgp3028ry5wf464r3s6gyptgmngrpnelhkuyvm",
pubkey=ValConsPubKey(),
value="10000000uluna"
)
tx = test1.create_and_sign_tx(CreateTxOptions(msgs=[msgCV]))
result = terra.tx.broadcast(tx)
print(f"RESULT:{result}")
"""
# msgEV = MsgEditValidator(
# validator_address="",
# description=Description(moniker="testval_1"),
# commission=CommissionRates(rate="0.02", max_rate="0.1", max_change_rate="0.01"),
# min_self_delegation=1,
# )
msgDel = MsgDelegate(
validator_address=validator1_address,
delegator_address=test1.key.acc_address,
amount="100uluna",
)
msgRedel = MsgBeginRedelegate(
validator_dst_address=validator2_address,
validator_src_address=validator1_address,
delegator_address=test1.key.acc_address,
amount=Coin.parse("10uluna"),
)
msgUndel = MsgUndelegate(
validator_address=validator1_address,
delegator_address=test1.key.acc_address,
amount=Coin.parse("20uluna"),
)
"""
tx = test1.create_and_sign_tx(CreateTxOptions(msgs=[msgEV]))
result = terra.tx.broadcast(tx)
print(f"RESULT:{result}")
"""
tx = test1.create_and_sign_tx(CreateTxOptions(msgs=[msgDel]))
result = terra.tx.broadcast(tx)
print(f"RESULT:{result}")
tx = test1.create_and_sign_tx(CreateTxOptions(msgs=[msgRedel]))
result = terra.tx.broadcast(tx)
print(f"RESULT:{result}")
tx = test1.create_and_sign_tx(CreateTxOptions(msgs=[msgUndel]))
result = terra.tx.broadcast(tx)
print(f"RESULT:{result}")
main()
| 3,037 |
src/composo_py/ioc.py
|
composo/composo-python-plugin
| 0 |
2168376
|
from datetime import datetime
from importlib.metadata import entry_points
from pathlib import Path
import dependency_injector.providers as providers
import dependency_injector.containers as containers
import requests
from appdirs import user_config_dir, user_cache_dir
from composo_py.input import InputInterface
from composo_py.licenses import SPDXLicensesGetter, LicenseService, LicenseServiceCached
from composo_py.plugin import ComposoPythonPlugin, ProjectName
from composo_py.files import AppPy, MainPy, IocPy, SetupPy, SetupCfg, ToxIni, PyProjectToml, ManifestIn
from composo_py.resources import CachedResourceGetter
from composo_py.system import DrySysInterface, RealSysInterface
import logging.config
from composo_py.templates.templates import LiquidTemplateRenderer
def fullname(o):
klass = o.__class__
module = klass.__module__
if module == 'builtins':
return klass.__qualname__ # avoid outputs like 'builtins.str'
return module + '.' + klass.__qualname__
logging_conf = {
"version": 1,
"formatters": {
"simple": {
"format": '%(message)s'
},
"advanced": {
"format": '%(levelname)-8s at %(pathname)s:%(lineno)d %(message)s'
}
},
"handlers": {
"debugging": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout",
},
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "simple",
"stream": "ext://sys.stdout",
}
},
"loggers": {
"simpleExample": {
"level": "DEBUG",
"handlers": ["console"],
"propagate": "no"
},
"InputInterface": {
"level": "INFO",
"handlers": ["console"],
"propagate": "no"
}
},
"root": {
"level": "DEBUG",
"handlers": ["console"]
}
}
logging.config.dictConfig(logging_conf)
DEFAULT_CONFIG = {
"conf_dir": user_config_dir("composo"),
"cache_dir": user_cache_dir("composo"),
"app": {
# "flavour": {
# # "standalone": True,
# # "tool": True,
# # "plugin_system",
# # "plugin"
# },
"name": {
"class": "TestApp",
"package": "test_app",
"project": "test-app"
},
},
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
},
"vcs": {
"git": {
"github": {
"name": "Arand"
}
}
},
"dry_run": "false"
}
# DEFAULT_CONFIG = {
# "author": "<NAME>",
# "github_name": "Arand",
# "email": "<EMAIL>"
# }
class Config(containers.DeclarativeContainer):
config = providers.Configuration("config")
config.from_dict(DEFAULT_CONFIG)
class Templates(containers.DeclarativeContainer):
template_renderer = providers.Factory(LiquidTemplateRenderer)
def generate_project_name(name, project_name_factory):
p_name = project_name_factory(name)
return {
"project": p_name.project,
"package": p_name.package,
"class": p_name.cls
}
class Python(containers.DeclarativeContainer):
project_name_factory = providers.DelegatedCallable(generate_project_name,
project_name_factory=providers.DelegatedFactory(ProjectName))
setup_cfg = providers.Factory(SetupCfg)
tox_ini = providers.Factory(ToxIni)
pyproject_toml = providers.Factory(PyProjectToml)
manifest_in = providers.Factory(ManifestIn)
verse = providers.FactoryAggregate(
setup_cfg=setup_cfg,
tox_ini=tox_ini,
pyproject_toml=pyproject_toml,
manifest_in=manifest_in
)
def get_year():
return datetime.now().year
def get_dry_run(dry_run: bool):
return str(dry_run).lower()
class System(containers.DeclarativeContainer):
dry_sys_interface = providers.Factory(DrySysInterface)
real_sys_interface = providers.Factory(RealSysInterface)
dry_run_selection = providers.Callable(str)
sys_interface = providers.Selector(providers.Callable(get_dry_run, Config.config.dry_run),
false=real_sys_interface,
true=dry_sys_interface)
input_interface = providers.Factory(InputInterface,
_input=providers.DelegatedCallable(input),
logger=providers.Callable(logging.getLogger, InputInterface.__name__))
resource_getter = providers.Factory(CachedResourceGetter,
get_request=providers.DelegatedCallable(requests.get),
sys_interface=sys_interface,
cache_folder=Config.config.cache_dir,
request_exception_type=requests.exceptions.ConnectionError
)
year = providers.Callable(get_year)
licenses_getter = providers.Factory(SPDXLicensesGetter,
cache_folder=Config.config.cache_dir,
sys_interface=sys_interface)
license_service = providers.Factory(LicenseServiceCached,
resource_getter=resource_getter,
input_interface=input_interface)
class Plugin(containers.DeclarativeContainer):
plugin = providers.Factory(ComposoPythonPlugin,
template_renderer_factory=providers.FactoryDelegate(Templates.template_renderer),
project_name_factory=Python.project_name_factory,
verse=Python.verse,
year=System.year,
sys_interface=System.sys_interface,
input_interface=System.input_interface,
config=Config.config,
license_service=System.license_service,
resource_getter=System.resource_getter
)
| 6,332 |
INDXFind.py
|
williballenthin/INDXParse
| 131 |
2169850
|
#!/usr/bin/python
#
# Simple script that looks for INDX records at 4096 (decimal) byte boundaries on a raw disk.
# It saves the INDX records to a binary output file that can be parsed with INDXparse.py.
# Tested against Windows Server 2003
#
# Copyright 2015, <NAME> <<EMAIL>>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.argv[1] == "-h":
print "\tpython ./INDXfind.py <ewfmount'd drive>"
print "\tex:\tpython ./INDXfind.py /mnt/ewf/ewf1"
sys.exit()
f = open(sys.argv[1], 'rb') # ewfmount'd drive expected as first argument on command line
indxBytes = "494e445828000900" # 49 4e 44 58 28 00 09 00 "INDX( header"
offset = 0 # data processed
byteChunk="go" # cheap do-while
recordsFound = 0 # for progress
outFile = open("INDX_records.raw", 'wb') # output file
print "\n\tRunning... progress will output every GigaByte. In testing this was every 15-20 seconds.\n" \
"\tThe output file is named \"INDX_records.raw\".\n" \
"\tINDX_records.raw should be parsed with INDXparser.py which can be found at:\thttps://github.com/williballenthin/INDXParse\n"
while byteChunk != "":
byteChunk = f.read(4096) # Only searching for cluster aligned (4096 on Windows Server 2003) INDX records... records all appear to be 4096 bytes
compare = byteChunk[0:8] # Compare INDX header to first 8 bytes of the byteChunk
if compare.encode("hex") == indxBytes:
recordsFound = recordsFound + 1
outFile.write(byteChunk) # Write the byteChunk to the output file
offset = offset + 4096 # Update offset for progress tracking
# Progress
if offset % 1073741824 == 0:
print "Processed: %d GB. INDX records found: %d" % ((offset / 1073741824), recordsFound)
outFile.close()
| 2,291 |
2981/solution.py
|
bossm0n5t3r/BOJ
| 2 |
2167958
|
import sys
def sol():
input = sys.stdin.readline
N = int(input())
arr = [int(input()) for _ in range(N)]
arr = sorted(arr)
GCD = arr[1] - arr[0]
for i in range(2, len(arr)):
GCD = gcd(GCD, arr[i] - arr[i - 1])
print_gcd(GCD)
def gcd(x, y):
while y:
x, y = y, x % y
return x
def print_gcd(GCD):
front_divisors = []
back_divisors = []
tmp = 1
while tmp * tmp <= GCD:
if GCD % tmp == 0:
front_divisors.append(str(tmp))
if tmp * tmp != GCD:
back_divisors.append(str(GCD // tmp))
tmp += 1
print(
(" ".join(front_divisors[1:]) + " " + " ".join(reversed(back_divisors))).strip()
)
if __name__ == "__main__":
sol()
| 759 |
rpg_game_map.py
|
yahyaeldarieby/snakerun
| 0 |
2169243
|
# Name: <NAME>
# Class: CS30
# Date: 12/4/2019
# Description: Map and fruits and vegetable layout.
from tabulate import tabulate
# This is the board of the game. Empty strings mean empty cells on the board.
game_board = [
["1", "", "", "", "", "", "", "", "", "", "", "", "", "", ""],
["2", "", "", "", "", "", "", "", "", "", "", "", "", "", ""],
["3", "", "", "", "", "", "", "", "", "", "", "", "", "", ""],
["4", "", "", "", "", "", "", "", "", "", "", "", "", "", ""],
["5", "", "", "", "", "", "", "", "", "", "", "", "", "", ""],
]
# This variable is used by tabulate
# to print a header for the board of the game.
column_headers = ["row/col", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "11", "12", "13", "14"]
# This function draws the board of the game
def draw_map():
print (tabulate(game_board, headers=column_headers, tablefmt="grid"))
# This function adds fruits and obstacles to the board of the game.
# F is for fruits and O is for obstacles.
def add_fruits_obstacles():
for x in range(1, 6, 1):
game_board[x-1][x] = "f"
for x in range(1, 6, 1):
game_board[x-1][x*2] = "o"
| 1,269 |
exercise207.py
|
vchatchai/python101
| 0 |
2170085
|
size = float(input("Please insert size:"))
if size < 37.0 :
print("XS")
elif 37.0 >= size or size < 41:
print("S")
elif 41 >= size or size < 43:
print("M")
elif 43 >= size or size < 46:
print("L")
elif size >= 46 :
print("XL")
| 254 |
compose_deploy/_main.py
|
KitB/compose-deploy
| 0 |
2169760
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import subprocess
import sys
from compose import config
from compose.config import environment
import compose_deploy
from compose_deploy import remote
def _search_up(filename, stop_at_git=True):
prefix = ''
while True:
path = os.path.join(prefix, filename)
if os.path.isfile(path):
return path
elif prefix == '/' or (os.path.isdir(os.path.join(prefix, '.git')) and
stop_at_git):
# insisting that .git is a dir means we will traverse out of git
# submodules, a behaviour I desire
raise IOError('{} not found here or any directory above here'
.format(filename))
prefix = os.path.realpath(os.path.join(prefix, '..'))
def get_config(basedir, files):
""" Returns the config object for the selected docker-compose.yml
This is an instance of `compose.config.config.Config`.
"""
config_details = config.find(
basedir, files,
environment.Environment.from_env_file(basedir))
return config.load(config_details)
def parse_services_arg(config, arg_services):
all_services = [service['name'] for service in config.services]
def get_service_dicts(service_names):
services_dict_out = {}
for service_dict in config.services:
name = service_dict['name']
if name in service_names:
services_dict_out[name] = service_dict
return services_dict_out
if not arg_services:
return get_service_dicts(all_services)
services_out = []
added = []
negated = []
for service in arg_services:
if service.startswith(':'):
negated.append(service)
service = service[1:]
else:
added.append(service)
if service not in all_services:
raise ValueError('Service "{}" not defined'.format(service))
if not added:
services_out.extend(all_services)
services_out.extend(added)
for service in negated:
services_out.remove(service[1:])
# Keep `services_out` for ordering
return get_service_dicts(services_out)
def _call(what, *args, **kwargs):
# If they can modify the docker-compose file then they can already gain
# root access without particular difficulty. "shell=True" is fine here.
return subprocess.check_output(what, *args, shell=True, **kwargs)
def _call_output(what, *args, **kwargs):
return subprocess.call(what, *args, shell=True, stdout=sys.stdout,
stderr=subprocess.STDOUT, **kwargs)
def _get_version():
return _call('git describe --tags HEAD')
def build(config, services):
""" Builds images and tags them appropriately.
Where "appropriately" means with the output of:
git describe --tags HEAD
and 'latest' as well (so the "latest" image for each will always be the
most recently built)
"""
filtered_services = {name: service for name, service in services.iteritems() if 'build' in service}
_call_output('docker-compose build {}'.format(' '.join(filtered_services.iterkeys())))
version = _get_version()
for service_name, service_dict in filtered_services.iteritems():
# Tag with proper version, they're already tagged latest from build
image = service_dict['image']
_call('docker tag {image}:latest {image}:{version}'.format(
image=image,
version=version
)
)
def push(config, services):
""" Upload the defined services to their respective repositories.
So's we can then tell the remote docker host to then pull and run them.
"""
version = _get_version()
for service_name, service_dict in services.iteritems():
image = service_dict['image']
things = {'image': image, 'version': version}
_call_output('docker push {image}:latest'.format(**things))
_call_output('docker push {image}:{version}'.format(**things))
def buildpush_main(command, args):
_base = os.path.abspath(_search_up(args.file[0]))
basedir = os.path.dirname(_base)
config = get_config(basedir, args.file)
actual_services = parse_services_arg(config, args.services)
# Dispatch to appropriate function
{'build': build, 'push': push}[command](config, actual_services)
def remote_main(args):
remote.remote(args.server, args.command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', '-V', action='version',
version='%(prog)s {}'.format(compose_deploy.__version__))
buildpush_parent = argparse.ArgumentParser(add_help=False)
buildpush_parent.add_argument(
'--file', '-f', nargs='+',
default=['docker-compose.yml'],
help='Same as the -f argument to docker-compose.')
buildpush_parent.add_argument(
'services', nargs='*',
help='Which services to work on, all if empty')
subparsers = parser.add_subparsers(dest='action')
build_parser = subparsers.add_parser('build', parents=[buildpush_parent], # noqa
help='Build and tag the images')
remote_parser = subparsers.add_parser(
'remote',
help='Run a shell with a connection to a remote docker server')
remote_parser.add_argument(
'server',
help='The remote docker server to connect to; '
'Uses openssh underneath so setup ~/.ssh/config appropriately, '
'needs passwordless login '
'(e.g. via ssh-agent or passwordless key)')
remote_parser.add_argument(
'--command', '-c', default=None,
help='Command to run in the opened shell (and then immediately exit)')
push_parser = subparsers.add_parser( # noqa
'push', parents=[buildpush_parent],
help='Push the images to their repositories')
args = parser.parse_args()
if args.action in ['build', 'push']:
buildpush_main(args.action, args)
elif args.action in ['remote']:
remote_main(args)
if __name__ == '__main__':
main()
| 6,201 |
manage.py
|
BreakUnrealGod/TornadoExercises
| 0 |
2167125
|
import settings
from application import Application
import tornado.ioloop
if __name__ == '__main__':
app = Application()
app.listen(settings.options['port'])
tornado.ioloop.IOLoop.current().start()
| 219 |
src/main.py
|
casualkex/mintwatch
| 5 |
2169834
|
import asyncio
from config.config_loader import Config, load_config
from watcher import WatcherTask
async def run_tasks(config: Config):
tasks = []
for service_config in config.service_configs:
task = WatcherTask(service_config, config, config.bot)
tasks.append(task.run())
await asyncio.gather(*tasks)
def main():
config = load_config()
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(run_tasks(config))
if __name__ == "__main__":
main()
| 513 |
run.py
|
HotelsDotCom/hapidoc-web
| 1 |
2169768
|
import logging
import os
from app import app
web_host = os.environ['HAPIDOC_WEB_HOST']
web_port = os.environ['HAPIDOC_WEB_PORT']
def start_app():
app.run(host=web_host, port=web_port)
if __name__ == "__main__":
logs_dir = 'logs'
log_filename = os.path.join(logs_dir, 'hapidocweb.log')
logging.basicConfig(filename=log_filename, level=logging.INFO)
start_app()
| 386 |
definitions.py
|
Quiet-Clicking-Sounds/StringCalculator
| 0 |
2170031
|
from pathlib import Path
ROOT_DIR = Path(__file__).parent
WIRE_TYPE_CSV = ROOT_DIR / "interface/standard_wire_types.csv"
CACHE_MAX_AGE_SEC = 100
note_names = ('A', 'A♯', 'B', 'C', 'C♯', 'D', 'D♯', 'E', 'F', 'F♯', 'G', 'G♯')
file_types = (
('json files', '*.json'),
('All files', '*.*')
)
| 297 |
lib/tools/subprocess_cub_tools.py
|
galena503/SCR
| 0 |
2169489
|
from lib.tools.tools_data import Tools_data
from lib.tools.tools import Tools
from conf.sub import sub
class Subprocess_sub_tools():
# 優先度が高い順にマークなしの物を選ぶ
def choose_task(self, SCRtasks):
for task in SCRtasks:
if task.priority == 3 and task.pid == 0: # 優先度高
return task
if task.priority == 2 and task.pid == 0:
taskp2 = task
if not taskp2.tid == 1000000:
return taskp2 # 優先度中
else:
for task in SCRtasks:
if task.pid == 0:
return task # 優先度低
# マークの無いタスクが無かったらプロセスを終了する
exit()
def mark_check(self, SCRtasks, now_pid):
for task in SCRtasks:
if task.pid == now_pid:
return task
return { "tid": 1000000 }
def exec_task(self, task):
command = []
comarg = conf.interpreter
lang = task.details.lang
command.append(lang)
if not task.details.ability == "":
command.append(task.ability + task.details.file_name + comarg[lang].extension)
elif not task.details.f_ability == "":
command.append(task.f_ability + task.details.file_name + comarg[lang].extension)
if not task.details.data_key == "":
command.append(comarg[lang].data_key)
command.append(task.details.data_key)
if not task.details.data_path == "":
command.append(comarg[lang].data_path)
command.append(task.details.data_path)
print('run : ' + command)
subprocess.run(command)
| 1,621 |
multilstm_tensorpack/tensorpack/train/trainer.py
|
neale/A4C
| 1 |
2170006
|
# -*- coding: UTF-8 -*-
# File: trainer.py
# Author: <NAME> <<EMAIL>>
from .base import Trainer
from ..tfutils import TowerContext
from .input_data import FeedInput
__all__ = ['SimpleTrainer']
class SimpleTrainer(Trainer):
""" A naive demo trainer which iterates over a DataFlow and feed into the
graph. It's not efficient compared to QueueInputTrainer or others."""
def __init__(self, config):
"""
Args:
config (TrainConfig): the training config.
"""
super(SimpleTrainer, self).__init__(config)
if config.dataflow is None:
self._input_method = config.data
assert isinstance(self._input_method, FeedInput), type(self._input_method)
else:
self._input_method = FeedInput(config.dataflow)
def run_step(self):
""" Feed data into the graph and run the updates. """
dp = self._input_method.next_feed()
feed = dict(zip(self.inputs, dp))
self.hooked_sess.run(self.train_op, feed_dict=feed)
def _setup(self):
self._input_method.setup_training(self)
model = self.model
self.inputs = model.get_reused_placehdrs()
with TowerContext('', is_training=True):
model.build_graph(self.inputs)
cost_var = model.get_cost()
opt = self.config.optimizer
self.train_op = opt.minimize(cost_var, name='min_op')
| 1,413 |
Others/code_festival/code-thanks-festival-2015-open/b.py
|
KATO-Hiro/AtCoder
| 2 |
2169201
|
# -*- coding: utf-8 -*-
def main():
from itertools import product
a = map(int, input().split())
b = map(int, input().split())
c = int(input())
ans = set()
for ai, bi in list(product(a, b)):
if ai == c:
ans.add(bi)
if bi == c:
ans.add(ai)
print(len(ans))
for ans_i in sorted(ans):
print(ans_i)
if __name__ == '__main__':
main()
| 419 |
src/test/mazeOfBob.py
|
5agado/intro-ai
| 3 |
2169414
|
import os
import random
import pygame
import sys
from util import utils
from numpy import matrix
from bitstring import BitArray
from genetic_algorithm.population import Population
import math
from time import sleep
'''
Simple maze game.
The maze is encoded as a matrix of int
'''
square_l = 30
start = None
end = None
def readMaze(filename):
filePath = os.path.join(utils.getResourcesPath(), filename)
m = matrix(utils.readMatrix(filePath))
numR, numC = m.shape
#find start and end
for y in range(numR):
for x in range(numC):
if m.item((y, x)) == 5:
global start
start = (y, x)
if m.item((y, x)) == 8:
global end
end = (y, x)
return m
def renderMaze(window, m, path = None):
window.fill((255, 255, 255))
numR, numC = m.shape
for y in range(numR):
for x in range(numC):
if m.item((y, x)) == 1:
box = pygame.Rect(x*square_l, y*square_l, square_l, square_l)
pygame.draw.rect(window, (0, 0, 0), box, 0)
if path:
for pos in path:
box = pygame.Rect(pos[1]*square_l, pos[0]*square_l, square_l, square_l)
pygame.draw.rect(window, (255, 0, 0), box, 0)
pygame.display.update()
pygame.time.delay(10)
def isValid(p, m):
numR, numC = m.shape
if (p[0] < 0 or p[0] >=numR):
return False
if (p[1] < 0 or p[1] >=numC):
return False
if (m.item(p) == 1 or m.item(p) == 5):
return False
return True
def walkChromo(genes, m):
pos = start
path = []
for move in genes:
if move.bin == '00':
pos = (pos[0], pos[1] +1)
elif move.bin == '01':
pos = (pos[0], pos[1] -1)
elif move.bin == '10':
pos = (pos[0] - 1, pos[1])
elif move.bin == '11':
pos = (pos[0] + 1, pos[1])
else:
raise Exception("No such move")
if not(isValid(pos, m)):
#return 0, path
break
else:
path.append(pos)
if pos == end:
dist = 0.0001
else:
dist = math.fabs(pos[0] - end[0]) + math.fabs(pos[1] - end[1])
fitness = (1/dist) #- len(path)/150
return fitness, path
#the genes are encoded as moves (R, L, U, D) in the maze
def initPopulation(p):
cValue = ['00', '01', '10', '11']
for i in range(p.size):
for j in range(p.chromoSize):
p.chromos[i].genes[j] = BitArray("0b" + random.choice(cValue))
def evolve(p, w, m):
for _ in range(200):
for i in range(len(p.chromos)):
fitness, _ = walkChromo(p.chromos[i].genes, m)
p.chromos[i].fitness = fitness
fitness, bestPath = walkChromo(p.getBestIndividuals()[0].genes, m)
print(fitness)
#print(p.generation_num)
renderMaze(w, m, bestPath)
p.newGeneration()
def main():
pygame.init()
maze = readMaze('mazes/9x15.txt')
numR, numC = maze.shape
window = pygame.display.set_mode((numC * square_l, numR * square_l))
renderMaze(window, maze)
Population.initPopulation = initPopulation
Population.evolve = evolve
p = Population(50, 30)
p.mutation_rate = 0.1
p.elites_num = 5
p.initPopulation()
p.evolve(window, maze)
print("done")
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
main()
| 3,640 |
SmartImplicitHub/TestDevices/FakeSensor.py
|
destman0/Smart-Implicit-Hub
| 2 |
2169714
|
#!/usr/bin/env python
import logging
import socket
import ifaddr
import sys
from time import sleep
import numpy as np
from pythonosc import udp_client
from zeroconf import ServiceInfo, Zeroconf
from typing import List
__copyright__ = "Copyright 2019, RISE Research Institutes of Sweden"
__author__ = "<NAME> and <NAME>"
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
desc = {'sensor1': '/proximity:0%5'}
info = ServiceInfo(type_="_osc._udp.local.",
name="FakeOSCPhone._osc._udp.local.",
address=socket.inet_aton("192.168.11.158"),
port=5005,
weight=0,
priority=0,
properties=desc,
server="FakeOSCPhone.local.")
zeroconf = Zeroconf()
print("Registration of a service PythonSensor")
zeroconf.register_service(info)
try:
while True:
sleep(0.1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering...")
zeroconf.unregister_service(info)
zeroconf.close()
| 1,269 |
DjangoWordle/WordleColourResult/models.py
|
m-kowalewski/django-wordle
| 0 |
2169354
|
from django.db import models
from django.db import models
from django.utils import timezone
from django.contrib import admin
class WordleResult(models.Model):
user_name = models.CharField(max_length=100)
result_date = models.DateTimeField('date solved')
ANSWERS = (
('G', 'Green'),
('Y', 'Yellow'),
('B', 'Black'),
)
table_width = 5
table_height = 6
table_try = [[0 for x in range(5)] for y in range(6)]
table_try[3][3] = 1
#table_field_try = [[models.CharField(max_length=1, choices=ANSWERS, default='B') for x in range(2)] for y in range(3)]
simple_field = models.CharField(max_length=1, choices=ANSWERS, default='B')
simple_row = []
for x in range(3):
simple_row.append(simple_field)
simple_table = []
for y in range(2):
simple_table.append(simple_row)
letter_table = models.CharField(max_length=35, default='BBBBBEBBBBBEBBBBBEBBBBBEBBBBBEBBBBB')
table_text = 'there will be a table'
answer = models.CharField(max_length=1, choices=ANSWERS, default='B')
def __str__(self):
return self.user_name
class GraphicTable(models.Model):
wordleresult = models.ForeignKey(WordleResult, on_delete=models.CASCADE)
table = 'there will be a table'
| 1,175 |
icetray/resources/test/conditionalmodule_that_impls_process.py
|
hschwane/offline_production
| 1 |
2169940
|
#!/usr/bin/env python
from I3Tray import I3Tray
from icecube import icetray
from icecube import dataio
import sys
tray = I3Tray()
tray.AddModule("BottomlessSource", "bs")
def frameloader():
def impl(frame):
frame['counter'] = icetray.I3Int(impl.i)
impl.i += 1
impl.i = 0
return impl
tray.AddModule(frameloader(), "loader")
#
# Only keep 'tag' if counter is even
#
tray.AddModule('Keep', 'k',
If=lambda f: f['counter'].value % 3 == 0,
Keys=['tag'])
def framecheck(frame):
print(frame)
if 'counter' in frame:
assert frame['counter'].value % 3 != 0
tray.AddModule(framecheck, 'check')
tray.Execute(100)
| 700 |
dqc/utils/pbc.py
|
Jaikinator/dqc
| 39 |
2168985
|
import torch
import numpy as np
import scipy.special
from typing import List
from dqc.hamilton.intor.lcintwrap import LibcintWrapper
# functions usually used for pbc
# typically helper functions are listed within the same file, but if they are
# used in a multiple files, then it should be put under the dqc.utils folder
def unweighted_coul_ft(gvgrids: torch.Tensor) -> torch.Tensor:
# Returns the unweighted fourier transform of the coulomb kernel: 4*pi/|gv|^2
# If |gv| == 0, then it is 0.
# gvgrids: (ngv, ndim)
# returns: (ngv,)
gnorm2 = torch.einsum("xd,xd->x", gvgrids, gvgrids)
gnorm2[gnorm2 < 1e-12] = float("inf")
coulft = 4 * np.pi / gnorm2
return coulft
def estimate_ovlp_rcut(precision: float, coeffs: torch.Tensor, alphas: torch.Tensor) -> float:
# estimate the rcut for lattice sum to achieve the given precision
# it is estimated based on the overlap integral
langmom = 1
C = (coeffs * coeffs + 1e-200) * (2 * langmom + 1) * alphas / precision
r0 = torch.tensor(20.0, dtype=coeffs.dtype, device=coeffs.device)
for i in range(2):
r0 = torch.sqrt(2.0 * torch.log(C * (r0 * r0 * alphas) ** (langmom + 1) + 1.) / alphas)
rcut = float(torch.max(r0).detach())
return rcut
def estimate_g_cutoff(precision: float, coeffs: torch.Tensor, alphas: torch.Tensor) -> float:
# g-point cut off estimation based on cubic lattice
# based on _estimate_ke_cutoff from pyscf
# https://github.com/pyscf/pyscf/blob/c9aa2be600d75a97410c3203abf35046af8ca615/pyscf/pbc/gto/cell.py#L498
langmom = 1
log_k0 = 3 + torch.log(alphas) / 2
l2fac2 = scipy.special.factorial2(langmom * 2 + 1)
a = precision * l2fac2 ** 2 * (4 * alphas) ** (langmom * 2 + 1) / (128 * np.pi ** 4 * coeffs ** 4)
log_rest = torch.log(a)
Ecut = 2 * alphas * (log_k0 * (4 * langmom + 3) - log_rest)
Ecut[Ecut <= 0] = .5
log_k0 = .5 * torch.log(Ecut * 2)
Ecut = 2 * alphas * (log_k0 * (4 * langmom + 3) - log_rest)
Ecut[Ecut <= 0] = .5
Ecut_max = float(torch.max(Ecut).detach())
# KE ~ 1/2 * g^2
gcut = (2 * Ecut_max) ** 0.5
return gcut
def get_gcut(precision: float, wrappers: List[LibcintWrapper], reduce: str = "min") -> float:
# get the G-point cut-off from the given wrappers where the FT
# eval/integration is going to be performed
gcuts: List[float] = []
for wrapper in wrappers:
# TODO: using params here can be confusing because wrapper.params
# returns all parameters (even if it is a subset)
coeffs, alphas, _ = wrapper.params
gcut_wrap = estimate_g_cutoff(precision, coeffs, alphas)
gcuts.append(gcut_wrap)
if len(gcuts) == 1:
return gcuts[0]
if reduce == "min":
return min(*gcuts)
elif reduce == "max":
return max(*gcuts)
else:
raise ValueError("Unknown reduce: %s" % reduce)
| 2,896 |
Space_Invaders/classes/Game/Sprites/ImageObject.py
|
Jh123x/Orbital
| 4 |
2169847
|
import pygame
from . import BaseObject
class ImageObject(BaseObject):
def __init__(self, initial_x: int, initial_y: int, width: int, height: int, image=None, debug: bool = False):
"""Main class for all objects with images"""
# Call superclass
super().__init__(initial_x, initial_y, debug)
# Get width
self.width = width
self.height = height
# Check if the image path is None
self.image = image
self.rect = None
# Load the rect
self.load_rect()
def draw(self, screen) -> None:
"""Draw the player onto the screen"""
# Render new position
screen.blit(self.image, self.rect)
def load_rect(self) -> None:
"""Load the rectangle for the obj"""
# If image exists
if self.image:
# Create the rectangle for the ImageObject Object
self.rect = pygame.Rect(self.image.get_rect().left, self.image.get_rect().top, self.get_width(),
self.get_height())
# If image does not exists
else:
# Create custom rect
self.rect = pygame.Rect(self.get_x(), self.get_y(), self.get_width(), self.get_height())
# Print debug message
if self.debug:
print("No image found")
# Inflate the model to the correct size
self.rect.inflate(self.get_width() // 2, self.get_height() // 2)
# Set the center of the rect
self.rect.center = (self.x, self.y)
def set_coord(self, position):
"""Set the coordinate of the moving object"""
# Set the coordinates
super().set_coord(position)
# Load rectangle
self.load_rect()
def get_center(self) -> tuple:
"""Get the coordinate of the center of the object"""
return self.rect.center
def rotate(self, angle: int) -> None:
"""Rotate the image by x degrees"""
# If the image exists
if self.image:
# Rotate the image
self.image = pygame.transform.rotate(self.image, angle)
# Load the rectangle
self.load_rect()
# Otherwise do nothing
else:
if self.debug:
print("There is no image to rotate")
def update(self) -> None:
"""Update the object rect position"""
# Set the position of the rect if it has changed from before
if self.changed:
# Load the rectangle of the object again
self.load_rect()
# Set the changed variable to False
self.changed = False
def scale(self, width: int, height: int) -> None:
"""Scale the image"""
# If the image exists
if self.image:
# Scale the image to the new width and height defined
self.image = pygame.transform.scale(self.image, (width, height))
# Reload the rect
self.load_rect()
# Otherwise do nothing
else:
if self.debug:
print("No image to scale")
def get_height(self) -> None:
"""Get the height of the image"""
return self.image.get_height() if self.image else self.height
def get_width(self) -> None:
"""Get the width of the image"""
return self.image.get_width() if self.image else self.width
| 3,406 |
server/src/weaverbird/pipeline/steps/rename.py
|
JeremyJacquemont/weaverbird
| 54 |
2168842
|
from typing import List, Tuple, Union
from pydantic import Field, root_validator
from weaverbird.pipeline.steps.utils.base import BaseStep
from weaverbird.pipeline.steps.utils.render_variables import StepWithVariablesMixin
from weaverbird.pipeline.types import TemplatedVariable
class RenameStep(BaseStep):
name = Field('rename', const=True)
to_rename: List[Tuple[str, str]] = Field(..., alias='toRename')
@root_validator(pre=True)
def handle_legacy_syntax(cls, values):
if 'oldname' in values and 'newname' in values:
values['to_rename'] = [(values.pop('oldname'), values.pop('newname'))]
return values
class RenameStepWithVariable(RenameStep, StepWithVariablesMixin):
to_rename: Union[TemplatedVariable, List[Tuple[TemplatedVariable, TemplatedVariable]]] = Field(
..., alias='toRename'
)
| 857 |
appdsa/calculator2.py
|
apetcho/AppliedDSA
| 0 |
2169453
|
"""An implementation of simple arithmetic calculator using a grammar
based on BNF (Backus-Naur-Form) notation.
The grammar is:
expr ::= expr1 "+" expr1 |
expr1 "-" expr1 |
expr1
expr1 ::= expr2 "*" expr2 |
expr2 "/" expr2 |
expr2
expr2 ::= "-" expr3 |
expr3
expr3 ::= expr4 "^" expr2 |
expr4
expr4 ::= <natural> |
"(" expr ")"
natural ::= ( '0' | '1' | '2' | '3' | '4'| '5' | '6' | '7' | '8' | '9' )*
"""
from typing import Union
from .rational import Rational
class Expression:
"""Expression to be evaluated."""
def __init__(self, src):
self._src = src
self._length = len(src)
self._index = 0
self._error = 0
self._value = 0
def __str__(self) -> str:
retval = ""
for ch in self._src:
if not str.isspace(ch):
retval += ch
return retval
__repr__ = __str__
def error(self, err: int) -> None:
if self._error == 0:
self._error = err
def error_exists(self) -> bool:
if self._error == 0:
return False
return True
def noerror(self) -> bool:
return not self.error_exists()
def value(self) -> Union[float, int, Rational]:
self.evaluate()
return self._value
def evaluate(self) -> None:
self._ch = self.next()
if self._ch == "{":
self._ch = self.next()
self._value = self.expr()
self._ch = self.next()
if self._ch != "}":
self.error(2) # missing one "}"
else:
self.error(1) # missing one "}"
def next(self) -> str:
while self._index < self._length:
token = self._src[self._index]
self._index += 1
if str.isspace(token):
continue
else:
return token
return '\0'
def next_read(self) -> str:
while self._index < self._length:
token = self._src[self._index]
self._index += 1
if str.isspace(token):
self._index += 1
else:
return token
return '\0'
def is_next(self, token) -> bool:
if self.next_read() == token:
return True
return False
def expr(self) -> Union[float, int, Rational]:
"""expr ::= expr1 "+" expr1 | expr1 "-" expr1 | expr1 """
if self.error_exists():
return 0
term = self.expr1()
while self.is_next("+") or self.is_next("-"):
self._ch = self.next()
if self._ch == "+":
self._ch = self.next()
term += self.expr1()
else:
if self._ch == "-":
self._ch = self.next()
term -= self.expr1()
return term
def expr1(self) -> Union[float, int, Rational]:
"""expr1 := expr2 "*" expr2 | expr2 "/" expr2 | expr2 """
if self.error_exists():
return 0
term: Rational = self.expr2()
while self.is_next("*") or self.is_next("/") or self.is_next(":"):
self._ch = self.next()
if self._ch == "*":
self._ch = self.next()
term *= self.expr2()
else:
if self._ch == "/" or self._ch == ":":
self._ch = self.next()
term /= self.expr2()
if not term.valid:
self.error(7) # division by zero
return term
def expr2(self) -> Union[float, int, Rational]:
"""expr2 := '-' expr3 | expr3 """
if self.error_exists():
return 0
negate = False
while self._ch == "-":
negate = not negate
self._ch = self.next()
term = self.expr3()
if negate:
return -term
return term
def expr3(self) -> Union[float, int, Rational]:
"""expr3 ::= expr4 "^" expr2 | expr4 """
if self.error_exists():
return 0
term = self.expr4()
if self.is_next("^"):
self._ch = self.next()
self._ch = self.next()
k: Rational = self.expr2()
if not k.valid:
self.error(8) # invalid exponent
term = term ** k
return term
def expr4(self) -> Union[float, int, Rational]:
"""expr4 ::= <natural> | "(" expr ")" """
if self.error_exists():
return 0
if str.isdigit(self._ch):
term = self.natural()
return Rational(int(term))
if self._ch == "(":
self._ch = self.next()
term = self.expr()
self._ch = self.next()
if self._ch == ")":
return term
else:
self.error(10) # missing ")"
else:
self.error(9) # missing "("
return 0
def natural(self) -> int:
"""natural ::= "(" '0'|'1'|'2'|'3'|'4'|'5'|'6'|'7'|'8'|'9' ")" """
n = int(self._ch)
x = self.next_read()
while str.isdigit(x):
n = n * 10 + int(x)
self._ch = self.next()
x = self.next_read()
return n
class Application:
def __init__(self):
self._expr = None
def demos(self):
self._expr = Expression("{ (3 + 4) * 5 ^ (1 + 1) - 7 }")
print("----------")
print("Example #1")
print("----------")
print(f"Expression ...... : {self._expr}")
print(f"Result .......... : {self._expr.value()}")
self._expr = Expression("{ 2 ^ 3 ^ 2 ^ 2 }")
print("----------")
print("Example #2")
print("----------")
print(f"Expression ...... : {self._expr}")
print(f"Result .......... : {self._expr.value()}")
self._expr = Expression("{ ( 9 + 1 ) * ( 7 + 2 * 5 ) }")
print("----------")
print("Example #3")
print("----------")
print(f"Expression ...... : {self._expr}")
print(f"Result .......... : {self._expr.value()}")
self._expr = Expression("{ 3/7 - 2/7 : (5 : 14) }")
print("----------")
print("Example #4")
print("----------")
print(f"Expression ...... : {self._expr}")
print(f"Result .......... : {self._expr.value()}")
def _read_expr(self) -> str:
print("------------------------------------------------------")
print("Next calculation (leave empty and press ENTER to quit)")
print("------------------------------------------------------")
return input("\x1b[32mcalc2>>\x1b[0m ")
def _mainloop(self) -> None:
expr = self._read_expr()
while len(expr) > 0:
self._expr = Expression("{" + expr + "}")
print(f"\x1b[31mresult =\x1b[0m {self._expr.value()}")
expr = self._read_expr()
def __call__(self):
self._mainloop()
def main():
app = Application()
app()
# TODO: Check the validity of the code: some hidden bug is in here
| 7,168 |
crdb/tests/test_person.py
|
jsayles/CoworkingDB
| 1 |
2168406
|
from django.test import TestCase
from crdb.models import Person, EmailAddress, SiteType
class PersonTestCase(TestCase):
def setUp(self):
Person.objects.create(username="person_one", email="<EMAIL>")
# Person.objects.create(username="person_two", email="<EMAIL>")
# Person.objects.create(username="person_three", email="<EMAIL>")
def test_by_email(self):
person_one = Person.objects.by_email("<EMAIL>")
self.assertIsNotNone(person_one)
other_one = Person.objects.get(username='person_one')
self.assertEqual(person_one, other_one)
def test_add_email(self):
new_email = "<EMAIL>"
person_one = Person.objects.get(username='person_one')
email_count = EmailAddress.objects.filter(person=person_one).count()
self.assertEqual(email_count, 1)
# Can not pull user_one with new email
new_one = Person.objects.by_email(new_email)
self.assertIsNone(new_one)
# Add new email
person_one.add_email(new_email)
email_count = EmailAddress.objects.filter(person=person_one).count()
self.assertEqual(email_count, 2)
# Assume we can get this user by email now
new_one = Person.objects.by_email(new_email)
self.assertIsNotNone(new_one)
self.assertEqual(person_one, new_one)
def test_add_primary_email(self):
new_email = "<EMAIL>"
person_one = Person.objects.get(username='person_one')
# Add new primary email
person_one.add_email(new_email, primary=True)
email_count = EmailAddress.objects.filter(person=person_one).count()
self.assertEqual(email_count, 2)
# Assume Person.email is now our new email
new_one = Person.objects.filter(email=new_email).first()
self.assertIsNotNone(new_one)
self.assertEqual(person_one, new_one)
# Assume our new model is properly set as primary
email_address = EmailAddress.objects.get(email=new_email)
self.assertTrue(email_address.is_primary)
def test_save_url(self):
person_one = Person.objects.get(username='person_one')
website_one = "https://github.com/person_one"
website_two = "https://github.com/p1"
self.assertEqual(0, person_one.websites.count())
# Save a Github URL and test it was saved
person_one.save_url(SiteType.GITHUB, website_one)
self.assertEqual(1, person_one.websites.count())
self.assertEqual(website_one, person_one.websites.first().url)
# Save another Github URL and assume it overwrites the first one
person_one.save_url(SiteType.GITHUB, website_two)
self.assertEqual(1, person_one.websites.count())
self.assertEqual(website_two, person_one.websites.first().url)
| 2,816 |
odoo/base-addons/hr_org_chart/tests/test_employee_deletion.py
|
LucasBorges-Santos/docker-odoo
| 0 |
2169828
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import Form, tagged, TransactionCase
from odoo.exceptions import MissingError
@tagged('post_install')
class TestEmployeeDeletion(TransactionCase):
def test_employee_deletion(self):
# Tests an issue with the form view where the employee could be deleted
employee_a, employee_b = self.env['hr.employee'].create([
{
'name': 'A',
},
{
'name': 'B',
},
])
department_a, department_b = self.env['hr.department'].create([
{
'name': 'DEP A',
'manager_id': employee_a.id,
},
{
'name': 'DEP B',
'manager_id': employee_b.id,
},
])
employee_a.write({
'parent_id': employee_a.id,
'coach_id': employee_a.id,
'department_id': department_a.id,
})
try:
with Form(employee_a) as form:
form.department_id = department_b
except MissingError:
self.fail('The employee should not have been deleted')
| 1,262 |
bot/conversation/talk/conv_talk.py
|
IgV52/bot
| 0 |
2168457
|
from telegram.ext import ConversationHandler, Filters, MessageHandler
from bot.conversation.talk.dialogue import talk,section,quest,theme,back
from bot.conversation.talk.dialogue import dialogue_dontknow
conv_talk = ConversationHandler(
entry_points=[MessageHandler(Filters.regex('^(Опрос)$'), talk)],
states={
'section': [MessageHandler(Filters.regex('^(Назад)$'), back),
MessageHandler(Filters.text, section)],
'theme' : [MessageHandler(Filters.regex('^(Назад)$'), back),
MessageHandler(Filters.text, theme)],
'quest': [MessageHandler(Filters.text, quest)]
},
fallbacks=[MessageHandler(Filters.video | Filters.photo | Filters.document
| Filters.location, dialogue_dontknow)],
map_to_parent={'select': 'select'})
| 872 |
tests/test_ext.py
|
raphaelrpl/lccs-db
| 6 |
2166127
|
#
# This file is part of Land Cover Classification System Database Model.
# Copyright (C) 2019-2020 INPE.
#
# Land Cover Classification System Database Model is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Unit-test for extension LCCS-DB."""
from lccs_db import LCCSDatabase
def test_ext_creation(app):
ext = LCCSDatabase(app)
assert app.extensions['lccs-db'] == ext
| 468 |
vlivepy/__init__.py
|
Jwright707/vlive-py
| 0 |
2169391
|
# -*- coding: utf-8 -*-
from . import api
from . import model
from . import utils
from . import variables as gv
from .api import *
from .model import Video, Upcoming
| 167 |
application/caches/test/test_cache_manager.py
|
andrew749/andrew749.github.io
| 0 |
2168876
|
import unittest
import six
if six.PY2:
from mock import Mock
else:
from unittest.mock import Mock
from application.caches.cache_manager import CacheManager
class TestCacheManager(unittest.TestCase):
def setUp(self):
self.cache_manager = CacheManager()
def tearDown(self):
pass
def test_addElementSingleCache(self):
cache = Mock()
cache.get = Mock(return_value="test.value")
self.cache_manager.add_cache(cache)
self.cache_manager.add("test.key", "test.value")
self.assertEqual(self.cache_manager.get("test.key"), "test.value")
cache.get.assert_called_with("test.key")
def test_addElementMultiCache(self):
cache = Mock()
cache2 = Mock()
cache.get = Mock(return_value=None)
cache2.get = Mock(return_value="test.value")
self.cache_manager.add_cache(cache)
self.cache_manager.add_cache(cache2)
self.cache_manager.add("test.key", "test.value")
self.assertEqual(self.cache_manager.get("test.key"), "test.value")
cache.get.assert_called_with("test.key")
cache2.get.assert_called_with("test.key")
if __name__ == "__main__":
unittest.main()
| 1,088 |
examples/pacman/main.py
|
tailhook/pyzza
| 2 |
2169775
|
from layout import TopLevel, RoundRect, Widget, Layout, State, Rel, Constraint
from string import repr
from flash.display import Shape
from game import Frame, Keys
level = """
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
Xg....................................gX
X.XX.XXXXX.XX.XXX.XXXX.XXX.XX.XXXXX.XX.X
X.XX.XXXXX.XX.XXX.XXXX.XXX.XX.XXXXX.XX.X
X.XX.XXXXX.XX.XXX.XXXX.XXX.XX.XXXXX.XX.X
X.XX.......XX.XXX.XXXX.XXX.XX.......XX.X
X.XX.X.XXX.XX.XXX......XXX.XX.XXX.X.XX.X
X.XX.X.XXX.XX.XXX.XXXX.XXX.XX.XXX.X.XX.X
X.XX.X.XXX.XX.XXX.XXXX.XXX.XX.XXX.X.XX.X
X......................................X
X.XX.X.XXX.XX.XXX.XXXX.XXX.XX.XXX.X.XX.X
X.XX.X.XXX.XX.XXX.XXXX.XXX.XX.XXX.X.XX.X
X.XX.X.XXX.XX.XXX.c....XXX.XX.XXX.X.XX.X
X.XX.......XX.XXX.XXXX.XXX.XX.......XX.X
X.XX.XXXXX.XX.XXX.XXXX.XXX.XX.XXXXX.XX.X
X.XX.XXXXX.XX.XXX.XXXX.XXX.XX.XXXXX.XX.X
X.XX.XXXXX.XX.XXX.XXXX.XXX.XX.XXXXX.XX.X
Xg....................................gX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
"""
class Ghost(Shape):
__slots__ = ('fx', 'fy')
def __init__(self, x, y):
self.fx = x
self.fy = y
def draw(self, width, height):
g = self.graphics
g.moveTo(width/4, height/4)
g.lineTo(width*3/4, height/4)
g.lineTo(width*3/4, height*3/8)
g.lineTo(width/2, height*3/8)
g.lineTo(width/2, height*5/8)
g.lineTo(width*3/4, height*5/8)
g.lineTo(width*3/4, height*3/4)
g.lineTo(width/4, height*3/4)
class Meal(Shape):
__slots__ = ('fx', 'fy', 'amount')
def __init__(self, x, y, amount=10):
self.fx = x
self.fy = y
self.amount = amount
def draw(self, width, height):
g = self.graphics
g.clear()
g.beginFill(0xFF0000)
g.drawCircle(width/2, height/2, min(width, height)/4)
g.endFill()
class Pacman(Shape):
__slots__ = ('fx', 'fy')
def __init__(self, x, y):
self.fx = x
self.fy = y
def start(self):
Keys.register(Keys.LEFT, 'pacman_left')
Keys.register(Keys.RIGHT, 'pacman_right')
Keys.register(Keys.DOWN, 'pacman_down')
Keys.register(Keys.UP, 'pacman_up')
Frame.attach(self.frame)
def stop(self):
Frame.detach(self.frame)
def frame(self, delta):
if Keys.keys.pacman_left:
self.x -= delta*50
if Keys.keys.pacman_right:
self.x += delta*50
if Keys.keys.pacman_up:
self.y -= delta*50
if Keys.keys.pacman_down:
self.y += delta*50
def draw(self, width, height):
g = self.graphics
g.clear()
g.beginFill(0xFFFF00)
g.lineStyle(1, 0xFF0000)
g.moveTo(width/4, height/4)
g.lineTo(width*3/4, height/4)
g.lineTo(width*3/4, height*3/8)
g.lineTo(width/2, height*3/8)
g.lineTo(width/2, height*5/8)
g.lineTo(width*3/4, height*5/8)
g.lineTo(width*3/4, height*3/4)
g.lineTo(width/4, height*3/4)
g.endFill()
class Wall:
__slots__ = ('fx', 'fy')
def __init__(self, x, y):
self.fx = x
self.fy = y
def draw(self, graph, x, y, width, height):
graph.beginFill(0x808080)
graph.drawRect(x, y, width, height)
graph.endFill()
class Field(Widget):
def __init__(self, data, name, states):
super().__init__(name, states)
self.ghosts = []
self.pacman = None
self.walls = {}
self.meals = {}
self.wallsprite = Shape()
self.wallsprite.cacheAsBitmap = True
self.addChild(self.wallsprite)
self.field_width = 0
self.field_height = 0
y = 0
for line in values(data.split('\n')):
x = 0
for i in range(line.length):
c = line.charAt(i)
if ' \n\t\r'.indexOf(c) >= 0:
continue
if c == '.':
self.meals['p{}_{}'.format(x, y)] = Meal(x, y)
elif c == 'g':
self.ghosts.push(Ghost(x, y))
elif c == 'c':
self.pacman = Pacman(x, y)
elif c == 'X':
self.walls['p{}_{}'.format(x, y)] = Wall(x, y)
x += 1
self.field_width = max(x, self.field_width)
if x:
y += 1
self.field_height = y
for m in values(self.meals):
self.addChild(m)
for g in values(self.ghosts):
self.addChild(g)
self.addChild(self.pacman)
def draw(self, width, height):
super().draw(width, height)
w = width/self.field_width
h = height/self.field_height
# drawing walls
wg = self.wallsprite.graphics
wg.clear()
for wall in values(self.walls):
wall.draw(wg, w*wall.fx, h*wall.fy, w, h)
# drawing other objects
for m in values(self.meals):
m.x = w*m.fx
m.y = h*m.fy
m.draw(w, h)
for g in values(self.ghosts):
g.x = w*g.fx
g.y = h*g.fy
p = self.pacman
p.x = w*p.fx
p.y = h*p.fy
self.pacman.draw(w, h)
def start(self):
self.pacman.start()
@package('pacman')
class Main(TopLevel):
def __init__(self):
self.layout = Layout([
Field(level, 'field', {
'normal': State.parse(
'normal:(0,0)-(1,1)'),
}),
])
super().__init__()
Frame.start(self, True)
Keys.start(self.stage)
self.layout.mapping.field.start()
| 5,622 |
corona-dashboard-server/scripts/user-location.py
|
rohithravin/COVID-19-Dashboard
| 0 |
2170076
|
import mysql.connector
import os
from mysql.connector import errorcode
import pandas as pd
import sys
import config
import json
cnx = mysql.connector.connect(user = config.MYSQL_USERNAME, password = config.MYSQL_PASSWORD,
host = config.MYSQL_HOST, allow_local_infile=True,
database = config.MYSQL_DB )
cursor = cnx.cursor()
if len(sys.argv) != 2:
print("Invalid argument list.", file=sys.stderr)
exit(1)
try:
zipcode_input = int(sys.argv[1])
except Exception as err:
print("Argument not a number.", file=sys.stderr)
exit(2)
sql_stm = "SELECT * FROM zipcode WHERE zipcode = {}".format(zipcode_input)
try:
cursor.execute(sql_stm)
except mysql.connector.Error as err:
print(err.msg, file=sys.stderr)
err_flag = True
for (zipcode, primary_city, state,county) in cursor:
if county.strip() in config.NORCAL_COUNTIES:
print(json.dumps({'county':county, 'zipcode':zipcode, 'caliSection':'NORCAL'}))
err_flag = False
else:
print(json.dumps({'county':county, 'zipcode':zipcode, 'caliSection':'SOCAL'}))
err_flag = False
if err_flag:
print('Zipcode doesn\'t exist.',file=sys.stderr)
| 1,219 |
dontpad_cli.py
|
YanSoares/TelegramCrawler
| 4 |
2170047
|
import dontpad
import sys
def error():
print("Incorrect parameters.")
print("Format:\n dontpad [MODE] [URL_EXTENSION] [TEXT]")
print(" MODES:\n -r (default)\t\tRead content from URL_EXTENSION.\n -w \t\tWrite TEXT to URL_EXTENSION.")
exit(-1)
argc = len(sys.argv)
if argc < 2:
error()
if argc >= 3:
url_extension = sys.argv[2]
mode = "-r"
mode = sys.argv[1]
if mode == "-r":
print(dontpad.read(url_extension))
elif mode == "-w":
text = " ".join(sys.argv[3:])
dontpad.write(url_extension, text)
elif sys.argv[1] not in ["-r", "-w"]:
url_extension = sys.argv[1]
print(dontpad.read(url_extension))
else:
error()
| 710 |
spot-oa/ipython/extensions/spot_webapp.py
|
maduhu/Apache-Spot-Incubator
| 0 |
2169958
|
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(__file__))))
from api.graphql.webapp import load_jupyter_server_extension
| 162 |
tests/sett/test_yearn_original_wrapper.py
|
shuklaayush/badger-system
| 99 |
2168972
|
from helpers.time_utils import days
import brownie
import pytest
from brownie import *
from helpers.constants import *
from helpers.registry import registry
from tests.conftest import yearnSettTestConfig, badger_single_sett
from collections import namedtuple
@pytest.fixture(scope="module", autouse=True)
def setup(MockToken, AffiliateToken, YearnTokenVault, YearnRegistry):
# Assign accounts
deployer = accounts[0]
affiliate = accounts[1]
manager = accounts[2]
guardian = accounts[3]
randomUser1 = accounts[4]
randomUser2 = accounts[5]
randomUser3 = accounts[7]
yearnGovernance = accounts[6]
namedAccounts = {
"deployer": deployer,
"affiliate": affiliate,
"manager": manager,
"guardian": guardian,
"randomUser1": randomUser1,
"randomUser2": randomUser2,
"randomUser3": randomUser3,
"yearnGovernance": yearnGovernance,
}
# WBTC (mainnet)
mockToken = deployer.deploy(MockToken)
mockToken.initialize(
[randomUser1.address, randomUser2.address, randomUser3.address],
[10e18, 20e18, 10e18],
)
assert mockToken.balanceOf(randomUser1.address) == 10e18
assert mockToken.balanceOf(randomUser2.address) == 20e18
assert mockToken.balanceOf(randomUser3.address) == 10e18
# Yearn underlying vault
vault = deployer.deploy(YearnTokenVault)
vault.initialize(
mockToken.address, deployer.address, AddressZero, "YearnWBTC", "vyWBTC"
)
vault.setDepositLimit(24e18)
# Yearn registry
yearnRegistry = deployer.deploy(YearnRegistry)
yearnRegistry.setGovernance(yearnGovernance)
# Add vault to registry
yearnRegistry.newRelease(vault.address)
yearnRegistry.endorseVault(vault.address)
# Deploy and initialize the wrapper contract (deployer -> affiliate)
wrapper = deployer.deploy(
AffiliateToken,
mockToken.address,
yearnRegistry.address,
"BadgerYearnWBTC",
"bvyWBTC",
)
yield namedtuple("setup", "mockToken vault yearnRegistry wrapper namedAccounts")(
mockToken, vault, yearnRegistry, wrapper, namedAccounts
)
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
# @pytest.mark.skip()
def test_deposit_withdraw_flow(setup):
randomUser1 = setup.namedAccounts["randomUser1"]
randomUser2 = setup.namedAccounts["randomUser2"]
randomUser3 = setup.namedAccounts["randomUser3"]
deployer = setup.namedAccounts["deployer"]
guardian = setup.namedAccounts["guardian"]
manager = setup.namedAccounts["manager"]
if setup.wrapper.bestVault() == setup.vault.address:
# === Deposit flow === #
# Approve wrapper as spender of mockToken for users
setup.mockToken.approve(setup.wrapper.address, 100e18, {"from": randomUser3})
setup.mockToken.approve(setup.wrapper.address, 100e18, {"from": randomUser2})
setup.mockToken.approve(setup.wrapper.address, 100e18, {"from": randomUser1})
# total amount of tokens deposited through wrapper = 0
assert setup.wrapper.totalVaultBalance(setup.wrapper.address) == 0
# total supply of wrapper shares = 0
assert setup.wrapper.totalSupply() == 0
# = User 2: Has 20 Tokens, deposits 1 = #
# Random user (from guestlist) deposits 1 Token
setup.wrapper.deposit(1e18, {"from": randomUser2})
assert setup.mockToken.balanceOf(randomUser2.address) == 19e18
assert setup.wrapper.totalVaultBalance(setup.wrapper.address) == 1e18
# mockToken balance of vault equal to deposited amount
assert setup.vault.totalAssets() == 1e18
assert setup.wrapper.totalAssets() == 1e18
# wrapper shares are minted for depositor and vault shares are 0 for depositor
assert setup.vault.balanceOf(randomUser2.address) == 0
assert setup.wrapper.balanceOf(randomUser2.address) == 1e18
# Test pricePerShare to equal 1
assert setup.wrapper.pricePerShare() == 1e18
print("-- 1st User Deposits 1 --")
print("Wrapper's PPS:", setup.wrapper.pricePerShare())
print("Vault's PPS:", setup.vault.pricePerShare())
# = User 1: Has 10 Tokens, deposits 10 = #
# Another random user (from guestlist) deposits all their Tokens (10)
setup.wrapper.deposit({"from": randomUser1})
assert setup.mockToken.balanceOf(randomUser1.address) == 0
assert setup.wrapper.totalVaultBalance(setup.wrapper.address) == 11e18
# mockToken balance of vault and wrapper equals to net amount
assert setup.vault.totalAssets() == 11e18
assert setup.wrapper.totalAssets() == 11e18
# wrapper shares are minted for depositor and vault shares are 0 for depositor
assert setup.vault.balanceOf(randomUser1.address) == 0
assert setup.wrapper.balanceOf(randomUser1.address) == 10e18
# Test pricePerShare to equal 1
assert setup.wrapper.pricePerShare() == 1e18
print("-- 2nd User Deposits 10 --")
print("Wrapper's PPS:", setup.wrapper.pricePerShare())
print("Vault's PPS:", setup.vault.pricePerShare())
chain.sleep(10000)
chain.mine(1)
# === Withdraw flow === #
# = User 2: Has 19 Tokens, 1 bvyWBTC token, withdraws 0.5 = #
assert setup.mockToken.balanceOf(randomUser2.address) == 19e18
setup.wrapper.withdraw(0.5e18, {"from": randomUser2})
print("-- 1st User withdraws 0.5 --")
print("Wrapper's PPS:", setup.wrapper.pricePerShare())
print("Vault's PPS:", setup.vault.pricePerShare())
assert setup.mockToken.balanceOf(randomUser2.address) == 19.5e18
assert setup.wrapper.totalVaultBalance(setup.wrapper.address) == 10.5e18
# mockToken balance of vault equals to net amount
assert setup.vault.totalAssets() == 10.5e18
assert setup.wrapper.totalAssets() == 10.5e18
# wrapper shares are burned for withdrawer and vault shares are still 0 for withdrawer
assert setup.vault.balanceOf(randomUser2.address) == 0
assert setup.wrapper.balanceOf(randomUser2.address) == 0.5e18
# = User 1: Has 0 Tokens, 10 bvyWBTC token, withdraws all = #
assert setup.mockToken.balanceOf(randomUser1.address) == 0
setup.wrapper.withdraw({"from": randomUser1})
print("-- 2nd User withdraws 10 --")
print("Wrapper's PPS:", setup.wrapper.pricePerShare())
print("Vault's PPS:", setup.vault.pricePerShare())
assert setup.mockToken.balanceOf(randomUser1.address) == 10e18
assert setup.wrapper.totalVaultBalance(setup.wrapper.address) == 0.5e18
# mockToken balance of vault equals to net amount
assert setup.vault.totalAssets() == 0.5e18
assert setup.wrapper.totalAssets() == 0.5e18
# wrapper shares are burnt for withdrawer and vault shares are still 0 for withdrawer
assert setup.vault.balanceOf(randomUser1.address) == 0
assert setup.wrapper.balanceOf(randomUser1.address) == 0
# = User 3: Has 10 Tokens, 0 bvyWBTC token, withdraws 1 = #
# Random user attempts to withdraw 1 token
# Should revert since user has no tokens on vault
with brownie.reverts():
setup.wrapper.withdraw(1e18, {"from": randomUser3})
# User's token balance remains the same
assert setup.mockToken.balanceOf(randomUser3.address) == 10e18
# Test pricePerShare to equal 1
assert setup.wrapper.pricePerShare() == 1e18
# = User 2 sends 0.5 byvWBTC to user 3 for withdrawal = #
setup.wrapper.transfer(randomUser3.address, 0.5e18, {"from": randomUser2})
assert setup.wrapper.balanceOf(randomUser3.address) == 0.5e18
# User 3 withdraws using the 0.5 shares received from user 2
setup.wrapper.withdraw(0.5e18, {"from": randomUser3})
# mockToken balance of user 3: 10 + 0.5 = 10.5
assert setup.mockToken.balanceOf(randomUser3.address) == 10.5e18
assert setup.wrapper.totalVaultBalance(setup.wrapper.address) == 0
| 8,133 |
myusearch.py
|
alexherns/biotite-scripts
| 1 |
2169070
|
#!/usr/bin/env python
import subprocess
import sys
import os
DB_SHORTCUTS = {
'nr': '/work/blastdb/nr.fa.udb',
'refseq': '/work/blastdb/refseq_protein.udb'
}
USEARCH_FORMATS = [
'alnout',
'blast6out',
'dbmatched'
]
def gen_usearch_cmd(args):
base = ['usearch7.0.1001_i86linux64', '-ublast']
base.append(args.f)
base.extend(['-db', args.db])
base.extend(['-evalue', args.evalue])
if args.maxhits:
base.extend(['-maxhits', str(args.maxhits)])
base.extend(['-threads', str(args.t)])
base.extend(['-{0}'.format(args.format), args.o])
return base
def run_usearch(args):
usearch_cmd = gen_usearch_cmd(args)
print ' '.join(usearch_cmd)
p = subprocess.Popen(usearch_cmd)
p.wait()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = \
'simple generation of usearch commands')
parser.add_argument(\
'-f', type = str, required = True, \
help = 'fasta (required)')
parser.add_argument(\
'-o', type = str, required = True, \
help = 'output results file')
parser.add_argument(\
'-t', type = int, default = 10, \
help = '# threads for usearch')
parser.add_argument(\
'--db', type = str, default = 'nr', \
help = 'path to udb file')
parser.add_argument(\
'--evalue', type = str, default = '1e-8', \
help = 'e-value threshold for reporting hits')
parser.add_argument(\
'--maxhits', type = int, default = 0, \
help = 'maximum number hits to report')
parser.add_argument(\
'--format', type = str, default = 'blast6out', \
help = 'output format type')
parser.add_argument(\
'--verbose', action = 'store_true', \
help = 'print progress metrics to stderr')
parser.add_argument(\
'--log', type = str, default = 'log.txt', \
help = 'log file for stderr logging when not run with verbose')
parser.add_argument(\
'--clean', action = 'store_true', \
help = 'clean up temporary files after hits found')
args = parser.parse_args()
if args.db in DB_SHORTCUTS:
args.db = DB_SHORTCUTS[args.db]
if args.format not in USEARCH_FORMATS:
sys.exit('Output format {0} not one of [{1}]'.format(args.format, ', '.join(USEARCH_FORMATS)))
run_usearch(args)
| 2,512 |
src/vodka/resources/blank_app/application.py
|
20c/vodka
| 3 |
2168764
|
import vodka.app
# @vodka.app.register('my_app')
# class MyApplication(vodka.app.Application):
# pass
| 106 |
httprunner/events.py
|
QiChangYin/MultipleInterfaceManager
| 0 |
2168744
|
# encoding: utf-8
from httprunner.exception import MyBaseError
class EventHook(object):
"""
Simple event class used to provide hooks for different types of events in HttpRunner.
Here's how to use the EventHook class::
my_event = EventHook()
def on_my_event(a, b, **kw):
print "Event was fired with arguments: %s, %s" % (a, b)
my_event += on_my_event
my_event.fire(a="foo", b="bar")
"""
def __init__(self):
self._handlers = []
def __iadd__(self, handler):
self._handlers.append(handler)
return self
def __isub__(self, handler):
if handler not in self._handlers:
raise MyBaseError("handler not found: {}".format(handler))
index = self._handlers.index(handler)
self._handlers.pop(index)
return self
def fire(self, **kwargs):
for handler in self._handlers:
handler(**kwargs)
| 979 |
003 - Exercicios Loop - While.py/007 - Num Maior que Num.py
|
rodrigoviannini/meus_Primeiros_Codigos
| 2 |
2169966
|
"Faça um script que leia dois números e informe o maior deles."
primeiroNum = int(input("Digite um número inteiro: "))
segundoNum = int(input("Digite um número inteiro: "))
if primeiroNum > segundoNum:
print(f"\nO maior número é o primeiro: {primeiroNum}")
else:
print(f"\nO maior número é o segundo: {segundoNum}")
| 325 |
hexrd/ui/overlays/rotation_series.py
|
HEXRD/hexrdgui
| 13 |
2170084
|
import numpy as np
from hexrd import constants
from hexrd.ui.constants import ViewType
class RotationSeriesSpotOverlay:
def __init__(self, plane_data, instr,
crystal_params=None,
eta_ranges=None,
ome_ranges=None,
ome_period=None,
eta_period=np.r_[-180., 180.],
aggregated=True,
ome_width=5.0,
tth_width=None,
eta_width=None):
# FIXME: eta_period is currently not in use
self._plane_data = plane_data
self._instrument = instr
if crystal_params is None:
self._crystal_params = np.hstack(
[constants.zeros_3,
constants.zeros_3,
constants.identity_6x1]
)
else:
assert len(crystal_params) == 12, \
"crystal parameters must have length 12"
self._crystal_params = crystal_params
if eta_ranges is None:
self._eta_ranges = [(-np.pi, np.pi), ]
else:
assert hasattr(eta_ranges, '__len__'), \
'eta ranges must be a list of 2-tuples'
# !!! need better check
self._eta_ranges = eta_ranges
if ome_ranges is None:
self._ome_ranges = [(-np.pi, np.pi), ]
else:
assert hasattr(ome_ranges, '__len__'), \
'ome ranges must be a list of 2-tuples'
# !!! need better check
self._ome_ranges = ome_ranges
if ome_period is None:
self._ome_period = self._ome_ranges[0][0] + np.r_[0., 2*np.pi]
else:
self._ome_period = ome_period
self.aggregated = aggregated
self.ome_width = ome_width
self.tth_width = tth_width
self.eta_width = eta_width
@property
def plane_data(self):
return self._plane_data
@property
def instrument(self):
return self._instrument
@property
def crystal_params(self):
return self._crystal_params
@crystal_params.setter
def crystal_params(self, x):
assert len(x) == 12, 'input must be array-like with length 12'
@property
def widths_enabled(self):
widths = ['tth_width', 'eta_width']
return all(getattr(self, x) is not None for x in widths)
@property
def eta_ranges(self):
return self._eta_ranges
@eta_ranges.setter
def eta_ranges(self, x):
# FIXME: need a check here
self._eta_ranges = x
@property
def ome_ranges(self):
return self._ome_ranges
@ome_ranges.setter
def ome_ranges(self, x):
# FIXME: need a check here
self._ome_ranges = x
@property
def ome_period(self):
return self._ome_period
def overlay(self, display_mode=ViewType.raw):
"""
Returns appropriate point groups for displaying bragg reflection
locations for a monochromatic rotation series.
Parameters
----------
display_mode : TYPE, optional
DESCRIPTION. The default is ViewType.raw.
Raises
------
NotImplementedError
TODO: bin omega output as frames; functions exist in
imageseries.omega
Returns
-------
point_groups : TYPE
DESCRIPTION.
"""
sim_data = self.instrument.simulate_rotation_series(
self.plane_data, [self.crystal_params, ],
eta_ranges=self.eta_ranges,
ome_ranges=self.ome_ranges,
ome_period=self.ome_period
)
point_groups = {}
for det_key, psim in sim_data.items():
panel = self.instrument.detectors[det_key]
valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size = psim
angles = valid_angs[0][:, :2]
omegas = valid_angs[0][:, 2]
if display_mode == ViewType.polar:
data = np.degrees(angles)
elif display_mode in [ViewType.raw, ViewType.cartesian]:
data = valid_xys[0]
if display_mode == ViewType.raw:
# If raw, convert to pixels
data[:] = panel.cartToPixel(data)
data[:, [0, 1]] = data[:, [1, 0]]
ranges = self.range_data(angles, display_mode, panel)
point_groups[det_key] = {
'data': data,
'aggregated': self.aggregated,
'omegas': np.degrees(omegas),
'omega_width': self.ome_width,
'ranges': ranges,
}
return point_groups
@property
def tvec_c(self):
if self.crystal_params is None:
return None
return self.crystal_params[3:6].reshape(3, 1)
def range_corners(self, spots):
# spots should be in degrees
if not self.widths_enabled:
return []
widths = (self.tth_width, self.eta_width)
# Put the first point at the end to complete the square
tol_box = np.array(
[[0.5, 0.5],
[0.5, -0.5],
[-0.5, -0.5],
[-0.5, 0.5],
[0.5, 0.5]]
)
ranges = []
for spot in spots:
corners = np.tile(spot, (5, 1)) + tol_box*np.tile(widths, (5, 1))
ranges.append(corners)
return ranges
def range_data(self, spots, display_mode, panel):
return self.rectangular_range_data(spots, display_mode, panel)
def rectangular_range_data(self, spots, display_mode, panel):
range_corners = self.range_corners(spots)
if display_mode == ViewType.polar:
# All done...
return np.degrees(range_corners)
# The range data is curved for raw and cartesian.
# Get more intermediate points so the data reflects this.
results = []
for corners in range_corners:
data = []
for i in range(len(corners) - 1):
tmp = np.linspace(corners[i], corners[i + 1])
data.extend(panel.angles_to_cart(tmp, tvec_c=self.tvec_c))
data = np.array(data)
if display_mode == ViewType.raw:
data = panel.cartToPixel(data)
data[:, [0, 1]] = data[:, [1, 0]]
results.append(data)
return results
| 6,424 |
tests/unit/dataactcore/factories/user.py
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend
| 0 |
2168990
|
import factory
from factory import fuzzy
from dataactcore.models import userModel
class UserFactory(factory.Factory):
class Meta:
model = userModel.User
username = fuzzy.FuzzyText()
email = fuzzy.FuzzyText()
name = fuzzy.FuzzyText()
cgac_code = fuzzy.FuzzyText()
title = fuzzy.FuzzyText()
| 322 |
src/utility/test_latency.py
|
mgely/pi-looper
| 0 |
2169383
|
# Note: input is plugged into output
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from core import Looper
from time import sleep
import matplotlib.pyplot as plt
import numpy as np
N_tests = 2
with Looper() as l:
sleep(3)
data_1 = l.loop
l.release_rec_button()
sleep(2)
l.release_play_button()
sleep(2)
data_2 = l.loop
l.release_rec_button()
sleep(2)
l.release_play_button()
sleep(2)
data_3 = l.loops[1]
fig = plt.figure()
plt.plot(data_1[:,0])
plt.plot(data_2[:,0])
fig.savefig('/home/pi/Desktop/plot.pdf')
latency_samples = np.argmax(data_2) - np.argmax(data_1)
latency_time = latency_samples/float(l.sample_rate)
print('LATENCY 1 = %.0f ms'%(1e3*latency_time))
latency_samples = np.argmax(data_3) - np.argmax(data_2)
latency_time = latency_samples/float(l.sample_rate)
print('LATENCY 2 = %.0f ms'%(1e3*latency_time))
| 929 |
squerly/boolean.py
|
csams/queryable
| 1 |
2167955
|
"""
The boolean module lets you create complicated boolean expressions by
composing objects. They can then be evaluated against multiple values.
"""
import logging
import operator
import re
from functools import partial, wraps
log = logging.getLogger(__name__)
__all__ = [
"pred",
"flip",
"TRUE",
"FALSE",
"lt",
"le",
"eq",
"ge",
"gt",
"isin",
"contains",
"search",
"matches",
"startswith",
"endswith",
]
class Boolean:
def test(self, value):
raise NotImplementedError()
def __and__(self, other):
return All(self, other)
def __or__(self, other):
return Any(self, other)
def __invert__(self):
return Not(self)
class Any(Boolean):
def __init__(self, *predicates):
self.predicates = predicates
def test(self, value):
return any(predicate.test(value) for predicate in self.predicates)
class All(Boolean):
def __init__(self, *predicates):
self.predicates = predicates
def test(self, value):
return all(predicate.test(value) for predicate in self.predicates)
class Not(Boolean):
def __init__(self, predicate):
self.predicate = predicate
def test(self, value):
return not self.predicate.test(value)
class Predicate(Boolean):
""" Calls a function to determine truth value. """
def __init__(self, predicate, *args, **kwargs):
self.predicate = predicate
self.args = args
self.kwargs = kwargs
def test(self, value):
try:
return self.predicate(value, *self.args, **self.kwargs)
except Exception as ex:
if log.isEnabledFor(logging.DEBUG):
log.debug(ex)
return False
def pred(predicate, *args, **kwargs):
return partial(Predicate, predicate)
def flip(f):
"""
Switches position of the first two arguments to f and ensures
its result is a bool.
"""
@wraps(f)
def inner(a, b, *args, **kwargs):
return bool(f(b, a, *args, **kwargs))
return inner
class TRUE(Boolean):
def test(self, value):
return True
class FALSE(Boolean):
def test(self, value):
return False
TRUE = TRUE()
FALSE = FALSE()
lt = pred(operator.lt)
le = pred(operator.le)
eq = pred(operator.eq)
ge = pred(operator.ge)
gt = pred(operator.gt)
isin = pred(flip(operator.contains))
contains = pred(operator.contains)
search = pred(flip(re.search))
matches = search
startswith = pred(str.startswith)
endswith = pred(str.endswith)
| 2,555 |
src/evesrp/util/fields.py
|
paxswill/evesrp
| 17 |
2170003
|
from __future__ import absolute_import
import wtforms
import wtforms.widgets
import wtforms.fields
from wtforms.utils import unset_value
class ImageInput(wtforms.widgets.Input):
"""WTForms widget for image inputs (<input type="image">)
"""
input_type = u'image'
def __init__(self, src='', alt=''):
super(ImageInput, self).__init__()
self.src = src
self.alt = alt
def __call__(self, field, **kwargs):
kwargs['src'] = self.src
kwargs['alt'] = self.alt
return super(ImageInput, self).__call__(field, **kwargs)
class ImageField(wtforms.fields.BooleanField):
"""WTForms field for image fields.
"""
def __init__(self, src, alt='', **kwargs):
widget = ImageInput(src, alt)
super(wtforms.fields.BooleanField, self).__init__(widget=widget,
**kwargs)
def process(self, formdata, data=unset_value):
if formdata:
for key in formdata:
if key.startswith(self.name):
self.data = True
break
else:
self.data = False
else:
self.data = False
| 1,173 |
Figures/Figure_2_covariances_processes.py
|
jemil-butt/kernel_inference
| 3 |
2169676
|
"""
The goal of this script is to showcase different covariance matrices arising
from different model choices together with example realizations of the
associated stochastic processes. In this way, produce figure 2 of the paper
'Inference of instationary covariance functions for optimal estimation in
spatial statistics'.
For this, do the following:
1. Imports and definitions
2. Create covariance matrices
3. Sample from stochastic processes
4. Plots and illustrations
The simulations are based on a fixed random seed, to generate data deviating
from the ones shown in the paper and different for each run, please comment out
the entry 'np.random.seed(x)' in section 1.
"""
"""
1. Imports and definitions -----------------------------------------------
"""
# i) Imports
import numpy as np
import numpy.linalg as lina
from scipy.stats import wishart
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 5})
# ii) Definition of auxiliary quantities
n=300
t=np.linspace(0,1,n)
np.random.seed(1)
"""
2. Create covariance matrices --------------------------------------------
"""
# i) Squared exponential covariance matrices
d_1=0.4; d_2=0.1;
def cov_fun_sqexp_1(t1,t2):
return np.exp(-(lina.norm(t1-t2)/d_1)**2)
def cov_fun_sqexp_2(t1,t2):
return np.exp(-(lina.norm(t1-t2)/d_2)**2)
K_sqexp_1=np.zeros([n,n])
K_sqexp_2=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp_1[k,l]=cov_fun_sqexp_1(t[k],t[l])
K_sqexp_2[k,l]=cov_fun_sqexp_2(t[k],t[l])
# ii) Bochner covariance functions
n_exp=10
[U_1,Lambda_1,V_1]=lina.svd(K_sqexp_1)
Lambda_1=np.diag(Lambda_1)
Lambda_cut_1=Lambda_1[:n_exp,:n_exp]
U_1_cut=U_1[:,:n_exp]
[U_2,Lambda_2,V_2]=lina.svd(K_sqexp_2)
Lambda_2=np.diag(Lambda_2)
Lambda_cut_2=Lambda_2[:n_exp,:n_exp]
U_2_cut=U_2[:,:n_exp]
weight_fun_1=np.diag(np.diag((1/n_exp)*wishart.rvs(n_exp,scale=Lambda_cut_1)))
weight_fun_2=np.diag(np.diag((1/n_exp)*wishart.rvs(n_exp,scale=Lambda_cut_2)))
omega=np.logspace(-1,1,n_exp)
def complex_exp(t1,t2,omega):
return np.exp(2*np.pi*(1j)*omega*(t1-t2))
basis_vectors=np.zeros([n,n_exp])+0j*np.zeros([n,n_exp])
for k in range(n):
for l in range(n_exp):
basis_vectors[k,l]=(complex_exp(t[k],0,omega[l]))
K_Bochner_1=np.real(basis_vectors@weight_fun_1@basis_vectors.conj().T)
K_Bochner_2=np.real(basis_vectors@weight_fun_2@basis_vectors.conj().T)
# iii) Nondiagonal Mercer covariance
gamma_1=(1/n_exp)*wishart.rvs(n_exp,scale=Lambda_cut_1)
gamma_2=(1/n_exp)*wishart.rvs(n_exp,scale=Lambda_cut_2)
K_nondiag_Mercer_1=U_1_cut@gamma_1@U_1_cut.T
K_nondiag_Mercer_2=U_2_cut@gamma_2@U_2_cut.T
"""
3. Sample from stochastic processes --------------------------------------
"""
# i) Prepare simulation
n_simu=10
x_sqexp_1=np.zeros([n,n_simu])
x_sqexp_2=np.zeros([n,n_simu])
x_Bochner_1=np.zeros([n,n_simu])
x_Bochner_2=np.zeros([n,n_simu])
x_nondiag_Mercer_1=np.zeros([n,n_simu])
x_nondiag_Mercer_2=np.zeros([n,n_simu])
for k in range(n_simu):
x_sqexp_1[:,k]=np.random.multivariate_normal(np.zeros([n]),K_sqexp_1)
x_sqexp_2[:,k]=np.random.multivariate_normal(np.zeros([n]),K_sqexp_2)
x_Bochner_1[:,k]=np.random.multivariate_normal(np.zeros([n]),K_Bochner_1)
x_Bochner_2[:,k]=np.random.multivariate_normal(np.zeros([n]),K_Bochner_2)
x_nondiag_Mercer_1[:,k]=np.random.multivariate_normal(np.zeros([n]),K_nondiag_Mercer_1)
x_nondiag_Mercer_2[:,k]=np.random.multivariate_normal(np.zeros([n]),K_nondiag_Mercer_2)
"""
3. Plots and illustrations -----------------------------------------------
"""
# i) Invoke figure
fig1 = plt.figure(dpi=200,constrained_layout=True)
gs1 = fig1.add_gridspec(3, 4)
# ii) Squared exponential covariance matrices
f1_ax1 = fig1.add_subplot(gs1[0, 0])
f1_ax1.imshow(K_sqexp_1)
f1_ax1.set_title('Squared exponential covariance 1')
f1_ax1.axis('off')
f1_ax2 = fig1.add_subplot(gs1[0, 2])
f1_ax2.imshow(K_sqexp_2)
f1_ax2.set_title('Squared exponential covariance 2')
f1_ax2.axis('off')
# iii) Bochner covariance matrices
f1_ax3 = fig1.add_subplot(gs1[1, 0])
f1_ax3.imshow(K_Bochner_1)
f1_ax3.set_title('Bochner covariance 1')
f1_ax3.axis('off')
f1_ax4 = fig1.add_subplot(gs1[1, 2])
f1_ax4.imshow(K_Bochner_2)
f1_ax4.set_title('Bochner covariance 2')
f1_ax4.axis('off')
# iv) Nondiagonal Mercer covariance matrices
f1_ax5 = fig1.add_subplot(gs1[2, 0])
f1_ax5.imshow(K_nondiag_Mercer_1)
f1_ax5.set_title('Nondiagonal Mercer covariance 1')
f1_ax5.axis('off')
f1_ax6 = fig1.add_subplot(gs1[2, 2])
f1_ax6.imshow(K_nondiag_Mercer_2)
f1_ax6.set_title('Nondiagonal Mercer covariance 2')
f1_ax6.axis('off')
# v) Squared exponential realizations
f1_ax7 = fig1.add_subplot(gs1[0, 1])
f1_ax7.plot(t,x_sqexp_1,color='0')
f1_ax7.set_title('Example realizations')
f1_ax7.axis('off')
f1_ax8 = fig1.add_subplot(gs1[0, 3])
f1_ax8.plot(t,x_sqexp_2,color='0')
f1_ax7.set_title('Example realizations')
f1_ax8.axis('off')
# vi) Bochner realizations
f1_ax7 = fig1.add_subplot(gs1[1, 1])
f1_ax7.plot(t,x_Bochner_1,color='0')
f1_ax7.axis('off')
f1_ax8 = fig1.add_subplot(gs1[1, 3])
f1_ax8.plot(t,x_Bochner_2,color='0')
f1_ax8.axis('off')
# vi) Nondiagonal mercer realizations
f1_ax7 = fig1.add_subplot(gs1[2, 1])
f1_ax7.plot(t,x_nondiag_Mercer_1,color='0')
f1_ax7.axis('off')
f1_ax8 = fig1.add_subplot(gs1[2, 3])
f1_ax8.plot(t,x_nondiag_Mercer_2,color='0')
f1_ax8.axis('off')
# # Save the figure
# plt.savefig('Figure_2',dpi=400)
| 5,528 |
Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_041.py
|
jbauermanncode/Curso_Em_Video_Python
| 0 |
2169277
|
'''
Crie um programa que faça o computador jogar Jokenpô com você.
'''
# Para o computador escolher uma string
from random import randint
# Esperando 1 segundo dizendo Jokenpo
from time import sleep
# Ler uma palavra
print('[1] PEDRA: ')
print('[2] PAPEL: ')
print('[3] TESOURA: ')
usuario = int(input('Escolha: '))
if usuario == 1:
usuario = 'PEDRA'
elif usuario == 2:
usuario = 'PAPEL'
elif usuario == 3:
usuario = 'TESOURA'
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!')
sleep(1)
# O computador escolherá uma palavra aleatoriamente
escolha = randint(1, 3)
if escolha == 1:
escolha = 'PEDRA'
elif escolha == 2:
escolha = 'PAPEL'
elif escolha == 3:
escolha = 'TESOURA'
# Jogando com o computador
print('A escolha do usuário foi {} e a escolha do computador foi {}.'.format(usuario, escolha))
usuario_vencedor = (usuario == 'PEDRA' and escolha == 'TESOURA') or (usuario == 'PAPEL' and escolha == 'PEDRA') or (usuario == 'TESOURA' and escolha == 'PAPEL')
empate = usuario == escolha
print()
print('-=='*10)
if empate:
print('O JOGO EMPATOU!')
elif usuario_vencedor:
print('O jogador VENCEU!!!')
else:
print('O computador VENCEU!!!')
print('-=='*10)
| 1,206 |
tests/throttled_server.py
|
xinhuang/throttle-bag
| 2 |
2169415
|
import datetime
class ThrottledException(Exception):
pass
class ThrottledServer(object):
def __init__(self, seconds, times=1):
self._seconds = seconds
self._times = times
self._calls = []
def _check(self):
now = self.now()
if self._times == 1:
for c in self._calls:
if now - c < self._seconds:
raise ThrottledException(now - c)
self._calls.append(now)
else:
self._calls.append(now)
self._calls = [c for c in self._calls if now - c < self._seconds]
if len(self._calls) > self._times:
msg = 'too many invocations in a short time: {}'.format(
len(self._calls))
raise ThrottledException(msg)
return now
def now(self):
return datetime.datetime.utcnow().timestamp()
def foo(self):
return self._check()
def bar(self):
return self._check()
async def async_foo(self):
return self._check()
async def async_bar(self):
return self._check()
| 1,114 |
blog/receivers.py
|
davidmoss/blogit
| 0 |
2169728
|
from django.db import models
from .models import Blog, Comment, Post
def add_post_id(sender, instance, **kwargs):
blog, __ = Blog.objects.get_or_create(name='blogit')
if blog.post_ids:
blog.post_ids.append(instance.id)
else:
blog.post_ids = [instance.id]
blog.save()
def add_comment_id(sender, instance, **kwargs):
post = instance.post
if post.comment_ids:
post.comment_ids.append(instance.id)
else:
post.comment_ids = [instance.id]
post.save()
def remove_post_id(sender, instance, **kwargs):
blog, __ = Blog.objects.get_or_create(name='blogit')
if instance.id in blog.post_ids:
blog.post_ids.remove(instance.id)
blog.save()
def remove_comment_id(sender, instance, **kwargs):
post = instance.post
if instance.id in post.comment_ids:
post.comment_ids.remove(instance.id)
post.save()
models.signals.post_save.connect(add_post_id, sender=Post)
models.signals.post_save.connect(add_comment_id, sender=Comment)
models.signals.post_delete.connect(remove_post_id, sender=Post)
models.signals.post_delete.connect(remove_comment_id, sender=Comment)
| 1,163 |
shortLinks/admin.py
|
NightTarlis/URLShortener
| 0 |
2169020
|
from django.contrib import admin
from shortLinks.models import Link
class ShorterAdmin(admin.ModelAdmin):
readonly_fields = [
'short', 'full',
'count', 'login'
]
list_display = [
'short', 'full',
'count', 'login'
]
list_filter = ['login']
admin.site.register(Link, ShorterAdmin)
| 335 |
chapter-03/exercise004.py
|
krastin/pp-cs3.0
| 0 |
2169636
|
def absolute_difference(x:float, y:float) -> float:
"""
return the absolute value of the difference between x and y
>>> absolute_difference(3, 5)
2
>>> absolute_difference(10, 7)
3
"""
return abs(x - y)
print(absolute_difference(3,5))
print(absolute_difference(10,7))
| 301 |
tests/vmap_test.py
|
RandallBalestriero/TheanoXLA
| 67 |
2169408
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import symjax
import symjax as sj
def test_vectorize():
sj.current_graph().reset()
x = symjax.tensor.Placeholder((0, 2), "float32")
w = symjax.tensor.Variable(1.0, dtype="float32")
p = x.sum(1)
f = symjax.function(x, outputs=p, updates={w: x.sum()})
assert np.array_equal(f(np.ones((1, 2))), [2.0])
assert w.value == 2.0
assert np.array_equal(f(np.ones((2, 2))), [2.0, 2.0])
assert w.value == 4.0
def test_vectorize_sgd():
sj.current_graph().reset()
x = symjax.tensor.Placeholder((0, 2), "float32")
y = symjax.tensor.Placeholder((0,), "float32")
w = symjax.tensor.Variable((1, 1), dtype="float32")
loss = ((x.dot(w) - y) ** 2).mean()
g = symjax.gradients(loss, [w])[0]
other_g = symjax.gradients(x.dot(w).sum(), [w])[0]
f = symjax.function(x, y, outputs=loss, updates={w: w - 0.1 * g})
other_f = symjax.function(x, outputs=other_g)
L = [10]
for i in range(10):
L.append(f(np.ones((i + 1, 2)), -1 * np.ones(i + 1)))
assert L[-1] < L[-2]
assert np.array_equal(other_f(np.ones((i + 1, 2))), [i + 1.0, i + 1.0])
if __name__ == "__main__":
test_vectorize_sgd()
test_vectorize()
| 1,265 |
hard-gists/2032745/snippet.py
|
jjhenkel/dockerizeme
| 21 |
2169478
|
# java -cp ~/Downloads/Sikuli-X-1.0rc3\ \(r905\)-linux-x86_64/Sikuli-IDE/sikuli-script.jar org.python.util.jython test.py
from __future__ import with_statement
import unittest
from sikuli.Sikuli import *
class Firefox(object):
"""
very simple firefox browser context manager
"""
def __init__(self, url):
self.url = url
def __enter__(self):
Screen(0)
app = App.open('firefox')
wait(2)
type('l', KEY_CTRL)
type("%s\n" % self.url)
def __exit__(self, type_, value, traceback):
type('q', KEY_CTRL)
class TestBasicScenario(unittest.TestCase):
"""
+ check the existence of the download overlay and the toggling of its
visibility with the visiblity of the customer list
+ edit, delete and save (undelete) of a customer yaml (basic scenario)
"""
def test_01_on_index_download_overlay_visible(self):
assert exists("download-overlay.png")
def test_02_click_settings_overlay_disappera(self):
click("settings.png")
wait("new-customer.png" )
assert not exists("download-overlay.png")
def test_03_edit_customer(self):
click("semansarada.png")
wait("semansarada-editing.png")
assert exists("semansarada-yaml.png")
def test_04_deleting_customer(self):
click("trash.png")
wait("semansarada-deleted.png")
def test_05_undeleting_customer(self):
click("semansarada.png")
wait("semansarada-editing.png")
click("save.png")
wait("semansarada-saved.png")
def test_06_closing_customer_list(self):
click("settings.png")
assert exists("download-overlay.png")
# Sikuli settings (no logs)
Settings.ActionLogs = False
Settings.InfoLogs = False
Settings.DebugLogs = False
with Firefox('http://localhost:3000'):
suite = unittest.TestLoader().loadTestsFromTestCase(TestBasicScenario)
unittest.TextTestRunner(verbosity=2).run(suite)
"""
[eugene]~/Code/sikuli/tollbooth.sikuli$ java -cp ~/Downloads/Sikuli-X-1.0rc3\ \(r905\)-linux-x86_64/Sikuli-IDE/sikuli-script.jar org.python.util.jython test.py
[info] Sikuli vision engine loaded.
[info] VDictProxy loaded.
test_01_on_index_download_overlay_visible (__main__.TestBasicScenario) ... ok
test_02_click_settings_overlay_disappera (__main__.TestBasicScenario) ... ok
test_03_edit_customer (__main__.TestBasicScenario) ... ok
test_04_deleting_customer (__main__.TestBasicScenario) ... ok
test_05_undeleting_customer (__main__.TestBasicScenario) ... ok
test_06_closing_customer_list (__main__.TestBasicScenario) ... ok
----------------------------------------------------------------------
Ran 6 tests in 23.367s
OK
"""
| 2,701 |
easydatalab/tests/monitoring/test_filemonitor.py
|
cfalguiere/datalab-utils
| 0 |
2169843
|
#-*- coding: utf-8 -*-
from __future__ import print_function
import unittest
from easydatalab.monitoring.filemonitor import FileMonitor
class TestFileMonitor(unittest.TestCase):
def test_constructor(self):
fm = FileMonitor()
self.assertIsNotNone( fm )
def test_track(self):
fm = FileMonitor()
monitor = fm.track('A', 'input', 'output')
self.assertIsNotNone( monitor )
def test_pre(self):
fm = FileMonitor()
monitor = fm.track('A', 'input', 'output')
monitor.pre_condition()
self.assertEquals( monitor.status, 'Started' )
def test_post(self):
fm = FileMonitor()
monitor = fm.track('A', 'input', 'output')
monitor.post_condition()
self.assertEquals( monitor.status, 'Done' )
| 801 |
tests/bot/test_send_response.py
|
sedders123/phial
| 13 |
2169432
|
"""Test send_response."""
import io
import pytest
from phial import Attachment, Phial, Response
from tests.helpers import wildpatch
def test_send_response_string() -> None:
"""Test send_response works with a string."""
def mock_send_message(instance: Phial, response: Response) -> None:
assert response.text == "test"
assert response.channel == "channel"
bot = Phial("token")
wildpatch(Phial, "send_message", mock_send_message)
bot._send_response("test", "channel")
def test_send_response_with_none() -> None:
"""Test send_response works with None."""
bot = Phial("token")
bot._send_response(None, "channel")
def test_send_response_fails_with_non_response_object() -> None:
"""Test send_response fails with an invalid type."""
bot = Phial("token")
with pytest.raises(ValueError):
bot._send_response(True, "channel") # type: ignore
def test_send_response() -> None:
"""Test send_response works with response."""
expected_response = Response(
"channel",
"message",
original_ts="orig_time",
ephemeral=False,
user="user",
attachments=[{"foo": "bar"}],
)
def mock_send_message(instance: Phial, response: Response) -> None:
assert response == expected_response
bot = Phial("token")
wildpatch(Phial, "send_message", mock_send_message)
bot._send_response(expected_response, "channel")
def test_send_response_fails_with_text_and_reaction() -> None:
"""Test send_response fails when a response has both text and reaction."""
expected_response = Response(
"channel", "message", original_ts="orig_time", reaction="reaction"
)
def mock_send_message(instance: Phial, response: Response) -> None:
assert response == expected_response
bot = Phial("token")
wildpatch(Phial, "send_message", mock_send_message)
with pytest.raises(ValueError):
bot._send_response(expected_response, "channel")
def test_send_response_with_attachment() -> None:
"""Test send_response works with an Attachment."""
def mock_send_attachment(instance: Phial, response: Attachment) -> None:
assert response.channel == "channel"
assert response.filename == "file_name"
bot = Phial("token")
wildpatch(Phial, "upload_attachment", mock_send_attachment)
output = io.StringIO()
output.write("content")
attachment = Attachment("channel", "file_name", output)
bot._send_response(attachment, "channel")
def test_send_response_reaction() -> None:
"""Test send_response works with an reaction."""
expected_response = Response(
"channel", original_ts="orig_time", reaction="reaction"
)
def mock_send_reaction(instance: Phial, response: Response) -> None:
assert response == expected_response
bot = Phial("token")
wildpatch(Phial, "send_reaction", mock_send_reaction)
bot._send_response(expected_response, "channel")
| 2,990 |
src/pythonbasico-oo/veiculo.py
|
kessiarodrigues/Python-Course
| 0 |
2169362
|
class Veiculo:
def __init__(self):_init_(self, cor, rodas, marca):
self.cor = cor
self.rodas = rodas
self.marca = marca
self.tanque = tanque
def abastecer (self, litros):
self.tanque += litros
return
| 293 |
tests/util.py
|
Arlula/python-core-sdk
| 0 |
2167929
|
import arlulacore
import arlulacore
import os
def create_test_session() -> arlulacore.Session:
return arlulacore.Session(os.getenv("API_KEY"), os.getenv("API_SECRET"), url=os.getenv("API_HOST"))
| 200 |
digideep/utility/stats/gpu.py
|
sharif1093/digideep
| 11 |
2169584
|
"""This module is inspired by https://github.com/anderskm/gputil/blob/master/GPUtil/GPUtil.py.
This module provides tools to monitor the GPU utilization using Visdom.
"""
from distutils import spawn
from subprocess import Popen, PIPE
import os, platform
#####################
##### GPU STATS #####
#####################
# GPU = collections.namedtuple("GPU", "id uuid load memoryTotal memoryUsed memoryFree driver gpu_name serial display_active display_mode temp_gpu")
def safeFloatCast(strNumber):
try:
number = float(strNumber)
except ValueError:
number = float('nan')
return number
def get_gpu_lines():
if platform.system() == "Windows":
# If the platform is Windows and nvidia-smi
# could not be found from the environment path,
# try to find it from system drive with default installation path
nvidia_smi = spawn.find_executable('nvidia-smi')
if nvidia_smi is None:
nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive']
else:
nvidia_smi = "nvidia-smi"
# Get ID, processing and memory utilization for all GPUs
try:
p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE)
stdout, stderror = p.communicate()
except Exception as ex:
import warnings
warnings.warn(str(ex))
return []
output = stdout.decode('UTF-8')
# output = output[2:-1] # Remove b' and ' from string added by python
#print(output)
## Parse output
# Split on line break
lines = output.split(os.linesep)
return lines
def get_gpu_count():
lines = get_gpu_lines()
numDevices = len(lines)-1
return numDevices
def get_gpu_stats():
lines = get_gpu_lines()
numDevices = len(lines)-1
# TODO: We don't need all information for statistics. Only those that may change ...
gpus = dict(id=[], uuid=[], load=[],
memoryTotal=[], memoryUsed=[], memoryFree=[],
driver=[], gpu_name=[], serial=[], display_active=[], display_mode=[], temp_gpu=[])
for i in range(numDevices):
line = lines[i]
#print(line)
vals = line.split(', ')
#print(vals)
gpus["id"].append(int(vals[0]))
gpus["uuid"].append(vals[1])
gpus["load"].append(safeFloatCast(vals[2]))
gpus["memoryTotal"].append(safeFloatCast(vals[3]))
gpus["memoryUsed"].append(safeFloatCast(vals[4]))
gpus["memoryFree"].append(safeFloatCast(vals[5]))
gpus["driver"].append(vals[6])
gpus["gpu_name"].append(vals[7])
gpus["serial"].append(vals[8])
gpus["display_active"].append(vals[9])
gpus["display_mode"].append(vals[10])
gpus["temp_gpu"].append(safeFloatCast(vals[11]))
return gpus
| 2,978 |
ideas/migrations/0004_auto_20200315_0743.py
|
abhiabhi94/idea-fare
| 0 |
2168008
|
# Generated by Django 3.0.4 on 2020-03-15 07:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ideas', '0003_auto_20200308_1450'),
]
operations = [
migrations.AlterField(
model_name='idea',
name='slug',
field=models.SlugField(default='', max_length=80),
),
]
| 392 |
cride/users/models/users.py
|
JuanDavidArce/ComparteRide
| 0 |
2169535
|
"""User model"""
#django
from django.core import validators
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
#Utilities
from cride.utils.models import CRideModel
class User(CRideModel,AbstractUser):
"""User model
Extend from djangos's abstract user, change the username field,
to email and add some extra fields """
email = models.EmailField(
'email_address',
unique=True,
error_messages={
'unique':'A user with that email already exists'
}
)
phone_regex =RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="phone number must be entered in the format +999999999, Up to 15 digits allowed"
)
phone_number = models.CharField(validators=[phone_regex], max_length=17,blank=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS =['username','first_name','last_name']
is_client=models.BooleanField(
'client status',
default=True,
help_text=(
'Help easili distinguish user and perform queries'
'clients are the main type of user'
)
)
is_verified = models.BooleanField(
'verified',
default=False,
help_text='set to true when the user have verified its email address'
)
def __str__(self):
"""Return username"""
return self.username
def get_short_name(self):
"""Return username."""
return self.username
| 1,529 |
example/example/models.py
|
yourcelf/django-ajax-selects
| 1 |
2169605
|
# -*- coding: utf8 -*-
from django.db import models
class Person(models.Model):
""" an actual singular human being """
name = models.CharField(blank=True, max_length=100)
email = models.EmailField()
def __unicode__(self):
return self.name
class Group(models.Model):
""" a music group """
name = models.CharField(max_length=200, unique=True, help_text="Name of the group")
members = models.ManyToManyField(Person, blank=True, help_text="Enter text to search for and add each member of the group.")
url = models.URLField(blank=True)
def __unicode__(self):
return self.name
class Label(models.Model):
""" a record label """
name = models.CharField(max_length=200, unique=True)
owner = models.ForeignKey(Person, blank=True, null=True)
url = models.URLField(blank=True)
def __unicode__(self):
return self.name
class Song(models.Model):
""" a song """
title = models.CharField(blank=False, max_length=200)
group = models.ForeignKey(Group)
def __unicode__(self):
return self.title
class Release(models.Model):
""" a music release/product """
title = models.CharField(max_length=100)
catalog = models.CharField(blank=True, max_length=100)
group = models.ForeignKey(Group, blank=True, null=True, verbose_name=u"Русский текст (group)")
label = models.ForeignKey(Label, blank=False, null=False)
songs = models.ManyToManyField(Song, blank=True)
def __unicode__(self):
return self.title
class Author(models.Model):
""" Author has multiple books,
via foreign keys
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Book(models.Model):
""" Book has no admin, its an inline in the Author admin"""
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
about_group = models.ForeignKey(Group)
mentions_persons = models.ManyToManyField(Person, help_text="Person lookup renders html in menu")
def __unicode__(self):
return self.title
| 2,114 |
test/tdm_test.py
|
vibinabraham/FermiCluster
| 3 |
2170087
|
import numpy as np
import scipy
import itertools
import time
from math import factorial
import copy as cp
import sys
from fermicluster import *
from pyscf_helper import *
import pyscf
ttt = time.time()
np.set_printoptions(suppress=True, precision=10, linewidth=1500)
print("GITHUB TREE")
import subprocess
label = subprocess.check_output(["git","rev-parse", "HEAD"]).strip()
print(label)
def test_1():
ttt = time.time()
### PYSCF INPUT
molecule = '''
H 0.00 0.00 0.00
H 2.00 0.00 2.00
H 0.00 2.20 2.00
H 2.10 2.00 0.00
H 4.00 2.20 2.00
H 4.10 2.00 0.00
'''
charge = 0
spin = 0
basis_set = 'sto-3g'
### TPSCI BASIS INPUT
orb_basis = 'scf'
cas = False
#cas_nstart = 2
#cas_nstop = 10
#cas_nel = 10
### TPSCI CLUSTER INPUT
blocks = [[0,1,2,3],[4,5,6,7]]
init_fspace = ((2, 2), (0, 0))
blocks = [[0,1,2,3],[4,5]]
init_fspace = ((2, 2), (1, 1))
blocks = [[0,1],[2,3]]
init_fspace = ((1, 1), (1, 1))
blocks = [[0,1],[2,3],[4,5]]
init_fspace = ((1, 1), (1, 1), (1, 1))
nelec = tuple([sum(x) for x in zip(*init_fspace)])
if cas == True:
assert(cas_nel == nelec)
nelec = cas_nel
#Integrals from pyscf
#Integrals from pyscf
pmol = PyscfHelper()
pmol.init(molecule,charge,spin,basis_set,orb_basis)
h = pmol.h
g = pmol.g
ecore = pmol.ecore
#cluster using hcore
#idx = e1_order(h,cut_off = 1e-2)
#h,g = reorder_integrals(idx,h,g)
state_i = 0
state_j = 1
do_tci = 1
if do_tci:
clusters, clustered_ham, ci_vector, cmf_out = system_setup(h, g, ecore, blocks, init_fspace,
cmf_maxiter = 20,
cmf_dm_guess = None,
cmf_diis = False,
max_roots = 100,
delta_elec = 4
)
print(" Build exact eigenstate")
ci_vector.expand_to_full_space(clusters)
pt_vector = ci_vector.copy()
H = build_full_hamiltonian(clustered_ham, ci_vector)
print(" Diagonalize Hamiltonian Matrix:",flush=True)
vguess = ci_vector.get_vector()
if H.shape[0] > 100 and abs(np.sum(vguess)) >0:
e,v = scipy.sparse.linalg.eigsh(H,n_roots=5,v0=vguess,which='SA')
else:
e,v = np.linalg.eigh(H)
idx = e.argsort()
e = e[idx]
e = e + ecore
v = v[:,idx]
print(np.linalg.norm(v))
print(np.linalg.norm(v[:,state_j]))
ci_vector.zero()
ci_vector.set_vector(v[:,state_i])
pt_vector.zero()
pt_vector.set_vector(v[:,state_j])
#ci_vector.print_configs()
pt_vector.print_configs()
print(pt_vector.norm())
print(ci_vector.norm())
rdm_a1, rdm_b1 = build_tdm(ci_vector,pt_vector,clustered_ham)
print("E i j %4d %4d %16.8f %16.8f"%(state_i,state_j,e[state_i],e[state_j]))
print(rdm_a1+rdm_b1)
do_fci = 1
if do_fci:
print("FCI")
from pyscf import fci
#efci, ci = fci.direct_spin1.kernel(h, g, h.shape[0], nelec,ecore=ecore, verbose=5) #DO NOT USE
cisolver = fci.direct_spin1.FCI()
cisolver.max_cycle = 200
cisolver.conv_tol = 1e-14
efci, ci = cisolver.kernel(h, g, h.shape[1], nelec=nelec, ecore=ecore,nroots =5,verbose=100)
#d1 = cisolver.make_rdm1(ci, h.shape[1], nelec)
#print(d1)
#print("FCIS%10.8f"%(efci))
#print(t)
tdm = cisolver.trans_rdm1(ci[state_i], ci[state_j], h.shape[0], nelec=nelec, link_index=None)
print("E i j %4d %4d %16.8f %16.8f"%(state_i,state_j,efci[state_i],efci[state_j]))
print(tdm)
print("Difference")
print(tdm - rdm_a1 - rdm_b1)
try:
assert(np.allclose(rdm_a1 + rdm_b1, tdm, atol=1e-7))
except:
assert(np.allclose(rdm_a1 + rdm_b1, -1*tdm, atol=1e-7))
if __name__== "__main__":
test_1()
| 4,361 |
src/movement/rightHand.py
|
Quanta-Robotics/Robot-Blueberry
| 25 |
2168872
|
from expression import *
takePosition()
changeDegree([3],[100])
changeDegree([1],[170])
changeDegree([7],[70])
changeDegree([5,9],[90,180])
time.sleep(2)
#stop
changeDegree([5,9],[170,60])
changeDegree([7],[170])
changeDegree([1],[50])
changeDegree([3],[0])
time.sleep(1)
takePosition()
| 289 |
download_user_unsplash_photos.py
|
Ourselp/unsplash-DigitalFrame
| 0 |
2168853
|
import pyunsplash
import requests
import os
import re
import json
import shutil
#If you have more than 50 photos on your account think about pushing your app for production otherwise you are limited to 50 api call / hour
pu = pyunsplash.PyUnsplash(api_key='YOUR API KEY')
def main():
pageNumber = 10
count = 0
update = False
retrievePic = 0;
while retrievePic < pageNumber:
this_user = pu.user('ourselp', w=100, h=100)
photos = this_user.photos(page=retrievePic, per_page=20) # photos is an instance of class Photos
retrievePic += 1
if photos.entries:
pageNumber += 1
for photo in photos.entries:
count += 1
filename = photo.id + '.jpeg'
linkSourceImg = requests.get(photo.link_download_location + '/?client_id=UVtouHS8slGsncRIUtSKsI5BZdiI2dzCQ0hav80KQ4Y')
data = linkSourceImg.json()
url = data['url']
path = '/home/pi/Desktop/photoframe/unsplash-pictures/%s' % filename
folder = '/home/pi/Desktop/photoframe/unsplash-pictures'
try:
image_file = open(path)
print (" ---> Already have %s" % url)
except IOError:
print (" ---> Downloading %s" % url)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
f.write(r.content)
update = True
#if it added or removed a photo, update slideshow
if update == True:
os.system("kill $(ps aux | grep '[f]eh' | awk '{print $2}')")
os.system("/home/pi/bin/script_slideshow")
if __name__ == '__main__':
main()
| 1,815 |
src/report_manager/apps/loginApp.py
|
enryH/CKG
| 0 |
2169805
|
from apps import basicApp
import dash_core_components as dcc
import dash_html_components as html
class LoginApp(basicApp.BasicApp):
"""
Defines the login App
Enables user to access the reports
"""
def __init__(self, title, subtitle, description, layout=[], logo=None, footer=None):
self.pageType = "loginPage"
basicApp.BasicApp.__init__(self, title, subtitle, description, self.pageType, layout, logo, footer)
self.buildPage()
def buildPage(self):
"""
Builds page with the basic layout from *basicApp.py* and adds the login form.
"""
self.add_basic_layout()
login_form = html.Div([html.Form([
dcc.Input(placeholder='username', name='username', type='text'),
dcc.Input(placeholder='password', name='password', type='password'),
html.Button('Login', type='submit')], action='/apps/login', method='post')])
self.add_to_layout(login_form)
| 1,006 |
server/directory.py
|
pennlabs/labs-api-server
| 9 |
2169950
|
import datetime
from flask import jsonify, request
from server import app
from server.base import cached_route
from server.penndata import penn_dir
@app.route("/directory/search", methods=["GET"])
def detail_search():
if "name" not in request.args:
return jsonify({"error": "Please specify search parameters in the query string"})
name = request.args["name"]
def get_data():
arr = name.split()
params = []
if len(arr) > 1:
if arr[0][-1] == ",":
params = [{"last_name": arr[0][:-1], "first_name": arr[1]}]
else:
params = [
{"last_name": arr[-1], "first_name": arr[0]},
{"last_name": arr[0], "first_name": arr[-1]},
]
else:
params = [{"last_name": name}, {"first_name": name}]
ids = set()
final = []
for param in params:
param["affiliation"] = "FAC"
for param in params:
data = penn_dir.search(param)
for result in data["result_data"]:
person_id = result["person_id"]
if person_id not in ids:
final.append(result)
ids.add(person_id)
return {"result_data": final}
td = datetime.timedelta(days=30)
return cached_route("directory:search:%s" % name, td, get_data)
@app.route("/directory/person/<person_id>", methods=["GET"])
def person_details(person_id):
td = datetime.timedelta(days=30)
def get_data():
return penn_dir.person_details(person_id)["result_data"][0]
return cached_route("directory:person:%s" % person_id, td, get_data)
| 1,696 |
old/multihead/cut.py
|
kkltcjk/face
| 0 |
2169612
|
import os
import subprocess
def cut(base_dir, output_dir):
for sub in os.listdir(base_dir):
if sub != 'cluster':
sub_dir = os.path.join(base_dir, sub)
video_dir = os.path.join(sub_dir, 'video')
yitu_dir = os.path.join(sub_dir, 'yitu_orgin')
_cut(sub_dir, video_dir, yitu_dir, output_dir)
def _cut(base_dir, video_dir, yitu_dir, output_dir):
ipc_no = int(os.path.basename(base_dir).split('_')[1][3:]) * 100
with open(os.path.join(base_dir, 'result.log'), 'a+') as f:
cmd = './exe.sh {} {} {} {} {}'.format(base_dir, video_dir,
yitu_dir, output_dir, ipc_no)
p = subprocess.Popen(cmd, shell=True, stdout=f.fileno(),
stderr=f.fileno(), executable='/bin/bash',
cwd='/root/temp_face/yitu_sdk_sj_crack20180308')
p.communicate()
if __name__ == '__main__':
cut('/home/data/test/s8_20180503', '/home/data/test/s8_20180503/cluster/output')
| 1,032 |
courses/python/idor/vuln_app/patched_payments.py
|
tank1st99/securitygym
| 49 |
2169844
|
from flask import jsonify
from flask import Blueprint
from flask import g
from flask import request
from flask import url_for
from flask import redirect
from flask import render_template
from flask import flash
import functools
from idor.vuln_app.db import get_db
bp = Blueprint("payments", __name__)
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for("auth.login"))
return view(**kwargs)
return wrapped_view
@bp.route("/")
@login_required
def user_payments():
db = get_db()
payments = db.execute("SELECT * FROM payment WHERE user_id = ? ORDER BY id",
(g.user["id"],)).fetchall()
return render_template("payments/payments_list.html", payments=payments)
@bp.route("/payment/<int:payment_id>")
@login_required
def view_payment(payment_id):
db = get_db()
payment = db.execute("SELECT * FROM payment WHERE user_id = ? AND id = ?", (g.user["id"], payment_id,)).fetchone()
if not payment:
flash("Payment not found")
return redirect(url_for("payments.user_payments"))
return render_template("payments/payment_details.html", payment=payment)
@bp.route("/add_payment", methods=("GET", "POST"))
@login_required
def add_payment():
if request.method == "POST":
db = get_db()
amount = request.form["amount"]
description = request.form["description"]
error = None
if not amount:
error = "Amount required"
if not description:
error = "Description required"
if error is None:
db.execute("INSERT INTO payment (user_id, amount, description) VALUES (?, ?, ?)",
(g.user["id"], amount, description)
)
db.commit()
return redirect(url_for("payments.user_payments"))
return render_template("payments/add_payment.html")
| 1,958 |
odoo-13.0/addons/website_partner/models/res_partner.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
| 0 |
2169711
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
from odoo.addons.http_routing.models.ir_http import slug
from odoo.tools.translate import html_translate
class WebsiteResPartner(models.Model):
_name = 'res.partner'
_inherit = ['res.partner', 'website.seo.metadata']
website_description = fields.Html('Website Partner Full Description', strip_style=True, translate=html_translate)
website_short_description = fields.Text('Website Partner Short Description', translate=True)
def _compute_website_url(self):
super(WebsiteResPartner, self)._compute_website_url()
for partner in self:
partner.website_url = "/partners/%s" % slug(partner)
| 698 |
systemcheck/systems/generic/gui/widgets/generic_main_widget.py
|
team-fasel/SystemCheck
| 2 |
2168760
|
from PyQt5 import QtWidgets, QtGui, QtCore
from systemcheck.gui.models import PolyMorphicFilterProxyModel
from systemcheck.systems.generic.gui.widgets import GenericSystemWidget
from systemcheck.checks.gui.widgets.checks_widget import ChecksWidget
from systemcheck.results.gui.widgets.result_widget import ResultWidget
from systemcheck.resources import icon_rc
import logging
import systemcheck.plugins
from pprint import pprint, pformat
from typing import Union
from collections import namedtuple
class Signals(QtCore.QObject):
checksDelete = QtCore.pyqtSignal()
checksExport = QtCore.pyqtSignal()
checksImport = QtCore.pyqtSignal()
checksNew = QtCore.pyqtSignal()
checksNewFolder = QtCore.pyqtSignal()
checksPause = QtCore.pyqtSignal()
checksRun = QtCore.pyqtSignal()
checksStop = QtCore.pyqtSignal()
resultClear = QtCore.pyqtSignal()
resultExport = QtCore.pyqtSignal()
resultImport = QtCore.pyqtSignal()
systemsCheckLogon = QtCore.pyqtSignal()
systemsDelete = QtCore.pyqtSignal()
systemsDisable = QtCore.pyqtSignal()
systemsEnable = QtCore.pyqtSignal()
systemsExport = QtCore.pyqtSignal()
systemsImport = QtCore.pyqtSignal()
systemsNew = QtCore.pyqtSignal()
systemsNewFolder = QtCore.pyqtSignal()
def __init__(self):
super().__init__()
class GenericSystemMainWidget(QtWidgets.QWidget):
def __init__(self, systemType:str='generic', systemFilter:list = None,
systemsWidget:QtWidgets.QWidget=None):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.signals = Signals()
self.systemType=systemType
self.initializePluginManager()
self.setupCommonUi(systemsWidget)
self.systemFilter = systemFilter
self.checkModel = None
self.checkModel= None
self.checkModel = None
def buildTaskList(self, systems:set, checks:set)->set:
""" Build the Task List
:param systems: A set of Systems that were checked
:param checks: A set of Checks that were checked
"""
Task = namedtuple('Task', 'system check')
taskList = set()
if systems and checks:
for system in systems:
for check in checks:
task = Task(system=system, check=check)
taskList.add(task)
return taskList
def checkedChecks(self)->set:
""" Get List of checked Checks """
pluginNames = {plugin.name
for plugin in self.pm.getPlugins(category='check', systemtype=self.systemType)}
checkedChecks = {node
for node in self.checks.tree.model().checkedNodes()
if node.type in pluginNames}
return checkedChecks
def checkedSystems(self)->set:
""" Get List of checked Systems """
checkedSystems = {node
for node in self.systems.tree.model().checkedNodes()
if node.logon_info() is not None}
return checkedSystems
@property
def checkFilter(self):
""" Return the Relevant Check Objects for the system type """
self.logger.debug('Building Check Filter')
checksFilterList = set()
pm = systemcheck.plugins.SysCheckPM()
for plugin in pm.getPlugins(category='check', systemtype=self.systemType):
for object in plugin.plugin_object.alchemyObjects:
checksFilterList.add(object)
self.logger.debug('CheckFilter determined: %s', pformat(checksFilterList))
return checksFilterList
@property
def checkModel(self):
""" The QAbstractItem Model for Checks """
return self.__checkModel
@checkModel.setter
def checkModel(self, model):
""" Sets the data model for the checks
By default the checks are restricted using a QSortFilterProxyModel. The filter is generated from the plugin
manager based on the system type
"""
self.__checkModel = model
self.checkSortFilterProxyModel = PolyMorphicFilterProxyModel(self.checkFilter)
self.checkSortFilterProxyModel.setSourceModel(self.checkModel)
self.checks.setModel(self.checkSortFilterProxyModel)
@property
def checkSortFilterProxyModel(self):
return self.__checkSortFilterProxyModel
@checkSortFilterProxyModel.setter
def checkSortFilterProxyModel(self, model):
self.__checkSortFilterProxyModel = model
def initializePluginManager(self):
self.pm = systemcheck.plugins.SysCheckPM()
def on_checksRun(self):
systems=self.checkedSystems()
checks=self.checkedChecks()
tasklist = self.buildTaskList(systems=systems, checks=checks)
self.results
for task in tasklist:
plugin=self.pm.getPlugin(task.check.type)
result = plugin.plugin_object.executeAction(task.system, task.check)
self.results.resultAdd_signal.emit(result)
def setupCommonUi(self, systemsWidget:QtWidgets.QWidget=None):
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.setWindowTitle('Generic Main Widget')
self.setWindowIcon(QtGui.QIcon(':Checked'))
self.checksResults_splitter = QtWidgets.QSplitter()
self.checksResults_splitter.setOrientation(QtCore.Qt.Vertical)
self.systemsChecks_splitter = QtWidgets.QSplitter()
self.systemsChecks_splitter.setOrientation(QtCore.Qt.Horizontal)
# Verify whether the plugin provides a systems widget. If not, use the generic.
if systemsWidget:
self.logger.debug('systems widget provided by systems plugin')
self.systems = systemsWidget()
else:
self.logger.debug('systems widget not provided by systems plugin, using generic widget')
self.systems = GenericSystemWidget()
self.systemsChecks_splitter.addWidget(self.systems)
self.systemsChecks_splitter.addWidget(self.checksResults_splitter)
self.checks = ChecksWidget(self.systemType)
self.checksResults_splitter.addWidget(self.checks)
self.results = ResultWidget()
self.checksResults_splitter.addWidget(self.results)
layout.addWidget(self.systemsChecks_splitter)
self.show()
self.signals.checksDelete.connect(self.checks.on_checkDelete)
self.signals.checksExport.connect(self.checks.on_checkExport)
self.signals.checksImport.connect(self.checks.on_checkImport)
self.signals.checksNew.connect(self.checks.on_checkNew)
self.signals.checksNewFolder.connect(self.checks.on_checkNewFolder)
self.signals.checksPause.connect(self.checks.on_checkPause)
self.signals.checksRun.connect(self.on_checksRun)
self.signals.checksStop.connect(self.checks.on_checkStop)
self.signals.resultClear.connect(self.results.on_resultClear)
self.signals.resultExport.connect(self.results.resultHandler.on_resultExport)
self.signals.resultImport.connect(self.results.resultHandler.on_resultImport)
self.signals.systemsCheckLogon.connect(self.systems.on_checkLogon)
self.signals.systemsNewFolder.connect(self.systems.on_newFolder)
self.signals.systemsNew.connect(self.systems.on_new)
self.signals.systemsDelete.connect(self.systems.on_delete)
self.signals.systemsImport.connect(self.systems.on_import)
self.signals.systemsExport.connect(self.systems.on_export)
self.signals.systemsDisable.connect(self.systems.on_disable)
self.signals.systemsEnable.connect(self.systems.on_enable)
@property
def systemModel(self)->QtCore.QAbstractItemModel:
""" Get the System QAbstractItemModel """
return self.__systemModel
@systemModel.setter
def systemModel(self, model:QtCore.QAbstractItemModel)->bool:
""" Set the System QAbstractItemModel """
self.__systemModel = model
sm = PolyMorphicFilterProxyModel(self.systemFilter)
sm.setSourceModel(self.systemModel)
self.systemSortFilterProxyModel = sm
self.systems.setModel(self.systemSortFilterProxyModel)
return True
@property
def saFolderClass(self):
return self.__saFolderClass
@saFolderClass.setter
def saFolderClass(self, folderClass):
self.__saFolderClass = folderClass
@property
def systemSortFilterProxyModel(self):
return self.__systemSortFilterProxyModel
@systemSortFilterProxyModel.setter
def systemSortFilterProxyModel(self, proxymodel):
self.__systemSortFilterProxyModel = proxymodel
@property
def systemFilter(self):
return self.__systemFilter
@systemFilter.setter
def systemFilter(self, systemFilter:list):
self.__systemFilter = systemFilter
@property
def systemType(self):
return self.__systemType
@systemType.setter
def systemType(self, systemType:str):
self.__systemType = systemType
| 9,109 |
clean/311_service/311sql.py
|
chrislin009/trafficDispatcher
| 1 |
2170018
|
from pyspark.sql import Row
from pyspark import SparkContext
from pyspark.sql.functions import to_timestamp
from pyspark.sql.types import TimestampType
from pyspark.sql import SparkSession
if __name__ == "__main__":
sc = SparkContext()
lines = sc.textFile("/user/bl2514/311Result.txt")
parts = lines.map(lambda l: l.split(","))
rdd311 = parts.map(lambda p: Row(nid=p[0], datetime=p[1], borough=p[2], count=int(p[3])))
service311 = spark.createDataFrame(rdd311)
spark = SparkSession.builder.appName("Python Spark SQL basic example").config("spark.some.config.option", "some-value").getOrCreate()
df311.createOrReplaceTempView("df311")
result = spark.sql("select nid, datetime, borough, count from df311 where nid not like 'UNKNOWN'")
result.write.format("com.databricks.spark.csv").option("header", "true").save("311-Service-2016-Cleaned.csv")
#no head
result.select("*").write.save("311clean.csv", format="csv")
| 924 |
Vector.py
|
deanearlwright/chippy
| 0 |
2168890
|
# ==============================================================
# V e c t o r . p y
# ==============================================================
# Author: <NAME>
# Created: 25 August 2011
# Purpose: Reimplementation of the Q-learning perturbation
# testbed for multiple metacognition levels.
# ==============================================================
# imports
# ==============================================================
import unittest
from Constants import *
# ==============================================================
# Vector
# ==============================================================
class Vector(object):
def __init__(self, data=None):
if data:
self.__vec = list(data)
else:
self.__vec = []
def __repr__(self):
return 'Vector(%s)' % repr(self.__vec)
def __add__(self, other):
return Vector(map(lambda x, y: x+y, self, other))
def __div__(self, other):
return Vector(map(lambda x, y: x/y, self, other))
def __getitem__(self, index):
return self.__vec[index]
def __len__(self):
return len(self.__vec)
def __str__(self):
return str(self.__vec)
# ==============================================================
# unit tests
# ==============================================================
class TestVector(unittest.TestCase):
def testEmptyConstructor(self):
v = Vector()
self.assertEqual(len(v), 0)
self.assertEqual(str(v), '[]')
self.assertEqual(repr(v), 'Vector([])')
def testConstructor(self):
v = Vector([1,2,3])
self.assertEqual(len(v), 3)
self.assertEqual(v[1], 2)
self.assertEqual(str(v), '[1, 2, 3]')
self.assertEqual(repr(v), 'Vector([1, 2, 3])')
def testAdd(self):
v1 = Vector([1,2,3])
v2 = Vector([22,9,13])
v = v1 + v2
self.assertEqual(len(v), 3)
self.assertEqual(v[1], 11)
self.assertEqual(str(v), '[23, 11, 16]')
self.assertEqual(repr(v), 'Vector([23, 11, 16])')
def testDiv(self):
v1 = Vector([6,6,16])
v2 = Vector([2,3,4])
v = v1 / v2
self.assertEqual(len(v), 3)
self.assertEqual(v[1], 2)
self.assertEqual(str(v), '[3, 2, 4]')
self.assertEqual(repr(v), 'Vector([3, 2, 4])')
# ==============================================================
# main
# ==============================================================
def main():
unittest.main()
# ==============================================================
# module initialization
# ==============================================================
if __name__ == "__main__":
main()
# ==============================================================
# end V e c t o r . p y end
# ==============================================================
| 3,426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.