max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
games.py
|
wilrop/Cyclic-Equilibria-MONFG
| 0 |
2022671
|
import numpy as np
# Game 1: The (im)balancing act game. There are no NE under SER.
monfg1 = [np.array([[(4, 0), (3, 1), (2, 2)],
[(3, 1), (2, 2), (1, 3)],
[(2, 2), (1, 3), (0, 4)]], dtype=float),
np.array([[(4, 0), (3, 1), (2, 2)],
[(3, 1), (2, 2), (1, 3)],
[(2, 2), (1, 3), (0, 4)]], dtype=float)]
# Game 2: The (im)balancing act game without M. There are no NE under SER.
monfg2 = [np.array([[(4, 0), (2, 2)],
[(2, 2), (0, 4)]], dtype=float),
np.array([[(4, 0), (2, 2)],
[(2, 2), (0, 4)]], dtype=float)]
# Game 3: The (im)balancing act game without R. (L, M) is a pure NE under SER.
monfg3 = [np.array([[(4, 0), (3, 1)],
[(3, 1), (2, 2)]], dtype=float),
np.array([[(4, 0), (3, 1)],
[(3, 1), (2, 2)]], dtype=float)]
# Game 4: A two action game. There are NE under SER for (L,L) and (M,M).
monfg4 = [np.array([[(4, 1), (1, 2)],
[(3, 1), (3, 2)]], dtype=float),
np.array([[(4, 1), (1, 2)],
[(3, 1), (3, 2)]], dtype=float)]
# Game 5: A three action game. There are NE under SER for (L,L), (M,M) and (R,R).
monfg5 = [np.array([[(4, 1), (1, 2), (2, 1)],
[(3, 1), (3, 2), (1, 2)],
[(1, 2), (2, 1), (1, 3)]], dtype=float),
np.array([[(4, 1), (1, 2), (2, 1)],
[(3, 1), (3, 2), (1, 2)],
[(1, 2), (2, 1), (1, 3)]], dtype=float)]
# Game 6: A multi-objectivised version of the game of chicken. We use the utility function u2 for both players.
# The cyclic equilibrium is to go 2/3 your own, 1/3 other action uniformly over these.
monfg6 = [np.array([[(0, 0), (7, 2)],
[(2, 7), (6, 2.32502)]], dtype=float),
np.array([[(0, 0), (2, 7)],
[(7, 2), (6, 2.32502)]], dtype=float)]
# Game 7: An example of a game where commitment may be exploited.
monfg7 = [np.array([[(-1, -1), (-1, 1)],
[(1, -1), (1, 1)]], dtype=float),
np.array([[(-1, -1), (-1, 1)],
[(1, -1), (1, 1)]], dtype=float)]
# Game 8: A two action game. There are two NE when both agents use utility function u2 under SER: (L,L) and (R, R).
# The cyclic equilibrium is to mix uniformly over these.
monfg8 = [np.array([[(10, 2), (0, 0)],
[(0, 0), (2, 10)]], dtype=float),
np.array([[(10, 2), (0, 0)],
[(0, 0), (2, 10)]], dtype=float)]
# Game 9: A noisy version of game 8.
monfg9 = [np.array([[(10, 2), (2, 3)],
[(4, 2), (6, 3)]], dtype=float),
np.array([[(10, 2), (2, 3)],
[(4, 2), (6, 3)]], dtype=float)]
# Game 10: A game without Nash equilibria that still has a cyclic Nash equilibrium.
monfg10 = [np.array([[(2, 0), (0, 1)],
[(1, 0), (0, 2)]], dtype=float),
np.array([[(2, 0), (1, 1)],
[(1, 1), (0, 2)]], dtype=float)]
# Game 11: The same game as game 10 but intended to be used with the utility functions reversed.
monfg11 = [np.array([[(2, 0), (1, 1)],
[(1, 1), (0, 2)]], dtype=float),
np.array([[(2, 0), (0, 1)],
[(1, 0), (0, 2)]], dtype=float)]
def u1(vector):
"""
This function calculates the utility for agent 1.
:param vector: The reward vector.
:return: The utility for agent 1.
"""
utility = vector[0] ** 2 + vector[1] ** 2
return utility
def gradient_u1(vector):
"""
This function returns the partial derivative for the two objectives for agent 1.
:param vector: The reward vector.
:return: An array of the two partial derivatives for agent 1.
"""
dx = 2 * vector[0]
dy = 2 * vector[1]
return np.array([dx, dy])
def u2(vector):
"""
This function calculates the utility for agent 2.
:param vector: The reward vector.
:return: The utility for agent 2.
"""
utility = vector[0] * vector[1]
return utility
def gradient_u2(vector):
"""
This function returns the partial derivative for the two objectives for agent 2.
:param vector: The reward vector.
:return: An array of the two partial derivatives for agent 2.
"""
dx = vector[1]
dy = vector[0]
return np.array([dx, dy])
def u3(vector):
"""
This function calculates the utility from a vector.
:param vector: The reward vector.
:return: The utility from this vector.
"""
utility = vector[0] * vector[1] - vector[1] ** 2 # i.e. balanced
return utility
def gradient_u3(vector):
"""
This function returns the partial derivative for the two objectives for utility function 3.
:param vector: The reward vector.
:return: An array of the two partial derivatives.
"""
dx = vector[1] # = y
dy = vector[0] - 2 * vector[1] # = x - 2y
return np.array([dx, dy])
def u4(vector):
"""
A utility function that is a constant.
:param vector: The input payoff vector.
:return: A constant utility k.
"""
k = 2
return k
def gradient_u4(vector):
"""
This function returns the partial derivative for the two objectives for utility function 4.
:param vector: The reward vector.
:return: An array of the two partial derivatives.
"""
dx = 0
dy = 0
return np.array([dx, dy])
def get_monfg(game):
"""
This function will provide the correct payoffs based on the game we play.
:param game: The current game.
:return: A list of payoff matrices.
"""
if game == 'game1':
monfg = monfg1
elif game == 'game2':
monfg = monfg2
elif game == 'game3':
monfg = monfg3
elif game == 'game4':
monfg = monfg4
elif game == 'game5':
monfg = monfg5
elif game == 'game6':
monfg = monfg6
elif game == 'game7':
monfg = monfg7
elif game == 'game8':
monfg = monfg8
elif game == 'game9':
monfg = monfg9
elif game == 'game10':
monfg = monfg10
elif game == 'game11':
monfg = monfg11
else:
raise Exception("The provided game does not exist.")
return monfg
def get_u_and_du(u_str):
"""
Get the utility function and derivative of the utility function.
:param u_str: The string of this utility function.
:return: A utility function and derivative.
"""
if u_str == 'u1':
return u1, gradient_u1
elif u_str == 'u2':
return u2, gradient_u2
elif u_str == 'u3':
return u3, gradient_u3
elif u_str == 'u4':
return u4, gradient_u4
else:
raise Exception('The provided utility function does not exist.')
def scalarise_matrix(payoff_matrix, u):
"""
This function scalarises an input matrix using a provided utility function.
:param payoff_matrix: An input payoff matrix.
:param u: A utility function.
:return: The trade-off game.
"""
scalarised_matrix = np.zeros((payoff_matrix.shape[0], payoff_matrix.shape[1]))
for i in range(scalarised_matrix.shape[0]):
for j in range(scalarised_matrix.shape[1]):
utility = u(payoff_matrix[i, j])
scalarised_matrix[i, j] = utility
return scalarised_matrix
| 7,351 |
app/__init__.py
|
MashSoftware/the-button
| 5 |
2023994
|
import logging
from config import Config
from flask import Flask
from flask_compress import Compress
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_talisman import Talisman
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = "auth.login"
login.login_message_category = "info"
login.refresh_view = "auth.login"
login.needs_refresh_message_category = "info"
login.needs_refresh_message = "To protect your account, please log in again to access this page."
limiter = Limiter(key_func=get_remote_address, default_limits=["2 per second", "60 per minute"])
compress = Compress()
talisman = Talisman()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
limiter.init_app(app)
compress.init_app(app)
csp = {
"default-src": "'self'",
"style-src": ["cdn.jsdelivr.net", "use.fontawesome.com"],
"font-src": "use.fontawesome.com",
"script-src": "cdn.jsdelivr.net",
"img-src": ["data:", "'self'"],
}
talisman.init_app(app, content_security_policy=csp, content_security_policy_nonce_in=["style-src"])
# Register blueprints
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp)
from app.account import bp as account_bp
app.register_blueprint(account_bp, url_prefix="/account")
from app.entry import bp as entry_bp
app.register_blueprint(entry_bp, url_prefix="/entries")
from app.tag import bp as tag_bp
app.register_blueprint(tag_bp, url_prefix="/tags")
from app.search import bp as search_bp
app.register_blueprint(search_bp, url_prefix="/search")
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
app.logger.info("Startup")
return app
from app import models
| 2,272 |
django_fastdev/__init__.py
|
boxed/django-fastdev
| 26 |
2022879
|
__version__ = '1.0.2'
default_app_config = 'django_fastdev.django_app.FastDevConfig'
from django.core.management.commands.runserver import Command
Command.check = lambda *_, **__: print('Skipped model validations')
Command.check_migrations = lambda *_, **__: print('Skipped migrations checks')
| 298 |
COM220/Trabalhos/prova2/produto.py
|
MarcosPaul0/Materias-de-Programacao
| 0 |
2023786
|
import tkinter as tk
from tkinter import messagebox
import os.path
import pickle
class Produto:
def __init__(self, codigo, descricao, valor):
self.__codigo = codigo
self.__descricao = descricao
self.__valor = valor
def getCodigo(self):
return self.__codigo
def getDescricao(self):
return self.__descricao
def getValor(self):
return self.__valor
class ViewInsereProduto(tk.Toplevel):
def __init__(self, controle):
tk.Toplevel.__init__(self)
self.geometry('300x200')
self.title("Produto Cadastro")
self.controle = controle
self.frameCodigo = tk.Frame(self, pady=10)
self.frameDescricao = tk.Frame(self)
self.frameValor = tk.Frame(self, pady=10)
self.frameBotao = tk.Frame(self)
self.frameCodigo.pack()
self.frameDescricao.pack()
self.frameValor.pack()
self.frameBotao.pack()
self.labelCodigo = tk.Label(self.frameCodigo,text="Código: ")
self.labelCodigo.pack(side="left")
self.inputCodigo = tk.Entry(self.frameCodigo, width=20)
self.inputCodigo.pack(side="left")
self.labelDescricao = tk.Label(self.frameDescricao,text="Descrição: ")
self.labelDescricao.pack(side="left")
self.inputDescricao = tk.Entry(self.frameDescricao, width=20)
self.inputDescricao.pack(side="left")
self.labelValor = tk.Label(self.frameValor,text="Valor: ")
self.labelValor.pack(side="left")
self.inputValor = tk.Entry(self.frameValor, width=20)
self.inputValor.pack(side="left")
self.buttonEnter = tk.Button(self.frameBotao ,text="Registrar")
self.buttonEnter.pack(side="left")
self.buttonEnter.bind("<Button>", controle.enterHandlerIn)
self.buttonLimpar = tk.Button(self.frameBotao ,text="Limpar")
self.buttonLimpar.pack(side="left")
self.buttonLimpar.bind("<Button>", controle.limpaHandlerIn)
self.buttonFechar = tk.Button(self.frameBotao ,text="Concluído")
self.buttonFechar.pack(side="left")
self.buttonFechar.bind("<Button>", controle.fechaHandlerIn)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class ViewConsultaProduto(tk.Toplevel):
def __init__(self, controle):
tk.Toplevel.__init__(self)
self.geometry('300x200')
self.title("Produto Consulta")
self.controle = controle
self.frameCodigo = tk.Frame(self, pady=10)
self.frameBotao = tk.Frame(self)
self.frameCodigo.pack()
self.frameBotao.pack()
self.labelCodigo = tk.Label(self.frameCodigo,text="Código: ")
self.labelCodigo.pack(side="left")
self.inputCodigo = tk.Entry(self.frameCodigo, width=20)
self.inputCodigo.pack(side="left")
self.buttonEnter = tk.Button(self.frameBotao ,text="Buscar")
self.buttonEnter.pack(side="left")
self.buttonEnter.bind("<Button>", controle.enterHandlerCon)
self.buttonLimpar = tk.Button(self.frameBotao ,text="Limpar")
self.buttonLimpar.pack(side="left")
self.buttonLimpar.bind("<Button>", controle.limpaHandlerCon)
self.buttonFechar = tk.Button(self.frameBotao ,text="Concluído")
self.buttonFechar.pack(side="left")
self.buttonFechar.bind("<Button>", controle.fechaHandlerCon)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class ControleProduto:
def __init__(self, controlePrincipal):
self.controlePrincipal = controlePrincipal
if not os.path.isfile("produto.pickle"):
self.listaProduto = []
else:
with open("produto.pickle", "rb") as f:
self.listaProduto = pickle.load(f)
def salvarProduto(self):
if len(self.listaProduto) != 0:
with open("produto.pickle","wb") as f:
pickle.dump(self.listaProduto, f)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= FUNÇÃO AUXILIAR -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def getProdutos(self):
return self.listaProduto
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= FUNÇÕES DE INSERÇÃO -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def insereProduto(self):
self.viewInsereProduto = ViewInsereProduto(self)
def enterHandlerIn(self, event):
codigo = self.viewInsereProduto.inputCodigo.get()
descricao = self.viewInsereProduto.inputDescricao.get()
valor = self.viewInsereProduto.inputValor.get()
produto = Produto(codigo, descricao, float(valor.replace(',', '.')))
self.listaProduto.append(produto)
self.viewInsereProduto.mostraJanela('Sucesso', 'Produto inserido com sucesso!')
self.limpaHandlerIn(event)
def limpaHandlerIn(self, event):
self.viewInsereProduto.inputCodigo.delete(0, len(self.viewInsereProduto.inputCodigo.get()))
self.viewInsereProduto.inputDescricao.delete(0, len(self.viewInsereProduto.inputDescricao.get()))
self.viewInsereProduto.inputValor.delete(0, len(self.viewInsereProduto.inputValor.get()))
def fechaHandlerIn(self, event):
self.viewInsereProduto.destroy()
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= FUNÇÕES DE CONSULTA -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def consultaProduto(self):
self.viewConsultaProduto = ViewConsultaProduto(self)
def enterHandlerCon(self, event):
codigo = self.viewConsultaProduto.inputCodigo.get()
resposta = ''
for produto in self.listaProduto:
if produto.getCodigo() == codigo:
resposta += '{} / {} / {}'.format(produto.getCodigo(), produto.getDescricao(), str(produto.getValor()).replace('.', ','))
if resposta:
self.viewConsultaProduto.mostraJanela(str(codigo), resposta)
else:
self.viewConsultaProduto.mostraJanela(str(codigo), 'Produto não encontrado!')
self.limpaHandlerCon(event)
def limpaHandlerCon(self, event):
self.viewConsultaProduto.inputCodigo.delete(0, len(self.viewConsultaProduto.inputCodigo.get()))
def fechaHandlerCon(self, event):
self.viewConsultaProduto.destroy()
| 5,765 |
rabbitai/utils/url_map_converters.py
|
psbsgic/rabbitai
| 0 |
2023423
|
# -*- coding: utf-8 -*-
from typing import Any, List
from werkzeug.routing import BaseConverter, Map
from rabbitai.models.tags import ObjectTypes
class RegexConverter(BaseConverter):
def __init__(self, url_map: Map, *items: List[str]) -> None:
super(RegexConverter, self).__init__(url_map) # type: ignore
self.regex = items[0]
class ObjectTypeConverter(BaseConverter):
"""Validate that object_type is indeed an object type."""
def to_python(self, value: str) -> Any:
return ObjectTypes[value]
def to_url(self, value: Any) -> str:
return value.name
| 606 |
server/player/mahjong_soul/views.py
|
eIGato/mahjong-portal
| 10 |
2023762
|
from django.db.models import Q
from django.http import Http404
from django.shortcuts import render
from player.mahjong_soul.models import MSAccountStatistic
def ms_accounts(request, stat_type="four"):
if stat_type not in ["four", "three"]:
raise Http404
four_players = False
statistics = MSAccountStatistic.objects.filter(rank__isnull=False)
if stat_type == "four":
four_players = True
statistics = statistics.filter(game_type=MSAccountStatistic.FOUR_PLAYERS)
else:
statistics = statistics.filter(game_type=MSAccountStatistic.THREE_PLAYERS)
statistics = statistics.filter(Q(tonpusen_games__gt=0) | Q(hanchan_games__gt=0))
statistics = statistics.select_related("account", "account__player").order_by("-rank", "-points")
return render(request, "ms/ms_accounts.html", {"statistics": statistics, "four_players": four_players})
| 894 |
1. FUNDAMENTOS/3. PROGRAMACION ESTADISTICA CON PYTHON/2. ejercicios clase/4.clase17-12.py
|
alvarochiqui/edem
| 0 |
2023940
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 17 15:36:24 2021
@author: alvar
"""
#Importamos todas las librerias necesarias para el proyecto
import os #sistema operativo
import pandas as pd #gestionar datframes
import numpy as np #numeric python vectores
import matplotlib.pyplot as plt #graficos estadisticos
from pandas.api.types import CategoricalDtype #For definition of custom categorical data types (ordinal if necesary)
import seaborn as sns # For hi level, Pandas oriented, graphics
import scipy.stats as stats # For statistical inference
from statsmodels.formula.api import ols
from stargazer.stargazer import Stargazer #para modelos de regresion
os.chdir(r'C:\Users\alvar\Desktop\EDEM\2. GITHUB\edem\Estadistica Python\code_and_data\datasets')
wbr = pd.read_csv ('WBR_11_12_denormalized_temp.csv', sep=';', decimal=',')
os.getcwd()
print(wbr.shape)
print(wbr.head())
print(wbr.info())
#Quality Control OK
#OLS Ordinary least squares
#(variable target ~variable predictora).fit(ajusta la recta)
model1=ols('cnt~temp_celsius',data=wbr).fit()
model2=ols('cnt~temp_celsius+windspeed_kh',data=wbr).fit()
model3=ols('cnt~temp_celsius+windspeed_kh+hum',data=wbr).fit()
model4=ols('cnt~temp_celsius+windspeed_kh+hum+yr',data=wbr).fit()
model5=ols('cnt~temp_celsius+windspeed_kh+hum+workingday+yr',data=wbr).fit()
print(model5.summary2())
#!pip install stargazer
stargazer = Stargazer([model1, model2,model3])
stargazer.render_html()
stargazer = Stargazer([model1, model2,model3,model4,model5])
stargazer.title('Table 1. A model of bicycle demand in Washington')
stargazer.significant_digits(1)
stargazer.covariate_order(['temp_celsius', 'windspeed_kh','hum','yr','workingday'])
stargazer.show_degrees_of_freedom(False)
stargazer.render_html()
#Get dummies / hot encoding
wbr.weathersit.hist()
wbr
dummies = pd.get_dummies(wbr.weathersit)
colnames = { 1:'sunny', 2:'cloudy', 3:'rainy'} #This is a dictionary
dummies.rename(colnames, inplace = True, axis=1) #Rename column labels
wbr = pd.concat([wbr,dummies],axis=1) #add new columns
wbr.columns
#recomendacion siempre en binario a las variables dicotomicas
#la mayoria de los dias hace sol
model6 = ols('cnt~temp_celsius + hum + workingday + windspeed_kh + yr + cloudy +rainy', data=wbr).fit()
#################off road regression
m=4500
print(m)
wbr.loc[(wbr['cnt']<m), 'goal']=0
wbr.loc[(wbr['cnt']>m), 'goal']=1
plt.scatter(wbr.cnt, wbr.goal)
from statsmodels.formula.api import logit
model_l1 = logit('goal ~ temp_celsius', data=wbr).fit()
print(model_l1.summary2())
model_l7 = logit('goal~temp_celsius + hum + workingday + windspeed_kh + yr + cloudy +rainy', data=wbr).fit()
print(model_l7.summary2())
#la temp2 sale con decimales, se puede considerar redondear
wbr['temp2'] = wbr.temp_celsius*wbr.temp_celsius
wbr['temp2b'] = wbr.temp_celsius**2
| 2,898 |
pylayers/antprop/examples/ex_antenna5.py
|
usmanwardag/pylayers
| 143 |
2023137
|
from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : evaluates the relative error of reconstruction (vsh3) for various values of order l
5 : display the results
"""
filename = 'S1R1.mat'
A = Antenna(filename,'ant/UWBAN/Matfile')
B = Antenna(filename,'ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
B.Ftheta = B.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
B.Fphi = B.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
A = vsh(A,dsf)
B = vsh(B,dsf)
tn = []
tet = []
tep = []
te = []
tmse = []
l = 20
A.C.s1tos2(l)
B.C.s1tos2(l)
u = np.shape(A.C.Br.s2)
Nf = u[0]
Nk = u[1]
tr = np.arange(2,Nk)
A.C.s2tos3_new(Nk)
B.C.s2tos3(1e-6)
UA = np.sum(A.C.Cr.s3*np.conj(A.C.Cr.s3),axis=0)
UB = np.sum(B.C.Cr.s3*np.conj(B.C.Cr.s3),axis=0)
ua = A.C.Cr.ind3
ub = B.C.Cr.ind3
da ={}
db ={}
for k in range(Nk):
da[str(ua[k])]=UA[k]
db[str(ub[k])]=UB[k]
tu = []
for t in sort(da.keys()):
tu.append(da[t] - db[t])
errelTha,errelPha,errela = A.errel(l,20,dsf,typ='s3')
errelThb,errelPhb,errelb = B.errel(l,20,dsf,typ='s3')
print "a: nok",errela,errelPha,errelTha
print "b: ok ",errelb,errelPhb,errelThb
for r in tr:
E = A.C.s2tos3_new(r)
errelTh,errelPh,errel = A.errel(l,20,dsf,typ='s3')
print 'r : ',r,errel,E
tet.append(errelTh)
tep.append(errelPh)
te.append(errel)
#
line1 = plt.plot(array(tr),10*log10(array(tep)),'b')
line2 = plt.plot(array(tr),10*log10(array(tet)),'r')
line3 = plt.plot(array(tr),10*log10(array(te)),'g')
#
plt.xlabel('order l')
plt.ylabel(u'$\epsilon_{rel}$ (dB)',fontsize=18)
plt.title('Evolution of reconstruction relative error wrt order')
plt.legend((u'$\epsilon_{rel}^{\phi}$',u'$\epsilon_{rel}^{\\theta}$',u'$\epsilon_{rel}^{total}$'))
plt.legend((line1,line2,line3),('a','b','c'))
plt.show()
plt.legend(('errel_phi','errel_theta','errel'))
| 2,472 |
quadraticCalculator.py
|
michal-kalina/QuadraticCalculator
| 0 |
2023603
|
# import modules
import sys
import math
# To take coefficient input from the users
a = float(input("Podaj 'a': "))
# Checking if a is equle zero
if a == 0:
print("Wartosc zmiennej 'a' nie moze byc zerem.")
print("Uruchom skrypt ponownie podajac wartosc 'a' rozna od zera.")
# A was zero, exiting the script
sys.exit()
# Getting other ceofficients from the user
b = float(input("Podaj 'b': "))
c = float(input("Podaj 'c': "))
if a > 0:
print("Wartosc wspolczynnika 'a' jest wieksza od zera.")
print("Ramiona funkcji f(x) = %dx**2 + %dx + %d sa zwrocone do gory." %(a,b,c))
else:
print("Wartosc wspolczynnika 'a' jest mniejsza od zera.")
print("Ramiona funkcji f(x) = %dx**2 + %dx + %d sa zwrocone w dol." %(a,b,c))
# Calculating delta
delta = (b**2) - (4*a*c)
print('Delata jest rowna: %d' % delta)
# Check how many solutions we have
if delta == 0:
# We have only one solution
x = -b / 2*a
print("f(x) = %dx**2 + %dx + %d posiada jedeno miejsce zerowe x = %d" %(a,b,c,x))
elif delta > 0:
# We have two solutions
x1 = (-b - math.sqrt(delta)) / (2*a)
x2 = (-b + math.sqrt(delta)) / (2*a)
print("f(x) = %dx**2 + %dx + %d posiada dwa miejsca zerowe x1 = %d i x2 = %d" %(a,b,c,x1,x2))
else:
# There is no solution
print("f(x) = %dx**2 + %dx + %d nie posiada rozwiazan." %(a,b,c))
# End of script
sys.exit()
| 1,377 |
6Hangul_Naratgul/yacc1.py
|
hschoi1/automata_for_hangul
| 0 |
2023627
|
# -*- coding: utf-8 -*-
import ply.yacc as yacc
import eNFA_to_mDFA
# Get the token map from the lexer. This is required.
from lex1 import tokens
def p_expression_union(p):
'expression : expression UNION expression'
p[0]=('union',p[1],p[3])
def p_expression_expr(p):
'expression : LPAREN expression RPAREN'
p[0]=p[2]
def p_expression_concat(p):
'expression : expression CONCAT expression'
p[0]=('concatenation',p[1],p[3])
def p_expression_closure(p):
'expression : expression CLOSURE'
p[0]=('closure',p[1])
def p_expression_term(p):
'expression : term'
p[0]=('term',p[1])
def p_term_sym(p):
'term : SYMBOL'
p[0]=p[1]
precedence = (
('left', 'UNION'),
('left', 'CONCAT'),
('left','CLOSURE'),
)
# Error rule for syntax errors
def p_error(p):
print("Syntax error in input!")
# Build the parser
parser = yacc.yacc()
| 944 |
mesonbuild/templates/valatemplates.py
|
paper42/meson
| 2 |
2023962
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mesonbuild.templates.sampleimpl import SampleImpl
import re
hello_vala_template = '''void main (string[] args) {{
stdout.printf ("Hello {project_name}!\\n");
}}
'''
hello_vala_meson_template = '''project('{project_name}', ['c', 'vala'],
version : '{version}')
dependencies = [
dependency('glib-2.0'),
dependency('gobject-2.0'),
]
exe = executable('{exe_name}', '{source_name}', dependencies : dependencies,
install : true)
test('basic', exe)
'''
lib_vala_template = '''namespace {namespace} {{
public int sum(int a, int b) {{
return(a + b);
}}
public int square(int a) {{
return(a * a);
}}
}}
'''
lib_vala_test_template = '''using {namespace};
public void main() {{
stdout.printf("\nTesting shlib");
stdout.printf("\n\t2 + 3 is %d", sum(2, 3));
stdout.printf("\n\t8 squared is %d\\n", square(8));
}}
'''
lib_vala_meson_template = '''project('{project_name}', ['c', 'vala'],
version : '{version}')
dependencies = [
dependency('glib-2.0'),
dependency('gobject-2.0'),
]
# These arguments are only used to build the shared library
# not the executables that use the library.
shlib = shared_library('foo', '{source_file}',
dependencies: dependencies,
install: true,
install_dir: [true, true, true])
test_exe = executable('{test_exe_name}', '{test_source_file}', dependencies : dependencies,
link_with : shlib)
test('{test_name}', test_exe)
# Make this library usable as a Meson subproject.
{ltoken}_dep = declare_dependency(
include_directories: include_directories('.'),
link_with : shlib)
'''
class ValaProject(SampleImpl):
def __init__(self, options):
super().__init__()
self.name = options.name
self.version = options.version
def create_executable(self) -> None:
lowercase_token = re.sub(r'[^a-z0-9]', '_', self.name.lower())
source_name = lowercase_token + '.vala'
open(source_name, 'w', encoding='utf-8').write(hello_vala_template.format(project_name=self.name))
open('meson.build', 'w', encoding='utf-8').write(
hello_vala_meson_template.format(project_name=self.name,
exe_name=lowercase_token,
source_name=source_name,
version=self.version))
def create_library(self) -> None:
lowercase_token = re.sub(r'[^a-z0-9]', '_', self.name.lower())
uppercase_token = lowercase_token.upper()
class_name = uppercase_token[0] + lowercase_token[1:]
test_exe_name = lowercase_token + '_test'
namespace = lowercase_token
lib_vala_name = lowercase_token + '.vala'
test_vala_name = lowercase_token + '_test.vala'
kwargs = {'utoken': uppercase_token,
'ltoken': lowercase_token,
'header_dir': lowercase_token,
'class_name': class_name,
'namespace': namespace,
'source_file': lib_vala_name,
'test_source_file': test_vala_name,
'test_exe_name': test_exe_name,
'project_name': self.name,
'lib_name': lowercase_token,
'test_name': lowercase_token,
'version': self.version,
}
open(lib_vala_name, 'w', encoding='utf-8').write(lib_vala_template.format(**kwargs))
open(test_vala_name, 'w', encoding='utf-8').write(lib_vala_test_template.format(**kwargs))
open('meson.build', 'w', encoding='utf-8').write(lib_vala_meson_template.format(**kwargs))
| 4,270 |
main/python-setuptools_scm/template.py
|
matu3ba/cports
| 46 |
2022870
|
pkgname = "python-setuptools_scm"
pkgver = "6.3.2"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools", "python-tomli"]
depends = ["python-setuptools", "python-tomli", "python-packaging"]
pkgdesc = "Manage Python package versions with SCM tags"
maintainer = "q66 <<EMAIL>>"
license = "MIT"
url = "https://github.com/pypa/setuptools_scm"
source = f"$(PYPI_SITE)/s/setuptools_scm/setuptools_scm-{pkgver}.tar.gz"
sha256 = "a49aa8081eeb3514eb9728fa5040f2eaa962d6c6f4ec9c32f6c1fba88f88a0f2"
# tests fail when the package is not installed
options = ["!check"]
def post_install(self):
self.install_license("LICENSE")
| 643 |
app/migrations/0024_remove_board_photo_delete.py
|
ckr3453/knut_festival
| 4 |
2023908
|
# Generated by Django 2.2.5 on 2019-09-20 19:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0023_board_photo_delete'),
]
operations = [
migrations.RemoveField(
model_name='board',
name='photo_delete',
),
]
| 329 |
reaction/rpc/base.py
|
Inkln/reaction
| 73 |
2023415
|
from abc import ABC, abstractmethod
import functools
import pickle
from .common import read_config, RPCHandler, RPCRequest, RPCResponse
class FunctionOrMethod:
def __init__(self, func, **attrs):
self._func = func
self._attrs = attrs
self._method = False
def __getattr__(self, item):
return self._attrs.get(item)
def __get__(self, instance, owner):
if not self._method:
self._func = functools.partial(self._func, instance)
self._method = True
return self
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
class BaseRPC(ABC):
@staticmethod
def encode_request(val: RPCRequest) -> bytes:
return pickle.dumps(val)
@staticmethod
def decode_request(val: bytes) -> RPCRequest:
return pickle.loads(val)
@staticmethod
def encode_response(val: RPCResponse) -> bytes:
return pickle.dumps(val)
@staticmethod
def decode_response(val: bytes) -> RPCResponse:
return pickle.loads(val)
@abstractmethod
async def consume(self):
pass
@abstractmethod
async def call(self, msg: RPCRequest) -> RPCResponse:
pass
def __call__(self, handler: RPCHandler) -> RPCHandler:
if self._name is None:
self._name = handler.__name__
self._handler = FunctionOrMethod(
handler, consume=self.consume, call=self.call,
)
return self._handler
@classmethod
def configure(cls, filename: str) -> "BaseRPC":
config = read_config(filename)
return cls(**config)
| 1,627 |
server.py
|
commonid369/Medinform
| 0 |
2023662
|
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
from keras.preprocessing import image
from keras.applications.resnet50 import ResNet50
from starlette.applications import Starlette
from starlette.responses import HTMLResponse
from starlette.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
from pathlib import Path
import uvicorn, aiohttp, asyncio
import base64, sys, numpy as np
import cv2
path = "C:/Users/Amit/Documents/101 btp/medicare/MedInform/app"
model_file_url = 'https://github.com/guramritpalsaggu/Medical_Image_Analysis/blob/master/flask-app-live/app/models/malaria2.h5?raw=true' #DIRECT / RAW DOWNLOAD URL HERE!'
model_file_name = 'malaria2'
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])
app.mount('/static', StaticFiles(directory='C:/Users/Amit/Documents/101 btp/medicare/MedInform/app/static'))
MODEL_PATH = path+'/models/malaria2.h5'
IMG_FILE_SRC = path+'static/saved_image.png'
# IMG_FILE_SRC_2 = 'static/saved_image.png'
PREDICTION_FILE_SRC = path+'static/predictions.txt'
async def download_file(url, dest):
if dest.exists(): return
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.read()
with open(dest, 'wb') as f: f.write(data)
async def setup_model():
#UNCOMMENT HERE FOR CUSTOM TRAINED MODEL
# await download_file(model_file_url, MODEL_PATH)
model = load_model(MODEL_PATH) # Load your Custom trained model
#model._make_predict_function()
# model = ResNet50(weights='imagenet') # COMMENT, IF you have Custom trained model
return model
# Asynchronous Steps
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_model())]
model = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()
@app.route("/upload", methods=["POST"])
async def upload(request):
data = await request.form()
img_bytes = (data["img"])
bytes = base64.b64decode(img_bytes)
with open(IMG_FILE_SRC, 'wb') as f: f.write(bytes)
return model_predict(IMG_FILE_SRC, model)
def model_predict(img_path, model):
result = []; img = image.load_img(img_path, target_size=(125, 125))
# img = cv2.resize(img, dsize=(125, 125), interpolation=cv2.INTER_CUBIC)
img = np.array(img)
kernel = np.array([[0,-1,0],[-1,6,-1],[0,-1,0]])
img = cv2.filter2D(img, -1, kernel)
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
x = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)
x = np.expand_dims(x/255., axis=0)
# predictions = decode_predictions(model.predict(x), top=3)[0] # Get Top-3 Accuracy
# for p in predictions: _,label,accuracy = p; result.append((label,accuracy))
predictions = model.predict(x)
predictions = float(predictions)
if predictions <= 0.5:
result.append('parasitic')
result.append(round(100*(1-predictions), 2))
else:
result.append('normal')
result.append(round(100*predictions, 2))
result_html1 = path/'static'/'result1.html'
result_html2 = path/'static'/'result2.html'
result_html = str(result_html1.open().read() +str(result) + result_html2.open().read())
return HTMLResponse(result_html)
@app.route("/")
def form(request):
index_html = path+'/static/index.html'
return HTMLResponse(index_html.open().read())
if __name__ == "__main__":
uvicorn.run(app)
| 3,660 |
amazon-ion-handler/Json.py
|
vegegoku/iot-reference-architectures
| 0 |
2023624
|
#!/usr/bin/env python
import amazon.ion.simpleion as ion
import os
import boto3
iot_data_client = boto3.client('iot-data')
output_topic = os.environ['OutputTopic']
def function_handler(event, context):
return iot_data_client.publish(topic = output_topic, qos = 0, payload = ion.dumps(event, binary=True))
| 311 |
validator/views/__init__.py
|
s-scherrer/qa4sm
| 10 |
2023054
|
from .general import *
from .signup import *
from .validation import *
from .results import *
from .user_profile import *
from .about import *
from .datasets import *
from .help import *
from .published_results import *
| 220 |
accounts/views.py
|
nab5m/ToDo
| 0 |
2024026
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.urls import reverse
from accounts.forms import AccountCreationForm
from django.contrib.auth import login
def custom_login(request):
if request.user.is_authenticated:
return redirect(reverse('accounts:profile'))
else:
return login(request)
def register(request):
user = request.user
if user.is_authenticated:
return redirect(reverse('accounts:profile'))
if request.method == "POST":
form = AccountCreationForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, '회원가입 성공')
return redirect(reverse('accounts:login'))
else:
messages.add_message(request, messages.INFO, '회원가입 실패')
else:
form = AccountCreationForm()
return render(request, 'accounts/register.html', {'form': form})
@login_required(login_url='/accounts/login/')
def profile(request):
user = request.user
return render(request, 'accounts/profile.html', {'user': user})
| 1,170 |
plot_data.py
|
daniel-muthukrishna/covid19
| 0 |
2023757
|
from urllib.request import urlopen
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.optimize import curve_fit
plt.rcParams['text.usetex'] = True
plt.rcParams['font.serif'] = ['Computer Modern Roman'] + plt.rcParams['font.serif']
country_names = ['australia', 'uk', 'us', 'italy', 'spain', 'france', 'germany', 'iran']
colours = ['tab:green', 'tab:orange', 'tab:red', 'tab:blue', 'tab:purple', 'tab:pink', 'tab:brown', 'tab:cyan',
'tab:olive', '#FF1493', 'navy', '#aaffc3', 'lightcoral', '#228B22', '#aa6e28', '#FFA07A']
lookback_time_in_days = -10
base_url = 'https://www.worldometers.info/coronavirus/country/'
font = {'family': 'normal',
'size': 15}
matplotlib.rc('font', **font)
def get_data(webpage):
dates = str(webpage).split('categories: ')[1].split('\\n')[0].replace('[', '').replace(']', '').replace('"','').split(',')
names = []
data = []
for line in str(webpage).split('series: ')[1:]:
keys = line.split(': ')
done = 0
for k, key in enumerate(keys):
if 'name' in key:
name = keys[k + 1].replace("\\'", "").split(',')[0]
names.append(name)
done += 1
if 'data' in key:
datum = keys[k + 1].replace('[', '').split(']')[0].split(',')
data.append(datum)
done += 1
if done == 2:
break
return dates, names, data
country_data = {}
for i, country in enumerate(country_names):
url = base_url + country
f = urlopen(url)
webpage = f.read()
dates, names, data = get_data(webpage)
country_data[country] = {'dates': dates, 'titles': names, 'data': data}
titles = ['Cases', 'Deaths']
for title in titles:
fig_linear, ax_linear = plt.subplots(1, 1, figsize=(9,6))
fig_log, ax_log = plt.subplots(1,1, figsize=(9,6))
for i, c in enumerate(country_names):
print(c)
title_index = country_data[c]['titles'].index(title)
dates = country_data[c]['dates']
xdata = np.arange(len(dates))
ydata = country_data[c]['data'][title_index]
ydata = np.array(ydata).astype('float')
sidx = lookback_time_in_days
b, logA = np.polyfit(xdata[sidx:], np.log(ydata[sidx:]), 1)
log_yfit = b * xdata[sidx:] + logA
lin_yfit = np.exp(logA) * np.exp(b * xdata[sidx:])
ax_linear.plot(dates, ydata, label=f'{c.upper():<12s}: ${np.exp(b):.2f}^t$ ({np.log(2)/b:.1f} days to double)', marker='.', color=colours[i])
ax_linear.plot(xdata[sidx:], lin_yfit , color=colours[i], linestyle=':', alpha=1)
ax_log.plot(dates, np.log(ydata), label=f'${c.upper()}: {np.exp(b):.2f}^t$', marker='.', color=colours[i])
ax_log.plot(xdata[sidx:], log_yfit, color=colours[i], linestyle=':', alpha=1)
# label=f'{c.upper()}: {np.exp(logA):.2f}*{np.exp(b):.2f}^t'
ax_linear.tick_params(rotation=90)
ax_linear.set_ylabel(title)
ax_linear.legend()
fig_linear.tight_layout()
fig_linear.savefig(f'{title}_linear.pdf')
ax_log.tick_params(rotation=90)
ax_log.set_ylabel('log ' + title)
ax_log.legend()
# ax_log.set_yscale('log')
fig_log.tight_layout()
fig_log.savefig(f'{title}_log.pdf')
plt.show()
| 3,318 |
source/_sample/ptt/mutes-users-create.py
|
showa-yojyo/notebook
| 14 |
2023270
|
#!/usr/bin/env python
# Demonstration POST mutes/users/create
# See https://dev.twitter.com/rest/reference/post/mutes/users/create
from secret import twitter_instance
from json import dump
import sys
tw = twitter_instance()
# [1]
response = tw.mutes.users.create(screen_name='showa_yojyo')
# [2]
dump(response, sys.stdout, ensure_ascii=False, indent=4, sort_keys=True)
| 374 |
get_path.py
|
lmbaeza/Laberinto-SI
| 0 |
2023528
|
import sys
import os
import platform
def get_path(level):
if platform.system() == 'Linux' or platform.system() == 'Darwin':
# Comandos para Linux y Mac
os.system('g++ -std=c++17 -o Labyrinth Labyrinth.cpp')
os.system('./Labyrinth < ascii/map.txt > ascii/path.txt')
elif platform.system() == 'Windows':
# Comandos para Windows
os.system('g++ -std=c++17 -o Labyrinth.exe Labyrinth.cpp')
os.system('Labyrinth.exe < ascii/map.txt > ascii/path.txt')
path = ''
with open('ascii/path.txt', 'r') as reader:
path += reader.read()
if level == 3:
path = path[0] + path
return path
| 679 |
[9]-MD5-Digest.py
|
anuj0809/Fundamentals-Of-Cryptography
| 0 |
2023497
|
import hashlib
result = hashlib.md5(b'GeeksforGeeks')
print("The byte equivalent of hash is : " + str(result.digest()) )
| 123 |
backend/logger/admin.py
|
AstroMatt/subjective-time-perception
| 0 |
2022680
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from backend.logger.models import HTTPRequest
from import_export.admin import ImportExportModelAdmin
class RequestsWithoutResultsFilter(admin.SimpleListFilter):
title = _('Requests without Results')
parameter_name = 'need_recalculate'
def lookups(self, request, model_admin):
return [
('yes', _('Need recalculate')),
]
def queryset(self, request, queryset):
from backend.api_v3.models import Result
all_results = Result.objects.all().values_list('request_sha1', flat=True)
if self.value() == 'yes':
return queryset.exclude(sha1__in=list(all_results))
@admin.register(HTTPRequest)
class HTTPRequestAdmin(ImportExportModelAdmin):
change_list_template = 'admin/change_list_import_export.html'
change_list_filter_template = 'admin/filter_listing.html'
actions = ['recalculate', 'mark_as_problematic']
list_display = ['integrity', 'field_datetime', 'ip', 'method', 'api_version', 'sha1', 'error_log']
list_display_links = ['field_datetime']
list_filter = ['integrity', RequestsWithoutResultsFilter, 'method', 'api_version', 'modified']
search_fields = ['ip', 'sha1']
date_hierarchy = 'added'
ordering = ['-modified']
list_per_page = 100
readonly_fields = ['sha1', 'ip', 'method', 'api_version']
def field_datetime(self, obj):
return f'{obj.added:%Y-%m-%d %H:%M}'
field_datetime.admin_order_field = 'datetime'
field_datetime.short_description = _('Datetime [UTC]')
def mark_as_problematic(modeladmin, request, queryset):
queryset.update(integrity=HTTPRequest.INTEGRITY_ERROR)
def recalculate(modeladmin, request, queryset):
import json
from backend.api_v3.models import Result
from backend.common.utils import json_datetime_decoder
for request in queryset:
data = json.loads(request.data, object_hook=json_datetime_decoder)
Result.add(
request_sha1=request.sha1,
clicks=data.pop('clicks'),
result=data,
)
recalculate.short_description = _('Recalculate Results from HTTP Request')
class Media:
css = {'all': [
'logger/css/httprequest-resize-body.css',
]}
| 2,364 |
lib/optimizer/optimizer/ensemble/diversity.py
|
spaenigs/ensemble-performance
| 1 |
2023162
|
from itertools import combinations
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
def kappa(y_pred_1, y_pred_2):
a, b, c, d = 0, 0, 0, 0
for i, j in zip(y_pred_1, y_pred_2):
if (i, j) == (1, 1):
a += 1
elif (i, j) == (0, 0):
d += 1
elif (i, j) == (1, 0):
b += 1
elif (i, j) == (0, 1):
c += 1
a, b, c, d = \
[v/len(y_pred_1) for v in [a, b, c, d]]
dividend = 2 * (a*d-b*c)
divisor = ((a+b) * (b+d)) + ((a+c) * (c+d))
return dividend / divisor
def kappa_error_plot(meta_clf, X_list, y):
train_index, test_index = \
list(StratifiedKFold(n_splits=2).split(X_list[0], y))[0]
X_train_list, X_test_list = \
[X[train_index] for X in X_list], \
[X[test_index] for X in X_list]
y_train, y_test = y[train_index], y[test_index]
meta_clf.fit(X_train_list, y_train)
res = []
for ((enc_name_1, clf_1), X_test_1), ((enc_name_2, clf_2), X_test_2) in \
combinations(zip(meta_clf.estimators_, X_test_list), 2):
y_pred_tree_1, y_pred_tree_2 = \
clf_1.predict(X_test_1), clf_2.predict(X_test_2)
error_1, error_2 = \
1 - accuracy_score(y_pred_tree_1, y_test), \
1 - accuracy_score(y_pred_tree_2, y_test)
mean_pairwise_error = np.mean([error_1, error_2])
k = kappa(y_pred_tree_1, y_pred_tree_2)
res += [[k, mean_pairwise_error, f"{enc_name_1}_{enc_name_2}"]]
return pd.DataFrame(res, columns=["kappa", "mean_pairwise_error", "encoding_pair"])
| 1,669 |
build.py
|
KorySchneider/mintab
| 12 |
2023775
|
import subprocess, os, shutil
output_file = 'index.html'
output = ''
def write(output):
with open(output_file, 'r+') as f:
f.truncate()
f.write(output)
# set up
if not os.path.exists('build/'):
os.makedirs('build/')
# autoprefix css
autoprefix_cmd = 'postcss src/style.css --use autoprefixer --dir build/'
subprocess.run(autoprefix_cmd.split(), stdout=subprocess.PIPE).stdout.decode('utf-8')
# autoprefixer prints confirmation/timing
# compile ts
compile_cmd = 'ntsc src/tab.ts --outFile build/tab.js'
subprocess.run(compile_cmd.split(), stdout=subprocess.PIPE).stdout.decode('utf-8')
print('✔ Compiled src/tab.ts')
# combine
html, css, js = '', '', ''
with open('src/page.html', 'r') as f:
html += f.read()
with open('build/style.css', 'r') as f:
css += f.read()
with open('build/tab.js', 'r') as f:
js += f.read()
output += html
output += '<script>' + js + '</script>'
output += '<style>' + css + '</style>'
write(output)
print('✔ Wrote index.html')
# minify
minify_command = 'html-minifier --collapse-boolean-attributes --collapse-whitespace --decode-entities --html5 --minify-css --minify-js --process-conditional-comments --remove-attribute-quotes --remove-comments --remove-empty-attributes --remove-optional-tags --remove-redundant-attributes --remove-script-type-attributes --remove-style-link-type-attributes --remove-tag-whitespace --sort-attributes --sort-class-name --trim-custom-fragments --use-short-doctype ' + output_file
mini_output = subprocess.run(minify_command.split(), stdout=subprocess.PIPE).stdout.decode('utf-8')
write(mini_output)
print('✔ Minified index.html')
# clean up
shutil.rmtree('build/')
| 1,671 |
elasticmon/client.py
|
kkirsche/elasticmon
| 0 |
2023712
|
from elasticsearch import Elasticsearch
from flatten_json import flatten
from logging import getLogger, basicConfig, ERROR, WARN, INFO, DEBUG
from logging.handlers import SysLogHandler
from urllib3 import disable_warnings
class ElasticmonClient(object):
"""ElasticmonClient is the primary client for monitoring Elasticsearch.
This class is used to poll Elasticsearch for information about the cluster
health, node statistics, etc. and log that to the loggig systems.
Attributes:
es_client (:obj:`elasticsearch.Elasticsearch`): Elasticsearch client
logger (:obj:`logging.Logger`): Class logger.
"""
def __init__(self, verbosity=2, hosts=[], environment=''):
"""Initialize a new ElasticmonClient instance.
Args:
hosts (list[str]): List of Elasticsearch hosts to connect to.
verbosity (int): The level at which to log, higher numbers offer
more logging.
environment (str): The environment this is logging in.
Returns:
None
"""
disable_warnings()
self.es_client = Elasticsearch(hosts)
self.verbosity = verbosity
self.environment = environment
self.logger = self.setup_logger()
def setup_logger(self):
"""Setup the class logging instance.
Args:
verbose (bool): True for debug logging, False for info logging
Returns:
logging.Logger
"""
log_format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
basicConfig(format=log_format)
logger = getLogger("elasticmon")
if self.verbosity >= 3:
logger.setLevel(DEBUG)
elif self.verbosity == 2:
logger.setLevel(INFO)
elif self.verbosity == 1:
logger.setLevel(WARN)
else:
logger.setLevel(ERROR)
logger.addHandler(self.setup_syslog_handler())
return logger
def setup_syslog_handler(self, address='/dev/log'):
"""Setup the class logging instance.
Args:
address (string or tuple): Where to log syslog information to.
Returns:
logging.handler.SysLogHandler
"""
handler = SysLogHandler(address=address)
return handler
def cluster_health(self):
"""Retrieve the cluster health information.
Returns:
dict: the cluster health information
"""
self.logger.debug("start ElasticmonClient.cluster_health")
chealth = self.es_client.cluster.health()
self.logger.debug("end ElasticmonClient.cluster_health")
return chealth
def cluster_stats(self):
"""Retrieve the cluster statistics.
Returns:
dict: the cluster state information
"""
self.logger.debug("start ElasticmonClient.cluster_stats")
cstats = self.es_client.cluster.stats()
self.logger.debug("end ElasticmonClient.cluster_stats")
return cstats
def indices_stats(self):
"""Retrieve the index statistics.
Returns:
dict: the index statistics
"""
self.logger.debug("start ElasticmonClient.indices_stats")
istats = self.es_client.indices.stats(index='_all')
self.logger.debug("end ElasticmonClient.indices_stats")
return istats
def node_stats(self):
"""Retrieve the node statistics information.
Returns:
dict: the node statistics information
"""
self.logger.debug("start ElasticmonClient.node_stats")
nstats = self.es_client.nodes.stats()
self.logger.debug("end ElasticmonClient.node_stats")
return nstats
def print_cluster_health_flattened(self, j):
"""Print the flattened data
Args:
j (dict): The dictionary that should be flattened.
"""
self.logger.debug(
"start ElasticmonClient.print_cluster_health_flattened")
fields = [
'application=elasticmon', 'data_type=cluster_health',
'environment={env}'.format(env=self.environment)
]
self.print_flattened(j=j, fields=fields)
self.logger.debug("end ElasticmonClient.print_cluster_health_flattened")
def print_cluster_stats_flattened(self, j):
"""Print the flattened data
Args:
j (dict): The dictionary that should be flattened.
"""
self.logger.debug(
"start ElasticmonClient.print_cluster_stats_flattened")
fields = [
'application=elasticmon', 'data_type=cluster_stats',
'environment={env}'.format(env=self.environment)
]
self.print_flattened(j=j, fields=fields)
self.logger.debug("end ElasticmonClient.print_cluster_stats_flattened")
def print_indices_stats_flattened(self, j):
"""Print the flattened data
Args:
j (dict): The dictionary that should be flattened.
"""
self.logger.debug(
"start ElasticmonClient.print_indices_stats_flattened")
fields = [
'application=elasticmon', 'data_type=indices_stats',
'environment={env}'.format(env=self.environment)
]
self.print_flattened(j=j, fields=fields)
self.logger.debug("end ElasticmonClient.print_indices_stats_flattened")
def print_node_stats_flattened(self, j):
"""Print the flattened data
Args:
j (dict): The dictionary that should be flattened.
"""
self.logger.debug(
"start ElasticmonClient.print_cluster_health_flattened")
node_names = []
if 'nodes' in j:
node_names = j['nodes'].keys()
for node in node_names:
fields = [
'node={node}'.format(node=node), 'application=elasticmon',
'data_type=node_stats', 'environment={env}'.format(
env=self.environment)
]
self.print_flattened(j=j['nodes'][node], fields=fields)
self.logger.debug("end ElasticmonClient.print_cluster_health_flattened")
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def print_flattened(self, j, fields=['application=elasticmon']):
"""Print the flattened data
Args:
j (dict): The dictionary that should be flattened.
"""
syslog_line = []
self.logger.debug("start ElasticmonClient.print_flattened")
flat = flatten(j)
for k, v in list(flat.items()):
syslog_line.append('{key}={value}'.format(key=k, value=v))
print('{key}: {value}'.format(key=k, value=v))
for output_row in self.chunks(syslog_line, 10):
output_row += fields
self.logger.info(' '.join(output_row))
self.logger.debug("end ElasticmonClient.print_flattened")
| 6,993 |
demos/blues.py
|
ricktaube/musx
| 9 |
2022996
|
################################################################################
"""
blues.py generates a blusey piece using the `spray()` generator in paint.py.
Composer: <NAME>
To run this script cd to the parent directory of demos/ and do:
```bash
python3 -m demos.blues
```
"""
if __name__ == '__main__':
from musx import Score, Seq, MidiFile, pick
from musx.paint import spray
# The blues scale.
blues = [0, 3, 5, 6, 7, 10, 12]
# It's good practice to add any metadata such as tempo, midi instrument
# assignments, micro tuning, etc. to track 0 in your midi file.
track0 = MidiFile.metatrack()
# Track 1 will hold the composition.
track1 = Seq()
# Create a score and give it tr1 to hold the score event data.
score = Score(out=track1)
# The sections of the piece
s1=spray(score, duration=.2, rhythm=.2, band=[0, 3, 5], pitch=30, amplitude=0.35, end=36)
s2=spray(score, duration=.2, rhythm=[-.2, -.4, .2, .2], band=[3, 7, 6], pitch=pick(30, 42), amplitude=0.5, end=25)
s3=spray(score, duration=.2, rhythm=[-.2, .2, .2], band=blues, pitch=pick(42, 54), instrument=2, end=20)
s4=spray(score, duration=.2, rhythm=[-.6, .4, .4], band=blues, pitch=66, amplitude=0.4, end=15)
s5=spray(score, duration=.2, rhythm=.2, band=[0, 3, 5], pitch=30, amplitude=0.5, end=10)
s6=spray(score, duration=.2, rhythm=[-.2, -.4, .2, .2], band=[3, 7, 6], pitch=pick(30, 42), amplitude=0.8, end=10)
s7=spray(score, duration=.2, rhythm=[-.2, .2, .2], band=blues, pitch=pick(42, 54), instrument=2, end=10)
s8=spray(score, duration=.2, rhythm=[-.6, .4, .4], band=blues, pitch=66, amplitude=0.6, end=10)
s9=spray(score, duration=.2, rhythm=.2, band=blues, pitch=66, amplitude=0.4, end=6)
# Create the composition.
score.compose([[0,s1], [5, s2], [10, s3], [15, s4], [37, s5], [37, s6], [37, s7], [37,s8], [47,s9]])
# Write the seqs to a midi file in the current directory.
file = MidiFile("blues.mid", [track0, track1]).write()
print(f"Wrote '{file.pathname}'.")
# To automatially play demos use setmidiplayer() and playfile().
# Example:
# setmidiplayer("fluidsynth -iq -g1 /usr/local/sf/MuseScore_General.sf2")
# playfile(file.pathname)
| 2,260 |
alembic/versions/00002_a4cb7899f08f_starting.py
|
awesome-archive/ReadableWebProxy
| 193 |
2023123
|
"""starting
Revision ID: a4cb7899f08f
Revises: <PASSWORD>
Create Date: 2016-01-10 18:55:20.339504
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'fba1df4514fa'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('web_page_history', sa.Column('contenthash', sa.Text(), nullable=True))
op.add_column('web_page_history', sa.Column('is_diff', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('web_page_history', 'is_diff')
op.drop_column('web_page_history', 'contenthash')
### end Alembic commands ###
| 1,280 |
beep/protocol/arbin.py
|
abillscmu/beep
| 55 |
2022940
|
# Copyright [2020] [Toyota Research Institute]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Arbin-compatible schedule file
parsing and parameter insertion
"""
import re
import warnings
from copy import deepcopy
from collections import OrderedDict
from beep.utils import DashOrderedDict
class Schedule(DashOrderedDict):
"""
Schedule file utility. Provides the ability to read
an Arbin type schedule file. Note that __init__ works
identically to that of an OrderedDict, i. e. with
tuple or dictionary inputs and inherits from pydash
getters and setters with Schedule.get, Schedule.set
and Schedule.unset e.g.
>>> schedule = Schedule.from_file("arbin_file_1.sdu")
>>> schedule.set("Schedule.Step7.m_szLabel", "CC1")
>>> print(schedule['Schedule']['Step7']['m_szLabel'])
>>> "CC1"
"""
@classmethod
def from_file(cls, filename, encoding="latin-1"):
"""
Schedule file ingestion. Converts a schedule file with section headers
to an ordered dict with section headers as nested dicts.
Args:
filename (str): Schedule file name (tested with FastCharge schedule file)
encoding (str): encoding of schedule file
Returns:
(Schedule): Ordered dictionary with keys corresponding to options
or control variables. Section headers are nested dicts within
the dict
"""
obj = cls()
with open(filename, "rb") as f:
text = f.read()
text = text.decode(encoding)
split_text = re.split(r"\[(.+)\]", text)
for heading, body in zip(split_text[1::2], split_text[2::2]):
body_lines = re.split(r"[\r\n]+", body.strip())
body_dict = OrderedDict([line.split("=", 1) for line in body_lines])
heading = heading.replace("_", ".")
obj.set(heading, body_dict)
return obj
def to_file(self, filename, encoding="latin-1", linesep="\r\n"):
"""
Schedule file output. Converts an dictionary to a schedule file with
the appropriate section headers. This function
DOES NOT check the flow control or limits set in the steps. The dictionary
must represent a valid schedule before it is passed to this function.
Args:
filename (str): string corresponding to the file to
output the schedule to
encoding (str): text encoding for the file
linesep (str): line separator for the file,
default Windows-compatible "\r\n"
"""
# Flatten dict
data = deepcopy(self)
flat_keys = _get_headings(data, delimiter=".")
flat_keys.reverse() # Reverse ensures sub-dicts are removed first
data_tuples = []
for flat_key in flat_keys:
data_tuple = (flat_key.replace(".", "_"), data.get_path(flat_key))
data_tuples.append(data_tuple)
data.unset(flat_key)
data_tuples.reverse()
# Construct text
blocks = []
for section_title, body_data in data_tuples:
section_header = "[{}]".format(section_title)
body = linesep.join(
["=".join([key, value]) for key, value in body_data.items()]
)
blocks.append(linesep.join([section_header, body]))
contents = linesep.join(blocks) + linesep
# Write file
with open(filename, "wb") as f:
f.write(contents.encode(encoding))
@classmethod
def from_fast_charge(cls, CC1, CC1_capacity, CC2, template_filename):
"""
Function takes parameters for the FastCharge Project
and creates the schedule files necessary to run each of
these parameter combinations. Assumes that control type
is CCCV.
Args:
CC1 (float): Constant current value for charge section 1 in Amps
CC1_capacity (float): Capacity to charge to for section 1 in Amp-hours
CC2 (float): Constant current value for charge section 2 in Amps
template_filename (str): File path to pull the template schedule
file from
"""
obj = cls.from_file(template_filename)
obj.set_labelled_steps(
"CC1", "m_szCtrlValue", step_value="{0:.3f}".format(CC1).rstrip("0")
)
obj.set_labelled_limits(
"CC1",
"PV_CHAN_Charge_Capacity",
comparator=">",
value="{0:.3f}".format(CC1_capacity).rstrip("0"),
)
obj.set_labelled_steps(
"CC2", "m_szCtrlValue", step_value="{0:.3f}".format(CC2).rstrip("0")
)
return obj
def get_labelled_steps(self, step_label):
"""
Insert values for steps in the schedule section
Args:
step_label (str): The user determined step label for the step.
If there are multiple identical labels this will operate
on the first one it encounters
Returns:
(iterator): iterator for subkeys of schedule which match
the label value
"""
# Find all step labels
labelled_steps = filter(
lambda x: self.get_path("Schedule.{}.m_szLabel".format(x)) == step_label,
self["Schedule"].keys(),
)
return labelled_steps
def set_labelled_steps(self, step_label, step_key, step_value, mode="first"):
"""
Insert values for steps in the schedule section
Args:
step_label (str): The user determined step label for the step.
If there are multiple identical labels this will operate
on the first one it encounters
step_key (str): Key in the step to set, e.g. ('m_szStepCtrlType')
step_value (str): Value to set for the key
mode (str): accepts 'first' or 'all',
for 'first' updates only first step with matching label
for 'all' updates all steps with matching labels
Returns:
dict: Altered ordered dictionary with keys corresponding to
options or control variables.
"""
# Find all step labels
labelled_steps = self.get_labelled_steps(step_label)
# TODO: should update happen in place or return new?
for step in labelled_steps:
self.set("Schedule.{}.{}".format(step, step_key), step_value)
if mode == "first":
break
return self
def set_labelled_limits(self, step_label, limit_var, comparator, value):
"""
Insert values for the limits in the steps in the schedule section
Args:
step_label (str): The user determined step label for the step. If there
are multiple identical labels this will operate on the first one it
encounters
limit_var (str): Variable being used for this particular limit in the step
value (int or str): threshold value to trip limit
comparator (str): str-represented comparator to trip limit,
e.g. '>' or '<'
Returns:
dict: Altered ordered dictionary with keys corresponding to options or control
variables.
"""
labelled_steps = self.get_labelled_steps(step_label)
for step in labelled_steps:
# Get all matching limit keys
step_data = self.get_path("Schedule.{}".format(step))
limits = [
heading
for heading in _get_headings(step_data)
if heading.startswith("Limit")
]
# Set limit of first limit step with matching code
for limit in limits:
limit_data = step_data[limit]
if limit_data["m_bStepLimit"] == "1": # Code corresponding to stop
if limit_data["Equation0_szLeft"] == limit_var:
limit_prefix = "Schedule.{}.{}".format(step, limit)
self.set(
"{}.Equation0_szCompareSign".format(limit_prefix),
comparator,
)
self.set("{}.Equation0_szRight".format(limit_prefix), value)
else:
warnings.warn(
"Additional step limit at {}.{}".format(step, limit)
)
return self
def _get_headings(obj, delimiter="."):
"""
Utility function for getting all nested keys
of a dictionary whose values are themselves
a dict
Args:
obj (dict): nested dictionary to be searched
delimiter (str): string delimiter for nested
sub_headings, e. g. top_middle_low for
'top', 'middle', and 'low' nested keys
"""
headings = []
for heading, body in obj.items():
if isinstance(body, dict):
headings.append(heading)
sub_headings = _get_headings(body, delimiter=delimiter)
headings.extend(
[delimiter.join([heading, sub_heading]) for sub_heading in sub_headings]
)
return headings
| 9,778 |
opencv_demos/strokeEdges.py
|
perfectbullet/albumy
| 0 |
2023958
|
#!/usr/bin/python
# coding=utf-8
import time
import cv2
def strokeEdges(src, blurKsize=5, edgeKsize=3):
graySrc = src.copy()
if blurKsize >= 3:
graySrc = cv2.medianBlur(graySrc, blurKsize)
graySrc = cv2.Laplacian(graySrc, cv2.CV_8U, ksize=edgeKsize)
# 归一化
normalizedInverseAlpha = (1.0 / 255) * (255 - graySrc)
channel = graySrc * normalizedInverseAlpha
# channels = cv2.split(src)
# for channel in channels:
# channel[:] = channel * normalizedInverseAlpha
return channel
# return cv2.merge([channel, ])
def main(img_path):
src = cv2.imread(img_path, 0)
t1 = time.time()
dst = strokeEdges(src)
t2 = time.time()
print('T={}'.format(t2 - t1))
cv2.imshow("src", src)
cv2.imshow("dst", dst)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
main('C_ZZ_JG_1079_1749_1210_1876.jpg')
| 896 |
styleTransferCNN/util/util.py
|
prography/honeybee-deeplearning
| 0 |
2022978
|
import torch
import PIL.Image
def load_image(file_name, size=None):
image = PIL.Image.open(file_name)
if size is not None:
Image = image.resize((size, size), image.ANTIALIAS)
return image
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
| 403 |
my intereseted short codes/pytest/test_expectation.py
|
Jalilnkh/KartalOl-Fluent-Python-2022
| 0 |
2022732
|
import pytest
@pytest.mark.parametrize(
"test_input, expected", [("3+5", 8), ("2+4", 6), pytest.param("6*9", 42, marks=pytest.mark.xfail)],
)
def test_eval(test_input, expected):
assert eval(test_input) == expected
| 225 |
line_track_designer/printer.py
|
Quentin18/Line-Track-Designer
| 1 |
2024109
|
"""
With the **printer** module, you can print the tracks built with the
library. It uses *CUPS*.
Warnings:
This module can be used only on Linux and macOS.
"""
import os
import logging
try:
import cups
except ImportError:
pass
from line_track_designer.error import LineTrackDesignerError
class Printer:
"""
Manage the printer to print a track. It is composed of three fields:
* **conn**: connection to the *CUPS* server
* **printer_name**: the name of the default printer
* **file_tiles**: the path to the PDF document with the tiles to print
Raises:
LineTrackDesignerError: no printers found
Note:
If no printer is found, you need to add one in your devices.
"""
def __init__(self):
"""Init a Printer object."""
try:
self._conn = cups.Connection()
printers = self._conn.getPrinters()
if printers:
self._printer_name = list(printers.keys())[0]
cwd = os.path.dirname(os.path.abspath(__file__))
self._file_tiles = os.path.join(
cwd, 'pdf', 'linefollowtiles.pdf')
else:
raise LineTrackDesignerError('no printers found')
logging.info('Printer {} found'.format(self.printer_name))
except ImportError:
pass
@property
def conn(self):
"""Get the connection."""
return self._conn
@property
def printer_name(self):
"""Get the name of the printer."""
return self._printer_name
@property
def file_tiles(self):
"""Get the path of the PDF file."""
return self._file_tiles
def __str__(self):
"""
Make the string format of the Printer object.
It returns the name of the printer.
"""
return self.printer_name
def __repr__(self):
"""
Make the repr format of the Printer object.
It's the same than the string format.
"""
return str(self)
def print_page(self, copies, pages, title, media='a4'):
"""
Ask to the printer to print pages of the PDF file.
Args:
copies (int): number of copies to print
pages (int): pages to print
title (str): name of the printing
media (str): format (default: 'a4')
Raises:
LineTrackDesignerError: printing failed
"""
try:
self.conn.printFile(
self.printer_name,
self.file_tiles,
title,
{
'copies': str(copies),
'page-ranges': str(pages),
'media': media,
'sides': 'one-sided'})
except Exception:
raise LineTrackDesignerError('printing failed')
logging.info('Pages {} printed'.format(pages))
| 2,917 |
qmt/geometry/geo_2d_data.py
|
basnijholt/qmt
| 0 |
2023657
|
from shapely.geometry import LinearRing, LineString, MultiLineString, Polygon
from shapely.ops import unary_union
from typing import List, Optional, Sequence, Union
import numpy as np
from matplotlib.axes import Axes
import matplotlib._color_data as mcd
from .geo_data_base import GeoData
class Geo2DData(GeoData):
def __init__(self, lunit="nm"):
"""Class for holding a 2D geometry specification. The parts dict can contain
shapely Polygon or LineString objects. Polygon are intended to be 2D domains,
while LineString are used for setting boundary conditions and surface
conditions.
Parameters
----------
lunit : str, optional
Length unit, by default "nm"
"""
super().__init__(lunit)
def add_part(
self, part_name: str, part: Union[LineString, Polygon], overwrite: bool = False
):
"""Add a part to this geometry.
Parameters
----------
part_name : str
Name of the part to add
part : Union[LineString, Polygon]
Part to add
overwrite : bool, optional
Whether we allos this to overwrite existing part, by default False
Raises
------
ValueError
Part is not a valid Polygon
"""
if isinstance(part, Polygon) and not part.is_valid:
raise ValueError(f"Part {part_name} is not a valid polygon.")
part.virtual = False
super().add_part(
part_name,
part,
overwrite,
lambda p: self.build_order.append(part_name) if p is not None else None,
)
def remove_part(self, part_name: str, ignore_if_absent: bool = False):
"""Remove a part from this geometry.
Parameters
----------
part_name : str
Name of the part to remove
ignore_if_absent : bool, optional
Whether we ignore an attempted removal if the part name is not present, by
default False
"""
super.remove_part(
part_name,
ignore_if_absent,
lambda p: self.build_order.remove(p) if p is not None else None,
)
@property
def polygons(self):
"""Return dictionary of parts that are polygons."""
return {k: v for k, v in self.parts.items() if isinstance(v, Polygon)}
@property
def edges(self):
"""Return dictionary of parts that are lines."""
return {k: v for k, v in self.parts.items() if isinstance(v, LineString)}
def compute_bb(self) -> List[float]:
"""Compute the bounding box of all of the parts in the geometry.
Returns
-------
List of [min_x, max_x, min_y, max_y].
"""
all_shapes = list(self.parts.values())
bbox_vertices = unary_union(all_shapes).envelope.exterior.coords.xy
min_x = min(bbox_vertices[0])
max_x = max(bbox_vertices[0])
min_y = min(bbox_vertices[1])
max_y = max(bbox_vertices[1])
return [min_x, max_x, min_y, max_y]
def part_build_order(self) -> List[str]:
"""Returns the build order restricted to parts.
Parameters
----------
Returns
-------
build order restricted to parts.
"""
priority = []
for geo_item in self.build_order:
if geo_item in self.parts and isinstance(self.parts[geo_item], Polygon):
priority += [geo_item]
return priority
def coord_list(self, part_name: str) -> List:
"""Get the list of vertex coordinates for a part
Parameters
----------
part_name : str
Name of the part
Returns
-------
coord_list
"""
part = self.parts[part_name]
if isinstance(part, Polygon):
# Note that in shapely, the first coord is repeated at the end, which we
# trim off:
return list(np.array(part.exterior.coords.xy).T)[:-1]
elif isinstance(part, LineString):
return list(np.array(part.coords.xy).T)[:]
def plot(
self,
parts_to_exclude: Optional[Sequence[str]] = None,
line_width: float = 20.0,
ax: Optional[Axes] = None,
colors: Optional[Sequence] = None,
) -> Axes:
""" Plots the 2d geometry
Parameters
----------
parts_to_exclude : Sequence[str]
Part/edge names that won't be plotted (Default value = None)
line_width : float
Thickness of lines (only for edge lines). (Default value = 20.0)
ax : Optional[Axes]
You can pass in a matplotlib axes to plot in. If it's None, a new
figure with its corresponding axes will be created
(Default value = None)
colors : Sequence[str]
Colors to use for plotting the parts
(Default value = None)
Returns
-------
Axes object.
"""
from matplotlib import pyplot as plt
import descartes
if parts_to_exclude is None:
parts_to_exclude = []
if colors is None:
colors = list(mcd.XKCD_COLORS.values())
if not ax:
ax = plt.figure().gca()
pn = 0
for part_name, part in self.parts.items():
if part_name in parts_to_exclude:
continue
if isinstance(part, LineString):
if len(part.coords) == 2:
coords = np.asarray(part.coords)
vec = np.asarray(coords[0]) - np.asarray(coords[1])
vec /= np.linalg.norm(vec)
perp_vec = np.array([-vec[1], vec[0]])
half_width = line_width / 2
part1 = LineString(
[
coords[0] + half_width * perp_vec,
coords[1] + half_width * perp_vec,
coords[1] - half_width * perp_vec,
coords[0] - half_width * perp_vec,
]
)
else:
part1 = part
pgn = Polygon(LinearRing(part1))
else:
pgn = part
patch = descartes.PolygonPatch(pgn, fc=colors[pn].upper(), label=part_name)
ax.add_patch(patch)
plt.text(
list(*part.representative_point().coords)[0],
list(*part.representative_point().coords)[1],
part_name,
ha="center",
va="center",
)
pn += 1
# Set axis to auto. The user can change this later if he wishes
ax.axis("auto")
return ax
| 6,872 |
datachecker.py
|
cjybyjk/android_YellowPageServer
| 4 |
2023687
|
import iso3166
def check_type(x):
return int(x) in (-1, 0, 1)
def check_region(x):
return x.upper() in iso3166.countries_by_alpha2
| 141 |
bin/sibus_destinations_depth_soap.py
|
denis-guillemenot/sibus_jms
| 2 |
2023800
|
# ===========================================
# ./wsadmin.sh -lang jython -host <dmgr_host> -port <dmgr_port> -f sibus_destinations_depth_soap.py
# version 1.2
# ===========================================
script_name = "sibus_destinations_depth_soap.py"
import sys
def print_usage():
print('\nUsage: wsadmin.sh -conntype SOAP -lang jython -f <%s>' % script_name)
print
return
def show_depth( *argv):
"""
Display J2CConnectionFactory connections factories to SIBus
Display J2CAdminObject Queues and Topics depths
"""
# first check if we are really connected: AdminTask should be available
try:
cellName = AdminControl.getCell()
except:
print('\nWARNING: Not connected')
print_usage()
# sys.exit()
return
# get all J2CConnectionFactory to link (JNDI) to (SIBus)
j2c_cf = AdminConfig.list( "J2CConnectionFactory").split( lineSeparator)
cf_to_sibus = {}
for c in j2c_cf:
c_name = AdminConfig.showAttribute( c, 'name')
c_jndi = AdminConfig.showAttribute( c, 'jndiName')
cf_to_sibus[ c_name] = {}
cf_to_sibus[ c_name][ 'jndi'] = c_jndi
c_props = AdminConfig.showAttribute( c, 'propertySet')
if c_props:
c_def_props = AdminConfig.showAttribute( c_props, 'resourceProperties')[1:-1].split()
c_sibus = ''
for p in c_def_props:
p_name = AdminConfig.showAttribute( p, 'name')
p_value = AdminConfig.showAttribute( p, 'value')
if ( p_name.lower() == 'busname') and ( len( p_value)): c_sibus = p_value
cf_to_sibus[ c_name][ 'sibus'] = c_sibus
# reverse dictionnary
sibus_to_cf = {}
# print cf_to_sibus
print
for j in cf_to_sibus.keys():
sibus_name = cf_to_sibus[ j][ 'sibus']
if len( sibus_name):
if sibus_to_cf.has_key( sibus_name) : sibus_to_cf[ sibus_name].append( j)
else : sibus_to_cf[ sibus_name] = [ j]
# print SIBus connection factories
print( "\n%s %s\n" % ( "#--- JMS Connections to SIBus", "-" * 30))
for s in sibus_to_cf.keys():
sibus_name = s
print( "\n %d to %s \n" % ( int( len( sibus_to_cf[ s])), sibus_name))
# print connections factories
for c in sibus_to_cf[ s]:
c_jndi = cf_to_sibus[ c][ 'jndi']
print( " %s (%s)" %( c, c_jndi))
# get all J2CAdminObject to link (JNDI) JMS Queue and Topic to (SIBus) Destination
j2c = AdminConfig.list( "J2CAdminObject").split( lineSeparator)
jms_to_dest = {}
for i in j2c:
jndi_name = AdminConfig.showAttribute( i, 'jndiName')
jms_to_dest[ jndi_name] = {}
for j in AdminConfig.showAttribute( i, 'properties')[1:-1].split():
prop_name = AdminConfig.showAttribute( j, 'name')
prop_value = AdminConfig.showAttribute( j, 'value')
jms_to_dest[ jndi_name][ prop_name] = prop_value
dest_to_jms = {}
for k, v in jms_to_dest.items():
bus_name = jms_to_dest[ k][ 'BusName']
if jms_to_dest[ k].has_key( 'QueueName'):
dest_name = 'Queue:' + jms_to_dest[ k][ 'QueueName']
else:
# TopicName : JMS Topic name, TopicSpace: SIBus Topic name
dest_name = 'Topic:' + jms_to_dest[ k][ 'TopicSpace']
dest_to_jms[ bus_name + ':' + dest_name] = k
# check queue depth
print( "\n%s %s\n" % ( "#--- SIBus Queue(s) Depth -- [SIBus:queue_name (JMS JNDI)]", "-" * 30))
sibqueuepoints = AdminControl.queryNames('type=SIBQueuePoint,*').split( lineSeparator)
for d in sibqueuepoints:
if d != '':
# sibus_name = AdminControl.makeObjectName( sibqueuepoints[0]).getKeyProperty( 'SIBus')
sibus_name = AdminControl.makeObjectName( d).getKeyProperty( 'SIBus')
queue_name = AdminControl.getAttribute(d, 'identifier')
queue_depth= AdminControl.getAttribute(d, 'depth')
if dest_to_jms.has_key( sibus_name + ':Queue:' + queue_name):
jms_name = '(%s)' % dest_to_jms[ sibus_name + ':Queue:' + queue_name]
else:
jms_name = ''
print( " %5d messages in %s:%s %s" %( int( queue_depth), sibus_name, queue_name, jms_name))
# check topic depth
print( "\n%s %s" % ( "#--- SIBus Topic(s) Depth -- [SIBus:topic_name (JMS JNDI)]", "-" * 30))
print( "%s %s\n" % ( "#--- durable subscriber -- [client_id##subscriber_id]", "-" * 30))
sibpubpoints = AdminControl.queryNames('type=SIBPublicationPoint,*').split( lineSeparator)
if sibpubpoints:
for d in sibpubpoints:
if d != '':
# sibus_name = AdminControl.makeObjectName( sibqueuepoints[0]).getKeyProperty( 'SIBus')
sibus_name = AdminControl.makeObjectName( d).getKeyProperty( 'SIBus')
topic_name = AdminControl.getAttribute(d, 'identifier')
topic_depth= AdminControl.getAttribute(d, 'depth')
if dest_to_jms.has_key( sibus_name + ':Topic:' + topic_name):
jms_name = '(%s)' % dest_to_jms[ sibus_name + ':Topic:' + topic_name]
else:
jms_name = ''
print( " %5d messages in %s:%s %s" % ( int( topic_depth), sibus_name, topic_name, jms_name))
# check all subscribers depth
subs = AdminControl.invoke_jmx( AdminControl.makeObjectName( d), 'getSubscriptions', [], [])
for s in subs:
print( " %5d messages for subscriber %s" % ( int( s.getDepth()), s.getSubscriberId()))
if (len(subs) > 0): print
# =======================================================================================================================
# for WAS 6: __name__ == "main"
if __name__ == "__main__" or __name__ == "main":
show_depth( *sys.argv)
# sys.exit()
else:
try:
import AdminConfig, AdminControl, AdminApp, AdminTask, Help
import lineSeparator
except ImportError:
pass
| 5,607 |
examples/mplot3d/wire3d_animation_demo.py
|
SoftwareDev/mat-plot-lib
| 3 |
2023492
|
from __future__ import print_function
"""
A very simple 'animation' of a 3D plot
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import time
def generate(X, Y, phi):
R = 1 - np.sqrt(X**2 + Y**2)
return np.cos(2 * np.pi * X + phi) * R
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = np.linspace(-1, 1, 50)
ys = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(xs, ys)
Z = generate(X, Y, 0.0)
wframe = None
tstart = time.time()
for phi in np.linspace(0, 360 / 2 / np.pi, 100):
oldcol = wframe
Z = generate(X, Y, phi)
wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
# Remove old line collection before drawing
if oldcol is not None:
ax.collections.remove(oldcol)
plt.pause(.001)
print ('FPS: %f' % (100 / (time.time() - tstart)))
| 855 |
code/r3b_bsc/symplectic.py
|
GandalfSaxe/letomes
| 0 |
2023584
|
"""
Reduced 3-Body Problem Solver Module
====================================
A collection of various numerical solvers for the reduced 3-body problem consisting of two larger masses (Earth, Moon) and one smaller moving in their gravitational field (a satellite). The solution assumes Earth-Moon center of mass as origin and a cartesian x-y coordinate system rotating with the lines connecting the Earth and Moon (non-inertial frame accounted for in the equations of motion).
Functions:
euler: Solves by Euler method explicitly, implicitly or symplectically.
We assume TODO: FILL OUT HERE!
"""
from math import pi, sqrt
import numpy as np
from numba import jit
from orbsim import DAY, EARTH_RADIUS
from orbsim.r3b_2d import (
ORBITAL_TOLERANCE,
EARTH_POSITION_X,
k,
LLO_RADIUS,
LLO_VELOCITY,
LUNAR_POSITION_X,
UNIT_LENGTH,
UNIT_VELOCITY,
h_DEFAULT,
h_MIN_DEFAULT,
STEP_ERROR_TOLERANCE,
)
from orbsim.r3b_2d.analyticals import get_pdot_x, get_pdot_y, get_v_x, get_v_y
from orbsim.r3b_2d.integrators import euler_step_symplectic, verlet_step_symplectic
@jit
def symplectic(
n, duration, x, y, p_x, p_y, xs, ys, p_xs, p_ys, step_errors, h_list, info
):
# Initialize values
h = h_DEFAULT
h_min = h_MIN_DEFAULT
# STEP_ERROR_TOLERANCE = STEP_ERROR_TOLERANCE
# max_steps = duration
step_error = 1e-15
status = 1
target_dist = 1
target = 1
target_pos_x = LUNAR_POSITION_X
# target = 2; target_pos_x = L1_position_x
target_pos_y = 0
# Time reset
t = 0
for i in range(n):
# Store position
xs[i] = x
ys[i] = y
p_xs[i] = p_x
p_ys[i] = p_y
step_errors[i] = step_error
h_list[i] = h
# Integrate time period
dt = duration * (i + 1) / n
count = 0
while t < dt:
# Safety on iterations
count += 1
if count > 10000000:
count = 0
h_min = 2 * h_min
# Adaptive symplectic euler/midpoint
x1, y1, p1_x, p1_y = euler_step_symplectic(h, x, y, p_x, p_y)
x2, y2, p2_x, p2_y = verlet_step_symplectic(h, x, y, p_x, p_y)
# Relative local error of step
step_error = sqrt(
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) / (x2 * x2 + y2 * y2)
)
# Accept the step only if the weighted error is no more than the
# tolerance STEP_ERROR_TOLERANCE. Estimate an h that will yield an error of STEP_ERROR_TOLERANCE on
# the next step and use 0.8 of this value to avoid failures.
if step_error < STEP_ERROR_TOLERANCE or h <= h_min:
# Accept step
x = x2
y = y2
p_x = p2_x
p_y = p2_y
# Forward time by step
t = t + h
h = max(
h_min, h * max(0.1, 0.8 * sqrt(STEP_ERROR_TOLERANCE / step_error))
)
else:
# No accept, reduce h to half
h = max(h_min, 0.5 * h)
# How close are we to the moon?
rx = x - target_pos_x
ry = y - target_pos_y
r = sqrt(rx * rx + ry * ry)
target_dist = min(target_dist, r)
# Check if we hit the target
if status == 1:
if target == 1:
r_low = (LLO_RADIUS - ORBITAL_TOLERANCE) / UNIT_LENGTH
r_high = (LLO_RADIUS + ORBITAL_TOLERANCE) / UNIT_LENGTH
else:
r_low = 0
r_high = ORBITAL_TOLERANCE / UNIT_LENGTH
if r > r_low and r < r_high:
# Current velocity
v_x = p_x + y
v_y = p_y - x
if target == 1:
# Project velocity onto radius vector and subtract
# so velocity vector is along orbit
vr = (v_x * rx + v_y * ry) / r ## FIXME: Check if vr is correct
v_x = v_x - vr * rx / r
v_y = v_y - vr * ry / r
# Now adjust velocity to lunar orbit velocity
vt = sqrt(v_x * v_x + v_y * v_y)
p_x = (LLO_VELOCITY / UNIT_VELOCITY) * v_x / vt - y
p_y = (LLO_VELOCITY / UNIT_VELOCITY) * v_y / vt + x
# Total velocity change
dv = sqrt(
vr * vr
+ (vt - LLO_VELOCITY / UNIT_VELOCITY)
* (vt - LLO_VELOCITY / UNIT_VELOCITY)
)
else:
dv = sqrt(v_x * v_x + v_y * v_y)
# Store info
info[0] = dv
info[1] = t
# Finish?
status = -10000 + dv
if n == 1:
return status
# Check if we hit the earth
r = (x - EARTH_POSITION_X) * (x - EARTH_POSITION_X) + y * y # FIXME: sqrt?
r_high = EARTH_RADIUS / UNIT_LENGTH
if r < r_high * r_high:
return 100 # Hit earth surface
if status >= 0:
status = target_dist
return status
| 5,427 |
examples/Structural Engineering Examples/Ex2 - OpenSees Optimization of a Truss/Ex2.2 - Truss Node Positions/functions.py
|
cslotboom/Naturalize
| 0 |
2022701
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 22:59:59 2020
@author: CS
"""
import naturalize as nat
import numpy as np
import random
import sys
sys.path.append('..')
import TrussAnalysis as ta
mm = 0.001
class environment:
"""
The enviroment will act as a container for the data in the problem.
The boundary node coordinates and element fixities are defined as static
values.
"""
def __init__(self, xlims, ylims, trussMat = 11, forces = np.array([1000., 1000., 0.])):
"""
Set the limits and define their size.
"""
self.xlims = xlims
self.ylims = ylims
self.dx = self.xlims[1] - self.xlims[0]
self.dy = self.ylims[1] - self.ylims[0]
"""
Set the truss material and forces and define their size.
"""
self.trussMat = trussMat
self.forces = forces
"""
Set the static boundary nodes, and the connectivities between nodes.
"""
self.xCoordBasic = np.array([0.,1.,1.])
self.yCoordBasic = np.array([0.,0.,3.])
self.Connectivity = [[1,2],[1,4],[2,4],[1,3], [3,4],[3,6],[3,5],[4,6], [5,6], [5,7], [6,7]]
def testIndividual(individual, env):
"""
Tests and individual and returns the result of that test.
This function essentially is a wrapper that converts data from our
individual and environment into a form usable by the truss anaylsis
functions.
Note several values are returned as a result.
This will be processed later to define the value of fitness.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
"""
Areas = np.ones(11)*100*mm**2
Forces = env.forces
xCoords = individual.genotype[0]
yCoords = individual.genotype[1]
xCoordBasic = env.xCoordBasic
yCoordBasic = env.yCoordBasic
connectiviy = env.Connectivity
# Add the basic node and the final fixed node.
xCoords = np.concatenate([xCoordBasic[:2], xCoords, [xCoordBasic[-1]]])
yCoords = np.concatenate([yCoordBasic[:2], yCoords, [yCoordBasic[-1]]])
gen = str(individual.gen)
nodeIDs = np.array([1,2,3,4,5,6,7])
disp, volumes, force = ta.runTrussAnalysis(Areas, Forces, nodeIDs, xCoords, yCoords, connectiviy, env.trussMat)
return disp, volumes, force
def ftest(individual, environment):
"""
The fitness function, this is what we actually want to minimize.
In this case, we'll minimize the displacement in the x direction.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
disp : float
The output displacement of the analysis.
"""
"""
Note that our OpenSees analysis may actually fail to converge for some
geneotypes. Here we use a try/except block to try and catch these errors.
It's not perfect, sometimes Opensees still manages to crash the kernal..
This is beyound our control, but doesn't happen very often.
"""
try:
result = testIndividual(individual, environment)
# print('result') # debugging
except:
result = [10**6, [10**6]]
print(str(individual.name) + ' failed')
# if result[0] == 0:
# result[0] = 10**6
individual.result = result
return result
def fitness_basic(individual, environment):
"""
Determines how good each solution is, this is what that is minimized
In this case, minimize the displacement in the x direction.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
disp : float
The output displacement of the analysis.
"""
# disp, volumes, forces = individual.result
disp, volumes, _ = individual.result
"""
Sometimes OpenSees freaks out and returns a negative displacement.
We don't want those solutions!"""
if disp[0] <= 0:
fitness = 100
else:
fitness = disp[0]
return fitness
def fitness_Complex(individual, environment):
"""
Determines how good each solution is, this is what that is minimized.
In this function, the displacement multiplied by volume is minimized.
This will make solutions with a lower volume more attractive.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
fitness : float
The output displacement multiplied by system volume.
"""
dispLim = 3/250
# forceLim = 16000
disp, volumes, _ = individual.result
vol = np.sum(volumes)
fitness = disp[0]*vol
# Sometimes opensees freaks out and returns a negative displacement.
# We don't want those solutions!
if disp[0] <= 0:
fitness = 100
return fitness
def fitnessLength(individual, environment):
"""
Determines how good each solution is.
Here we used a normalized displacement, which is the displacement multiplied
by the total volume.
THe best solution will have the lowest combination of displacement and volume.
"""
Llim = 1.5
disp, volumes = individual.result
Lmax = np.max(np.array(volumes) / 0.001)
Lmin = np.min(np.array(volumes) / 0.001)
fitness = disp[0]
if disp[0] <= 0:
fitness = 100
# normDisp = 100
if Llim < Lmax:
fitness = 100
return fitness
def plotIndividual(individual, env):
xCoordBasic = env.xCoordBasic
yCoordBasic = env.yCoordBasic
connectiviy = env.Connectivity
xCoords = individual.genotype[0]
yCoords = individual.genotype[1]
xCoords = np.concatenate([xCoordBasic[:2], xCoords, [xCoordBasic[-1]]])
yCoords = np.concatenate([yCoordBasic[:2], yCoords, [yCoordBasic[-1]]])
nodeIDs = np.array([1,2,3,4,5,6,7])
Areas = np.ones(11)*0.001
fig, ax = ta.plotTruss(Areas,nodeIDs, xCoords, yCoords, connectiviy)
areas = np.ones(len(connectiviy))
maxArea = 1
style_blue(fig, ax, areas, maxArea)
return fig, ax
def style_blue(fig, ax, areas, maxArea):
fig.set_figwidth(8)
fig.set_figheight(6)
for text in ax.texts:
text.set_fontsize(10)
ax.texts = []
for ii, line in enumerate(ax.lines):
line.set_linewidth(5*areas[ii]/maxArea)
line.set_color("steelblue")
ax.set_facecolor("skyblue")
ax.collections[0].set_color('cornsilk')
ax.collections[0].set_zorder(10)
ax.collections[0].set_linewidth(2)
# fig.savefig("mygraph.png")
# ax.axis('off')
ax.set_xlim([-1.5, 2.5])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# ax.annotate("", xy=(0.9, 3), xytext=(0.5, 3), arrowprops=dict(arrowstyle="->", color = 'red') )
return fig, ax
| 7,450 |
text_recognition/recognize.py
|
kstar1996/Car_Plate_DataGenerator
| 3 |
2023888
|
import easyocr
import os
# 글자에 따라 차량의 용도를 다음과 같이 분류하고 있었다.
# 1. 일반 자가용, 비사업용 차량
# 가/나/다/라/마/거/너/더/러/머/버/서/어/저/고/노/도/로/모/보/소/오/조/구/누/두/루/무/부/수/우/주
# 2. 일반 사업용(택시, 버스)
# 아/바/사/자
# 3. 렌터카 (법인 또는 대여)
# 하/허/호
# 4. 택배용
# 배
# https://post.naver.com/viewer/postView.nhn?volumeNo=15128707&memberNo=40864363
def korean_recog(img):
string =''
reader = easyocr.Reader(
lang_list=['ko'],
gpu=False,
detector='./craft_mlt_25k.pth',
recognizer='./korean_g2.pth',
download_enabled=False
)
# Make sure that is only recognizes certain characters
result = reader.readtext(img, detail=0, allowlist='0123456789가나다라마거너더러머버서어저고노도로모보소오조구누두루무부수우주아바사자하허호배서울경기인천강원충남대전충북부산울대구경북남전광제')
for i in result:
string += i
return string
# korean_recog('../detection/crop_black8361.jpg') #126차 2861
num = 0
for file in sorted(os.listdir("../data_generate_license/generated_plate/test_plate/")):
num += 1
file_name = "../data_generate_license/generated_plate/test_plate/" + file
# plate = cv2.imread(file_name)
print(file)
print(korean_recog(file_name))
| 1,122 |
algorithms_for_searching_sorting_indexing/test/test_search_sort.py
|
bijanshokrollahi/data_structures_and_algos
| 1 |
2023323
|
import unittest
import insertion_sort
class MyTestCase(unittest.TestCase):
def test_something(self):
x1 = [0, 2, 4, 5, 6, 7, 8, 10]
y1 = [-2, 0, 2, 4, 7, 8, 10, 12]
crossover = insertion_sort.findCrossoverIndex(x1, y1)
self.assertEqual(crossover, 3)
def test_something_else(self):
x2 = [0, 1, 4, 5, 6, 7, 8, 10]
y2 = [-2, 1.5, 2, 4, 7, 8, 10, 12]
crossover = insertion_sort.findCrossoverIndex(x2, y2)
self.assertIn(crossover, [0, 3])
def test_3(self):
x1 = [0, 1, 2, 3, 4, 5, 6, 7]
y1 = [-2, 0, 4, 5, 6, 7, 8, 9]
crossover = insertion_sort.findCrossoverIndex(x1, y1)
self.assertEqual(crossover, 1)
def test_4(self):
x1 = [0, 1, 2, 3, 4, 5, 6, 7]
y1 = [-2, 0, 4, 4.2, 4.3, 4.5, 8, 9]
crossover = insertion_sort.findCrossoverIndex(x1, y1)
self.assertIn(crossover, [1, 5])
def test_5(self):
x1 = [0, 1]
y1 = [-10, 10]
crossover = insertion_sort.findCrossoverIndex(x1, y1)
self.assertEqual(crossover, 0)
def test_6(self):
x1 = [0, 1, 2, 3]
y1 = [-10, -9, -8, 5]
crossover = insertion_sort.findCrossoverIndex(x1, y1)
self.assertEqual(crossover, 2)
def test_7(self):
n = insertion_sort.integerCubeRoot(7)
self.assertEqual(n, 1)
def test_8(self):
n = insertion_sort.integerCubeRoot(8)
self.assertEqual(n, 2)
def test_9(self):
n = insertion_sort.integerCubeRoot(20)
self.assertEqual(n, 2)
def test_10(self):
n = insertion_sort.integerCubeRoot(26)
self.assertEqual(n, 2)
def test_11(self):
for j in range(27, 64):
self.assertEqual(insertion_sort.integerCubeRoot(j), 3)
def test_12(self):
for j in range(64, 125):
self.assertEqual(insertion_sort.integerCubeRoot(j), 4)
def test_14(self):
for j in range(125, 216):
self.assertEqual(insertion_sort.integerCubeRoot(j), 5)
def test_13(self):
for j in range(216, 343):
self.assertEqual(insertion_sort.integerCubeRoot(j), 6)
def test_18(self):
for j in range(343, 512):
self.assertEqual(insertion_sort.integerCubeRoot(j), 7)
def test_15(self):
lst1 = insertion_sort.kWayMerge([[1, 2, 3], [4, 5, 7], [-2, 0, 6], [5]])
expected_output = [-2, 0, 1, 2, 3, 4, 5, 5, 6, 7]
self.assertEqual(lst1, expected_output)
def test_16(self):
lst1 = insertion_sort.kWayMerge([[-2, 4, 5, 8], [0, 1, 2], [-1, 3, 6, 7]])
expected_output = [-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
self.assertEqual(lst1, expected_output)
def test_17(self):
lst1 = insertion_sort.kWayMerge([[-1, 1, 2, 3, 4, 5]])
expected_output = [-1, 1, 2, 3, 4, 5]
self.assertEqual(lst1, expected_output)
if __name__ == '__main__':
unittest.main()
| 2,949 |
utils/resize_smaller.py
|
ace19-dev/image-retrieval-tf
| 6 |
2024082
|
# ========================================================================
# Resize, Pad Image to Square Shape and Keep Its Aspect Ratio With Python
# ========================================================================
import os
import argparse
import sys
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import tensorflow as tf
FLAGS = None
def main(_):
tf.compat.v1.logging.set_verbosity(tf.logging.INFO)
cls_lst = os.listdir(FLAGS.original_dir)
cls_lst.sort()
# print(cls_lst)
size = FLAGS.desired_size.split(',')
size = tuple(int(s) for s in size)
for classname in cls_lst:
class_path = os.path.join(FLAGS.original_dir, str(classname))
data_category = os.listdir(class_path)
# total = len(data_cate)
for cate in data_category:
target_dir = os.path.join(FLAGS.target_dir, str(classname), cate)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
img_dir = os.path.join(class_path, cate)
images = os.listdir(img_dir)
total = len(images)
for idx, img in enumerate(images):
if idx % 100 == 0:
tf.logging.info('On image %d of %d', idx, total)
image_path = os.path.join(img_dir, img)
im = Image.open(image_path)
im.thumbnail(size, Image.ANTIALIAS)
# im.resize(size)
outfile = os.path.join(target_dir, img)
im.save(outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--original_dir',
type=str,
default='/home/ace19/dl_data/v2-plant-seedlings-dataset/classes',
help='Where is image to load.')
parser.add_argument(
'--target_dir',
type=str,
default='/home/ace19/dl_data/v2-plant-seedlings-dataset_resized/classes',
help='Where is resized image to save.')
parser.add_argument(
'--desired_size',
type=str,
default='224,224',
help='how do you want image resize height, width.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 2,258 |
test/__init__.py
|
andrewdge/Pyot
| 0 |
2024028
|
import platform
import os
from pyot.core import Settings
# Fix: Windows `asyncio.run()` will throw `RuntimeError: Event loop is closed`.
# Refer: https://github.com/aio-libs/aiohttp/issues/4324
if platform.system() == 'Windows':
from pyot.utils.internal import silence_proactor_pipe_deallocation
silence_proactor_pipe_deallocation()
Settings(
MODEL="LOL",
DEFAULT_PLATFORM="NA1",
DEFAULT_REGION="AMERICAS",
DEFAULT_LOCALE="EN_US",
PIPELINE=[
{
"BACKEND": "pyot.stores.Omnistone",
"LOG_LEVEL": 30,
"EXPIRATIONS": {
"summoner_v4_by_name": 100,
"match_v4_match": 600,
"match_v4_timeline": 600,
}
},
# {
# "BACKEND": "pyot.stores.RedisCache",
# "LOG_LEVEL": 30,
# "DB": 1,
# "EXPIRATIONS": {
# "match_v4_match": 600,
# "match_v4_timeline": 600,
# }
# },
{
"BACKEND": "pyot.stores.MerakiCDN",
"LOG_LEVEL": 30,
"ERROR_HANDLING": {
404: ("T", []),
500: ("R", [3])
}
},
{
"BACKEND": "pyot.stores.CDragon",
"LOG_LEVEL": 30,
"ERROR_HANDLING": {
404: ("T", []),
500: ("R", [3])
}
},
{
"BACKEND": "pyot.stores.RiotAPI",
"LOG_LEVEL": 30,
"API_KEY": os.environ["RIOT_API_KEY"],
"RATE_LIMITER": {
"BACKEND": "pyot.limiters.RedisLimiter",
"LIMITING_SHARE": 1,
"HOST": "127.0.0.1",
"PORT": 6379,
"DB": 0,
},
"ERROR_HANDLING": {
400: ("T", []),
503: ("E", [3, 3])
}
}
]
).activate()
Settings(
MODEL="LOR",
DEFAULT_REGION="AMERICAS",
DEFAULT_LOCALE="EN_US",
PIPELINE=[
{
"BACKEND": "pyot.stores.Omnistone",
"LOG_LEVEL": 30,
"EXPIRATIONS": {
"match_v1_match": 10
}
},
{
"BACKEND": "pyot.stores.DDragon",
"LOG_LEVEL": 30,
"ERROR_HANDLING": {
404: ("T", []),
500: ("R", [3])
}
},
{
"BACKEND": "pyot.stores.RiotAPI",
"API_KEY": os.environ["LOR_API_KEY"],
"LOG_LEVEL": 30,
"RATE_LIMITER": {
"BACKEND": "pyot.limiters.MemoryLimiter",
"LIMITING_SHARE": 1,
},
"ERROR_HANDLING": {
400: ("T", []),
503: ("E", [3, 3])
}
}
]
).activate()
Settings(
MODEL="TFT",
DEFAULT_PLATFORM="NA1",
DEFAULT_REGION="AMERICAS",
DEFAULT_LOCALE="EN_US",
PIPELINE=[
{
"BACKEND": "pyot.stores.Omnistone",
"LOG_LEVEL": 30,
},
{
"BACKEND": "pyot.stores.CDragon",
"LOG_LEVEL": 30,
"ERROR_HANDLING": {
404: ("T", []),
500: ("R", [3])
}
},
{
"BACKEND": "pyot.stores.RiotAPI",
"API_KEY": os.environ["TFT_API_KEY"],
"RATE_LIMITER": {
"BACKEND": "pyot.limiters.MemoryLimiter",
"LIMITING_SHARE": 1,
},
"ERROR_HANDLING": {
400: ("T", []),
503: ("E", [3, 3])
}
}
]
).activate()
Settings(
MODEL="VAL",
DEFAULT_PLATFORM="NA",
DEFAULT_REGION="AMERICAS",
DEFAULT_LOCALE="EN-US",
PIPELINE=[
{
"BACKEND": "pyot.stores.Omnistone",
"LOG_LEVEL": 30,
},
{
"BACKEND": "pyot.stores.RiotAPI",
"API_KEY": os.environ["VALORANT_DEV_KEY"],
"RATE_LIMITER": {
"BACKEND": "pyot.limiters.MemoryLimiter",
"LIMITING_SHARE": 1,
},
"ERROR_HANDLING": {
400: ("T", []),
503: ("E", [3, 3])
}
}
]
).activate()
| 4,274 |
src/reader/_http_utils.py
|
mirekdlugosz/reader
| 205 |
2023414
|
"""
HTTP utilities. Contains no business logic.
This mainly exists because we didn't want to depend on werkzeug.
"""
import re
from typing import Dict
from typing import Iterable
from typing import List
from typing import Tuple
# copied from werkzeug.http
_accept_re = re.compile(
r"""
( # media-range capturing-parenthesis
[^\s;,]+ # type/subtype
(?:[ \t]*;[ \t]* # ";"
(?: # parameter non-capturing-parenthesis
[^\s;,q][^\s;,]* # token that doesn't start with "q"
| # or
q[^\s;,=][^\s;,]* # token that is more than just "q"
)
)* # zero or more parameters
) # end of media-range
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
[^,]* # "extension" accept params: who cares?
)? # accept params are optional
""",
re.VERBOSE,
)
def parse_accept_header(value: str) -> List[Tuple[str, float]]:
"""Like werkzeug.http.parse_accept_header(), but returns a plain list."""
# copied from werkzeug.http, with some modifications
if not value:
return []
result = []
for match in _accept_re.finditer(value):
quality_match = match.group(2)
if not quality_match:
quality: float = 1
else:
quality = max(min(float(quality_match), 1), 0)
result.append((match.group(1), quality))
result.sort(key=lambda t: t[1], reverse=True)
return result
def unparse_accept_header(values: Iterable[Tuple[str, float]]) -> str:
"""Like werkzeug.datastructures.MIMEAccept(values).to_header()."""
parts = []
for value, quality in sorted(values, key=lambda t: t[1], reverse=True):
if quality != 1:
value = f"{value};q={quality}"
parts.append(value)
return ','.join(parts)
def parse_options_header(value: str) -> Tuple[str, Dict[str, str]]:
"""Like werkzeug.http.parse_options_header(), but ignores the options."""
return value.partition(';')[0].strip(), {}
| 2,186 |
libs/stats/equalizer.py
|
ttuananh112/OBPDataCollection
| 1 |
2023943
|
import glob
import os.path
import shutil
import pandas as pd
import numpy as np
from stats.utils import get_turning, get_velocity
class Equalizer:
def __init__(
self,
folder_path: str
):
"""
This function do equalize training set
for 4 type of turning
Args:
folder_path (str): should be path to dynamics_by_ts folder
"""
self._list_file = glob.glob(f"{folder_path}/*.csv")
self.container = {
"left": list(),
"right": list(),
"stay": list(),
"straight": list()
}
self._do_turning_direction_stats()
def _do_turning_direction_stats(self):
"""
Do examine in turning direction for AGENT
self.container should be:
(Dict(List)):
+ key: turning direction type
+ value: list of file_path corresponding to agent's turning direction
Returns:
(None)
"""
for file_path in self._list_file:
df = pd.read_csv(file_path)
df_agent = df.loc[df["object_type"] == "AGENT"]
avg_vel = get_velocity(df_agent["status"])
turn_dir = get_turning(df_agent["heading"], avg_vel)
self.container[turn_dir].append(file_path)
def run(
self,
save_folder: str
):
"""
Do equalize number of turning direction type
and then copy to save_folder
Args:
save_folder (str): folder to copy new data to
Returns:
(None)
"""
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# shuffling
for turn_dir in self.container.keys():
np.random.shuffle(self.container[turn_dir])
# get min number of files
min_length = 1e9
for list_files in self.container.values():
if len(list_files) < min_length:
min_length = len(list_files)
# cut by min number of files
# and copy to new folder
for turn_dir, list_files in self.container.items():
new_list_files = list_files[:min_length]
for file_path in new_list_files:
f_name = os.path.basename(file_path)
shutil.copyfile(file_path, f"{save_folder}/{f_name}")
| 2,374 |
Python3/0355-Design-Twitter/soln.py
|
wyaadarsh/LeetCode-Solutions
| 5 |
2022937
|
class Twitter:
def __init__(self):
"""
Initialize your data structure here.
"""
self.tweets = collections.defaultdict(collections.deque)
self.time = itertools.count(10000, -1)
self.follows = collections.defaultdict(set)
def postTweet(self, userId, tweetId):
"""
Compose a new tweet.
:type userId: int
:type tweetId: int
:rtype: void
"""
self.tweets[userId].appendleft((next(self.time), tweetId))
def getNewsFeed(self, userId):
"""
Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
:type userId: int
:rtype: List[int]
"""
merges = heapq.merge(iter(self.tweets[userId]),
*[iter(self.tweets[followeeId])
for followeeId in self.follows[userId]])
return [tweetId for _, tweetId in itertools.islice(merges, 10)]
def follow(self, followerId, followeeId):
"""
Follower follows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: void
"""
if followeeId != followerId:
self.follows[followerId].add(followeeId)
def unfollow(self, followerId, followeeId):
"""
Follower unfollows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: void
"""
self.follows[followerId].discard(followeeId)
# Your Twitter object will be instantiated and called as such:
# obj = Twitter()
# obj.postTweet(userId,tweetId)
# param_2 = obj.getNewsFeed(userId)
# obj.follow(followerId,followeeId)
# obj.unfollow(followerId,followeeId)
| 1,971 |
data/istat_stats/pystatToTable.py
|
ghirardinicola/hackLavoro
| 2 |
2023628
|
from pyjstat import pyjstat
from collections import OrderedDict
from pprint import pprint
import urllib2
import json
#dataset_url_1 = 'https://raw.githubusercontent.com/ghirardinicola/hackLavoro/master/italiaRAW2014soloProfessioni.json'
dataset_url_1 = 'https://gist.githubusercontent.com/ghirardinicola/dd2aaa5841f5a5d7e4b3/raw/05854048e9ef2dd33df33db6d7fd335d60918e98/occupatiSTAT.json'
# Important: JSON data must be retrieved in order;
# this can be accomplished using the object_pairs_hook parameter
# of the json.load method.
occupati_json_data = json.load(urllib2.urlopen(dataset_url_1),
object_pairs_hook=OrderedDict)
#with open('occupati.json') as data_file:
# occupati_json_data = json.load(data_file,object_pairs_hook=OrderedDict)
#pprint(occupati_json_data)
#occupati_json_data = json.load(json_data,
# object_pairs_hook=OrderedDict)
occupati_results = pyjstat.from_json_stat(occupati_json_data, naming="label")
# Get the first result, since we're using only one input dataset
occupati_dataset = occupati_results[0]
occupati_dataset.to_csv('occupati.csv', sep='\t', encoding='utf-8')
#print occupati_dataset
| 1,182 |
surrogate/models.py
|
hfzhang31/I-BAU_Adversarial_Unlearning_of-Backdoors_via_implicit_Hypergradient
| 0 |
2023730
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim import Optimizer
import torch.backends.cudnn as cudnn
import torchvision
from torch.utils.data import TensorDataset, DataLoader
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import hypergrad as hg
from itertools import repeat
from poi_util import poison_dataset,patching_test
import poi_util
import tqdm
import h5py
import torchvision
import torchvision.transforms as transforms
import kornia.augmentation as A
from output_util import draw_plot
from PIL import Image
import torchvision.models as models
#res18 model for issba
def get_issba_model(name, num_class=200):
if name.lower() == 'res18':
#Load Resnet18
model = models.resnet18(True)
model.fc = nn.Linear(model.fc.in_features, num_class)
return model
cfg = {'small_VGG16': [32, 32, 'M', 64, 64, 'M', 128, 128, 'M'],
"VGG16": [64, 64, "M", 128, 128, "M", 256, 256, 256, "M", 512, 512, 512, "M", 512, 512, 512, "M"],}
drop_rate = [0.3,0.4,0.4]
class VGG(nn.Module):
def __init__(self, vgg_name, num_classes):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(2048, num_classes)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
key = 0
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(drop_rate[key])]
key += 1
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ELU(inplace=True)]
in_channels = x
return nn.Sequential(*layers)
class wanetVGG(nn.Module):
def __init__(self, vgg_name):
super(wanetVGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [
nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True),
]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class PreActBlock(nn.Module):
"""Pre-activation version of the BasicBlock."""
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.ind = None
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
if self.ind is not None:
out += shortcut[:, self.ind, :, :]
else:
out += shortcut
return out
class PreActBottleneck(nn.Module):
"""Pre-activation version of the original Bottleneck module."""
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AvgPool2d(4)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18(num_classes=10):
return PreActResNet(PreActBlock, [2, 2, 2, 2], num_classes=num_classes)
class SM(torch.nn.Module):
def __init__(self, channels, kernel_size, stride, epsilon = 1, unconstrained = False):
super(SM, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(channels, channels, kernel_size, stride = stride, padding = int((kernel_size-1)/2)),
nn.Conv2d(channels, channels, kernel_size, stride = stride, padding = int((kernel_size-1)/2)),
nn.Conv2d(channels, channels, kernel_size, stride = stride, padding = int((kernel_size-1)/2)),
nn.Conv2d(channels, channels, kernel_size, stride = stride, padding = int((kernel_size-1)/2)),
)
self.epsilon = epsilon
self.unconstrained = unconstrained
def forward(self, input):
x = self.model(input)
if not self.unconstrained:
x = torch.tanh(x)*self.epsilon
return x
""" class SM(torch.nn.Module):
def __init__(self, sample):
super(SM, self).__init__()
self.pert = torch.nn.Parameter(torch.zeros_like(sample, requires_grad=True))
def forward(self, input):
return self.pert """
kepsilon = None
kconstrained = None
def get_sm_model(params = None, constants = None):
global kepsilon
global kconstrained
if constants is not None:
epsilon, constrained = constants
assert not (kepsilon == None and epsilon == None)
if kepsilon == None and epsilon != None:
kepsilon = epsilon
assert not (kconstrained == None and constrained == None)
if kconstrained == None and constrained != None:
kconstrained = constrained
sm_model = SM(3,3,1, kepsilon, kconstrained)
if params != None:
sd = dict()
sd['model.0.weight'] = params[0]
sd['model.0.bias'] = params[1]
sd['model.1.weight'] = params[2]
sd['model.1.bias'] = params[3]
sd['model.2.weight'] = params[4]
sd['model.2.bias'] = params[5]
sd['model.3.weight'] = params[6]
sd['model.3.bias'] = params[7]
sm_model.load_state_dict(sd)
return sm_model
def normalize_sm_model(model, p=2, div=1.):
params = list(model.parameters())
sd = dict()
sd['model.0.weight'] = torch.div(F.normalize(params[0], p=p), div)
sd['model.0.bias'] = torch.div(F.normalize(params[1], p=p, dim=0), div)
sd['model.1.weight'] = torch.div(F.normalize(params[2], p=p), div)
sd['model.1.bias'] = torch.div(F.normalize(params[3], p=p, dim=0), div)
sd['model.2.weight'] = torch.div(F.normalize(params[4], p=p), div)
sd['model.2.bias'] = torch.div(F.normalize(params[5], p=p, dim=0), div)
sd['model.3.weight'] = torch.div(F.normalize(params[6], p=p), div)
sd['model.3.bias'] = torch.div(F.normalize(params[7], p=p, dim=0), div)
model.load_state_dict(sd)
| 9,326 |
django/apps/attachment/migrations/0010_attachment_size.py
|
wykys/project-thesaurus
| 0 |
2023984
|
# Generated by Django 3.0.6 on 2020-05-28 08:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attachment', '0009_auto_20200524_1426'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='size',
field=models.IntegerField(default=0, verbose_name='Size of attachment'),
preserve_default=False,
),
]
| 459 |
Raspberry/packages/rmnetwork/tcpfileclient.py
|
peter9teufel/raspmedia
| 29 |
2023870
|
import socket
import sys, os, platform
import messages
from packages.rmmedia import mediaplayer
observers = []
_BLOCK_SIZE = 1024
def sendFile(filePath, host):
s = socket.socket()
s.connect((host,60020))
f=open (unicode(filePath), "rb")
filename = os.path.basename(f.name)
filenameEnc = filename.encode('utf-8')
# print "Filename encoded: ", filenameEnc
fnSize = len(filenameEnc)
numFiles = 1
numFilesBytes = [int(numFiles >> i & 0xff) for i in (24,16,8,0)]
sizeBytes = [int(fnSize >> i & 0xff) for i in (24,16,8,0)]
data = bytearray()
for b in numFilesBytes:
data.append(int(b))
for b in sizeBytes:
data.append(int(b))
filesize = os.stat(filePath).st_size
fileSizeBytes = [int(filesize >> i & 0xff) for i in (24,16,8,0)]
for b in fileSizeBytes:
data.append(int(b))
s.send(data)
s.send(filenameEnc)
bytesSent = 0;
l = f.read(_BLOCK_SIZE)
while (l):
s.send(l)
bytesSent += _BLOCK_SIZE
if bytesSent > filesize:
bytesSent = filesize
l = f.read(_BLOCK_SIZE)
s.close()
def sendFiles(files, basePath, host):
s = socket.socket()
s.connect((host,60029))
msgData = messages.getTcpFileMessage(files, basePath)
msgSize = len(msgData)
bytesSent = 0;
index = 0
while bytesSent < msgSize:
packEnd = index + _BLOCK_SIZE
# print "INDEX: %d PACKEND: %d MESSAGE SIZE: %d" % (index,packEnd,msgSize)
if packEnd > msgSize:
curPacket = msgData[index:]
else:
curPacket = msgData[index:packEnd]
s.send(curPacket)
bytesSent += _BLOCK_SIZE
if bytesSent > msgSize:
bytesSent = msgSize
index += _BLOCK_SIZE
print "Done, closing TCP connection..."
s.close()
def sendAllImageFiles(host):
mediaPath = "/home/pi/raspmedia/Raspberry/media/thumbs"
imgs = mediaplayer.getImageFilelist()
sendFiles(imgs, mediaPath, host)
| 2,000 |
model_api.py
|
tailongnguyen/petsite
| 2 |
2022977
|
from flask import Flask,request,json
import numpy as np
from flask_cors import CORS
from recognitor import *
import cv2
import os
import pickle
app = Flask(__name__)
CORS(app)
model = get_model()
classes = ['abyssinian', 'alaskanmalamute', 'american bobtail', 'american shorthair', 'americanpitbullterrier', 'americanstaffordshireterrier', 'turkishangora', 'balinese', 'beagle', 'bengal', 'berger', 'birman', 'bombay', 'boxer', 'bullmastiff', 'burmese', 'cavalierkingcharlesspaniel', 'chihuahua', 'chowchow', 'dachshund', 'dalmatian', 'dobermannpinscher', 'englishcockerspaniel', 'englishmastiff',
'greatdane', 'greatpyrenees', 'greyhound', 'huskysibir', 'japanese bobtail', 'labradorretriever', 'leonberger', 'maltese', 'newfoundland', 'pekingese', 'pembrokewelshcorgi', 'persian', 'pomeranian', 'pug', 'rottweiler', 'samoyed', 'shihtzu', 'sphynx', 'st.bernard', 'staffordshirebullterrier', 'tabby', 'vizsla', 'weimaraner', 'westhighlandwhiteterrier', 'whippet', 'yorkshireterrier']
@app.route("/classify", methods=['POST'])
def predict():
image = request.data
im = pickle.loads(image)
img = cv2.resize(im, IMG_SHAPE[:2])
cl = model.predict(np.expand_dims(img, axis=0))
rs = classes[np.argmax(cl)]
print(rs)
return rs
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5050)
| 1,328 |
gym_minigrid/__init__.py
|
patras91/gym-minigrid
| 1 |
2023309
|
# Import the envs module so that envs register themselves
import gym_minigrid.envs
# Import wrappers so it's accessible when installing with pip
import gym_minigrid.wrappers
| 174 |
diagnostics/inspection.py
|
tianyu-lu/torchsde
| 0 |
2023034
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import torch
import tqdm
from torchsde import BaseBrownian, BaseSDE, sdeint
from torchsde.settings import SDE_TYPES
from torchsde.types import Tensor, Vector, Scalar, Tuple, Optional, Callable
from . import utils
def inspect_samples(y0: Tensor,
ts: Vector,
dt: Scalar,
sde: BaseSDE,
bm: BaseBrownian,
img_dir: str,
methods: Tuple[str, ...],
options: Optional[Tuple] = None,
vis_dim=0,
dt_true: Optional[float] = 2 ** -14,
labels: Optional[Tuple[str, ...]] = None):
if options is None:
options = (None,) * len(methods)
if labels is None:
labels = methods
sde = copy.deepcopy(sde).requires_grad_(False)
solns = [
sdeint(sde, y0, ts, bm, method=method, dt=dt, options=options_)
for method, options_ in zip(methods, options)
]
method_for_true = 'euler' if sde.sde_type == SDE_TYPES.ito else 'midpoint'
true = sdeint(sde, y0, ts, bm, method=method_for_true, dt=dt_true)
labels += ('true',)
solns += [true]
# (T, batch_size, d) -> (T, batch_size) -> (batch_size, T).
solns = [soln[..., vis_dim].t() for soln in solns]
for i, samples in enumerate(zip(*solns)):
utils.swiss_knife_plotter(
img_path=os.path.join(img_dir, f'{i}'),
plots=[
{'x': ts, 'y': sample, 'label': label, 'marker': 'x'}
for sample, label in zip(samples, labels)
]
)
def inspect_orders(y0: Tensor,
t0: Scalar,
t1: Scalar,
dts: Vector,
sde: BaseSDE,
bm: BaseBrownian,
img_dir: str,
methods: Tuple[str, ...],
options: Optional[Tuple] = None,
dt_true: Optional[float] = 2 ** -14,
labels: Optional[Tuple[str, ...]] = None,
test_func: Optional[Callable] = lambda x: (x ** 2).flatten(start_dim=1).sum(dim=1)):
if options is None:
options = (None,) * len(methods)
if labels is None:
labels = methods
sde = copy.deepcopy(sde).requires_grad_(False)
ts = torch.tensor([t0, t1], device=y0.device)
solns = [
[
sdeint(sde, y0, ts, bm, method=method, dt=dt, options=options_)[-1]
for method, options_ in zip(methods, options)
]
for dt in tqdm.tqdm(dts)
]
if hasattr(sde, 'analytical_sample'):
true = sde.analytical_sample(y0, ts, bm)[-1]
else:
method_for_true = 'euler' if sde.sde_type == SDE_TYPES.ito else 'midpoint'
true = sdeint(sde, y0, ts, bm, method=method_for_true, dt=dt_true)[-1]
mses = []
maes = []
for dt, solns_ in zip(dts, solns):
mses_for_dt = [utils.mse(soln, true) for soln in solns_]
mses.append(mses_for_dt)
maes_for_dt = [utils.mae(soln, true, test_func) for soln in solns_]
maes.append(maes_for_dt)
strong_order_slopes = [
utils.linregress_slope(utils.log(dts), .5 * utils.log(mses_for_method))
for mses_for_method in zip(*mses)
]
weak_order_slopes = [
utils.linregress_slope(utils.log(dts), utils.log(maes_for_method))
for maes_for_method in zip(*maes)
]
utils.swiss_knife_plotter(
img_path=os.path.join(img_dir, 'strong_order'),
plots=[
{'x': dts, 'y': mses_for_method, 'label': f'{label}(k={slope:.4f})', 'marker': 'x'}
for mses_for_method, label, slope in zip(zip(*mses), labels, strong_order_slopes)
],
options={'xscale': 'log', 'yscale': 'log', 'cycle_line_style': True}
)
utils.swiss_knife_plotter(
img_path=os.path.join(img_dir, 'weak_order'),
plots=[
{'x': dts, 'y': mres_for_method, 'label': f'{label}(k={slope:.4f})', 'marker': 'x'}
for mres_for_method, label, slope in zip(zip(*maes), labels, weak_order_slopes)
],
options={'xscale': 'log', 'yscale': 'log', 'cycle_line_style': True}
)
| 4,799 |
server.py
|
cmfcmf/mopidy-snapcast
| 4 |
2023650
|
#!/usr/bin/python3
import json
import argparse
import sys
import time
import logging
import os
from typing import Optional, List, Dict
import tornado.ioloop
import tornado.gen
import tornado.web
from snapcast.control import Snapserver
from tornado.escape import to_unicode
from tornado.httpclient import AsyncHTTPClient
from tornado.platform.asyncio import AsyncIOMainLoop
import asyncio
from zeroconf import Zeroconf, ServiceListener, ServiceInfo
from serializer import Serializer
SNAPCAST_ZERO_NAME = '_snapcast-tcp._tcp.local.'
def noop(*args, **kws):
return None
class ZeroListener(ServiceListener):
def __init__(self, container: list, on_add_service = noop, on_remove_service = noop):
self.container = container
self.on_add_service = on_add_service
self.on_remove_service = on_remove_service
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
logging.info("Service %s added, service info: %s" % (name, info))
self.container.append(info)
self.on_add_service(info)
def update_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
logging.info("Service %s updated, service info: %s" % (name, info))
def remove_service(self, zeroconf, type, name):
logging.info("Service %s removed" % name)
for i, info in enumerate(self.container):
if info.name == name:
self.container.pop(i)
self.on_remove_service(info)
break
class BaseHandler(tornado.web.RequestHandler):
def initialize(self):
super().initialize()
self.serializer = Serializer()
def set_default_headers(self, *args, **kwargs):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Content-Type', 'application/json')
async def mopidy_rpc_request(self, server_name, method, params=None):
body = json.dumps({
"method": method,
"jsonrpc": "2.0",
"params": params if params is not None else {},
"id": 1
})
headers = dict()
headers['Content-Type'] = 'application/json'
mopidy_server = self.get_mopidy_server_from_name(server_name)
url = 'http://{}:{}/mopidy/rpc'.format(mopidy_server.server, mopidy_server.port)
response = await AsyncHTTPClient().fetch(url, method='POST', body=body, headers=headers)
return json.loads(to_unicode(response.body))['result']
@staticmethod
def get_mopidy_server_from_name(name):
return list(filter(lambda mopidy_server: mopidy_server.name == name, zero_mopidy_servers))[0]
def write_json(self, data):
self.write(json.dumps(self.serializer.serialize(data)))
class BrowseHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
uri = self.get_argument('uri', None)
name = self.get_argument('name')
items = yield self.mopidy_rpc_request(name, "core.library.browse", {'uri': uri})
self.write_json(items)
class PlayHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
name = self.get_argument('name')
uris = self.get_arguments('uri')
yield self.mopidy_rpc_request(name, "core.tracklist.clear")
tracks = yield self.mopidy_rpc_request(name, "core.tracklist.add", {'uris': uris})
yield self.mopidy_rpc_request(name, "core.playback.play", {'tlid': tracks[0]['tlid']})
self.write_json({})
class MopidyStopPlaybackHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
name = self.get_argument('name')
yield self.mopidy_rpc_request(name, "core.tracklist.clear")
yield self.mopidy_rpc_request(name, "core.playback.stop")
self.write_json({})
class SnapServersHandler(BaseHandler):
def get(self):
self.write_json({name: { 'streams': server.streams, 'clients': server.clients } for name, server in snap_servers.items()})
class MopidyServersHandler(BaseHandler):
def get(self):
self.write_json(zero_mopidy_servers)
class ClientSettingsHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
server_name = self.get_argument('server_name')
client_id = self.get_argument('id')
action = self.get_argument('action')
server = snap_servers[server_name]
client = server.client(client_id)
if action == 'mute':
yield from client.set_muted(True)
elif action == 'unmute':
yield from client.set_muted(False)
elif action == 'delete':
yield from server.delete_client(client.identifier)
elif action == 'set_latency':
latency = int(self.get_argument('latency'))
yield from client.set_latency(latency)
elif action == 'set_stream':
stream_id = self.get_argument('stream')
yield from client.group.set_stream(stream_id)
else:
logging.error('Unknown action!')
pass
self.write_json({})
class StaticFileHandler(tornado.web.StaticFileHandler):
def validate_absolute_path(self, root: str, absolute_path: str) -> Optional[str]:
try:
return super().validate_absolute_path(root, absolute_path)
except tornado.web.HTTPError as e:
if self.request.method == "GET" and e.status_code == 404:
self.redirect("/")
return None
else:
raise e
def make_app(debug):
return tornado.web.Application([
(r"/snap_servers.json", SnapServersHandler),
(r"/mopidy_servers.json", MopidyServersHandler),
(r"/client", ClientSettingsHandler),
(r"/browse.json", BrowseHandler),
(r"/play", PlayHandler),
(r"/stop", MopidyStopPlaybackHandler),
(r"/(.*)", StaticFileHandler, {
'path': os.path.join(os.path.dirname(__file__), 'frontend-react', 'build'),
'default_filename': 'index.html'
}),
], debug=debug)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Snapcast control')
parser.add_argument("--debug", help="run tornado in debug mode", action="store_true")
parser.add_argument("--loglevel", help="loglevel", default='DEBUG')
parser.add_argument("--port", help="web server port", default=8080, type=int)
args = parser.parse_args()
logging_file_handler = logging.FileHandler("server.log", encoding='utf-8')
logging_file_handler.setLevel(logging.INFO)
logging_file_handler.addFilter(lambda r: not (r.name == "tornado.access"))
logging_stdout_handler = logging.StreamHandler(sys.stdout)
logging.basicConfig(
level=getattr(logging, args.loglevel.upper()),
handlers=[logging_file_handler, logging_stdout_handler],
datefmt='%Y-%m-%d %H:%M:%S',
format='[%(asctime)s] [%(name)s] %(levelname)s: %(message)s'
)
AsyncIOMainLoop().install()
ioloop = asyncio.get_event_loop()
zero_snap_servers = []
zero_mopidy_servers = []
snap_servers = {}
def info_to_name(info: ServiceInfo):
return info.name.replace('.' + SNAPCAST_ZERO_NAME, '')
def on_add_snapserver(info: ServiceInfo):
snap_server = Snapserver(ioloop, host=info.parsed_addresses()[0],
port=info.port, reconnect=True)
snap_servers[info_to_name(info)] = snap_server
ioloop.create_task(snap_server.start())
def on_remove_snapserver(info: ServiceInfo):
snap_servers.pop(info_to_name(info))
zeroconf = Zeroconf()
zeroconf.add_service_listener('_mopidy-http._tcp.local.', ZeroListener(zero_mopidy_servers))
zeroconf.add_service_listener(SNAPCAST_ZERO_NAME, ZeroListener(zero_snap_servers,
on_add_snapserver,
on_remove_snapserver))
@asyncio.coroutine
def sync_snapserver():
while True:
servers = [server for server in snap_servers.values() if server._protocol is not None]
logging.info('Synchronizing %s snapservers' % len(servers))
for server in servers:
status = yield from server.status()
server.synchronize(status)
yield from asyncio.sleep(60)
ioloop.create_task(sync_snapserver())
logging.info("Starting web app")
app = make_app(args.debug)
app.listen(args.port)
ioloop.run_forever()
| 8,573 |
fungraph/internal/lockedcache.py
|
davehadley/graci
| 1 |
2024021
|
import os
import shelve
import threading
from filelock import FileLock
from fungraph.cacheabc import Cache
class LockedCache(Cache):
def __init__(self, dirname: str, timeout=120):
self._timeout = timeout
os.makedirs(dirname, exist_ok=True)
self.filename = os.sep.join((dirname, "fungraphcache.shelve.db"))
@property
def _threadlock(self):
try:
return self._backing_threadlock
except AttributeError:
self._backing_threadlock = threading.RLock()
return self._backing_threadlock
@property
def _lockname(self):
return f"{self.filename}.lock"
def _lock(self):
return FileLock(self._lockname)
def __getitem__(self, key):
with self._threadlock:
with self._lock().acquire(self._timeout):
with shelve.open(self.filename) as s:
return s[key]
def __setitem__(self, key, value):
with self._threadlock:
with self._lock().acquire(self._timeout):
with shelve.open(self.filename) as s:
s[key] = value
def __contains__(self, __x: object) -> bool:
with self._threadlock:
with self._lock().acquire(self._timeout):
with shelve.open(self.filename) as s:
return __x in s
def __getstate__(self):
return (self._timeout, self.filename)
def __setstate__(self, state):
self._timeout, self.filename = state
| 1,511 |
examples/c08_2_write_to_range/openpyxl/index.py
|
simkimsia/ug-read-write-excel-using-python
| 1 |
2024075
|
from openpyxl import load_workbook
from examples.c08_0_convert_indices_coordinates.openpyxl \
import index as convert_index
def write_to_range(file_path, sheet_name, start_cell, end_cell, data):
# data is expected to be 2 dimensional list
start_cell_index = convert_index.coordinate_to_index(start_cell, False)
end_cell_index = convert_index.coordinate_to_index(end_cell, False)
start_col = start_cell_index[0]
start_row = start_cell_index[1]
end_col = end_cell_index[0]
end_row = end_cell_index[1]
wb = load_workbook(file_path)
ws = wb[sheet_name]
count_row = 0
for i in range(start_row, end_row+1):
count_col = 0
for j in range(start_col, end_col+1):
ws.cell(row=i, column=j).value = data[count_row][count_col]
count_col += 1
count_row += 1
return wb.save(file_path)
| 871 |
src/tests/test_id_repository.py
|
whorvath2/identifier
| 0 |
2023789
|
"""
Copyright © 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import pytest
import re
import os
from co.deability.identifier import config
from co.deability.identifier.errors.BadRepositoryError import BadRepositoryError
from co.deability.identifier.errors.IllegalArgumentError import IllegalArgumentError
from co.deability.identifier.errors.IllegalIdentifierError import IllegalIdentifierError
from co.deability.identifier.api.repositories.id_repository import (
IdRepository,
_is_valid,
_generate_id,
)
from co.deability.identifier.api.repositories.id_repository_type import IdRepositoryType
from co.deability.identifier.errors.TooManyRetriesError import TooManyRetriesError
from conftest import test_path
def test_id_repository_construction():
with pytest.raises(IllegalArgumentError):
IdRepository(repository_type="aString", base_path="./")
with pytest.raises(BadRepositoryError):
IdRepository(
repository_type=IdRepositoryType.WRITER,
base_path="./foobar",
)
IdRepository._writer = None
repository: IdRepository = IdRepository(
repository_type=IdRepositoryType.WRITER,
base_path=test_path,
)
assert repository._writer == repository
assert len(repository._readers) <= config.MAX_READER_COUNT
IdRepository._readers = []
repository = IdRepository(
repository_type=IdRepositoryType.READER,
base_path=test_path,
)
assert IdRepository._readers[0] == repository
os.environ["ID_MAX_READER_COUNT"] = "1"
another_repository: IdRepository = IdRepository(
repository_type=IdRepositoryType.READER,
base_path=test_path,
)
assert IdRepository._readers[0] == repository
def test_validity_tester():
an_id = "aShortId" # too short
assert not _is_valid(an_id)
an_id = "x" * 32 # illegal characters
assert not _is_valid(an_id)
an_id = "a" * 32
assert _is_valid(an_id)
def test_id_generator():
an_id = _generate_id()
assert re.compile("[a-f0-9]{32}").match(an_id)
def test_repository_create_id_fails_with_illegal_retries_values(
mock_id_repository_writer,
):
with pytest.raises(IllegalArgumentError):
mock_id_repository_writer.create_id(retries=None)
with pytest.raises(IllegalArgumentError):
mock_id_repository_writer.create_id(retries="foobar")
with pytest.raises(IllegalArgumentError):
mock_id_repository_writer.create_id(retries=-1)
def test_repository_calculates_paths_correctly(mock_id_repository_writer):
an_id: str = "abc"
assert Path(test_path, "a/b/c") == mock_id_repository_writer._path_calculator(
identifier=an_id
)
def test_repository_creates_ids(mock_id_repository_writer):
an_id = mock_id_repository_writer.create_id()
assert (
an_id
and len(an_id) == 32
and Path(mock_id_repository_writer._path_calculator(an_id)).exists()
)
def test_create_id_with_bad_retries_raises_error(mock_id_repository_writer):
with pytest.raises(IllegalArgumentError):
mock_id_repository_writer.create_id(retries=None)
with pytest.raises(IllegalArgumentError):
mock_id_repository_writer.create_id(retries=-1)
def test_create_id_with_failed_retries_raises_correct_error(mock_id_repository_writer):
holder = mock_id_repository_writer._serialize
mock_id_repository_writer._serialize = lambda identifier: False
with pytest.raises(TooManyRetriesError):
mock_id_repository_writer.create_id(retries=0)
mock_id_repository_writer._serialize = holder
def test_repository_checks_identifier_existence_correctly(mock_id_repository_writer):
an_id = mock_id_repository_writer.create_id()
assert mock_id_repository_writer.exists(identifier=an_id)
with pytest.raises(IllegalIdentifierError):
mock_id_repository_writer.exists(identifier="foobar")
| 4,409 |
tests/cardinality/test_probabilistic_counter.py
|
victox5/pdsa
| 0 |
2023646
|
import pytest
from math import sqrt
from pdsa.cardinality.probabilistic_counter import ProbabilisticCounter
def test_init():
pc = ProbabilisticCounter(10)
assert pc.sizeof() == 40, "Unexpected size in bytes"
with pytest.raises(ValueError) as excinfo:
pc = ProbabilisticCounter(0)
assert str(excinfo.value) == 'At least one simple counter is required'
def test_repr():
pc = ProbabilisticCounter(10)
assert repr(pc) == (
"<ProbabilisticCounter (length: 320, num_of_counters: 10)>")
def test_add():
pc = ProbabilisticCounter(10)
for word in ["test", 1, {"hello": "world"}]:
pc.add(word)
def test_count():
num_of_counters = 256
pc = ProbabilisticCounter(num_of_counters)
std = 0.78 / sqrt(num_of_counters)
errors = []
boundary = 20 * num_of_counters
cardinality = 0
for i in range(10000):
cardinality += 1
element = "element_{}".format(i)
pc.add(element)
if cardinality < boundary:
# For small cardinalities we need to use correction,
# that we will test in another case.
continue
error = (cardinality - pc.count()) / float(cardinality)
errors.append(error)
avg_error = abs(sum(errors)) / float(len(errors))
assert avg_error >= 0
assert avg_error <= std
def test_count_small():
num_of_counters = 256
pc = ProbabilisticCounter(
num_of_counters, with_small_cardinality_correction=True)
std = 0.78 / sqrt(num_of_counters)
errors = []
cardinality = 0
for i in range(1000):
cardinality += 1
element = "element_{}".format(i)
pc.add(element)
error = (cardinality - pc.count()) / float(cardinality)
errors.append(error)
avg_error = abs(sum(errors)) / float(len(errors))
assert avg_error >= 0
assert avg_error <= 2 * std # Even with correction, still not so good
def test_len():
pc = ProbabilisticCounter(10)
assert len(pc) == 320
| 2,020 |
backend/autoControllers/inclinationController.py
|
MarioBartolome/GII_0_17.02_SNSI
| 1 |
2022615
|
from backend.autoControllers import abcControllerPID
from typing import List, Dict
class InclinationController(abcControllerPID.abcControllerPID):
def __init__(self,
upper_limit: int = 1600,
lower_limit: int = 1400,
kP: float = 0.04,
kI: float = 0.0,
kD: float = 0.01
):
super(InclinationController, self).__init__(kP, kI, kD, upper_limit, lower_limit)
self.setActualRAWRC(1500)
def setMeasurement(self, measurement: List[Dict, int]):
"""
Sets the real measurement.
:param measurement: The measurement[0] taken from Accel/Gyro and the desired target, measurement[1]
"""
self._measurement = measurement[0]['x']
self._target = measurement[1]
def getChannels(self) -> List:
"""
Returns the channels values as a list.
:return: a list with the RAW RC values.
"""
return [self.computePID()]
def getLock(self):
return None
| 939 |
token_auth/views.py
|
josl/COMPARE-Uploader-Docker
| 0 |
2023049
|
from django.views.generic.base import View
from rest_framework_jwt.settings import api_settings as settings
from django import forms
import jwt
import json
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
from uploader.settings import SECRET_KEY
from django.contrib.auth.models import User
from calendar import timegm
from datetime import datetime, timedelta
from chunked_upload.response import Response
# Create your views here.
class Refresh(View):
def _check_payload(self, token):
# Check payload valid (based off of JSONWebTokenAuthentication,
# may want to refactor)
try:
print token
print SECRET_KEY
payload = jwt.decode(token, SECRET_KEY)
print payload
except jwt.ExpiredSignature:
msg = 'Signature has expired'
raise forms.ValidationError(msg)
except jwt.DecodeError:
msg = 'Error decoding signature'
raise forms.ValidationError(msg)
return payload
def _check_user(self, payload):
username = payload.get('username')
if not username:
msg = 'Invalid payload'
raise forms.ValidationError(msg)
# Make sure user exists
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
msg = 'User doesn\'t exist.'
raise forms.ValidationError(msg)
if not user.is_active:
msg = 'User account is disabled'
raise forms.ValidationError(msg)
return user
def refresh(self, token):
payload = self._check_payload(token=token)
user = self._check_user(payload=payload)
print 'lalla'
print user
# Get and check 'orig_iat'
orig_iat = payload.get('orig_iat')
print orig_iat
if orig_iat:
# Verify expiration
refresh_limit = settings.JWT_REFRESH_EXPIRATION_DELTA
print refresh_limit
if isinstance(refresh_limit, timedelta):
refresh_limit = (refresh_limit.days * 24 * 3600 +
refresh_limit.seconds)
expiration_timestamp = orig_iat + int(refresh_limit)
now_timestamp = timegm(datetime.utcnow().utctimetuple())
print now_timestamp
if now_timestamp > expiration_timestamp:
msg = 'Refresh has expired'
raise forms.ValidationError(msg)
else:
msg = 'orig_iat field is required'
raise forms.ValidationError(msg)
new_payload = {
'user_id': user.pk,
'email': user.email,
'username': user.username,
'exp': datetime.utcnow() + settings.JWT_EXPIRATION_DELTA
}
print new_payload
if settings.JWT_ALLOW_REFRESH:
new_payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
new_payload['orig_iat'] = orig_iat
return {
'token': jwt.encode(new_payload, SECRET_KEY),
'user': user.username
}
def post(self, request):
request_json = json.loads(smart_text(request.body))
print request_json
token = request_json['token']
new_payload = self.refresh(token)
return Response(json.dumps(new_payload))
| 3,467 |
negative-affect/main_nps_allsubjs.py
|
bjsmith/reversallearning
| 0 |
2024079
|
from pain_regression_allsubjs import *
rlPain=RLPain()
rlPain.fMRI_dir='/Users/benjaminsmith/Dropbox/joint-modeling/reversal-learning/behavioral-analysis/data/preprocessed'
rlPain.onset_dir='/Users/benjaminsmith/Dropbox/joint-modeling/reversal-learning/behavioral-analysis/data/runfiles'
#rlPain.decoder_file='/Users/benjaminsmith/GDrive/joint-modeling/reversal-learning/behavioral-analysis/data/pain_decoder.pkl'
rlPain.nps_map_filepath = '/Users/benjaminsmith/Dropbox/joint-modeling/wager-pain-dataset/NPS_share/weights_NSF_grouppred_cvpcr.img'
rlPain.regressor_output_filepathprefix = '/Users/benjaminsmith/Dropbox/joint-modeling/reversal-learning/behavioral-analysis/data/batch1/'
rlPain.get_wager_nps_map()
rlPain.onset_file_version='20171020T012118'
#sid=113
#rid=2
#nifti_file = rlPain.fMRI_dir + '/sub' + str(sid) + 'ReversalLearningPunishrun' + str(rid)
#onset_file = rlPain.onset_dir + '/runfiledetail20170820T001729_s' + str(sid) + '_punishment_r' + str(
# rid) + '.txt'
#print(rlPain.get_trialtype_pain_regressors(nifti_file,onset_file))
rlPain.process_detailed_regressors()
#rlPain.process_all_punishment_subjects()
| 1,138 |
setup.py
|
krishnadasmallya/eero-client
| 1 |
2022911
|
from setuptools import setup, find_packages
import os.path
import re
VERSIONFILE = "eero/version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
README_FILE = next(
r
for r in ['./README.txt', './README']
if os.path.isfile(r)
)
setup(name='eero-km',
version=verstr,
description="Extract eero network details and insights",
long_description=open(README_FILE, "r").read(),
long_description_content_type="text/plain",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
keywords='eero-km',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/krishnadasmallya/eero-client',
license='MIT License',
packages=find_packages(exclude=[
'ez_setup', 'example', 'tests', 'external']),
include_package_data=True,
zip_safe=False,
install_requires=[
'requests',
'click',
'pandas',
'PyYAML'
]
)
| 1,505 |
PDPTW/code/generat_example.py
|
banbanfen/Extension_of_the_vehicle_routing_problem
| 4 |
2023466
|
# -*- coding: utf-8 -*-
# @author: wangqianlong
# @email: <EMAIL>
# @date: 2021/08/07
# 本模块为模型数据结构定义
import pandas as pd
import random
import math
class Data_parameter():
def __init__(self,path_of_file):
# 需求数量
self.number_of_orders = 10 # 个
# 卡车数量
self.number_of_trucks = 2 # 梁
# 需求分布半径,直径为2*R
self.R = 3 # 千米
# 需求时间窗长度
self.TWL = 0.5 # 小时
# 需求时间窗波动长度的一半
self.TWL_wave = 0.1 # 小时
# 规划时间窗最大值,最小值为0
self.Tmax = 4 # 小时
# 需求重量下界
self.demand_min = 0.5 # 千克
# 需求重量上界
self.demand_max = 0.75 # 千克
# pickup的时间
self.time_of_pickup = 2 / 60 # 小时
# deliver的时间
self.time_of_delivery = 3 / 60 # 小时
# 算例生成位置
self.outpath = path_of_file
# 需求生成
def orders_generate(data_parameter):
orders_title = ['OID', 'Pickup', 'Delivery']
orders_data = [[o, o, o + data_parameter.number_of_orders] for o in range(1, (data_parameter.number_of_orders + 1))]
orders = pd.DataFrame(orders_data, columns=orders_title)
orders_to_path = data_parameter.outpath + '\\orders-%s.csv' % data_parameter.number_of_orders
orders.to_csv(orders_to_path, index=False)
#o_Pickup_deliver生成函数
def o_Pickup_Delivery(o,data_parameter):
# 需求 o 的时间窗
a = random.uniform(0,data_parameter.Tmax - data_parameter.TWL)
# 需求时间窗在一定范围内波动
o_TWL = data_parameter.TWL + random.uniform(-data_parameter.TWL_wave,data_parameter.TWL_wave)
b = a + o_TWL
# 需求重量(也在一定范围内波动)
dm = random.uniform(data_parameter.demand_min, data_parameter.demand_max)
# pickup的坐标及相关信息
o_Pickup_r = random.uniform(0,data_parameter.R)
o_Pickup_sita = random.uniform(0,2 * math.pi)
o_Pickup = [o,'pickup%s'%o,o_Pickup_r * math.cos(o_Pickup_sita),o_Pickup_r * math.sin(o_Pickup_sita),a,b,dm,data_parameter.time_of_pickup]
# delivery的坐标及相关信息
o_Delivery_r = random.uniform(0,data_parameter.R)
o_Delivery_sita = random.uniform(0,2 * math.pi)
o_Delivery = [o + data_parameter.number_of_orders,'delivery%s'%o,o_Delivery_r * math.cos(o_Delivery_sita),o_Delivery_r * math.sin(o_Delivery_sita),a,b,-dm,data_parameter.time_of_delivery]
return o_Pickup,o_Delivery
# Nodes函数Pickup节点(index)1···n和Delivery节点(index):n+1···2*n
def nodes_generate(data_parameter):
P = []
D = []
trucks = []
for o in range(1, (data_parameter.number_of_orders + 1)):
o_Pickup, o_Deliver = o_Pickup_Delivery(o,data_parameter)
P.append(o_Pickup)
D.append(o_Deliver)
# 生成Nodes_trucks:index:2*n+1···2*n+m
for truck in range(1, (data_parameter.number_of_trucks + 1)):
Nodes_truck_r = random.uniform(0, data_parameter.R)
Nodes_truck_sita = random.uniform(0, 2 * math.pi)
Node_truck = [2 * data_parameter.number_of_orders + truck,'truck%s' % truck, Nodes_truck_r * math.cos(Nodes_truck_sita),
Nodes_truck_r * math.sin(Nodes_truck_sita), 0, data_parameter.Tmax, 0, 0]
trucks.append(Node_truck)
Nodes_title = ['ID', 'ID_name', 'x', 'y', 'a', 'b', 'dm', 'st']
Nodes_data = P + D + trucks
Nodes = pd.DataFrame(Nodes_data, columns=Nodes_title)
Nodes_to_path = data_parameter.outpath + '\\Nodes-%s-%s.csv' % (data_parameter.number_of_orders, data_parameter.number_of_trucks)
Nodes.to_csv(Nodes_to_path, index=False)
def log_generate(data_parameter):
txt_path = data_parameter.outpath + '\\日志-%s-%s-%s.txt' % (data_parameter.number_of_orders, data_parameter.number_of_trucks, data_parameter.R)
with open(txt_path, 'w') as f:
text = ['需求数量\t', '卡车数量\t', '节点半径\t', '订单时间窗长度\t', '需求时间窗波动长度\t','总时间长度\t', '需求下限\t', '需求上限\t', 'pickup服务时间\t', 'delivery服务时间\t']
text_value = [data_parameter.number_of_orders, data_parameter.number_of_trucks, data_parameter.R, data_parameter.TWL, 2 * data_parameter.TWL_wave, data_parameter.Tmax, data_parameter.demand_min, data_parameter.demand_max, data_parameter.time_of_pickup,data_parameter.time_of_delivery]
for text_index in range(len(text)):
f.write(text[text_index] + str(text_value[text_index]) + '\n')
# 算例生成
def data_generate(data_parameter):
orders_generate(data_parameter)
nodes_generate(data_parameter)
log_generate(data_parameter)
if __name__ == '__main__' :
path_of_file = 'C:\\Users\\Administrator\\Desktop\\公众号\\模型复现\\2006-Stefan\\test_data'
data_parameter = Data_parameter(path_of_file)
data_generate(data_parameter)
| 4,489 |
automatewithpython/.snippets/nesteddictionary.py
|
Coalemus/Python-Projects
| 0 |
2023748
|
#!/bin/zsh
allguests = {'Alice': {'apples': 5,'pretzels': 12},
'Bob': {'ham sandwiches': 3, 'apples': 2},
'Carol': {'cups': 3, 'apple pies': 1}}
def totalbrought(guests, item):
numbrought = 0
for k, v in guests.items():
numbrought = numbrought + v.get(item, 0)
return numbrought
print("Number of things brought:")
print(' - apples ' + str(totalbrought(allguests, 'apples')))
print(' - cups ' + str(totalbrought(allguests, 'cups')))
print(' - cakes ' + str(totalbrought(allguests, 'cakes')))
print(' - ham sandwiches ' + str(totalbrought(allguests, 'ham sandwiches')))
print(' - apple pies ' + str(totalbrought(allguests, 'apple pies')))
| 733 |
Archive/aupyom-master/tests/test_import.py
|
MatthijsBrem/wheelchair-design-platform
| 0 |
2022753
|
import unittest
class ImportTestCase(unittest.TestCase):
def test_import(self):
import aupyom
| 107 |
fairsharing_proxy/api_client.py
|
ds-wizard/fairsharing-proxy
| 0 |
2023704
|
import asyncio
import httpx
from fairsharing_proxy.config import ProxyConfig
from fairsharing_proxy.model import Token, Record, SearchQuery
_NEED_LOGIN_MESSAGE = 'please login before continuing'
def _headers_with(token: Token):
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': token.auth_header,
}
class FAIRSharingUnauthorizedError(Exception):
CONTENT = {
'message': _NEED_LOGIN_MESSAGE
}
MESSAGE = {
_NEED_LOGIN_MESSAGE
}
class FAIRSharingClient:
def __init__(self, cfg: ProxyConfig):
self.api = cfg.fairsharing.api
self.url_sign_in = f'{self.api}/users/sign_in'
self.url_list = f'{self.api}/fairsharing_records'
self.url_search = f'{self.api}/search/fairsharing_records'
@staticmethod
def _check_response(response: httpx.Response):
# FAIRSharing is not using HTTP codes... need to check
# using message string that is human-readable
if response.is_success:
msg = response.json().get('message', '').lower()
if msg == _NEED_LOGIN_MESSAGE:
raise FAIRSharingUnauthorizedError()
response.raise_for_status()
async def client_login(
self, client: httpx.AsyncClient,
username: str, password: str,
) -> Token:
response = await client.post(
url=self.url_sign_in,
json={
'user': {
'login': username,
'password': password,
}
}
)
response.raise_for_status()
result = response.json()
return Token(result)
async def login(self, username: str, password: str) -> Token:
async with httpx.AsyncClient() as client:
return await self.client_login(
client=client,
username=username,
password=password,
)
async def client_search(
self, client: httpx.AsyncClient,
query: SearchQuery, token: Token,
) -> list[Record]:
# TODO: page size? page number?
response = await client.post(
url=self.url_search,
params=query.params,
headers=_headers_with(token),
)
self._check_response(response)
result = response.json().get('data', [])
return [rec for rec in (Record(**item) for item in result)
if rec.is_valid()]
async def search(
self, query: SearchQuery, token: Token,
) -> list[Record]:
async with httpx.AsyncClient() as client:
return await self.client_search(client, query, token)
async def client_list_records_url(
self, client: httpx.AsyncClient, url: str, token: Token,
) -> list[Record]:
response = await client.get(
url=url,
headers=_headers_with(token),
)
self._check_response(response)
result = response.json().get('data', [])
return [rec for rec in (Record(**item) for item in result)
if rec.is_valid()]
async def client_list_records(
self, client: httpx.AsyncClient, token: Token,
page_size=1, page_number=25,
) -> list[Record]:
return await self.client_list_records_url(
client=client,
token=token,
url=f'{self.url_list}'
f'?page[number]={page_number}'
f'&page[size]={page_size}'
)
async def list_records_url(
self, url: str, token: Token,
) -> list[Record]:
async with httpx.AsyncClient() as client:
return await self.client_list_records_url(
client=client,
url=url,
token=token,
)
async def list_records(
self, token: Token, page_size=1, page_number=25,
) -> list[Record]:
async with httpx.AsyncClient() as client:
return await self.client_list_records(
client=client,
token=token,
page_size=page_size,
page_number=page_number,
)
async def client_list_records_all(
self, client: httpx.AsyncClient, token: Token,
page_size=500, timeout=25, page_delay=None,
) -> list[Record]:
next_url = f'{self.url_list}?page[number]=1&page[size]={page_size}'
records = list() # type: list[Record]
while next_url is not None:
response = await client.get(
url=next_url,
headers=_headers_with(token),
timeout=timeout,
)
self._check_response(response)
result = response.json().get('data', [])
records.extend((rec for rec in (Record(**item) for item in result)
if rec.is_valid()))
next_url = response.json().get('links', {}).get('next', None)
if page_delay is not None:
await asyncio.sleep(page_delay)
return records
| 5,135 |
torchflare/experiments/commons.py
|
Atharva-Phatak/torchflare
| 86 |
2023318
|
from enum import Enum
class EVENTS(Enum):
"""Events that are used by experiment class."""
ON_EXPERIMENT_START = "on_experiment_start"
ON_EXPERIMENT_END = "on_experiment_end"
ON_EPOCH_START = "on_epoch_start"
ON_EPOCH_END = "on_epoch_end"
ON_LOADER_START = "on_loader_start"
ON_LOADER_END = "on_loader_end"
ON_BATCH_START = "on_batch_start"
ON_BATCH_END = "on_batch_end"
TRAIN_ATTRS = ["nn_module", "optimizer", "criterion"]
ATTR_TO_INTERNAL = {"nn_module": "model", "optimizer": "optimizer", "criterion": "criterion"}
__all__ = ["TRAIN_ATTRS", "ATTR_TO_INTERNAL", "EVENTS"]
| 616 |
crawler.py
|
0ssifrage/tang300
| 4 |
2023652
|
# -*- coding: utf-8 -*-
import json
import re
import urllib
import urllib2
def main():
iurl = 'https://zh.wikisource.org/zh-hant/%E5%94%90%E8%A9%A9%E4%B8%89%E7%99%BE%E9%A6%96'
content = urllib2.urlopen(iurl).read().decode('utf8')
re_url = re.compile('<li>([^<]*?) <a href="(.*?)".*?>(.*?)</li>')
ps = re_url.findall(content)
res = []
# t_re = re.compile('dth:50%;"><b>(.*?)</b>')
# a_re = re.compile(u'作者:</span>.*?>(.*?)</a>')
c_re = re.compile('poem">\s*?<p>([\W\w]*?)</p>')
rm_re = re.compile('</*span.*?>|<small[\W\w]*?/small>|\s|</*a>|</*sub>')
urlbase = "http://diy.fwg.hk/download/chi/learnandteach/software/poem300/"
i = 0
for pp in ps:
i += 1
if i % 10 == 0:
print i
url = "https://zh.wikisource.org" + pp[1]
# print url
try:
content = urllib2.urlopen(url).read().decode('utf8')
# t = t_re.findall(content)[0]
# a = a_re.findall(content)[0]
c = rm_re.sub('', c_re.findall(content)[0]).split('<br/>')
res.append([url, rm_re.sub('', pp[2]), pp[0], c])
except:
print url
f = open('./tang300.v0.json', 'w')
s = json.dumps(res, ensure_ascii=False, indent=2)
# print s
print len(res)
f.write(s.encode('utf-8'))
f.close()
main()
| 1,341 |
utils/time_compute.py
|
Wanggcong/SolutionSimilarityLearning
| 7 |
2023436
|
import time
import math
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
| 169 |
script.py
|
edyirdaw/emotion-recognition-service
| 7 |
2022635
|
import base64
import tempfile
import grpc
import time
from concurrent import futures
from demo import load_model_from_args, start_image_demo
from service_spec.EmotionService_pb2 import RecognizeResponse, BoundingBox
from service_spec.EmotionService_pb2_grpc import EmotionRecognitionServicer, add_EmotionRecognitionServicer_to_server
class Args:
"""
With this class, we call the function to load the model
"""
def __init__(self):
self.json = 'models/models/model-ff.json'
self.weights = 'models/models/model-ff.h5'
self.model_input = 'image'
self.snet = False
self.gui = False
self.path = ''
self.image = ''
class Model:
def __init__(self):
self.args = Args()
self.model = load_model_from_args(self.args)
def predict(self):
return start_image_demo(self.args, self.model)
class EmotionRecognitionServicer(EmotionRecognitionServicer):
def classify(self, request, context):
if request.image is None:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details("Image is required")
return RecognizeResponse
if request.image == '':
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details("Image is empty")
return RecognizeResponse
# TODO additional checks for valid magic parameters for the input image is required.
if request.image_type is None:
pass
binary_image = base64.b64decode(request.image)
bounding_boxes, emotions = self._classify(binary_image)
response = RecognizeResponse()
for d, e in zip(bounding_boxes, emotions):
response.faces.add(bounding_box=BoundingBox(x=d.left(), y=d.top(), w=d.right() - d.left(), h=d.bottom() - d.top()),
emotion=e)
return response
def _classify(self, image):
import tensorflow as tf
from keras import backend as K
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
K.set_session(sess)
model = Model()
# Requires us to save the file to disk
f = tempfile.NamedTemporaryFile()
f.write(image)
#f.close()
# close vs. flush because flush apparently won't work on windows
model.args.path = f.name
print(f.name)
bounding_boxes, emotions = model.predict()
#os.unlink(f.name) # cleanup temp file
del model
sess.close()
return bounding_boxes, emotions
def create_server(port=8001):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_EmotionRecognitionServicer_to_server(EmotionRecognitionServicer(), server)
server.add_insecure_port('[::]:' + str(port))
return server
if __name__ == '__main__':
server = create_server()
server.start()
_ONE_DAY = 60*60*24
try:
while True:
time.sleep(_ONE_DAY)
except KeyboardInterrupt:
server.stop(0)
| 3,110 |
tests/test_angle_tools.py
|
Sunmish/Aegean
| 40 |
2024004
|
#! /usr/bin/env python
"""
Test the angle_tools module
"""
from __future__ import print_function
__author__ = '<NAME>'
from AegeanTools import angle_tools as at
from astropy.coordinates import Angle
import astropy.units as u
import numpy as np
from numpy.testing import assert_approx_equal, assert_almost_equal
def test_ra2dec():
"""Test ra2dec against astropy conversion"""
# Test against the astropy calculations
for ra in ['14:21:45.003', '-12 04 22', '-00 01 12.003']:
ans = at.ra2dec(ra)
desired = Angle(ra, unit=u.hourangle).hour * 15
assert_approx_equal(ans, desired, "{0} != {1}".format(ans, desired))
def test_dec2dec():
"""Test dec2dec against astropy conversion"""
# Test against the astropy calculations
for dec in ['+14:21:45.003', '-99 04 22', '-00 01 23.456', '00 01']:
ans = at.dec2dec(dec)
desired = Angle(dec, unit=u.degree).degree
assert_approx_equal(ans, desired, err_msg="{0} != {1}".format(ans, desired))
def test_dec2dms():
"""Test conversion of dec to DMS strings"""
for dec, dstr in [(-0.12345, "-00:07:24.42"),
(80.0, "+80:00:00.00"),
(np.nan, "XX:XX:XX.XX"),
(np.inf, "XX:XX:XX.XX")]:
ans = at.dec2dms(dec)
if not ans == dstr:
raise AssertionError("{0} != {1}".format(ans, dstr))
def test_dec2hms():
"""Test conversion of RA to HMS strings"""
for dec, dstr in [(-15, "23:00:00.00"),
(15, "01:00:00.00"),
(23.5678, "01:34:16.27"),
(np.nan, "XX:XX:XX.XX"),
(np.inf, "XX:XX:XX.XX")]:
ans = at.dec2hms(dec)
if not ans == dstr:
raise AssertionError("{0} != {1}".format(ans, dstr))
def test_gcd():
"""Test the calculation of great circle distance"""
for ra1, dec1, ra2, dec2, dist in [(0, 0, 0, 1, 1), # simple 1 deg offset
(0, -90, 180, 90, 180), # pole to pole
(120, 89, 300, 89, 2.), # over the pole
(0, 0, 179.99999, 0, 179.99999), # distances very close to 180deg
(12.0, -90, 45, -90, 0) # at the south pole
]:
ans = at.gcd(ra1, dec1, ra2, dec2)
assert_almost_equal(ans, dist, err_msg="{0:5.2f},{1:5.2f} <-> {2:5.2f},{3:5.2f} == {4:g} != {5:g}".format(ra1, dec1, ra2, dec2, dist, ans))
def test_bear():
"""Test bearing calculation"""
for ra1, dec1, ra2, dec2, bear in [(0, 0, 0, 1, 0),
(0, 0, 180, 90, 0),
(0, 0, 179.99999, 0, 90),
(0, 0, 180.00001, 0, -90)
]:
ans = at.bear(ra1, dec1, ra2, dec2)
assert_almost_equal(ans, bear, err_msg="{0:5.2f},{1:5.2f} <-> {2:5.2f},{3:5.2f} == {4:g} != {5:g}".format(ra1, dec1, ra2, dec2, bear, ans))
def test_translate():
"""Test the translate function"""
for (ra1, dec1), (r, theta), (ra2, dec2) in [((0, 0), (1, 0), (0, 1)),
((45, 89.75), (0.5, 0), (225, 89.75)), # over the pole
((12, -45), (-1, 180), (12, -44)) # negative r
]:
ans = at.translate(ra1, dec1, r, theta)
assert_almost_equal(ans, (ra2, dec2), err_msg="{0:5.2f},{1:5.2f} -> {2:g},{3:g} -> {4:5.2f},{5:5.2f} != {6:g},{7:g}".format(ra1, dec1, r, theta, ra2, dec2, *ans))
def test_dist_rhumb():
"""Test rhumb distance calculation"""
for ra1, dec1, ra2, dec2, dist in [(0, 0, 0, 1, 1),
(0, 0, 180, 0, 180)
]:
ans = at.dist_rhumb(ra1, dec1, ra2, dec2)
assert_almost_equal(ans, dist)
def test_bear_rhumb():
"""Test rhumb bearing calculation"""
for ra1, dec1, ra2, dec2, bear in [(0, 0, 0, 1, 0),
(0, 0, 180, 0, 90)
]:
ans = at.bear_rhumb(ra1, dec1, ra2, dec2)
assert_almost_equal(ans, bear)
def test_translate_rhumb():
"""Test translate along rhumb line"""
for (ra1, dec1), (r, theta), (ra2, dec2) in [((0, 0), (1, 0), (0, 1)),
((12, -45), (-1, 180), (12, -44)) # negative r
]:
ans = at.translate_rhumb(ra1, dec1, r, theta)
assert_almost_equal(ans, (ra2, dec2), err_msg="{0:5.2f},{1:5.2f} -> {2:g},{3:g} -> {4:5.2f},{5:5.2f} != {6:g},{7:g}".format(ra1, dec1, r, theta, ra2, dec2, *ans))
if __name__ == "__main__":
# introspect and run all the functions starting with 'test'
for f in dir():
if f.startswith('test'):
print(f)
globals()[f]()
| 5,008 |
python/1_basic_blockchain/blockchain.py
|
Younes-Charfaoui/Blockchain
| 13 |
2023983
|
"""Copyright [2018] [<NAME>]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Module 1 - Create a Blockchain
#importing the libraries.
import datetime
import hashlib
import json
from flask import Flask , jsonify
# Building The Blockchain Architecture.
class Blockchain:
def __init__(self):
self.chain = []
self.create_block(proof = 1, previous_hash = '0')
# this is a type general block , we can add anything
def create_block(self, proof, previous_hash):
block = { 'index' : len(self.chain) + 1,
'timestamp' : str(datetime.datetime.now()),
'proof' : proof,
'previous_hash' : previous_hash }
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
# this the main thing about the blockchain, it's define the nonce to use in actual block that satisfay the current target
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
# operation must not be symetrical
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof+=1
return new_proof
# helper function to hash a block
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
# helper function to check if the blockchain is valid
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# The Implementation in the web app.
# Creating web app.
app = Flask(__name__)
# Create a blockchain.
blockchain = Blockchain()
# Mining a block.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash']}
return jsonify(response) , 200
# Getting Full blockchain.
@app.route('/get_chain',methods = ['GET'])
def get_chain():
response = {'chain' : blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the chain is valid via the helper function.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
#Running the app
app.run(host = '0.0.0.0' , port = 5000)
| 4,183 |
mugicli/shared.py
|
mugiseyebrows/mugi-cli
| 0 |
2023458
|
import glob
import sys
import io
import subprocess
import os
import re
NUM_RX = r'([-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)'
def glob_paths(paths):
res = []
for path in paths:
if has_magic(path):
res += glob.glob(path)
else:
res.append(path)
return res
def glob_paths(paths):
return _glob_paths_pred(paths, lambda path: True)
def glob_paths_files(paths):
return _glob_paths_pred(paths, lambda path: os.path.isfile(path))
def glob_paths_dirs(paths):
return _glob_paths_pred(paths, lambda path: os.path.isdir(path))
def has_magic(path):
# brackets in path is ambigous
if os.path.exists(path):
return False
return glob.has_magic(path) # ok
def _glob_paths_pred(paths, pred):
res = []
for path in paths:
if has_magic(path):
for item in glob.glob(path):
if pred(item):
res.append(item)
else:
if pred(path):
res.append(path)
return res
def glob_paths_dirs(paths):
res = []
for path in paths:
if has_magic(path):
for item in glob.glob(path):
if os.path.isdir(item):
res.append(item)
else:
res.append(path)
return res
def drop_last_empty_line(lines):
if lines[-1].strip() == "":
lines.pop()
"""
def print(arg, encoding='utf-8'):
if isinstance(arg, bytes):
print_bytes(arg)
elif isinstance(arg, list):
print_lines(arg, encoding)
elif isinstance(arg, str):
print_lines([arg], encoding)
"""
def print_bytes(bytes_):
sys.stdout.buffer.write(bytes_)
def print_lines(lines, encoding='utf-8'):
for line in lines:
#sys.stdout.write(line)
print_bytes(line.encode(encoding))
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def read_bytes(paths):
if isinstance(paths, list):
return _read_bytes_many(paths)
return _read_bytes_one(paths)
def _read_bytes_one(path):
if path is None:
return sys.stdin.buffer.read()
with open(path, 'rb') as f:
return f.read()
def _read_bytes_many(paths):
stdin_mode = len(paths) == 0
if stdin_mode:
return read_bytes(None)
else:
return b''.join([read_bytes(path) for path in paths])
def split_to_lines(text):
lines = text.split('\n')
line = lines[-1]
lines = [line + '\n' for line in lines]
lines[-1] = line
return lines
def read_lines(paths, encoding='utf-8', drop_last_empty_line_ = False):
if isinstance(paths, list):
return _read_lines_many(paths, encoding, drop_last_empty_line_)
return _read_lines_one(paths, encoding, drop_last_empty_line_)
def line_reader(paths, from_stdin, encoding='utf-8', drop_last_empty_line_ = False):
if from_stdin:
for i, line in enumerate(_read_lines_one(None, encoding, drop_last_empty_line_)):
yield i, line, '-'
else:
for path in paths:
for i, line in enumerate(_read_lines_one(path, encoding, drop_last_empty_line_)):
yield i, line, path
def _read_lines_many(paths, encoding='utf-8', drop_last_empty_line_ = False):
stdin_mode = len(paths) == 0
if stdin_mode:
return _read_lines_one(None, encoding, drop_last_empty_line_)
lines = []
for path in paths:
lines += _read_lines_one(path, encoding, drop_last_empty_line_)
return lines
def _read_lines_one(path, encoding='utf-8', drop_last_empty_line_ = False):
bytes_ = read_bytes(path)
lines = split_to_lines(bytes_.decode(encoding))
if drop_last_empty_line_ and lines[-1] in ['\r', '']:
lines.pop()
return lines
def read_lines_(paths, drop_last_empty_line_ = False):
lines = []
stdin_mode = len(paths) == 0
if stdin_mode:
lines = list(sys.stdin)
if drop_last_empty_line_:
drop_last_empty_line(lines)
else:
for path in paths:
with open(path, 'r', encoding='utf-8') as f:
lines_ = f.readlines()
if drop_last_empty_line_:
drop_last_empty_line(lines_)
lines += lines_
return lines
def run(args, cwd = None):
shell = (args[0] in ['type', 'echo', 'copy']) or '|' in args
subprocess.run(args, shell=shell, cwd=cwd)
def index_of_int(args):
for i, arg in enumerate(args):
if re.match('^-[0-9]+$', arg):
return i
def parse_args(short, long, short_val, long_val, args):
opts = {k: False for k in short + long}
for n in short_val + long_val:
opts[n] = None
while len(args) > 0:
arg = args[0]
if arg.startswith('--') and arg[2:] in long:
opts[arg[2:]] = True
elif arg.startswith('--') and arg[2:] in long_val:
opts[arg[2:]] = args[1]
args.pop(0)
elif arg.startswith('-') and arg[1:] in short_val:
opts[arg[1:]] = args[1]
args.pop(0)
elif arg.startswith('-') and all([c in short for c in arg[1:]]):
for c in arg[1:]:
opts[c] = True
else:
return opts, args
args.pop(0)
return opts, args
def print_utf8(s, end=b'\n'):
if not isinstance(end, bytes):
end = end.encode('utf-8')
sys.stdout.buffer.write(s.encode('utf-8') + end)
| 5,385 |
2015/Day 6-1.py
|
exp111/Advent-Of-Code
| 1 |
2022618
|
with open('Day 6 - input', 'r') as f:
lights = [[False for _ in range(1000)] for _ in range(1000)]
for instruction in f:
instruction = instruction.split()
if instruction[0] == 'turn':
if instruction[1] == 'on':
top_left = tuple(int(i) for i in instruction[2].split(','))
bottom_right = tuple(int(i) for i in instruction[4].split(','))
for i in range(top_left[0], bottom_right[0]+1):
for j in range(top_left[1], bottom_right[1]+1):
lights[i][j] = True
else:
top_left = tuple(int(i) for i in instruction[2].split(','))
bottom_right = tuple(int(i) for i in instruction[4].split(','))
for i in range(top_left[0], bottom_right[0]+1):
for j in range(top_left[1], bottom_right[1]+1):
lights[i][j] = False
else:
top_left = tuple(int(i) for i in instruction[1].split(','))
bottom_right = tuple(int(i) for i in instruction[3].split(','))
for i in range(top_left[0], bottom_right[0]+1):
for j in range(top_left[1], bottom_right[1]+1):
lights[i][j] = (False if lights[i][j] else True)
lights_on = 0
for i in range(1000):
for j in range(1000):
lights_on += 1 if lights[i][j] else 0
print(lights_on)
| 1,430 |
compsci courses/CPSC231 - Intro to CompSci in Python/assignments/as4/python_implementation/manager.py
|
q-omar/UofC
| 1 |
2023039
|
#<NAME>
#ID 10086638
#TA: <NAME>
#T03
#v1.60 (last modified 4:39pm, June 26, 2017)
#This file in conjunction with Target.py and Pursuer.py creates objects that interact with eachother.
#The Target object generates a user entered probability of a true/false event. The pursuer also
#generates a true false event that is initially set to 50/50. After each interaction, the Pursuer
#modifies its probability to create better matching results. A final statistic report is then displayed.
#Limitations: the ctrl+c break out of loop command is handled by the exception and so
#it doesn't break the loop. Can make it a little annoying to debug if needed to check the functions.
#I also had trouble getting a globals.py named constant file that could be read across all files. It would
#convenient to have a seperate named constants file that but I tried that and didn't work. So I copy pasted
#my named constants at the top for all files.
from Target import *
from Pursuer import *
LOWER_BOUND_RANDOM=0 #i tried to create a globals.py files with all the named constants
UPPER_BOUND_RANDOM=100#so that all files can use it simultanesouly but couldnt
COUNT_STEP=1#get it to work for some reason so i just repeated the starting constants
STARTING_COUNTER=0 #for each file
def interactionsChecker(): #this function checks user input by recursion
try:
interactions=int(input("Enter amount of dates (1 or more): "))
if interactions<1:
print("Number must be greater than 0, try again!")
interactions=interactionsChecker()
except:
print("Non numerical input - Try again!")
interactions=interactionsChecker()
return(interactions)
def probabilityChecker():#this function checks user input by recursion
aTarget=Target()
try:
aTarget.xIntProb=int(input("Enter the probability of x (0 - 100): "))
if aTarget.xIntProb<LOWER_BOUND_RANDOM or aTarget.xIntProb>UPPER_BOUND_RANDOM:
print("Number must be between 0 - 100, try again!")
aTarget.xIntProb=probabilityChecker()
except:
print("Non numerical input - Try again!")
aTarget.xIntProb=probabilityChecker()
return(aTarget.xIntProb)
def start():
aTarget=Target() #initializes both objects
aPursuer=Pursuer()
interactions=interactionsChecker() #calling these two functions to check user is stupid
aTarget.xIntProb=probabilityChecker()
interactionsCount = STARTING_COUNTER
numTotMatches=STARTING_COUNTER
while (interactionsCount<interactions): #runs while interactions are all complete
targXBhvr=aTarget.behaviorGeneration() #generates Target behavior, returns it is variable to pass
xCount,yCount=aTarget.tallyTarget() #tallies results each loop, returns it as variable to pass
aPursuer.behaviorGeneration() #generates Pursuer behavior
aPursuer.tallyPursuer() #tally results each loop
aPursuer.tallySuccessful(targXBhvr) #checks each interaction, tallies successful ones
aPursuer.probabilityShift() #calls shifting probability function from Pursuer to adjust x based on successful interactions
interactionsCount=interactionsCount+1 #update loop control
aPursuer.displayEnd(interactions,xCount,yCount) #finalresults after loop done
start()
| 3,315 |
exercise_3/find_usernames.py
|
DJO3/regex_grounds
| 2 |
2022924
|
import sys
import requests
import re
# Parses all urls and returns a sorted list of all found email addresses.
def find_usernames(urls):
names = []
names_lower = []
for url in urls:
response = requests.get(url)
txt = response.text
found_names = re.findall(r'@[a-zA-Z0-9_]+\b', txt)
for name in found_names:
if name not in names and name.lower() not in names_lower:
names.append(name)
names_lower.append(name.lower())
return sorted(names)
if __name__ == "__main__":
urls = sys.argv[1:]
usernames = find_usernames(urls)
for name in usernames:
sys.stdout.write(name + '\n')
print "Found {0} unique usernames!".format(len(usernames))
| 754 |
Music_Player/music_play.py
|
13jacole/5e_GM_Tools
| 2 |
2024047
|
#!/usr/bin/env python
#imports
import os
import sys
import argparse
import glob
import random as rando
from playsound import playsound
"""
music_play.py: Music Player
Usage: python music_play.py
"""
# meta information
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
#arguments
pa = argparse.ArgumentParser()
pa.add_argument('-l', '--loop', action='store_true',
help='Flag specifies if music should loop')
pa.add_argument('-r', '--random', action='store_true',
help='Flag specifies whether music will be randomized or not')
pa.add_argument('-p', '--playlist', action='store_true',
help='Flag specifies whether music will be playlist or song')
args = pa.parse_args()
#argument parsing
if args.loop:
loop = True
else:
loop = False
if args.random:
random = True
else:
random = False
if args.playlist:
playlist = True
else:
playlist = False
#skeleton code
currDir = os.getcwd()
if playlist:
print("What playlist do you want to play?\n")
else:
print("From what playlist do you want to select your song?\n")
dirList = [x[0] for x in os.walk('.')]
print(dirList)
print("Please type the name of the subdirectory: \n")
decision = input()
decision = decision.replace(" ", "_")
newDir = currDir + "\\" + decision
print(os.getcwd())
os.chdir(newDir)
mp3List = glob.glob1(newDir,"*.mp3")
mp3Counter = len(mp3List)
if playlist:
y = True
while y:
if mp3Counter > 0:
if random:
rando.shuffle(mp3List)
else:
print("No MP3 files exist in this directory")
sys.exit()
for idx, song in enumerate(mp3List, start=0):
playsound(mp3List[idx])
y = loop
else:
if random:
x = rando.randint(0, mp3Counter - 1)
else:
print("\n")
print("Songs in this directory:")
print("\n")
for idx, song in enumerate(mp3List, start=1):
print("{}: {}\n".format(idx, song))
print("Please indicate what song you would like to play\n[Enter the number to the left of the song name]")
x = input()
x= int(x) - 1
y = True
while y:
try:
playsound(mp3List[x])
y = loop
if y == True:
print("looping...\n")
except:
print("Error: Invalid song number")
sys.exit()
| 2,142 |
utils/mutual_information.py
|
DIAL-RPI/FreehandUSRecon
| 41 |
2023588
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 17:54:53 2018
@author: haskig
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: yan
Check registration quality by varying translation or rotation.
Compare with MI in the same time.
"""
# %%
from utils import registration_reader as rr
from utils import volume_data_generator as vdg
from utils import volume_resampler_3d as vr3d
import glob
from keras.models import load_model
from matplotlib import pyplot as plt
import numpy as np
from os import path
import SimpleITK as sitk
from resizeimage import resizeimage
from PIL import Image
import random
import scipy
from utils import volume_resampler_3d as vr3D
from stl import mesh
from keras import backend as K
# %% input image dimensions
img_rows, img_cols = 96, 96
depth = 32
#
img_channels = 2
batch_size = 64
tmp_folder = '/home/haskig/tmp'
# %%
a=0.1
if not 'deep_network' in globals():
print('Loading deep network...')
#fn_model = 'trained_binary_model_3d.h5'
"""
fn_model = 'trained_3d_regression_20170627_refined.h5'
folder_model = '/home/data/models'
fn_full = path.join(folder_model, fn_model)
"""
fn_model = '/zion/common/experimental_results/haskins_mrus_reg/trained_3d_regression_new_data.h5'
deep_network = load_model(fn_model)
#fn_model = '/home/haskig/tmp/trained_3d_regression_OG_smooth_stdev.h5'
#deep_network = load_model(fn_model, custom_objects={'mse_var_reg':mse_var_reg})
print('Deep network loaded from <{}>'.format(fn_model))
# %%
data_folder = '/home/data/uronav_data'
"""
if not 'vdg_train' in globals():
vdg_train = vdg.VolumeDataGenerator(data_folder, (1,500))
print('{} cases for using'.format(vdg_train.get_num_cases()))
"""
vdg_train = vdg.VolumeDataGenerator(data_folder, (71,750))
print('{} cases for using'.format(vdg_train.get_num_cases()))
#trainGen = vdg_train.generate_batch_with_parameters(batch_size=batch_size, shape=(img_cols,img_rows,depth))
# %% Generate samples and check predict values
case_idx = 1
case_folder = 'Case{:04d}'.format(case_idx)
full_case_path = path.join(data_folder, case_folder)
fn_stl = path.join(full_case_path, 'segmentationrtss.uronav.stl')
segMesh = mesh.Mesh.from_file(fn_stl)
folder = '/home/haskig/data/uronav_data'
"""
US_mat_path = path.join(folder, 'Case{:04}/SSC_US'.format(case_idx))
MR_mat_path = path.join(folder, 'Case{:04}/SSC_MR'.format(case_idx))
SSC_moving = scipy.io.loadmat(US_mat_path)['US_SSC']
SSC_fixed = scipy.io.loadmat(MR_mat_path)['MR_SSC']
"""
def get_array_from_itk_matrix(itk_mat):
mat = np.reshape(np.asarray(itk_mat), (3,3))
return mat
fns = glob.glob(path.join(full_case_path, '*.txt'))
if len(fns) < 1:
print('No registration file found!')
fn_gt = fns[0]
for fn_registration in fns:
if 'refined' in fn_registration:
fn_gt = fn_registration
trans_gt = rr.load_registration(fn_gt)
print(trans_gt)
R = sitk.ImageRegistrationMethod()
R.SetMetricAsJointHistogramMutualInformation()
scores = []
mis = []
var_range = np.arange(-20, 20, 0.5)
n = 1
e = 0
for x in var_range:
trans = np.copy(trans_gt)
trans0 = np.copy(trans_gt)
x0 = x
score = 0
for j in range(n):
if j == 0:
trans0[2,3] = trans_gt[2,3] + x
sample0 = vdg_train.create_sample(case_idx, (img_cols,img_rows,depth), trans0)
sampledFixed0 = sample0[0]
sampledMoving0 = sample0[1]
x += random.uniform(-e,e)
trans[2,3] = trans_gt[2,3] + x
sample = vdg_train.create_sample(case_idx, (img_cols,img_rows,depth), trans)
sampledFixed = sample[0]
sampledMoving = sample[1]
x = sitk.GetArrayFromImage(sampledFixed)
y = sitk.GetArrayFromImage(sampledMoving)
#pos_neg = sample[2]
error_trans = sample[2]
(angleX, angleY, angleZ, tX, tY, tZ) = sample[3]
sample4D = np.zeros((1, 32, 96, 96, 2), dtype=np.ubyte)
#print(sample4D.shape)
sample4D[0, :,:,:, 0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[0, :,:,:, 1] = sitk.GetArrayFromImage(sampledMoving)
prediction = deep_network.predict(sample4D)
score_dl = prediction[0,0]
score += score_dl
x=x0
score /= n
scores.append(score)
"""
SSD = 0
trans[2,3] = trans_gt[2,3] + x
for i in range(SSC_fixed.shape[3]):
resampler3D = vr3D.VolumeResampler(sitk.GetImageFromArray(SSC_fixed[:,:,:,i]), segMesh,
sitk.GetImageFromArray(SSC_moving[:,:,:,i]),
trans)
resampler3D.set_transform(trans)
sampledFixed, sampledMoving = resampler3D.resample(96, 96, 32)
fixed_img = sitk.GetArrayFromImage(sampledFixed)
moving_img = sitk.GetArrayFromImage(sampledMoving)
diff = np.subtract(fixed_img, moving_img)
sq_diff = np.square(diff)
SSD += np.sum(sq_diff)
SSC = SSD
"""
score_mi = R.MetricEvaluate(sampledFixed0, sampledMoving0)
mis.append(score_mi)
print('DL: %.4g <--> MI: %.4g' % (score, score_mi))
# %%
fig, ax1 = plt.subplots()
#num_pts = len(scores)
ax1.plot(var_range, scores, c='b', label='DL')
ax1.set_title('Translation along Z-axis', fontsize=14)
ax1.set_xlabel('Translation along Z axis (mm)'.format(n,e), fontsize=14)
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel('CNN score (mm)', color='b', fontsize=14)
ax1.tick_params('y', colors='b')
ax1.legend(loc='lower left',prop={'size': 11})
ax2 = ax1.twinx()
ax2.plot(var_range, -np.asarray(mis), c='r', label='MI')
ax2.set_ylabel('Mutual Information', color='r', fontsize=14)
ax2.tick_params('y', colors='r')
ax2.legend(loc="lower right",prop={'size': 11})
fig.tight_layout()
plt.savefig('/home/haskig/Pictures/MIvLM_trans_bad_case_56.pdf', dpi=600, format='pdf')
# %% Rotations
#import transformations as tfms
scores = []
mis = []
var_range = np.arange(-20, 20, 0.5)
n = 1
e = 0
for x in var_range:
x0 = x
trans = np.copy(trans_gt)
trans0 = np.copy(trans_gt)
score = 0
#trans[2,3] = trans_gt[2,3] + x
#mat_rot = tfms.rotation_matrix(x/180.0 * np.pi, (0,0,1))
#trans = mat_rot.dot(trans)
rot0, t0 = vdg_train.create_transform(x0, x0, x0, 0, 0, 0, trans_gt)
trans0[:3,:3] = rot0
trans0[:3, 3] = t0 + trans_gt[:3,3]
sample0 = vdg_train.create_sample(case_idx, (img_cols,img_rows,depth), trans0)
sampledFixed0 = sample0[0]
sampledMoving0 = sample0[1]
#pos_neg = sample[2]
error_trans0 = sample0[2]
(angleX, angleY, angleZ, tX, tY, tZ) = sample0[3]
for j in range(n):
x += random.uniform(-e,e)
rot, t = vdg_train.create_transform(x, x, x, 0, 0, 0, trans_gt)
trans[:3,:3] = rot
trans[:3, 3] = t + trans_gt[:3,3]
sample = vdg_train.create_sample(case_idx, (img_cols,img_rows,depth), trans)
sampledFixed = sample[0]
sampledMoving = sample[1]
#pos_neg = sample[2]
error_trans0 = sample[2]
(angleX, angleY, angleZ, tX, tY, tZ) = sample[3]
sample4D = np.zeros((1, 32, 96, 96, 2), dtype=np.ubyte)
#print(sample4D.shape)
sample4D[0, :,:,:, 0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[0, :,:,:, 1] = sitk.GetArrayFromImage(sampledMoving)
prediction = deep_network.predict(sample4D)
score_dl = prediction[0,0]
score += score_dl
score /= n
scores.append(score)
score_mi = R.MetricEvaluate(sampledFixed0, sampledMoving0)
mis.append(score_mi)
print('DL: %.4g <--> MI: %.4g' % (score_dl, score_mi))
# %
fig, ax1 = plt.subplots()
#num_pts = len(scores)
ax1.plot(var_range, scores, c='b', label='DL')
ax1.set_title('Rotation around all axes simultaneously', fontsize=14)
ax1.set_xlabel('Rotation around all axes (degree)'.format(n,e), fontsize=14)
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel('CNN score (mm)', color='b', fontsize=14)
ax1.tick_params('y', colors='b')
ax1.legend(loc='lower left',prop={'size': 11})
ax2 = ax1.twinx()
ax2.plot(var_range, -np.asarray(mis), c='r', label='MI')
ax2.set_ylabel('Mutual Information', color='r', fontsize=14)
ax2.tick_params('y', colors='r')
ax2.legend(loc="lower right",prop={'size': 11})
fig.tight_layout()
plt.savefig('/home/haskig/Pictures/MIvLM_rot_bad_case_56.pdf', dpi=600, format='pdf')
"""
image = Image.open('/home/haskig/Pictures/rotateAll_case10.png')
cover = resizeimage.resize_cover(image, [300,200])
cover.save('/home/haskig/Pictures/rotateAll_case30_resized.png', image.format)
"""
x = [7.84411,7.59224,7.34227,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355,6.69355]
plt.plot(x)
plt.ylabel('CNN Predicted TRE')
plt.xlabel('Generation Number')
plt.title('Differential Evolution plot: bad case (Initial TRE = 16mm)')
plt.savefig('/home/haskig/test_cases/Case0002/diffevo_generation_plot.pdf', dpi=600, format='pdf')
| 9,118 |
src/solutions/solution005.py
|
samtcwong/daily-coding-problems
| 0 |
2024095
|
from typing import Any, Callable, List, Tuple
# Problem #5 [Medium]
# Good morning! Here's your coding interview problem for today.
# This problem was asked by <NAME>.
# cons(a, b) constructs a pair, and car(pair) and cdr(pair) returns the first and
# last element of that pair.
# For example, car(cons(3, 4)) returns 3, and cdr(cons(3, 4)) returns 4.
# Given this implementation of cons:
# def cons(a, b):
# def pair(f):
# return f(a, b)
# return pair
# Implement car and cdr.
def cons(a, b):
def pair(f):
return f(a, b)
return pair
def car(pair):
def left(a, _):
return a
return pair(left)
def cdr(pair):
def right(_, b):
return b
return pair(right)
def eval_(function):
return function
tests = (
((car(cons(3, 4)),), 3),
((cdr(cons(3, 4)),), 4),
((car(cons(-1, 4)),), -1),
((cdr(cons(0, -1)),), -1),
)
solver = eval_
| 925 |
tests/table/test_datamining.py
|
Ovakefali13/pyiron_base
| 0 |
2023368
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import numpy as np
from pyiron_base._tests import TestWithProject, ToyJob
class TestProjectData(TestWithProject):
@classmethod
def setUpClass(cls):
super().setUpClass()
for i, c in enumerate("abcd"):
j = cls.project.create_job(ToyJob, f"test_{c}")
j.input['input_energy'] = i
j.run()
def setUp(self):
self.table = self.project.create.table('test_table')
self.table.filter_function = lambda j: j.name in ["test_a", "test_b"]
self.table.add['name'] = lambda j: j.name
self.table.add['array'] = lambda j: np.arange(8)
self.table.run()
def tearDown(self):
self.project.remove_job(self.table.name)
def test_filter(self):
"""Filter functions should restrict jobs included in the table."""
df = self.table.get_dataframe()
self.assertEqual(2, len(df), "Table not correctly filtered.")
self.assertEqual(["test_a", "test_b"], df.name.to_list(), "Table not correctly filtered.")
def test_filter_reload(self):
"""Lambdas should work as filter functions even if read from HDF5."""
try:
table_loaded = self.project.load(self.table.name)
except:
self.fail("Error on reloading table with filter lambda.")
def test_numpy_reload(self):
"""Numpy arrays should be reloaded as such, not as strings."""
# regression test: previously tables were converted to json then saved, which caused numpy arrays to be loaded
# as strings
table_loaded = self.project.load(self.table.name)
df = table_loaded.get_dataframe()
self.assertTrue(isinstance(df.array[0], np.ndarray),
"Numpy values not read correctly.")
if __name__ == '__main__':
unittest.main()
| 2,035 |
pyspawn/tests/integration_tests/pg_adapter_tests/_pgsql_utilities.py
|
Tsanton/pyspawn
| 0 |
2023949
|
from typing import List
from pyspawn._graph.table import Table
def _execute_query(pg_conn, query) -> None:
with pg_conn.cursor() as cur:
cur.execute(query)
def _execute_scalar(pg_conn, query):
with pg_conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0]
def _insert_bulk(pg_conn, query, input:List):
with pg_conn.cursor() as cur:
cur.executemany(query, input)
def _create_schema(conn, schema_name:str) -> None:
query = f"create schema {schema_name}"
_execute_query(conn, query)
return
def _create_table(pg_conn, table: Table) -> None:
query = f"""
CREATE TABLE {table.to_string()}
(
id int NOT NULL CONSTRAINT pk_{table.table_name} PRIMARY KEY,
val int
)"""
_execute_query(pg_conn, query)
def _create_foreign_key_relationship(pg_conn, child_table: Table, parent_table: Table) -> None:
"""The parent_table is the table with the constraint pointing to the referenced_tables primary key."""
query = f"""
alter table {child_table.to_string()}
add constraint fk_{child_table.table_name.upper()}_reffing_{parent_table.table_name} foreign key (val) references {parent_table.to_string()} (id)
"""
_execute_query(pg_conn, query)
| 1,261 |
request-management-api/migrations/versions/1aadec56e7f2_.py
|
bcgov/foi-flow
| 0 |
2024110
|
"""empty message
Revision ID: 1aadec56e7f2
Revises: <PASSWORD>
Create Date: 2021-07-21 15:28:33.814563
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1a<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('FOIRawRequests', sa.Column('assignedto', sa.String(length=120), nullable=True))
op.add_column('FOIRawRequests', sa.Column('updatedby', sa.String(length=120), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('FOIRawRequests', 'updatedby')
op.drop_column('FOIRawRequests', 'assignedto')
# ### end Alembic commands ###
| 829 |
tests/integration_tests/tests/agentless_tests/test_backwards_rest_api.py
|
TS-at-WS/cloudify-manager
| 0 |
2022806
|
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import subprocess
from integration_tests.framework import utils
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource as resource
class RestApiBackwardsCompatibilityTest(AgentlessTestCase):
def test_3_2_client(self):
self._test_client(client_version='3.2',
url_version_postfix='')
def test_3_3_1_client(self):
self._test_client(client_version='3.3.1',
url_version_postfix='/api/v2')
def _test_client(self, client_version, url_version_postfix):
shell_script_path = resource('scripts/test_old_rest_client.sh')
python_script_path = resource('scripts/test_old_rest_client.py')
result_path = os.path.join(self.workdir, 'result.json')
env = os.environ.copy()
env.update({
'python_script_path': python_script_path,
'client_version': client_version,
'manager_ip': self.get_manager_ip(),
'manager_user': utils.get_manager_username(),
'manager_password': utils.get_manager_password(),
'manager_tenant': utils.get_manager_tenant(),
'url_version_postfix': url_version_postfix,
'result_path': result_path
})
subprocess.check_call(shell_script_path,
shell=True,
cwd=self.workdir,
env=env)
with open(result_path) as f:
result = json.load(f)
if result['failed']:
self.fail('Failed to get manager status from old client. '
'[error={0}]'.format(result['details']))
| 2,338 |
data_visualization/seaborn/example_04_regression_plots.py
|
software-foundations/learning-data-science
| 0 |
2023935
|
from datasets import load_dataframe_tips
from plots import lmplot
import matplotlib.pyplot as plt
# load dataset
tips = load_dataframe_tips()
# lmplot - linear model plot
# lmplot(tips, x='total_bill', y='tip')
# lmplot(tips, x='total_bill', y='tip', hue='sex')
# lmplot(tips, x='total_bill', y='tip', palette='coolwarm')
# lmplot(tips, x='total_bill', y='tip', hue='sex', palette='coolwarm')
# lmplot(tips, x='total_bill', y='tip', markers=['v'])
# lmplot(tips, x='total_bill', y='tip', hue='sex', markers=['o', 'v'])
# lmplot(tips, x='total_bill', y='tip', scatter_kws={'s': 1})
# lmplot(tips, x='total_bill', y='tip', hue='sex', col='sex')
# lmplot(tips, x='total_bill', y='tip', hue='sex', col='sex', row='time')
# lmplot(tips, x='total_bill', y='tip', col='sex', row='time')
# lmplot(tips, x='total_bill', y='tip', hue='sex', col='day')
lmplot(tips, x='total_bill', y='tip', hue='sex', col='day', aspect=0.6, height=16)
plt.show()
| 942 |
shenfun/fourier/__init__.py
|
jaisw7/shenfun
| 138 |
2023950
|
#pylint: disable=missing-docstring
import numpy as np
from .bases import *
from .matrices import *
def energy_fourier(u, T):
r"""Compute the energy of u using Parceval's theorem
.. math::
\int abs(u)^2 dx = N*\sum abs(u_hat)^2
Parameters
----------
u : Array
The Fourier coefficients
T : TensorProductSpace
See https://en.wikipedia.org/wiki/Parseval's_theorem
"""
if not hasattr(T, 'comm'):
# Just a 1D basis
assert u.ndim == 1
if isinstance(T, R2C):
if u.shape[0] % 2 == 0:
result = (2*np.sum(abs(u[1:-1])**2) +
np.sum(abs(u[0])**2) +
np.sum(abs(u[-1])**2))
else:
result = (2*np.sum(abs(u[1:])**2) +
np.sum(abs(u[0])**2))
else:
result = np.sum(abs(u)**2)
return result
comm = T.comm
assert np.all([isinstance(base, FourierBase) for base in T.bases])
real = False
for axis, base in enumerate(T.bases):
if isinstance(base, R2C):
real = True
break
if real:
s = [slice(None)]*u.ndim
uaxis = axis + u.ndim-len(T.bases)
if T.forward.output_pencil.subcomm[axis].Get_size() == 1:
# aligned in r2c direction
if base.N % 2 == 0:
s[uaxis] = slice(1, -1)
result = 2*np.sum(abs(u[tuple(s)])**2)
s[uaxis] = 0
result += np.sum(abs(u[tuple(s)])**2)
s[uaxis] = -1
result += np.sum(abs(u[tuple(s)])**2)
else:
s[uaxis] = slice(1, None)
result = 2*np.sum(abs(u[tuple(s)])**2)
s[uaxis] = 0
result += np.sum(abs(u[tuple(s)])**2)
else:
# Data not aligned along r2c axis. Need to check about 0 and -1
if base.N % 2 == 0:
s[uaxis] = slice(1, -1)
result = 2*np.sum(abs(u[tuple(s)])**2)
s[uaxis] = 0
if T.local_slice(True)[axis].start == 0:
result += np.sum(abs(u[tuple(s)])**2)
else:
result += 2*np.sum(abs(u[tuple(s)])**2)
s[uaxis] = -1
if T.local_slice(True)[axis].stop == T.dims()[axis]:
result += np.sum(abs(u[tuple(s)])**2)
else:
result += 2*np.sum(abs(u[tuple(s)])**2)
else:
s[uaxis] = slice(1, None)
result = 2*np.sum(abs(u[tuple(s)])**2)
s[uaxis] = 0
if T.local_slice(True)[axis].start == 0:
result += np.sum(abs(u[tuple(s)])**2)
else:
result += 2*np.sum(abs(u[tuple(s)])**2)
else:
result = np.sum(abs(u[...])**2)
result = comm.allreduce(result)
return result
| 2,971 |
problem_44.py
|
dherault/project_euler
| 0 |
2022752
|
import math
print('___')
def p(n):
return n * (3 * n - 1) / 2
pentagonal_numbers = []
for i in range(1, 300):
pentagonal_numbers.append(p(i))
print(pentagonal_numbers)
minD = math.inf
for i in pentagonal_numbers:
for j in pentagonal_numbers:
d = j - i
if (d in pentagonal_numbers) and (j + i in pentagonal_numbers) and math.abs(d) < minD:
minD = d
print(minD)
| 410 |
lib/googlecloudsdk/api_lib/auth/external_account.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
| 2 |
2023316
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages logic for external accounts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.credentials import creds as c_creds
from googlecloudsdk.core.credentials import introspect as c_introspect
from googlecloudsdk.core.util import files
_EXTERNAL_ACCOUNT_TYPE = 'external_account'
class Error(exceptions.Error):
"""Errors raised by this module."""
class BadCredentialFileException(Error):
"""Raised when file cannot be read."""
class BadCredentialJsonFileException(Error):
"""Raised when the JSON file is in an invalid format."""
def GetExternalAccountCredentialsConfig(filename):
"""Returns the JSON content if the file corresponds to an external account.
This function is useful when the content of a file need to be inspected first
before determining how to handle it. More specifically, it would check a
config file contains an external account cred and return its content which can
then be used with CredentialsFromAdcDictGoogleAuth (if the contents
correspond to an external account cred) to avoid having to open the file
twice.
Args:
filename (str): The filepath to the ADC file representing an external
account credentials.
Returns:
Optional(Mapping): The JSON content if the configuration represents an
external account. Otherwise None is returned.
Raises:
BadCredentialFileException: If JSON parsing of the file fails.
"""
content = files.ReadFileContents(filename)
try:
content_json = json.loads(content)
except ValueError as e:
# File has to be in JSON format.
raise BadCredentialFileException('Could not read json file {0}: {1}'.format(
filename, e))
if IsExternalAccountConfig(content_json):
return content_json
else:
return None
def IsExternalAccountConfig(content_json):
"""Returns whether a JSON content corresponds to an external account cred."""
return (content_json or {}).get('type') == _EXTERNAL_ACCOUNT_TYPE
def CredentialsFromAdcDictGoogleAuth(external_config):
"""Creates external account creds from a dict of application default creds.
Args:
external_config (Mapping): The configuration dictionary representing the
credentials. This is loaded from the ADC file typically.
Returns:
google.auth.external_account.Credentials: The initialized external account
credentials.
Raises:
BadCredentialJsonFileException: If the config format is invalid.
googlecloudsdk.core.credentials.creds.InvalidCredentialsError: If the
provided configuration is invalid or unsupported.
"""
if ('type' not in external_config or
external_config['type'] != _EXTERNAL_ACCOUNT_TYPE):
raise BadCredentialJsonFileException(
'The provided credentials configuration is not in a valid format.')
return c_creds.FromJsonGoogleAuth(json.dumps(external_config))
def GetExternalAccountId(creds):
"""Returns the account identifier corresponding to the external account creds.
Args:
creds (google.auth.credentials.Credentials): The credentials whose account
ID is to be returned.
Returns:
Optional(str): The corresponding account ID, or None if the credentials are
not external_account credentials.
"""
if (c_creds.IsExternalAccountCredentials(creds) or
c_creds.IsExternalAccountUserCredentials(creds)):
return (creds.service_account_email or
c_introspect.GetExternalAccountId(creds))
return None
| 4,194 |
lib/JumpScale/servers/socketserver/QSocketServer.py
|
Jumpscale/jumpscale_core8
| 8 |
2023526
|
from JumpScale import j
import struct
import socketserver
import socket
try:
import gevent
def sleep(sec):
gevent.sleep(sec)
except:
import time
def sleep(sec):
time.sleep(sec)
import select
from QSocketServerClient import *
class QSocketServerHandler(socketserver.BaseRequestHandler):
# def __init__(self):
# SocketServer.BaseRequestHandler.__init__(self)
# SocketBase.__init__(self)
def getsize(self, data):
check = data[0]
if check != "A":
raise j.exceptions.RuntimeError("error in tcp stream, first byte needs to be 'A'")
sizebytes = data[1:5]
size = struct.unpack("I", sizebytes)[0]
return data[5:], size
def _readdata(self, data):
print("select")
try:
ready = select.select([self.socket], [], [], self.timeout)
self.selectcounter += 1
if self.selectcounter > 100:
raise j.exceptions.RuntimeError("recverror")
except Exception as e:
print(e)
raise j.exceptions.RuntimeError("recverror")
if ready[0]:
try:
data += self.socket.recv(4096)
except Exception as e:
print(e)
raise j.exceptions.RuntimeError("recverror")
else:
print("timeout on select")
return data
def readdata(self):
"""
"""
data = self.dataleftoever
self.dataleftoever = ""
# wait for initial data packet
while len(data) < 6: # need 5 bytes at least
data = self._readdata(data)
data, size = self.getsize(data) # 5 first bytes removed & size returned
while len(data) < size:
data = self._readdata(data)
self.dataleftover = data[size:]
self.selectcounter = 0
return data[0:size]
def senddata(self, data):
data = "A" + struct.pack("I", len(data)) + data
self.socket.sendall(data)
def handle(self):
self.timeout = 60
self.type = "server"
self.dataleftoever = ""
self.socket = self.request
self.selectcounter = 0
while True:
try:
data = self.readdata()
except Exception as e:
if str(e) == 'recverror':
self.socket.close()
return
else:
raise j.exceptions.RuntimeError("Cannot read data from client, unknown error: %s" % e)
if data.find("**connect**") != -1:
try:
self.senddata("ok")
# print data
print(("new client connected: %s,%s" % self.client_address))
except Exception as e:
print(("send error during connect:%s, will close socket" % e))
self.socket.close()
return
else:
result = j.servers.socketserver._handledata(data)
if result is not None:
try:
self.senddata(result)
except Exception as e:
print(("send error:%s, will close socket" % e))
self.socket.close()
return
class QSocketServer:
def __init__(self, addr, port, key, datahandler):
"""
@param datahandler is method with as argument(data) which is bytstr, what you want to send back should be returned out of method
"""
self.port = port
self.addr = addr
self.key = key
j.servers.socketserver.key = key
self.type = "server"
j.servers.socketserver._handledata = datahandler
self.server = socketserver.TCPServer((self.addr, self.port), QSocketServerHandler)
def start(self):
print(("started on %s" % self.port))
self.server.serve_forever()
class QSocketServerFactory:
def __init__(self):
self.__jslocation__ = "j.servers.socketserver"
def get(self, port, key, datahandler):
return QSocketServer('', port, key, datahandler)
def getClient(self, addr, port, key):
return SocketServerClient(addr, port, key)
def _handledata(self, data):
print("default data handler for socketserver, please overrule, method is handledata")
print("data received")
print(data)
| 4,453 |
{{cookiecutter.app_name}}/{{cookiecutter.app_slug}}/blueprints/{{cookiecutter.blueprint_name}}.py
|
atwalsh/skelly
| 0 |
2022908
|
from flask import Blueprint, render_template
{{cookiecutter.blueprint_name}} = Blueprint('{{cookiecutter.blueprint_name}}', __name__)
@{{cookiecutter.blueprint_name}}.route('/')
def index():
return render_template('index.html', app_name='{{cookiecutter.app_name}}')
| 272 |
loss_func.py
|
purushottamkar/aqi-satvam
| 2 |
2023733
|
import numpy as np
# Root mean squared error
def rmse( y_true, y_pred ):
rmse = np.sqrt(np.sum(np.square( y_pred - y_true )) / np.size(y_true))
return rmse
# Mean absolute percentage error
def mape( y_true, y_pred ):
mape = np.sum(np.abs(( y_pred - y_true ) / y_true))
mape = mape * 100 / np.size(y_true)
return mape
# Mean absolute error
def mae( y_true, y_pred ):
mae = np.sum(np.abs( y_true - y_pred ))
mae = mae / np.size(y_true)
return mae
# Pearson's coefficient
def pearson( y_true, y_pred ):
N = np.size(y_true)
num = np.sum(y_true * y_pred) * N - np.sum(y_true) * np.sum(y_pred)
den = (N * np.sum(np.square(y_true))) - np.square(np.sum(y_true))
den = den * ((N * np.sum(np.square(y_pred))) - np.square(np.sum(y_pred)))
den = np.sqrt(den)
if den == 0:
return 0
else :
return (num/den)
# R2 coefficient
def coeff_r2( y_true, y_pred ):
mu = np.mean(y_true)
ss_res = np.sum(np.square(y_true - y_pred))
ss_tot = np.sum(np.square(y_true - mu))
r2 = 1 - (ss_res / ss_tot)
return r2
| 1,041 |
src/pyongc/exceptions.py
|
mattiaverga/PyOngc
| 8 |
2023206
|
# -*- coding:utf-8 -*-
#
# MIT License
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Exceptions for PyONGC."""
from typing import Optional
class InvalidCoordinates(Exception):
"""
Raised when coordinates are not valid.
Maybe you're passing an object without registered coordinates (typically an `Unknown` object)
to some function; or you input coordinates as text in a wrong format: to be recognized
the input text must be in the format `HH:MM:SS.ss +/-DD:MM:SS.s`.
"""
def __init__(self, text: Optional[str] = None):
if text is not None:
super().__init__(text)
else: # pragma: no cover
super().__init__('Coordinates not recognized.')
class ObjectNotFound(Exception):
"""
Raised when a valid object identifier isn't found in the database.
The identifier is recognized to be part of one of the supported catalogs,
but the object isn't in the database (or doesn't exist at all).
For example, `pyongc.Dso('NGC7000A')` is valid, but it doesn't exist.
"""
def __init__(self, name: Optional[str] = None):
if name is not None:
super().__init__(f'Object named {name} not found in the database.')
else: # pragma: no cover
super().__init__('Object not found in the database.')
class UnknownIdentifier(Exception):
"""
Raised when input text can't be recognized as a valid object identifier.
You're asking for an identifier using the wrong format, or using an identifier
which refers to a catalog not supported by PyOngc.
"""
def __init__(self, text: Optional[str] = None):
if text is not None:
super().__init__(f'The name "{text}" is not recognized.')
else: # pragma: no cover
super().__init__('Unrecognized object name.')
| 2,883 |
static/ztp.py
|
CiscoSE/cisco-pnp-ztp-guestshell
| 1 |
2023735
|
"""
Cisco ZTP script through GuestShell
"""
import cli
import re
hostname = cli.execute('show version | i Processor board ID')
print(hostname)
filter = re.search("\w+$", hostname, flags=0)
print(filter.group(0))
hostname = "ztp-%s" % filter.group(0)
set_hostname = cli.configure('hostname %s' % hostname)
configuration = ['username user privilege 15 password 0 password',
'ip domain-name example.com',
'vtp mode transparent',
'line vty 0 4',
'login local',
]
set_vty = cli.configure(configuration)
| 584 |
software/pynguin/pynguin/utils/generic/genericaccessibleobject.py
|
se2p/artifact-pynguin-ssbse2020
| 3 |
2024027
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""
Provide wrappers around constructors, methods, function and fields.
Think of these like the reflection classes in Java.
"""
import abc
from typing import Callable, Optional, Set, Type
from pynguin.typeinference.strategy import InferredSignature
class GenericAccessibleObject(metaclass=abc.ABCMeta):
"""Abstract base class for something that can be accessed."""
def __init__(self, owner: Optional[Type]):
self._owner = owner
@abc.abstractmethod
def generated_type(self) -> Optional[Type]:
"""Provides the type that is generated by this accessible object."""
@property
def owner(self) -> Optional[Type]:
"""The type which owns this accessible object."""
return self._owner
# pylint: disable=no-self-use
def is_method(self) -> bool:
"""Is this a method?"""
return False
# pylint: disable=no-self-use
def is_constructor(self) -> bool:
"""Is this a constructor?"""
return False
# pylint: disable=no-self-use
def is_function(self) -> bool:
"""Is this a function?"""
return False
# pylint: disable=no-self-use
def is_field(self) -> bool:
"""Is this a field?"""
return False
# pylint: disable=no-self-use
def get_num_parameters(self) -> int:
"""Number of parameters."""
return 0
@abc.abstractmethod
def get_dependencies(self) -> Set[Type]:
"""A set of types that are required to use this accessible."""
class GenericCallableAccessibleObject(
GenericAccessibleObject, metaclass=abc.ABCMeta
): # pylint: disable=W0223
"""Abstract base class for something that can be called."""
def __init__(
self,
owner: Optional[Type],
callable_: Callable,
inferred_signature: InferredSignature,
) -> None:
super().__init__(owner)
self._callable = callable_
self._inferred_signature = inferred_signature
def generated_type(self) -> Optional[Type]:
return self._inferred_signature.return_type
@property
def inferred_signature(self) -> InferredSignature:
"""Provides access to the inferred type signature information."""
return self._inferred_signature
@property
def callable(self) -> Callable:
"""Provides the callable."""
return self._callable
def get_num_parameters(self) -> int:
return len(self.inferred_signature.parameters)
def get_dependencies(self) -> Set[Type]:
return {
value
for value in self.inferred_signature.parameters.values()
if value is not None
}
class GenericConstructor(GenericCallableAccessibleObject):
"""A constructor."""
def __init__(self, owner: Type, inferred_signature: InferredSignature) -> None:
super().__init__(owner, owner.__init__, inferred_signature)
assert owner
def generated_type(self) -> Optional[Type]:
return self.owner
def is_constructor(self) -> bool:
return True
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, GenericConstructor):
return False
return self._owner == other._owner
def __hash__(self):
return hash(self._owner)
def __repr__(self):
return f"{self.__class__.__name__}({self.owner}, {self.inferred_signature})"
class GenericMethod(GenericCallableAccessibleObject):
"""A method."""
def __init__(
self, owner: Type, method: Callable, inferred_signature: InferredSignature
) -> None:
super().__init__(owner, method, inferred_signature)
assert owner
def is_method(self) -> bool:
return True
def get_dependencies(self) -> Set[Type]:
assert self.owner, "Method must have an owner"
dependencies = super().get_dependencies()
dependencies.add(self.owner)
return dependencies
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, GenericMethod):
return False
return self._callable == other._callable
def __hash__(self):
return hash(self._callable)
def __repr__(self):
return (
f"{self.__class__.__name__}({self.owner},"
f" {self._callable.__name__}, {self.inferred_signature})"
)
class GenericFunction(GenericCallableAccessibleObject):
"""A function, which does not belong to any class."""
def __init__(
self, function: Callable, inferred_signature: InferredSignature
) -> None:
super().__init__(None, function, inferred_signature)
def is_function(self) -> bool:
return True
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, GenericFunction):
return False
return self._callable == other._callable
def __hash__(self):
return hash(self._callable)
def __repr__(self):
return f"{self.__class__.__name__}({self._callable.__name__}, {self.inferred_signature})"
class GenericField(GenericAccessibleObject):
"""A field."""
def __init__(self, owner: Type, field: str, field_type: Optional[Type]) -> None:
super().__init__(owner)
self._field = field
self._field_type = field_type
def is_field(self) -> bool:
return True
def get_dependencies(self) -> Set[Type]:
assert self.owner, "Field must have an owner"
return {self.owner}
def generated_type(self) -> Optional[Type]:
return self._field_type
@property
def field(self) -> str:
"""Provides the name of the field."""
return self._field
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, GenericField):
return False
return self._owner == other._owner and self._field == self._field
def __hash__(self):
return 31 + 17 * hash(self._owner) + 17 * hash(self._field)
def __repr__(self):
return f"{self.__class__.__name__}({self.owner}, {self._field}, {self._field_type})"
| 6,874 |
bot/umj.py
|
bryanpalmer/AzsocamiBot
| 2 |
2022708
|
# umj.py
"""UMJ module provides database calls to TheUnderMineJournal items database."""
import json
import mysql.connector as mysql
import wowclasses
# DEFS for my implementation
HOUSE_ID = 68
# common price check, returns house, item, level, price, quantity, lastseen
# SELECT * FROM `tblItemSummary` where house=68 and item=171276
# get last ah datetime, returns house, nextcheck, lastdaily, lastcheck, lastchecksuccess
# SELECT * FROM `tblHouseCheck` where house=68
# READ https://medium.com/opex-analytics/database-connections-in-python-extensible-reusable-and-secure-56ebcf9c67fe
class umj_connection(object):
"""MySql db connection to UMJ"""
def __init__(self):
# self.connection_string = connection_string
self.connector = None
def __enter__(self):
self.connector = mysql.connect(
user="",
password="",
host="newswire.theunderminejournal.com",
port=3306,
database="newsstand",
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_tb is None:
self.connector.commit()
else:
self.connector.rollback()
self.connector.close()
def umj_connector(func):
def with_connection_(*args, **kwargs):
# conn_str = os.environ["CONN"]
cnn = mysql.connect(
user="",
password="",
host="newswire.theunderminejournal.com",
port=3306,
database="newsstand",
)
try:
rv = func(cnn, *args, **kwargs)
except Exception:
cnn.rollback()
print("UMJ database connection error")
raise
else:
cnn.commit()
finally:
cnn.close()
return rv
return with_connection_
def create_connection():
conn = None
try:
conn = mysql.connect(
user="",
password="",
host="newswire.theunderminejournal.com",
port=3306,
database="newsstand",
)
except mysql.Error as e:
print(e)
finally:
return conn
@umj_connector
def getLastHouseCheck(cnn):
"""Returns last weekly datetime umj database was updated."""
cur = cnn.cursor()
cur.execute(f"SELECT lastcheck from tblHouseCheck WHERE house={HOUSE_ID};")
row = cur.fetchone()
return row[0]
# get last ah datetime, returns house, nextcheck, lastdaily, lastcheck, lastchecksuccess
# SELECT * FROM `tblHouseCheck` where house=68
@umj_connector
def getItemsListById(cnn, itemList):
retList = []
cur = cnn.cursor()
sql = """SELECT i.id, i.name_enus, i.quality, i.level, i.class, s.name_enus, i.icon, i.stacksize, i.buyfromvendor
FROM tblDBCItem i
LEFT JOIN tblDBCItemSubClass s ON s.class=i.class AND s.subclass=i.subclass
WHERE id=%s;"""
for item in itemList:
cur.execute(sql, (item[0],))
rec = cur.fetchone()
retList.append(wowclasses.Item(rec))
return retList
def getItemById(conn, itemId):
retVal = None
try:
sql = """SELECT i.id, i.name_enus, i.quality, i.level, i.class, s.name_enus, i.icon, i.stacksize, i.buyfromvendor
FROM tblDBCItem i
LEFT JOIN tblDBCItemSubClass s ON s.class=i.class AND s.subclass=i.subclass
WHERE id=%s;"""
cur = conn.cursor()
cur.execute(sql, (itemId,))
record = cur.fetchone()
# print(type(record))
retVal = wowclasses.Item(record)
except mysql.Error as e:
print(e.args[0])
return retVal
def getItemByName(conn, itemName):
retVal = None
searchVal = itemName.lower()
try:
sql = """SELECT i.id, i.name_enus, i.quality, i.level, i.class, s.name_enus, i.icon, i.stacksize, i.buyfromvendor
FROM tblDBCItem i
LEFT JOIN tblDBCItemSubClass s ON s.class=i.class AND s.subclass=i.subclass
WHERE lower(name_enus)=%s;"""
cur = conn.cursor()
cur.execute(sql, (searchVal,))
record = cur.fetchone()
# print(type(record))
retVal = wowclasses.Item(record)
except mysql.Error as e:
print(e.args[0])
return retVal
| 4,240 |
mundo-2/ex046.py
|
RaoniSilvestre/Exercicios-Python
| 2 |
2023967
|
import time
import emoji
for c in range(10,0,-1):
print(c)
time.sleep(1)
print(emoji.emojize(' :collision::collision::collision: BUM!!! :collision::collision::collision:'))
| 186 |
abmt/models/resnet_AB_ibn.py
|
chenhao2345/ABMT
| 17 |
2024161
|
from __future__ import absolute_import
import random
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
from torchvision.models.resnet import Bottleneck
from copy import deepcopy
from .resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
__all__ = ['resnet_ibn50a_AB']
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.constant_(m.bias, 0.0)
class ResNetIBN(nn.Module):
__factory = {
50: resnet50_ibn_a,
101: resnet101_ibn_a
}
def __init__(self, depth, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=0):
super(ResNetIBN, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNetIBN.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNetIBN.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,
resnet.layer1, resnet.layer2, resnet.layer3)
self.num_classes = num_classes
out_planes = resnet.fc.in_features
self.num_features = out_planes
# 1st branch
self.global_branch = resnet.layer4
self.gap = nn.AdaptiveAvgPool2d(1)
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn.bias.requires_grad_(False)
if self.num_classes > 0:
self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False)
init.normal_(self.classifier.weight, std=0.001)
init.constant_(self.feat_bn.weight, 1)
init.constant_(self.feat_bn.bias, 0)
# 2nd branch
self.max_branch = nn.Sequential(deepcopy(resnet.layer4), Bottleneck(2048, 512))
self.max_branch[1].apply(weights_init_kaiming)
self.gmp = nn.AdaptiveMaxPool2d(1)
self.feat_bn_max = nn.BatchNorm1d(self.num_features)
self.feat_bn_max.bias.requires_grad_(False)
if self.num_classes > 0:
self.classifier_max = nn.Linear(self.num_features, self.num_classes, bias=False)
init.normal_(self.classifier_max.weight, std=0.001)
init.constant_(self.feat_bn_max.weight, 1)
init.constant_(self.feat_bn_max.bias, 0)
if not pretrained:
self.reset_params()
def forward(self, x, feature_withbn=False):
x = self.base(x)
# 1st branch
x_g = self.global_branch(x)
x_g = self.gap(x_g)
x_g = x_g.view(x_g.size(0), -1)
bn_x_g = self.feat_bn(x_g)
# 2nd branch
x_m = self.max_branch(x)
x_m = self.gmp(x_m)
x_m = x_m.view(x_m.size(0), -1)
bn_x_m = self.feat_bn_max(x_m)
if self.training is False:
bn_x = F.normalize(torch.cat((bn_x_g, bn_x_m), dim=1))
return bn_x
if self.num_classes > 0:
prob_g = self.classifier(bn_x_g)
prob_m = self.classifier_max(bn_x_m)
else:
return x_g, bn_x_g, x_m, bn_x_m
if feature_withbn:
return bn_x_g, bn_x_m, prob_g, prob_m
return x_g, x_m, prob_g, prob_m
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
resnet = ResNetIBN.__factory[self.depth](pretrained=self.pretrained)
self.base[0].load_state_dict(resnet.conv1.state_dict())
self.base[1].load_state_dict(resnet.bn1.state_dict())
self.base[2].load_state_dict(resnet.relu.state_dict())
self.base[3].load_state_dict(resnet.maxpool.state_dict())
self.base[4].load_state_dict(resnet.layer1.state_dict())
self.base[5].load_state_dict(resnet.layer2.state_dict())
self.base[6].load_state_dict(resnet.layer3.state_dict())
self.base[7].load_state_dict(resnet.layer4.state_dict())
def resnet_ibn50a_AB(**kwargs):
return ResNetIBN(50, **kwargs)
| 5,354 |
lesson_17.py
|
alexudracul/LearnPython3
| 0 |
2023532
|
# Создайте функцию is_cat_here(), которая принимает любое количество аргументов
# и проверяет есть ли строка 'cat' среди них. Функция должна возвращать True,
# если такой параметр есть и False в обратном случае. Буквы в строке 'cat' могут быть как большие, так и маленькие
def is_cat_here(*args):
line = [str(val).lower() for val in list(args)]
if 'cat' in line:
return True
else:
return False
# Создайте функцию is_item_here(item, *args), которая проверяет есть ли item среди args.
# Функция должна возвращать True, если такой параметр есть и False в обратном случае.
def is_item_here(item, *args):
if item in args:
return True
else:
return False
# Создайте функцию your_favorite_color() с позиционным параметром my_color и параметрами **kwargs,
# которая будет выводить на экран 'My favorite color is (значение my_color), what is your favorite color?',
# если в параметрах kwargs нет ключа color, и 'My favorite color is (значение my_color),
# but (значение по ключу color) is also pretty good!', если в параметрах kwargs ключ color присутствует.
def your_favorite_color(my_color, **kwargs):
if 'color' in kwargs:
return print('My favorite color is {my_color}, but {color} is also pretty good!'
.format(my_color=my_color, color=kwargs['color']))
else:
return print('My favorite color is {}, what is your favorite color?'.format(my_color))
| 1,447 |
pyteal/ast/seq.py
|
spapasoteriou/pyteal
| 184 |
2024060
|
from typing import List, cast, TYPE_CHECKING, overload
from ..types import TealType, require_type
from ..errors import TealInputError
from ..ir import TealSimpleBlock
from .expr import Expr
if TYPE_CHECKING:
from ..compiler import CompileOptions
class Seq(Expr):
"""A control flow expression to represent a sequence of expressions."""
@overload
def __init__(self, *exprs: Expr):
...
@overload
def __init__(self, exprs: List[Expr]):
...
def __init__(self, *exprs):
"""Create a new Seq expression.
The new Seq expression will take on the return value of the final expression in the sequence.
Args:
exprs: The expressions to include in this sequence. All expressions that are not the
final one in this list must not return any values.
Example:
.. code-block:: python
Seq([
App.localPut(Bytes("key"), Bytes("value")),
Int(1)
])
"""
super().__init__()
# Handle case where a list of expressions is provided
if len(exprs) == 1 and isinstance(exprs[0], list):
exprs = exprs[0]
for i, expr in enumerate(exprs):
if not isinstance(expr, Expr):
raise TealInputError("{} is not a pyteal expression.".format(expr))
if i + 1 < len(exprs):
require_type(expr.type_of(), TealType.none)
self.args = exprs
def __teal__(self, options: "CompileOptions"):
start = TealSimpleBlock([])
end = start
for arg in self.args:
argStart, argEnd = arg.__teal__(options)
end.setNextBlock(argStart)
end = argEnd
return start, end
def __str__(self):
ret_str = "(Seq"
for a in self.args:
ret_str += " " + a.__str__()
ret_str += ")"
return ret_str
def type_of(self):
if len(self.args) == 0:
return TealType.none
return self.args[-1].type_of()
def has_return(self):
# this expression declares it has a return op only if its final expression has a return op
# TODO: technically if ANY expression, not just the final one, returns true for has_return,
# this could return true as well. But in that case all expressions after the one that
# returns true for has_return is dead code, so it could be optimized away
if len(self.args) == 0:
return False
return self.args[-1].has_return()
Seq.__module__ = "pyteal"
| 2,606 |
src/model.py
|
AhmedBegggaUA/SetXAI
| 0 |
2024182
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.fspool import FSPool
############
# Encoders #
############
"""
Clase FSEncoder, con la operacion invariante de fspool (Feature wise sort pool)
"""
class FSEncoderDSPN(nn.Module):
def __init__(self, input_channels, output_channels, dim):
super().__init__()
for m in self.modules():
if (
isinstance(m, nn.Linear)
or isinstance(m, nn.Conv2d)
or isinstance(m, nn.Conv1d)
):
init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
self.conv = nn.Sequential(
nn.Conv1d(input_channels + 1, dim, 1),
nn.ReLU(),
nn.Conv1d(dim, dim, 1),
nn.ReLU(),
nn.Conv1d(dim, output_channels, 1),
)
self.pool = FSPool(output_channels, 20, relaxed=False)
def forward(self, x, mask=None):
mask = mask.unsqueeze(1)
x = torch.cat([x, mask], dim=1) # include mask as part of set
x = self.conv(x)
x = x / x.size(2) # normalise so that activations aren't too high with big sets
x, _ = self.pool(x)
return x
"""
Clase FSEncoder, con la operacion invariante de fspool (Feature wise sort pool), pero adaptado
a la clasificacións
"""
class FSEncoder(nn.Module):
def __init__(self, input_channels, output_channels, dim):
super().__init__()
for m in self.modules():
if (
isinstance(m, nn.Linear)
or isinstance(m, nn.Conv2d)
or isinstance(m, nn.Conv1d)
):
init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.classifier = nn.Sequential(
nn.Linear(output_channels, output_channels),
nn.ReLU(),
nn.Linear(output_channels, 10),
)
self.pool = FSPool(dim, 20, relaxed=True)
def forward(self, x, mask=None):
x = self.conv(x)
x, perm = self.pool(x)
x = self.lin(x)
x = self.classifier(x)
return x
"""
Clase SumEncoder, con la operacion invariante suma
"""
class SumEncoder(nn.Module):
def __init__(self, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.classifier = nn.Sequential(
nn.Linear(output_channels, output_channels),
nn.ReLU(),
nn.Linear(output_channels, 10),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.sum(2)
x = self.lin(x)
x = self.classifier(x)
return x
"""
Clase SumEncoder, con la operacion invariante suma, adaptado para clasificar
"""
class SumEncoderDSPN(nn.Module):
def __init__(self, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels + 1, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
def forward(self, x, mask, *args):
mask = mask.unsqueeze(1)
x = torch.cat([x, mask], dim=1) # include mask as part of set
x = self.conv(x)
x = x.sum(2)
return x
"""
Clase MaxEncoder, con la operacion invariante Max
"""
class MaxEncoder(nn.Module):
def __init__(self, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.classifier = nn.Sequential(
nn.Linear(output_channels, output_channels),
nn.ReLU(),
nn.Linear(output_channels, 10),
)
def forward(self, x, n_points=None, *args):
x = self.conv(x)
x = x.max(2)[0]
x = self.lin(x)
x = self.classifier(x)
return x
"""
Clase MaxEncoder, con la operacion invariante Max, adaptado a la clasificación
"""
class MaxEncoderDSPN(nn.Module):
def __init__(self, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels+ 1, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.salidaConv = torch.zeros(1,1)
def forward(self, x, mask, *args):
mask = mask.unsqueeze(1)
x = torch.cat([x, mask], dim=1) # include mask as part of set
x = self.conv(x)
self.salidaConv = x
x = x.max(2)[0]
return x
"""
Clase MeanEncoderClasification, con la operacion invariante Mean, adaptado a la clasificación
"""
class MeanEncoder(nn.Module):
def __init__(self, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
self.lin = nn.Sequential(
nn.Linear(dim, dim, 1),
nn.ReLU(inplace=True),
nn.Linear(dim, output_channels, 1),
)
self.classifier = nn.Sequential(
nn.Linear(output_channels, output_channels),
nn.ReLU(),
nn.Linear(output_channels, 10),
)
def forward(self, x, n_points, *args):
x = self.conv(x)
x = x.sum(2) / n_points.size(1)
x = self.lin(x)
x = self.classifier(x)
return x
"""
Clase MeanEncoder, con la operacion invariante Mean
"""
class MeanEncoderDSPN(nn.Module):
def __init__(self, input_channels, output_channels, dim, **kwargs):
super().__init__()
self.conv = nn.Sequential(
nn.Conv1d(input_channels + 1, dim, 1),
nn.ReLU(inplace=True),
nn.Conv1d(dim, dim, 1),
)
def forward(self, x, n_points, *args):
mask = mask.unsqueeze(1)
x = torch.cat([x, mask], dim=1) # include mask as part of set
x = self.conv(x)
x = x.sum(2) / n_points.size(1)
return x
| 7,043 |
Codewars/opposite number/opposite number.py
|
adoreblvnk/code_solutions
| 0 |
2024127
|
def opposite(number):
if number > 0:
return float(f"-{number}")
elif number < 0:
return float(str(number)[1:])
return 0
opposite_soln = lambda x: -x
print(opposite_soln(-34))
| 206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.