max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
tests/test/test_commands.py
|
vb64/oeg.infotech.xml
| 0 |
2024550
|
# coding: utf-8
"""
make test T=test_commands
"""
from . import TestInfotech
class TestCommands(TestInfotech):
"""Commands for xml export file."""
def test_join(self):
"""Join command."""
from oeg_infotech import Infotech
info = Infotech.from_file(self.fixture('1736.xml'))
dist1 = info.total_dist()
welds1 = len(info.welds.items)
defects1 = len(info.defects.items)
lineobjects1 = len(info.lineobjects.items)
assert dist1 == 17727
assert welds1 == 23
assert defects1 == 10
assert lineobjects1 == 6
info = Infotech.from_file(self.fixture('1737.xml'))
dist2 = info.total_dist()
welds2 = len(info.welds.items)
defects2 = len(info.defects.items)
lineobjects2 = len(info.lineobjects.items)
assert dist2 == 5589
assert welds2 == 5
assert defects2 == 2
assert lineobjects2 == 4
text = info.join(['1100', self.fixture('1736.xml')])
assert 'IPL_INSPECT' in text
assert info.total_dist() == dist1 + dist2 + 1100
assert len(info.welds.items) == welds1 + welds2 + 1
assert len(info.defects.items) == defects1 + defects2
assert len(info.lineobjects.items) == lineobjects1 + lineobjects2
text = info.join([])
assert 'IPL_INSPECT' in text
text = info.join(['not_exist_file'])
assert 'No such file or directory' in text
def test_reverse(self):
"""Reverse command."""
from oeg_infotech import Infotech, lineobj, codes
from oeg_infotech.base import DistItem
info = Infotech.from_file(self.fixture('1736.xml'))
assert info.total_dist() == 17727
root = info.xml.getroot()
l_section = root.find(lineobj.Section.tag)
assert int(l_section[0].get(DistItem.field_odometer)) == 0
assert int(l_section[-1].get(DistItem.field_odometer)) == 17710
assert l_section[1].get(DistItem.field_typeobj) == codes.Feature.CASE_START
assert l_section[2].get(DistItem.field_typeobj) == codes.Feature.CASE_END
text = info.reverse()
assert 'IPL_INSPECT' in text
assert l_section[1].get(DistItem.field_typeobj) != codes.Feature.CASE_START
assert l_section[2].get(DistItem.field_typeobj) != codes.Feature.CASE_END
assert l_section[-2].get(DistItem.field_typeobj) == codes.Feature.CASE_END
assert l_section[-3].get(DistItem.field_typeobj) == codes.Feature.CASE_START
assert int(l_section[0].get(DistItem.field_odometer)) == 17
assert int(l_section[-1].get(DistItem.field_odometer)) == 17727
def test_fix(self):
"""Repair command."""
from oeg_infotech import Infotech
info = Infotech.from_file(self.fixture('umdp-1400.xml'))
pig = info.xml.getroot().find('PIGPASS')[1]
assert info.obj_dict['1'] == u'УМДП-1400'
assert pig.attrib.get('IDTYPEOBJ', None) == '1'
assert pig.attrib.get('MANUFACT_DATE', None) == ''
assert pig.attrib.get('PIGTYPE', None) == '2'
assert pig.attrib.get('OBSLTYPE', None) == '5'
info.fix()
pig = info.xml.getroot().find('PIGPASS')[1]
assert pig.attrib.get('IDTYPEOBJ', None) == '1'
assert pig.attrib.get('MANUFACT_DATE', None) == '2017'
assert pig.attrib.get('PIGTYPE', None) == '990004033563'
assert pig.attrib.get('OBSLTYPE', None) == '2'
info = Infotech.from_file(self.fixture('empty.xml'))
info.fix()
| 3,551 |
Biological_Questions/Sine_Wave_Alignments/Approach_Family_Fits/Optimiser_Fitting_Function.py
|
The-Kristina/CellComp
| 7 |
2024594
|
# TODO: Fit a sine wave onto a SINGLE 3-generational family lineage
# Tutorial here: https://astrofrog.github.io/py4sci/_static/15.%20Fitting%20models%20to%20data.html
import numpy as np
from scipy import optimize
def SineWaveFitting(file, print_stats=False):
""" Fitting onto sine wave for 3-generational families.
CURVE FIT CANNOT BE PERFORMED ON 2 DATAPOINTS! -> 2-generational family is not enough!
Only includes CCTs greater than 12.00 and smaller than 24.00! TODO: Add outliers as well!
Task:
- calculate the parameters of a sine wave which fits perfectly onto each individual family
- normalise this sine wave to have a positive amp and no shift_h - therefore oscillates from 0 upwards
- calculate the phase of the points (=by how much you need to move them to the right on the x-axis) to restore maximum fit
Sine wave:
Equation: amp * np.sin(2 * np.pi / per * x + shift_h) + shift_v
Parameters:
static: period -> 24 hours
shift_h -> 0 (after phasing the data on their x-axis)
dynamic: amplitude -> can be changed to absolute value; abs(amp)
shift_v -> will always be positive (correspond to CCT range for each family)
Args:
file (str, directory) -> absolute directory to file storing data in the following format:
Gen_1 Gen_1 Gen_1 Gen_2 Gen_2 Gen_2 Gen_3 Gen_3 Gen_3
Cell_ID CCT Gener Cell_ID CCT Gener Cell_ID CCT Gener
print_stats (bool) -> visualuse stats if you wish.
Return:
cell_id_list (list) -> stores cell_IDs of analysed family; [0] - grandparent, [1] - parent, [2] - child)
y-data_list (list) -> stores raw cell cycle durations [hours] of the cell_IDs above
params_list (list) -> stores amp, shift_h, shift_v of an ideally fitted sine wave per family
phase_list (list) -> (float) = phase which is to be added to each x-data point to preserve sine wave fit
"""
cell_IDs_list = []
y_data_list = []
params_list = []
phase_list = []
for line in open(file, "r"):
line = line.rstrip().split("\t")
if line[0] != "Gen_1" and line[0] != "Cell_ID":
line = [float(value) if "." in value else int(value) for value in line]
# Generate the data for Y-axis which are just the cell cycle durations:
if len(line) <= 6:
lineage = [line[0:3], line[3:6]]
y_data = np.array([lineage[0][1], lineage[1][1]])
else:
lineage = [line[0:3], line[3:6], line[6:9]]
y_data = np.array([lineage[0][1], lineage[1][1], lineage[2][1]])
# Check if all CCTs are between 12-28 hours - exclude outliers:
if any(cct < 12.0 for cct in y_data) or any(cct > 24.0 for cct in y_data):
continue
# X-axis starts at 'phase' (=unknown) & adds the Y-time points:
phase = 0.0
x_data = [phase]
for cct in y_data:
phase += cct
x_data.append(phase)
x_estimate = x_data.pop(-1)
x_data = np.array(x_data)
# Now fit the sine wave onto your points:
def sine_function(x, amp, shift_h, shift_v):
return amp * np.sin(2 * np.pi / 24 * x + shift_h) + shift_v
params, params_covariance = optimize.curve_fit(sine_function, x_data, y_data)
amp, shift_h, shift_v = params[0], params[1], params[2]
# Calculate the phase based on your fitted sine wave parameters:
# Phase must correspond to shifts onto: abs(amp) * np.sin(2*np.pi/24 * x + 0) + shift_v
phase = abs((shift_h * 24) / (2 * np.pi))
if shift_h < 0:
phase = 24 - phase
if abs(shift_h) > 6.00 and abs(shift_h) < 12.00:
phase = 24 - abs(phase)
if abs(shift_h) > 12.00 and abs(shift_h) < 18.00:
phase = phase + 48
if amp < 0:
phase = phase + 12
if print_stats is True:
print ("\nData on the y-axis: {}".format(y_data))
print ("Data on the x-axis: {}".format(x_data))
print ("\tParameters:\nAmplitude\t= {}\nPeriod\t\t= 2*pi*24\nShift H:\t= {}\nShift V:\t= {}"
.format(round(amp, 2), round(shift_h, 2), round(shift_v, 2)))
print ("Calculated phase = {}\n\tfor 'y = abs(amp) * np.sin(2*np.pi/24 * x + 0) + shift_v'".format(phase))
# Append the lists for return after loop is finished:
cell_IDs_list.append([lineage[0][0], lineage[1][0], lineage[2][0]])
y_data_list.append(y_data)
params_list.append(params)
phase_list.append(phase)
return cell_IDs_list, y_data_list, params_list, phase_list
| 5,057 |
manager/migrations/0001_initial.py
|
walliski/tf-info
| 0 |
2022981
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.PositiveIntegerField(editable=False, db_index=True)),
('url', models.CharField(max_length=90, verbose_name=b'Url of page to display (relative to root).')),
('duration', models.PositiveIntegerField(default=10, verbose_name=b'Duration (seconds)')),
('title', models.CharField(max_length=100)),
('description', models.TextField(null=True, blank=True)),
('pause_at', models.DateTimeField(null=True, blank=True)),
('hide_top_bar', models.BooleanField(default=False, verbose_name=b'Hide the top bar of the screen')),
('hide_bottom_bar', models.BooleanField(default=False, verbose_name=b'Hide the bottom bar of the screen')),
('active_time_start', models.TimeField(default=datetime.time(0, 0), verbose_name=b'Time of day to start displaying page.')),
('active_time_end', models.TimeField(default=datetime.time(0, 0), verbose_name=b'Time of day to stop displaying page. ')),
('active_date_start', models.DateField(default=datetime.date(2016, 4, 4), verbose_name=b'Date to start displayig page.')),
('active_date_end', models.DateField(null=True, verbose_name=b'Last date to display page.', blank=True)),
('monday', models.BooleanField(default=True)),
('tuesday', models.BooleanField(default=True)),
('wednesday', models.BooleanField(default=True)),
('thursday', models.BooleanField(default=True)),
('friday', models.BooleanField(default=True)),
('saturday', models.BooleanField(default=True)),
('sunday', models.BooleanField(default=True)),
('edited_by', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('order',),
'abstract': False,
},
bases=(models.Model,),
),
]
| 2,539 |
src/utils.py
|
yewzijian/ChangeDet
| 4 |
2023421
|
import os
def get_eval_list(eval_path):
"""Returns the list of images to evaluate
Args:
folder: Folder containing evaluation images
"""
scenes = list(filter(lambda s: os.path.isdir(os.path.join(eval_path, s)),
os.listdir(eval_path)))
eval_data = {}
for scene in scenes:
scene_folder = os.path.join(eval_path, scene)
t0_folder = os.path.join(scene_folder, 't0')
t1_folder = os.path.join(scene_folder, 't1')
gt_folder = os.path.join(scene_folder, 'groundtruth')
t0_paths = sorted(os.listdir(t0_folder))
t1_paths = sorted(os.listdir(t1_folder))
gt_paths = sorted(os.listdir(gt_folder))
eval_data[scene] = list(zip(t0_paths, t1_paths, gt_paths))
return eval_data
| 792 |
tests/test_admin.py
|
jrcastro2/invenio-oauth2server
| 3 |
2024226
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
from flask import Flask, url_for
from flask_admin import Admin
from invenio_db import db
from invenio_oauth2server import InvenioOAuth2Server
from invenio_oauth2server.admin import ClientView, TokenView, \
oauth2server_clients_adminview, oauth2server_tokens_adminview
def test_admin(models_fixture):
"""Test flask-admin interface."""
app = models_fixture
InvenioOAuth2Server(app)
assert isinstance(oauth2server_tokens_adminview, dict)
assert isinstance(oauth2server_clients_adminview, dict)
assert 'view_class' in oauth2server_tokens_adminview
assert 'view_class' in oauth2server_clients_adminview
admin = Admin(app, name="Test")
clients_view = oauth2server_clients_adminview.pop('view_class')
clients_model, clients_session = oauth2server_clients_adminview.pop('args')
clients_kwargs = oauth2server_clients_adminview.pop('kwargs')
tokens_view = oauth2server_tokens_adminview.pop('view_class')
tokens_model, tokens_session = oauth2server_tokens_adminview.pop('args')
tokens_kwargs = oauth2server_tokens_adminview.pop('kwargs')
admin.add_view(clients_view(clients_model, db.session,
**clients_kwargs))
admin.add_view(tokens_view(tokens_model, db.session,
**tokens_kwargs))
menu_items = {str(item.name): item for item in admin.menu()}
assert 'User Management' in menu_items
assert menu_items['User Management'].is_category()
submenu_items = {
str(item.name): item for item in
menu_items['User Management'].get_children()}
assert 'OAuth Applications' in submenu_items
assert 'OAuth Application Tokens' in submenu_items
with app.test_request_context():
token_request_url = url_for('token.index_view')
client_request_url = url_for('client.index_view')
client_view_url = url_for('invenio_oauth2server_settings.client_view',
client_id='client_test_u1c1')
client_reset_url = url_for(
'invenio_oauth2server_settings.client_reset',
client_id='client_test_u1c1')
token_view_url = url_for(
'invenio_oauth2server_settings.token_view',
token_id='1')
token_revoke_url = url_for(
'invenio_oauth2server_settings.token_revoke',
token_id='1')
with app.app_context():
with app.test_client() as client:
res = client.get(
token_request_url,
follow_redirects=True
)
assert res.status_code == 200
res = client.get(
client_request_url,
follow_redirects=True
)
assert res.status_code == 200
res = client.get(
client_view_url,
follow_redirects=True
)
assert res.status_code == 200
res = client.post(
client_reset_url,
follow_redirects=True
)
assert res.status_code == 200
res = client.get(
token_view_url,
follow_redirects=True
)
assert res.status_code == 200
res = client.post(
token_revoke_url,
follow_redirects=True
)
assert res.status_code == 405
| 3,619 |
rllib_model.py
|
simplerick/sqlopt
| 1 |
2024644
|
import networkx as nx
from PIL import Image
import io
import gym
from gym.spaces import Box
import torch
import numpy as np
#from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
from ray.rllib.utils.torch_ops import FLOAT_MIN, FLOAT_MAX
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
class CustomMaskedActModel(TorchModelV2, torch.nn.Module):
def __init__(self, obs_space, action_space, num_outputs,
model_config, name, **kw):
TorchModelV2.__init__(self, obs_space, action_space,
num_outputs, model_config,
name,
**kw)
torch.nn.Module.__init__(self)
true_obs_shape=model_config['custom_model_config']['true_obs_shape']
fc_obs_space = Box(low = 0, high = 1,
shape = true_obs_shape,
dtype = np.int)
self.fc_net = FullyConnectedNetwork(fc_obs_space,
action_space,
num_outputs,
model_config,
name + 'fc_net')
def forward(self, input_dict, state, seq_lens):
action_mask = input_dict['obs']['action_mask']
obs = input_dict['obs']['real_obs'].float()
obs = obs.reshape(obs.shape[0], -1)
logits,_ = self.fc_net({'obs': obs})
inf_mask = torch.clamp(torch.log(action_mask),
FLOAT_MIN , FLOAT_MAX)
return logits+inf_mask, state
def value_function(self):
return self.fc_net.value_function()
| 1,749 |
plantcv/plantcv/print_results.py
|
Howzit123/plantcv
| 1 |
2024534
|
# Print Numerical Data
import json
import os
from plantcv.plantcv import outputs
def print_results(filename):
"""Print result table
Inputs:
filename = filename
:param filename: str
:return:
"""
if os.path.isfile(filename):
with open(filename, 'r') as f:
hierarchical_data = json.load(f)
hierarchical_data["observations"] = outputs.observations
else:
hierarchical_data = {"metadata": {}, "observations": outputs.observations}
with open(filename, mode='w') as f:
json.dump(hierarchical_data, f)
| 584 |
examples/common/python/config/config.py
|
adityasingh177/trusted-compute-framework
| 0 |
2024399
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
config.py -- Functions to load configuration files with support for
variable expansion.
NOTE: Functions defined in this file are designed to be run
before logging is enabled.
"""
import os
import sys
import warnings
import logging
import re
import toml
from string import Template
from utility.file_utils import find_file_in_paths
__all__ = ["ConfigurationException",
"parse_configuration_files",
"parse_configuration_file",
"read_config_from_toml"]
logger = logging.getLogger(__name__)
try:
TCFHOME = os.environ["TCF_HOME"]
except KeyError:
raise KeyError("'TCF_HOME' environment variable not set.")
# -----------------------------------------------------------------
class ConfigurationException(Exception):
"""
A class to capture configuration exceptions.
"""
def __init__(self, filename, message):
super().__init__(self, "Error in configuration file {0}: {1}".format(filename, message))
# -----------------------------------------------------------------
def parse_configuration_files(cfiles, search_path, variable_map=None):
"""
Locate and parse a collection of configuration files stored in a
TOML format.
:param list(str) cfiles: list of configuration files to load
:param list(str) search_path: list of directories where the files may be located
:param dict variable_map: a set of substitutions for variables in the files
:return dict:an aggregated dictionary of configuration information
"""
config = {}
files_found = []
try:
for cfile in cfiles:
files_found.append(find_file_in_paths(cfile, search_path))
except FileNotFoundError as e:
raise ConfigurationException(e.filename, e.strerror)
for filename in files_found:
try:
config.update(parse_configuration_file(filename, variable_map))
except IOError as detail:
raise ConfigurationException(filename, "IO error; {0}".format(str(detail)))
except ValueError as detail:
raise ConfigurationException(filename, "Value error; {0}".format(str(detail)))
except NameError as detail:
raise ConfigurationException(filename, "Name error; {0}".format(str(detail)))
except KeyError as detail:
raise ConfigurationException(filename, "Key error; {0}".format(str(detail)))
except:
raise ConfigurationException(filename, "Unknown error")
return config
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def parse_configuration_file(filename, variable_map):
"""
Parse a configuration file expanding variable references
using the Python Template library (variables are $var format)
:param string filename: name of the configuration file
:param dict variable_map: dictionary of expansions to use
:returns dict: dictionary of configuration information
"""
cpattern = re.compile('##.*$')
with open(filename) as fp:
lines = fp.readlines()
text = ""
for line in lines:
text += re.sub(cpattern, '', line) + ' '
if variable_map:
text = Template(text).substitute(variable_map)
return toml.loads(text)
# -----------------------------------------------------------------
def read_config_from_toml(input_file, config_name=None,
confpaths=[".", TCFHOME + "/" + "config"]):
"""
Function to read toml file and returns the toml content as a list
Parameters:
- input_file is any toml file which need to be read
- config_name is particular configuration to pull
- confpaths is the directory structure in which the toml file exists
"""
conf_files = [input_file]
config = parse_configuration_files(conf_files, confpaths)
if config_name is None:
return config
else:
result = config.get(config_name)
if result is None:
logger.error("%s is missing in toml file %s", config_name, input_file)
return result
| 4,652 |
estoque.py
|
victorhugodostos/Telsa-System
| 2 |
2023216
|
from tkinter import *
from tkinter import ttk
import sqlite3
import menu as menupy
class Estoque():
def __init__(self):
self.EstoqueWindow = Tk()
self.EstoqueWindow.title("TELSA SYSTEM - ESTOQUE")
self.EstoqueWindow.minsize(width=1200,height=600)
self.EstoqueWindow.resizable(False,False)
self.EstoqueCanvas = Canvas(self.EstoqueWindow)
self.EstoqueCanvas.pack(expand=1,fill=BOTH)
self.Background = PhotoImage(file="image/menuback.png")
self.EstoqueCanvas.create_image(0,0,image=self.Background,anchor=NW)
## TITULO
self.Estoquetitle = self.EstoqueCanvas.create_text(600,125,text="ESTOQUE",font=("Times New Roman",48,"bold"),fill="white")
self.EstoqueCanvas.create_line(0,150,1200,150,width=3,fill="white")
self.Estoquetitle2 = self.EstoqueCanvas.create_text(600,230,text="PRODUTOS CADASTRADOS",font=("Times New Roman",18,"bold"),fill="white")
## BOTOES E LABELS
self.txt_estoque = self.EstoqueCanvas.create_text(400,180,text="TODOS OS PRODUTOS:",font=("Times New Roman",12,"bold"),fill="white")
self.select_estoque = ttk.Combobox(self.EstoqueCanvas,values=[],width=20,state='readonly')
self.EstoqueCanvas.create_window(600,180,window=self.select_estoque)
def Voltar():
self.EstoqueWindow.destroy()
menupy.Menu.__init__(self)
def Editar():
self.EditarWindow = Tk()
self.EditarWindow.title("TELSA SYSTEM - EDITAR PRODUTO")
self.EditarWindow.minsize(width=500, height=500)
self.EditarWindow.resizable(False,False)
self.EditarCanvas = Canvas(self.EditarWindow,bg="black")
self.EditarCanvas.pack(expand=1,fill=BOTH)
self.selecione_txt = self.EditarCanvas.create_text(250,30,text="SELECIONE O PRODUTO:",font=("Times New Roman",12,"bold"),fill="white")
self.produto_combobox = ttk.Combobox(self.EditarCanvas,values=[],width=20)
self.EditarCanvas.create_window(250,60,window=self.produto_combobox)
self.produto_combobox['values'] = self.produtos
self.busca_lista = []
def Buscar():
self.nome_entry.delete(0,END)
self.descricao_entry.delete(0,END)
self.valorv_entry.delete(0,END)
self.connect = sqlite3.connect("database/produtos.db")
self.cursor = self.connect.cursor()
self.cursor.execute("SELECT * FROM produtos WHERE nome=?",(self.produto_combobox.get(),))
for names in self.cursor.fetchone():
self.busca_lista.append(names)
self.connect.close()
self.nome_entry.insert(0,str(self.busca_lista[1]))
self.descricao_entry.insert(0,str(self.busca_lista[2]))
self.valorv_entry.insert(0,str(self.busca_lista[3]))
self.busca_lista.clear()
self.btn_buscar = Button(self.EditarCanvas,text="BUSCAR",font=("Impact",10,"bold"),fg="black",bg="red",activebackground="#B22222",width=10,command=Buscar)
self.EditarCanvas.create_window(250,90,window=self.btn_buscar)
self.nome_txt = self.EditarCanvas.create_text(250,120,text="NOME:",font=("Times New Roman",12,"bold"),fill="white")
self.nome_entry = Entry(self.EditarCanvas,width=20,justify="center")
self.EditarCanvas.create_window(250,150,window=self.nome_entry)
self.descricao_txt = self.EditarCanvas.create_text(250,180,text="DESCRIÇÃO:",font=("Times New Roman",12,"bold"),fill="white")
self.descricao_entry = Entry(self.EditarCanvas,width=20,justify="center")
self.EditarCanvas.create_window(250,210,window=self.descricao_entry)
self.valorv_txt = self.EditarCanvas.create_text(250,240,text="VALOR DE VENDA:",font=("Times New Roman",12,"bold"),fill="white")
self.valorv_entry = Entry(self.EditarCanvas,width=20,justify="center")
self.EditarCanvas.create_window(250,270,window=self.valorv_entry)
def Salvar():
try:
self.connect = sqlite3.connect("database/produtos.db")
self.cursor = self.connect.cursor()
self.cursor.execute("UPDATE produtos SET nome=?,descricao=?,valor=? WHERE nome=?",
(self.nome_entry.get(),self.descricao_entry.get(),self.valorv_entry.get(),self.produto_combobox.get(),))
self.connect.commit()
self.connect.close()
self.EditarWindow.destroy()
self.EstoqueWindow.destroy()
Estoque()
except:
self.erro = Tk()
self.erro.title("ERRO - TENTE NOVAMENTE")
self.erro.minsize(width=300,height=300)
self.erro.resizable(False,False)
self.erro_canvas = Canvas(self.erro)
self.erro_canvas.pack(expand=1,fill=BOTH)
self.erro_canvas.create_text(150,150,text="ERRO INESPERADO, TENTE NOVAMENTE!")
def OK():
self.erro.destroy()
self.btn_OK = Button(self.erro_canvas,text="OK")
self.erro_canvas.create_window(150,200,window=self.btn_OK,command=OK)
self.erro.mainloop()
def Cancelar():
self.EditarWindow.destroy()
self.btn_salvando = Button(self.EditarCanvas,text="SALVAR",font=("Impact",10,"bold"),fg="black",bg="red",activebackground="#B22222",width=10,command=Salvar)
self.EditarCanvas.create_window(180,450,window=self.btn_salvando)
self.btn_cancelar = Button(self.EditarCanvas,text="CANCELAR",font=("Impact",10,"bold"),fg="black",bg="red",activebackground="#B22222",width=10,command=Cancelar)
self.EditarCanvas.create_window(320,450,window=self.btn_cancelar)
self.EditarWindow.mainloop()
self.voltar = Button(self.EstoqueCanvas,text="VOLTAR",font=("Arial Black",10,"bold"),fg="black",bg="blue",activebackground="#00008B",width=10,command=Voltar)
self.EstoqueCanvas.create_window(1100,550,window=self.voltar)
self.editar = Button(self.EstoqueCanvas,text="EDITAR",font=("Arial Black",10,"bold"),fg="black",bg="blue",activebackground="#00008B",width=10,command=Editar)
self.EstoqueCanvas.create_window(1100,500,window=self.editar)
## BANCO DE DADOS
self.connect = sqlite3.connect("database/produtos.db")
self.cursor = self.connect.cursor()
self.cursor.execute("SELECT id FROM produtos")
self.Ids = []
for i in self.cursor.fetchall():
self.Ids.append(i)
self.cursor.execute("SELECT nome FROM produtos")
self.loop = 0
self.produtos = []
while self.loop<len(self.Ids):
for j in self.cursor.fetchone():
self.produtos.append(j)
self.loop += 1
self.select_estoque['values'] = self.produtos
##SCROLL DA AREA DE REGISTROS
self.scroll = Scrollbar(self.EstoqueCanvas,bg="black")
self.EstoqueCanvas.create_window(935,415,window=self.scroll,width=15,height=348)
##AREA DE REGISTROS
self.registros = Text(self.EstoqueCanvas,yscrollcommand=self.scroll.set,bg="black",fg="white")
self.registros.config(width=80,height=20)
self.scroll.config(command=self.registros.yview)
self.EstoqueCanvas.create_window(600,415,window=self.registros)
self.numero_voltas = 0
self.id_inicial = 1
self.posicao_inicial = 0
self.produtos_cadastrados = []
while self.numero_voltas<len(self.Ids):
self.cursor.execute("SELECT * FROM produtos WHERE id=?",(self.id_inicial,))
for produto11 in self.cursor.fetchone():
self.produtos_cadastrados.append(produto11)
self.registros.insert(END, " CÓDIGO - "+str(self.produtos_cadastrados[0])+"\n")
self.registros.insert(END, " PRODUTO - "+str(self.produtos_cadastrados[1])+"\n")
self.registros.insert(END, " DESCRIÇÃO - "+str(self.produtos_cadastrados[2])+"\n")
self.Conversao2 = str(self.produtos_cadastrados[3])
self.registros.insert(END, " VALOR - "+str(self.Conversao2.replace(".",","))+"\n")
self.registros.insert(END, "______________________________________________________________________________""\n\n")
self.posicao_inicial +=1
self.id_inicial+=1
self.numero_voltas+=1
self.produtos_cadastrados.clear()
self.connect.close()
self.EstoqueWindow.mainloop()
#Estoque()
| 9,161 |
src/flake8_absolute_import/core.py
|
bskinn/flake8-absolute-import
| 10 |
2023033
|
r"""*Main implementation file for* ``flake8-absolute-import``.
flake8 plugin to require absolute imports
**Author**
<NAME> (<EMAIL>)
**File Created**
6 Sep 2019
**Copyright**
\(c) <NAME> 2019-2021
**Source Repository**
http://github.com/bskinn/flake8-absolute-import
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
import ast
from flake8_absolute_import.version import __version__
ABS101 = "ABS101 Relative import found"
class Visitor(ast.NodeVisitor):
"""NodeVisitor to report relative imports."""
def __init__(self):
"""Create a Visitor with empty errors list."""
self.errors = []
def visit_ImportFrom(self, node): # noqa: N802
"""Implement check for relative import."""
if node.level > 0:
self.errors.append((node.lineno, node.col_offset, ABS101))
self.generic_visit(node)
class Plugin:
"""Core plugin class for flake8-absolute-import."""
name = "flake8-absolute-import"
version = __version__
def __init__(self, tree):
"""Create plugin instance from the provided AST."""
self._tree = tree
def run(self):
"""Traverse the AST and collect the errors."""
visitor = Visitor()
visitor.visit(self._tree)
for line, col, msg in visitor.errors:
yield line, col, msg, type(self)
| 1,398 |
legal-api/tests/unit/services/filings/validations/annual_report/test_validation.py
|
rstens/lear
| 1 |
2024288
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite to ensure the Annual Report is validated correctly."""
import copy
from datetime import date
import datedelta
import pytest
from freezegun import freeze_time
from registry_schemas.example_data import ANNUAL_REPORT
from legal_api.models import Business
from legal_api.services.filings.validations.annual_report import validate
# from tests.unit.models import factory_business, factory_business_mailing_address, factory_filing
@pytest.mark.parametrize(
'test_name, now, ar_date, agm_date, expected_code, expected_msg',
[('SUCCESS', date(2020, 9, 17), date(2020, 8, 5), date(2020, 7, 1), None, None),
])
def test_validate(session, test_name, now, ar_date, agm_date,
expected_code, expected_msg): # pylint: disable=too-many-arguments
"""Assert that a basic AR can be validated."""
# setup
identifier = 'CP1234567'
founding_date = ar_date - datedelta.YEAR
business = Business(identifier=identifier, last_ledger_timestamp=founding_date)
business.founding_date = founding_date
ar = copy.deepcopy(ANNUAL_REPORT)
ar['filing']['business']['identifier'] = identifier
ar['filing']['annualReport']['annualReportDate'] = ar_date.isoformat()
ar['filing']['annualReport']['annualGeneralMeetingDate'] = agm_date.isoformat()
# perform test
with freeze_time(now):
err = validate(business, ar)
# validate outcomes
assert not err
| 2,020 |
pyboids/app/simulation.py
|
mancaf/pyboids
| 12 |
2023008
|
"""Simulation classes."""
import pygame
from .flock import Flock
from . import params
from . import gui
from time import time
def callback(*args, **kwargs):
"""Make a no-argument callback from a function."""
def wrapper(f):
def wrapped():
return f(*args, **kwargs)
return wrapped
return wrapper
class Simulation:
"""Represent a simulation of a flock."""
def __init__(self, screen):
self.running = True
self.screen = screen
self.clock = pygame.time.Clock()
self.flock = Flock()
self.to_update = pygame.sprite.Group()
self.to_display = pygame.sprite.Group()
self.temp_message = pygame.sprite.GroupSingle()
self.fps_message = gui.FPSMessage(pos=(11, 0.5), text="FPS: ...")
def add_element(self, pos):
self.flock.add_element(pos)
if self.temp_message:
self.temp_message.sprite.kill()
msg = "Number of "
if "boid" in self.flock.add_kind:
msg += "boids: {}".format(len(self.flock.boids))
else:
msg += "obstacles: {}".format(len(self.flock.obstacles))
self.temp_message.add(
gui.TempMessage(pos=(6, 1), text=msg))
def toggle_behaviour(self, behaviour):
self.flock.behaviours[behaviour] = not self.flock.behaviours[behaviour]
def toggle_debug(self):
params.DEBUG = not params.DEBUG
def update(self, motion_event, click_event):
self.to_update.update(motion_event, click_event)
def display(self):
for sprite in self.to_display:
sprite.display(self.screen)
if params.DEBUG:
pygame.draw.polygon(
self.screen, pygame.Color("turquoise"),
[
(params.BOX_MARGIN, params.BOX_MARGIN),
(params.SCREEN_WIDTH - params.BOX_MARGIN,
params.BOX_MARGIN),
(params.SCREEN_WIDTH - params.BOX_MARGIN,
params.SCREEN_HEIGHT - params.BOX_MARGIN),
(params.BOX_MARGIN,
params.SCREEN_HEIGHT - params.BOX_MARGIN),
], 1)
def init_run(self):
# add 40 boids to the flock
# for x in range(1, 11):
# for y in range(3, 7):
# self.flock.add_element(utils.grid_to_px((x, y)))
self.temp_message.add(gui.TempMessage(
pos=(6, 4.5),
text="Add entities and get steering !",
font=params.H3_FONT)
)
self.to_update = pygame.sprite.Group(
self.flock,
gui.ToggleButton(
pos=(0.2, 8),
text="Entity : ",
labels=self.flock.kinds,
init_label=self.flock.add_kind,
action=lambda: self.flock.switch_element()),
gui.ToggleButton(
pos=(0.2, 8.5),
text="ADD ENTITY",
action=lambda: self.add_element(params.SCREEN_CENTER)),
gui.ToggleButton(
pos=(8.5, 8.5),
text="Show forces, velocities and frame: ",
labels="Yes No".split(),
init_label="No Yes".split()[params.DEBUG],
action=lambda: self.toggle_debug()),
)
# add behaviour toggle buttons
for k, behaviour in enumerate(self.flock.behaviours):
# v decorate to prevent a bug
@callback(self, behaviour)
def do_action(self, behaviour):
self.toggle_behaviour(behaviour)
# ^
self.to_update.add(gui.ToggleButton(
pos=(0.2, 0.2 + 0.3 * (1 + k)),
text="{}: ".format(behaviour.title()),
labels="off on".split(),
init_label="off on".split()[self.flock.behaviours[behaviour]],
action=do_action)
)
self.to_display = pygame.sprite.Group(
self.to_update,
)
def run(self):
key_to_function = {
pygame.K_ESCAPE:
lambda self, event: setattr(self, "running", False),
}
button_to_function = {
3: lambda self, event: self.add_element(event.pos),
}
self.init_run()
dt = 0
while self.running:
self.clock.tick(params.FPS)
t = time()
motion_event, click_event = None, None
self.screen.fill(params.SIMULATION_BACKGROUND)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
return "PYGAME_QUIT"
elif event.type == pygame.KEYDOWN and \
event.key in key_to_function:
key_to_function[event.key](self, event)
elif event.type == pygame.MOUSEBUTTONDOWN:
click_event = event
if event.button in button_to_function:
button_to_function[event.button](self, event)
elif event.type == pygame.MOUSEMOTION:
motion_event = event
self.update(motion_event, click_event)
self.fps_message.update(dt)
self.temp_message.update(motion_event, click_event)
self.display()
self.fps_message.display(self.screen)
if self.temp_message:
self.temp_message.sprite.display(self.screen)
pygame.display.flip()
dt = time() - t
def quit(self):
self.running = False
| 5,617 |
splendor.py
|
akulakov/learnprogramming
| 0 |
2024018
|
#!/usr/bin/env python
import random
from random import randint
from random import choice as randchoice
from helpers import cjoin, objrepr, sjoin
colors = "red cyan green white black".split()
tokens = []
for c in colors:
tokens.append(c)
tokens.extend(["gold"]*5)
class Card:
def __init__(self, color, cost, points):
self.color, self.cost, self.points = color, cost, points
def __repr__(self):
cost = ''.join(["%s%s" % (c[0][0], c[1]) for c in self.cost])
return objrepr(self.color, self.points, cost)
levels = {
# deck: cost, points
0: (4, 0),
1: (7, 2),
2: (9, 4),
3: (11, 6),
}
def gen_sample(target_sums):
"""Generate random sample where sum of the sample matches at least one value in `target_sums`."""
ssum = points = None
# sample_list = [0]*6 + [1]*4 + [2]*3 + [3]*2 + [4]
while ssum not in target_sums:
points = random.sample(range(5), len(colors))
ssum = sum(points)
return points
decks = []
for n in range(4):
deck = []
for m in range(15):
cost = gen_sample(4+n*2)
# cost = []
deck.append(Card(color, cost, levels[n][1]))
decks.append(deck)
class Player:
def __init__(self, name):
self.cards = []
self.name = name
def __repr__(self):
return "%s %s" % (self.name, cjoin(self.cards))
p1, p2 = Player('a'), Player('b')
for deck in decks:
print(deck)
print()
| 1,484 |
linreg.py
|
csmsoftware/phnmnl-scalability
| 0 |
2024413
|
# make sure you setup the pythonpath
# export PYTHONPATH=./
# this is how you run several workers in parallel
# time luigi --module linreg LinRegAllDatasets --scheduler-host luigi-service.default --workers 100
import luigi
from luigi.contrib.kubernetes import KubernetesJobTask
import glob
class LinRegTask(KubernetesJobTask):
# dataset file as a luigi paramter
datasetFile = luigi.Parameter()
name = "lin-reg" # give the task a name
max_retrials = 1 # how many times to retry it it fails
@property
def spec_schema(self):
return { # container specifications
"containers": [{
"name": self.name,
"image": "nsadawi/lin-reg",# container on docker hub
"args": [# the input file we pass as an argument
self.datasetFile,
],
# resources allocated to each task
"resources": {
"requests": {
"memory": "1G",
"cpu": "1"
},# do not exceed these limits
"limits": {
"memory": "1G",
"cpu": "1"
}
},
# specifications of volume mounts
"volumeMounts": [{
"mountPath": "/work", # inside the container
"name": "shared-volume",
"subPath": "jupyter/LinReg" # on host .. i.e. where we run this script
}]
}],
# volume name and specifications under PhenoMeNal
"volumes": [{
"name": "shared-volume",
"persistentVolumeClaim": {
"claimName": "galaxy-pvc"
}
}]
}
# this tells luigi the task is finished when this file is created
def output(self):
filename = self.datasetFile + ".out"
return luigi.LocalTarget(filename)
# here we loop through all csv files and create a task for each one
class LinRegAllDatasets(luigi.WrapperTask):
def requires(self):
inputFiles = glob.glob("randomised-datasets/*.csv")
for inputFile in inputFiles:
yield LinRegTask(datasetFile=inputFile)
| 2,287 |
modern_greek_inflexion/exceptions.py
|
PicusZeus/modern-greek-inflexion
| 0 |
2024698
|
class NotInGreekException(Exception):
"""
Exception raised when input is given in non Greek characters
"""
pass
class NotLegalAdjectiveException(Exception):
"""
Exception raised when input is not recognized as a possible adjective
"""
pass
class NotLegalVerbException(Exception):
"""
Exception raised when input is not recognized as a possible verb form
"""
pass
| 417 |
base/templatetags/event_tags.py
|
Sylvestre67/wagtail_alsace
| 0 |
2024868
|
from django import template
from events.models import EventPage
register = template.Library()
# https://docs.djangoproject.com/en/1.9/howto/custom-template-tags/
@register.inclusion_tag('tags/events_grid.html', takes_context=True)
def events_grid(context):
events = EventPage.objects.future().order_by('event__date')
context['events'] = events
return context
| 374 |
indel_analysis/microhomology/run_all_collect_mh_frequencies_by_len.py
|
kaskamal/SelfTarget
| 20 |
2022991
|
import io, sys, os
from selftarget.util import runSubdir
def runAllCollectMHFrequenciesByLen(input_dir = 'mh_indel_frequencies', highdir='.', scriptloc='.'):
idx = 0
dirnames = [input_dir + '/' + x for x in os.listdir(input_dir)]
for dirname in dirnames:
for mh_len in range(2, 16):
subdirs = [dirname + '/' + x for x in os.listdir(dirname)]
idx = runSubdir(idx, subdirs, '%s MH Len=%d' % (dirname,mh_len), scriptloc + '/collect_mh_frequencies_by_len.py', 'out_mh_by_len', __file__, extra_args='%d %s ' % (mh_len, highdir))
if __name__ == '__main__':
runAllCollectMHFrequenciesByLen()
| 639 |
src/event_group_rule.py
|
hanxuzjuckc/gsyslyzer
| 1 |
2023810
|
""" Module for defining a rule to group together events """
class EventGroupRule:
""" Defines the tags that make up an event group and any conextual
events that should be parsed as well. Also defines if
statistics should be collected for this specific event group
Attributes
tag: label for the rule
trigger_event_tags: list of tags associated to LogEvents
and that define the event group
context_event_tags: list of tags associated to LogEvents
and that supply context to event group """
def __init__(self, tag, trigger_event_tags, context_event_tags=None):
self.trigger_event_tags = trigger_event_tags
self.context_event_tags = context_event_tags
if self.context_event_tags is None:
self.context_event_tags = []
self.tag = tag
| 901 |
src/pybaum/equality.py
|
OpenSourceEconomics/pybaum
| 10 |
2024893
|
"""Functions to check equality of pytree leaves."""
from pybaum.config import IS_NUMPY_INSTALLED
from pybaum.config import IS_PANDAS_INSTALLED
if IS_NUMPY_INSTALLED:
import numpy as np
if IS_PANDAS_INSTALLED:
import pandas as pd
EQUALITY_CHECKERS = {}
if IS_NUMPY_INSTALLED:
EQUALITY_CHECKERS[np.ndarray] = lambda a, b: bool((a == b).all())
if IS_PANDAS_INSTALLED:
EQUALITY_CHECKERS[pd.Series] = lambda a, b: a.equals(b)
EQUALITY_CHECKERS[pd.DataFrame] = lambda a, b: a.equals(b)
| 510 |
pskgu_bot/utils/working_with_time.py
|
mrgick/pskgu_bot
| 14 |
2024224
|
"""
Файл вспомогательных функций
"""
from pskgu_bot import Config
from datetime import datetime, timedelta
DAYS_NAME = {
0: "Понедельник",
1: "Вторник",
2: "Среда",
3: "Четверг",
4: "Пятница",
5: "Суббота",
6: "Воскресенье"
}
def date_to_str(date, full=False):
"""
Перевод даты в строку с удалением временной зоны.
"""
if full:
return str(date)
return str(date).split(" ")[0]
def get_today(n=0, full=False):
"""
Возвращает сегодняшний день со смещением.
"""
time_now = datetime.now()
time_now = time_now + timedelta(days=n)
return date_to_str(time_now, full)
def get_week_days(n=0):
"""
Возвращает список дней этой недели
начиная с понедельника по субботу.
n - смещение на количество недель.
"""
days = []
ntime = datetime.now()
ntime = ntime + timedelta(days=-ntime.weekday(), weeks=n)
for x in range(6):
days.append(date_to_str(ntime))
ntime = ntime + timedelta(days=1)
return days
def get_name_of_day(str_date):
"""
Возвращает имя дня.
"""
day = datetime.fromisoformat(str_date).weekday()
return DAYS_NAME.get(day)
def compare_str_date(date1, date2):
"""
Сравнивает две даты в виде строк.
date1 >= date2
"""
return datetime.fromisoformat(date1) >= datetime.fromisoformat(date2)
def get_monday(date):
"""
Возвращает понедельник недели c date.
"""
date = datetime.fromisoformat(date)
monday = date + timedelta(days=-date.weekday(), weeks=0)
return date_to_str(date=monday)
| 1,629 |
Prog/Python/Architecture_Patterns_with_Python/cap01/xmodel.py
|
unimauro/Courses
| 1 |
2024798
|
def allocate(line: OrderLine, batches: List[Batch]) -> str:
batch = next(
b for b in sorted(batches) if b.can_allocate(line)
)
batch.allocate(line)
return batch.reference
class Batch:
def __gt__(self, other):
if self.eta is None:
return False
if other.eta is None:
return True
return self.eta > other.eta
def test_raises_out_of_stock_exception_if_cannot_allocate():
batch = Batch('batch1', 'SMALL-FORK', 10, eta=today)
allocate(OrderLine('order1', 'SMALL-FORK', 10), [batch])
with pytest.raises(OutOfStock, match='SMALL-FORK'):
allocate(OrderLine('order2', 'SMALL-FORK', 1), [batch])
class OutOfStock(Exception):
pass
def allocate(line: OrderLine, batches: List[Batch]) -> str:
try:
batch = next(
...
except StopIteration:
raise OutOfStock(f'Out of stock for sku {line.sku}')
| 923 |
datasets.py
|
WangFeng18/dino
| 0 |
2024639
|
import numpy as np
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from imagenet_lmdb import ImageNetLMDB as lmdb
from PIL import Image
from PIL import ImageFile
import random
import os
import glob
import torchvision
from torchvision.datasets.folder import default_loader
from collections import defaultdict
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImageNetLMDB(lmdb):
def __init__(self, root, list_file, aug):
super(ImageNetLMDB, self).__init__(root, list_file, ignore_label=False)
self.aug = aug
def __getitem__(self, index):
img, target = super(ImageNetLMDB, self).__getitem__(index)
imgs = self.aug(img)
return imgs, target
| 764 |
book_sites/book/migrations/0018_book_photo_url.py
|
Michelle-Hung/Books
| 0 |
2024404
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-07 06:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0017_auto_20180305_1359'),
]
operations = [
migrations.AddField(
model_name='book',
name='photo_url',
field=models.CharField(default='', max_length=200),
),
]
| 460 |
feature_extractor/ScatteringFeat1D.py
|
ussaema/CapsGeR
| 1 |
2023154
|
from . import FeatureExtractor
from kymatio import Scattering1D
import torch
import numpy as np
class ScatteringFeat1D(FeatureExtractor):
"""
Refer to (as well papers citing this paper):
"<NAME> and <NAME>. Invariant scattering convolution networks.IEEE Transactions on Pattern Analysis
and Machine Intelligence, 2013."
Computes scattering transform features, which have useful mathematical properties for classification
"""
# (1, T) -> (1, P, T/2**J)
# Where P ~= 1 + J Q + J (J-1) Q / 2.
#
J = 5 # Account for translation up to 2^6 samples
T = 200 # Number of samples per feature vector
Q = 2 # Resolution per octave
def __init__(self, dataset_name):
self.dataset_name = dataset_name
self.scattering_transform = Scattering1D(self.J, self.T, self.Q)
def extract_feature_point(self, raw_samples):
shape = raw_samples.shape
raw_samples = np.reshape(raw_samples, (1, shape[1], shape[0]))
raw_samples = torch.from_numpy(raw_samples).float()
Sx = self.scattering_transform.forward(raw_samples)
Sx = Sx.numpy()
Sx = np.reshape(Sx, (Sx.shape[1] * Sx.shape[2] * Sx.shape[3]))
return Sx
def global_setup(self, all_raw_samples):
pass
| 1,313 |
seq_exp/test/test_entrez.py
|
johnbradley/seq_explorer
| 0 |
2024432
|
import os
import seq_exp.seq_exp as seq_exp
import unittest
import tempfile
import ast
class EntrezTestCase(unittest.TestCase):
def setUp(self):
realapp, db = seq_exp.setup_api_and_db('sqlite:///:memory:')
self.realapp = realapp
self.db = db
self.db_fd, realapp.config['DATABASE'] = tempfile.mkstemp()
realapp.config['TESTING'] = True
self.app = realapp.test_client()
seq_exp.PROJECTS = {}
def tearDown(self):
self.db.close()
os.close(self.db_fd)
os.unlink(self.realapp.config['DATABASE'])
def literal_eval(self, rv):
resp_str = rv.data.decode("utf-8")
return ast.literal_eval(resp_str)
def test_fetch_four_human_dna(self):
#kind of fragile since relies upon external web server
rv = self.app.get('/entrez/nucleotide', data=dict(term='human', retmax='4'))
resp = self.literal_eval(rv)
self.assertEqual(4, resp['count'])
def test_fetch_five_mouse_protein(self):
#kind of fragile since relies upon external web server
rv = self.app.get('/entrez/protein', data=dict(term='mouse', retmax='5'))
resp = self.literal_eval(rv)
self.assertEqual(5, resp['count'])
| 1,239 |
awacs/rekognition.py
|
cloudtools/awacs
| 358 |
2022954
|
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Rekognition"
prefix = "rekognition"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CompareFaces = Action("CompareFaces")
CreateCollection = Action("CreateCollection")
CreateProject = Action("CreateProject")
CreateProjectVersion = Action("CreateProjectVersion")
CreateStreamProcessor = Action("CreateStreamProcessor")
DeleteCollection = Action("DeleteCollection")
DeleteFaces = Action("DeleteFaces")
DeleteProject = Action("DeleteProject")
DeleteProjectVersion = Action("DeleteProjectVersion")
DeleteStreamProcessor = Action("DeleteStreamProcessor")
DescribeCollection = Action("DescribeCollection")
DescribeProjectVersions = Action("DescribeProjectVersions")
DescribeProjects = Action("DescribeProjects")
DescribeStreamProcessor = Action("DescribeStreamProcessor")
DetectCustomLabels = Action("DetectCustomLabels")
DetectFaces = Action("DetectFaces")
DetectLabels = Action("DetectLabels")
DetectModerationLabels = Action("DetectModerationLabels")
DetectProtectiveEquipment = Action("DetectProtectiveEquipment")
DetectText = Action("DetectText")
GetCelebrityInfo = Action("GetCelebrityInfo")
GetCelebrityRecognition = Action("GetCelebrityRecognition")
GetContentModeration = Action("GetContentModeration")
GetFaceDetection = Action("GetFaceDetection")
GetFaceSearch = Action("GetFaceSearch")
GetLabelDetection = Action("GetLabelDetection")
GetPersonTracking = Action("GetPersonTracking")
GetSegmentDetection = Action("GetSegmentDetection")
GetTextDetection = Action("GetTextDetection")
IndexFaces = Action("IndexFaces")
ListCollections = Action("ListCollections")
ListFaces = Action("ListFaces")
ListStreamProcessors = Action("ListStreamProcessors")
ListTagsForResource = Action("ListTagsForResource")
RecognizeCelebrities = Action("RecognizeCelebrities")
SearchFaces = Action("SearchFaces")
SearchFacesByImage = Action("SearchFacesByImage")
StartCelebrityRecognition = Action("StartCelebrityRecognition")
StartContentModeration = Action("StartContentModeration")
StartFaceDetection = Action("StartFaceDetection")
StartFaceSearch = Action("StartFaceSearch")
StartLabelDetection = Action("StartLabelDetection")
StartPersonTracking = Action("StartPersonTracking")
StartProjectVersion = Action("StartProjectVersion")
StartSegmentDetection = Action("StartSegmentDetection")
StartStreamProcessor = Action("StartStreamProcessor")
StartTextDetection = Action("StartTextDetection")
StopProjectVersion = Action("StopProjectVersion")
StopStreamProcessor = Action("StopStreamProcessor")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
| 3,046 |
flask-example.py
|
kemalcanbora/python-examples
| 1 |
2024158
|
"""
Author: <NAME>
Date: Mon May 23 16:26:36 2016
Date Updated:
What is this code: An example Flask connection
Why?: For me to remember later
"""
| 148 |
pylspm/rebus.py
|
lseman/pyplspm
| 12 |
2024812
|
# <NAME>, “Unobserved Heterogeneity in Structural Equation Models: A
# New Approach to Latent Class Detection in PLS Path Modeling,” 2007.
from multiprocessing import Pool, freeze_support
import numpy as np
from numpy import inf
import pandas as pd
import scipy.stats
from scipy.stats import norm
import matplotlib
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage, ward, distance
from scipy.cluster.hierarchy import fcluster
from .pylspm import PyLSpm
from .boot import PyLSboot
from .mga import mga
from itertools import combinations
def rebus(residuals, data, dataRealoc, lvmodel, mvmodel, scheme, regression):
Z = linkage(residuals, method='ward')
# plt.figure(figsize=(10, 8))
# plt.title('Dendograma de Agrupamento Hierárquico')
plt.xlabel('Amostra')
plt.ylabel('Distância')
dendrogram(
Z,
leaf_rotation=90.,
leaf_font_size=8,
)
plt.show()
max_d = 18.5
clusters = fcluster(Z, max_d, criterion='distance')
print(clusters)
while True:
clusters = pd.DataFrame(clusters)
clusters.columns = ['Split']
old_clusters = clusters.copy()
dataSplit = pd.concat([data, clusters], axis=1)
nk = max(clusters['Split'])
rebus = []
for i in range(nk):
data_ = (dataSplit.loc[dataSplit['Split']
== i + 1]).drop('Split', axis=1)
data_.index = range(len(data_))
estima = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, 0, 100, HOC='true')
rebus.append(estima)
CM = pd.DataFrame(0, index=np.arange(len(data)), columns=np.arange(nk))
exoVar = rebus[i].endoexo()[1]
endoVar = rebus[i].endoexo()[0]
for j in range(nk):
dataRealoc_ = dataRealoc.copy()
# Novos residuais
mean_ = np.mean(rebus[j].data, 0)
scale_ = np.std(rebus[j].data, 0) * \
np.sqrt((len(data_) - 1) / len(data_))
dataRealoc_ = dataRealoc_ - mean_
dataRealoc_ = dataRealoc_ / scale_
outer_residuals = dataRealoc_.copy()
fscores = pd.DataFrame.dot(dataRealoc_, rebus[0].outer_weights)
for i in range(len(rebus[j].latent)):
block = dataRealoc_[rebus[j].Variables['measurement']
[rebus[j].Variables['latent'] == rebus[j].latent[i]]]
block = block.columns.values
loadings = rebus[j].outer_loadings.ix[
block][rebus[j].latent[i]].values
outer_ = fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = (dataRealoc_.ix[
:, block] - outer_)**2
inner_residuals = fscores[endoVar]
inner_ = pd.DataFrame.dot(
fscores, rebus[j].path_matrix.ix[endoVar].T)
inner_residuals = (fscores[endoVar] - inner_)**2
# Fim dos novos residuais
resnum1 = pd.DataFrame.dot(outer_residuals, (np.diag(
1 / (pd.DataFrame.sum(
rebus[j].comunalidades(), axis=1)).values.flatten())))
supresouter = pd.DataFrame.sum(
resnum1, axis=1) / (pd.DataFrame.sum(pd.DataFrame.sum(resnum1, axis=1)) / (len(data) - 2))
resnum2 = pd.DataFrame.dot(inner_residuals, (np.diag(
1 / rebus[j].r2.ix[endoVar].values.flatten())))
supresinner = pd.DataFrame.sum(
resnum2, axis=1) / (pd.DataFrame.sum(pd.DataFrame.sum(resnum2, axis=1)) / (len(data) - 2))
CM.ix[:, j] = (np.sqrt(supresouter * supresinner))
clusters = CM.idxmin(axis=1).values
clusters = clusters + 1
diff_clusters = clusters - old_clusters.values.flatten()
changes = diff_clusters.astype(bool).sum()
print(changes)
if((changes / len(data)) < 0.005):
break
old_clusters = clusters.copy()
# Estima final
clusters = pd.DataFrame(clusters)
clusters.columns = ['Split']
dataSplit = pd.concat([data, clusters], axis=1)
nk = max(clusters['Split'])
rebus = []
f1 = []
for i in range(nk):
data_ = (dataSplit.loc[dataSplit['Split']
== i + 1]).drop('Split', axis=1)
data_.index = range(len(data_))
rebus.append(PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, 0, 100, HOC='true'))
print(np.round(len(data_) / len(data) * 100, 2))
print(len(data_))
print(rebus[i].path_matrix)
print(rebus[i].gof())
resid = rebus[i].residuals()[3]
f1.append(resid)
print(resid)
print('Final Cost')
cost = (np.sum(f1))
print(1 / cost)
# Automatiza multi-group
allCombs = list(combinations(range(1, nk + 1), 2))
for i in range(len(allCombs)):
mga(50, 8, dataSplit, lvmodel,
mvmodel, scheme, regression, 0, 100, g1=allCombs[i][0], g2=allCombs[i][1],
segmento='Split')
| 5,490 |
pywatts/core/result_step.py
|
zyxsachin/pyWATTS
| 0 |
2024634
|
from typing import Optional, Dict
import pandas as pd
from pywatts.core.base_step import BaseStep
from pywatts.core.filemanager import FileManager
class ResultStep(BaseStep):
"""
This steps fetch the correct column if the previous step provides data with multiple columns as output
"""
def __init__(self, input_steps, buffer_element: str):
super().__init__(input_steps=input_steps)
self.buffer_element = buffer_element
def get_result(self, start: pd.Timestamp, end: Optional[pd.Timestamp], *args):
"""
Returns the specified result of the previous step.
"""
return list(self.input_steps.values())[0].get_result(start, end, self.buffer_element)
def get_json(self, fm: FileManager) -> Dict:
"""
Returns all information for restoring the resultStep.
"""
json_dict = super().get_json(fm)
json_dict["buffer_element"] = self.buffer_element
return json_dict
@classmethod
def load(cls, stored_step: dict, inputs, targets, module, file_manager):
"""
Load a stored ResultStep.
:param stored_step: Informations about the stored step
:param inputs: The input step of the stored step
:param targets: The target step of the stored step
:param module: The module wrapped by this step
:return: Step
"""
step = cls(inputs, stored_step["buffer_element"])
step.id = stored_step["id"]
step.name = stored_step["name"]
step.last = stored_step["last"]
return step
| 1,581 |
Round #535 (Div 3)/B.py
|
julianferres/Codeforces
| 4 |
2024836
|
def B():
n = int(input())
s = [int(x) for x in input().split()]
x = max(s)
for d in range(1,x+1):
if(x%d ==0):
s.remove(d)
y = max(s)
print(x,y)
B()
| 163 |
setup.py
|
sayeghr/clashroyale
| 0 |
2023916
|
from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='clashroyale',
packages=['clashroyale'],
version='v3.4.0',
description='An (a)sync wrapper for royaleapi.com',
long_description=long_description,
long_description_content_type='text/x-rst',
author='kyb3r',
license='MIT',
url='https://github.com/cgrok/clashroyale',
keywords=['clashroyale', 'wrapper', 'cr', 'royaleapi'],
install_requires=['aiohttp>=2.0.0,<2.3.0', 'python-box==3.1.1', 'requests==2.18.4', 'asynctest==0.12.0', 'yarl<1.2'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Games/Entertainment :: Real Time Strategy',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
python_requires='>=3.5'
)
| 951 |
black_magic/common.py
|
coldfix/black-magic
| 2 |
2023854
|
def param_names(argspec):
"""
Iterate over all parameter names used in the argspec.
"""
for argname in argspec.args:
yield argname
if argspec.varargs:
yield argspec.varargs
if argspec.kwonlyargs:
for argname in argspec.kwonlyargs:
yield argname
if argspec.varkw:
for argname in argspec.varkw:
yield argname
class Scope:
"""
Keeps track of used names in a particular scope.
"""
def __init__(self, iterable):
self.names = set(iterable)
def reserve(self, name):
"""Generate a new name that is not present in the scope."""
while name in self.names:
name += '_'
self.names.add(name)
return name
| 752 |
46/swagger_client/models/synset_entity.py
|
apitore/apitore-sdk-python
| 3 |
2022907
|
# coding: utf-8
"""
WordNet APIs
You can access ALL WordNet DB.<BR />[Endpoint] https://api.apitore.com/api/46 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SynsetEntity(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'pos': 'str',
'src': 'str',
'synset': 'str'
}
attribute_map = {
'name': 'name',
'pos': 'pos',
'src': 'src',
'synset': 'synset'
}
def __init__(self, name=None, pos=None, src=None, synset=None): # noqa: E501
"""SynsetEntity - a model defined in Swagger""" # noqa: E501
self._name = None
self._pos = None
self._src = None
self._synset = None
self.discriminator = None
self.name = name
self.pos = pos
self.src = src
self.synset = synset
@property
def name(self):
"""Gets the name of this SynsetEntity. # noqa: E501
Name # noqa: E501
:return: The name of this SynsetEntity. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SynsetEntity.
Name # noqa: E501
:param name: The name of this SynsetEntity. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def pos(self):
"""Gets the pos of this SynsetEntity. # noqa: E501
Part-of-speech # noqa: E501
:return: The pos of this SynsetEntity. # noqa: E501
:rtype: str
"""
return self._pos
@pos.setter
def pos(self, pos):
"""Sets the pos of this SynsetEntity.
Part-of-speech # noqa: E501
:param pos: The pos of this SynsetEntity. # noqa: E501
:type: str
"""
if pos is None:
raise ValueError("Invalid value for `pos`, must not be `None`") # noqa: E501
self._pos = pos
@property
def src(self):
"""Gets the src of this SynsetEntity. # noqa: E501
Src # noqa: E501
:return: The src of this SynsetEntity. # noqa: E501
:rtype: str
"""
return self._src
@src.setter
def src(self, src):
"""Sets the src of this SynsetEntity.
Src # noqa: E501
:param src: The src of this SynsetEntity. # noqa: E501
:type: str
"""
if src is None:
raise ValueError("Invalid value for `src`, must not be `None`") # noqa: E501
self._src = src
@property
def synset(self):
"""Gets the synset of this SynsetEntity. # noqa: E501
Synset # noqa: E501
:return: The synset of this SynsetEntity. # noqa: E501
:rtype: str
"""
return self._synset
@synset.setter
def synset(self, synset):
"""Sets the synset of this SynsetEntity.
Synset # noqa: E501
:param synset: The synset of this SynsetEntity. # noqa: E501
:type: str
"""
if synset is None:
raise ValueError("Invalid value for `synset`, must not be `None`") # noqa: E501
self._synset = synset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SynsetEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 5,176 |
CyberProject/CyberUser/migrations/0009_team_teama_teamb_wager.py
|
rzhvn1/CyberBet
| 0 |
2024685
|
# Generated by Django 3.1.6 on 2021-02-24 14:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('CyberUser', '0008_auto_20210220_1837'),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('win_count', models.PositiveIntegerField(default=0)),
('lose_count', models.PositiveIntegerField(default=0)),
('tie_count', models.PositiveIntegerField(default=0)),
('rank', models.PositiveIntegerField(default=25)),
('moral', models.PositiveIntegerField(default=50)),
],
),
migrations.CreateModel(
name='TeamA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CyberUser.team')),
],
),
migrations.CreateModel(
name='TeamB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CyberUser.team')),
],
),
migrations.CreateModel(
name='Wager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('match_result', models.CharField(choices=[('W', 'W'), ('T', 'T'), ('L', 'L')], max_length=10)),
('teamA', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CyberUser.teama')),
('teamB', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='CyberUser.teamb')),
],
),
]
| 2,116 |
test/unit_test/json/rw_and_reformat_json_test/rw_and_reformat_json_test.py
|
JE-Chen/APITestka
| 0 |
2023715
|
import json
import os
from je_api_testka import read_action_json
from je_api_testka import write_action_json
from je_api_testka import reformat_json
test_list = \
[
["test_api_method",
{"http_method": "get", "test_url": "http://httpbin.org/get",
"headers": {
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36',
}
}
],
["test_api_method",
{"http_method": "post", "test_url": "http://httpbin.org/post", "params": {"task": "new task"},
"result_check_dict": {"status_code": 200}
}
]
]
write_action_json(os.getcwd() + "/test1.json", test_list)
read_json = reformat_json(read_action_json(os.getcwd() + "/test1.json"))
print(read_json)
| 956 |
store/tests.py
|
lukasz0k/Book_store
| 0 |
2023933
|
from django.test import TestCase
from .models import Category
# Create your tests here.
class TestCategoriesModel(TestCase):
def setUp(self):
self.data1 = Category.objects.create(name='django', slug='django')
def test_category_model_entry(self):
"""Test Category model data insertion/types/fields attributes"""
data = self.data1
self.assertTrue(isinstance(data, Category))
def test_category_model_entry_name(self):
"""Test catey model defoult name"""
data = self.data1
self.assertEqual(str(data), 'django')
| 579 |
e2e/pages/__init__.py
|
04n0/jenkins-configuration
| 177 |
2023823
|
from __future__ import absolute_import
import os
JENKINS_HOST = os.getenv('JENKINS_HOST', 'localhost')
| 104 |
graph_search/streetlearn_analysis.py
|
episodeyang/graph_search
| 1 |
2024769
|
from functools import partial
import networkx as nx
import numpy as np
import gym
from params_proto import proto_partial
from params_proto.neo_proto import ParamsProto
from graph_search import methods, short_names
from streetlearn import StreetLearnDataset
class Args(ParamsProto):
env_id = "streetlearn_small"
neighbor_r = 2.4e-4
neighbor_r_min = None
h_scale = 1.2
# plotting
visualize_graph = True
def load_streetlearn(data_path="~/fair/streetlearn/processed-data/manhattan-large", pad=0.1):
from streetlearn import StreetLearnDataset
import matplotlib.pyplot as plt
from os.path import expanduser
path = expanduser(data_path)
d = StreetLearnDataset(path)
d.select_bbox(-73.997, 40.726, 0.01, 0.008)
d.show_blowout("NYC-large", show=True)
a = d.bbox[0] + d.bbox[2] * pad, d.bbox[1] + d.bbox[3] * pad
b = d.bbox[0] + d.bbox[2] * (1 - pad), d.bbox[1] + d.bbox[3] * (1 - pad)
(start, _), (goal, _) = d.locate_closest(*a), d.locate_closest(*b)
fig = plt.figure(figsize=(6, 5))
plt.scatter(*d.lng_lat[start], marker="o", s=100, linewidth=3,
edgecolor="black", facecolor='none', label="start")
plt.scatter(*d.lng_lat[goal], marker="x", s=100, linewidth=3,
edgecolor="none", facecolor='red', label="end")
plt.legend(loc="upper left", bbox_to_anchor=(0.95, 0.7), framealpha=1,
frameon=False, fontsize=12)
d.show_blowout("NYC-large", fig=fig, box_color='gray', box_alpha=0.1,
show=True, set_lim=True)
return d, start, goal
# 1. get data
# 2. build graph
# 3. get start and goal
# 4. make plans
def plot_graph(graph):
# fig = plt.figure(figsize=(3, 3))
nx.draw(graph, [n['pos'] for n in graph.nodes.values()],
node_size=0, node_color="gray", alpha=0.7, edge_color="gray")
plt.gca().set_aspect('equal')
# plt.tight_layout()
# plt.show()
def maze_graph(dataset: StreetLearnDataset):
from tqdm import tqdm
all_nodes = dataset.lng_lat
graph = nx.Graph()
for node, xy in enumerate(tqdm(all_nodes, desc="build graph")):
graph.add_node(node, pos=xy)
for node, a in tqdm(graph.nodes.items(), desc="add edges"):
(ll,), (ds,), (ns,) = dataset.neighbor([node], r=Args.neighbor_r)
for neighbor, d in zip(ns, ds):
graph.add_edge(node, neighbor, weight=d)
return graph
# if Args.visualize_graph:
# plot_graph(graph)
# plt.gca().set_aspect(dataset.lat_correction)
# plt.show()
# noinspection PyPep8Naming,PyShadowingNames
def heuristic(a, b, G: nx.Graph, scale=1, lat_correction=1 / 0.74):
a = [G.nodes[n]['pos'] for n in a]
b = [G.nodes[n]['pos'] for n in b]
magic = [1, lat_correction]
return np.linalg.norm((np.array(a) - np.array(b)) * magic, ord=1, axis=-1) * scale
def plot_trajectory_2d(path, color='black', **kwargs):
for (x, y), (x_, y_) in zip(path[:-1], path[1:]):
dx = (x_ - x)
dy = (y_ - y)
d = np.linalg.norm([dx, dy], ord=2)
plt.arrow(x, y, dx * 0.8, dy * 0.8, **kwargs, head_width=d * 0.3, head_length=d * 0.3,
length_includes_head=True, head_starts_at_zero=True, fc=color, ec=color)
def set_fig(dataset: StreetLearnDataset):
plt.gca().set_yticklabels([])
plt.gca().set_xticklabels([])
plt.gca().set_aspect(dataset.lat_correction)
def ind2pos(G, inds, scale=1):
return [G.nodes[n]['pos'] * scale for n in inds]
def patch_graph(G):
queries = defaultdict(lambda: 0)
_neighbors = G.neighbors
def neighbors(n):
# queries[n] += 1 # no global needed bc mutable.
ns = list(_neighbors(n))
for n in ns:
queries[n] += 1
return ns
G.neighbors = neighbors
return queries
if __name__ == '__main__':
from collections import defaultdict
from waterbear import DefaultBear
import matplotlib.pyplot as plt
from ml_logger import logger
dataset, start, goal = load_streetlearn()
G = maze_graph(dataset)
queries = patch_graph(G)
# goal -= 120 # 10 worked well
cache = DefaultBear(dict)
fig = plt.figure(figsize=(4, 4), dpi=300)
for i, (key, search) in enumerate(methods.items()):
queries.clear()
name = search.__name__
title, *_ = search.__doc__.split('\n')
short_name = short_names[key]
path, ds = search(G, start, goal, partial(heuristic, G=G, scale=1.2))
cache.cost[short_name] = len(queries.keys())
cache.len[short_name] = len(ds)
print(f"{key:>10} len: {len(path)}", f"cost: {len(queries.keys())}")
plt.subplot(2, 2, i + 1)
plt.title(title, pad=10)
# plot_graph(G)
plot_trajectory_2d(ind2pos(G, path, 100), label=short_name)
plt.scatter(*zip(*ind2pos(G, queries.keys(), 100)), color="gray", s=3, alpha=0.1)
set_fig(dataset)
# plt.legend(loc="upper left", bbox_to_anchor=(0.45, 0.8), framealpha=1, frameon=False, fontsize=12)
plt.tight_layout()
logger.savefig("../figures/streetlearn_plans.png", dpi=300)
plt.show()
plt.close()
# colors = ['#49b8ff', '#ff7575', '#66c56c', '#f4b247']
# for i, (k, v) in enumerate(cache.items()):
# plt.bar(k, v, color=colors[i])
fig = plt.figure(figsize=(3.8, 3), dpi=300)
plt.title('Planning Cost')
plt.bar(cache.cost.keys(), cache.cost.values(), color="gray", width=0.8)
plt.ylim(0, max(cache.cost.values()) * 1.2)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.tight_layout()
logger.savefig("../figures/streetlearn_cost.png", dpi=300)
plt.ylabel('# of distance lookup')
plt.show()
fig = plt.figure(figsize=(3.8, 3), dpi=300)
plt.title('Plan Length')
plt.bar(cache.len.keys(), cache.len.values(), color="gray", width=0.8)
plt.ylim(0, max(cache.len.values()) * 1.2)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.tight_layout()
logger.savefig("../figures/streetlearn_length.png", dpi=300)
plt.ylabel('Path Length')
plt.show()
logger.print('done', color="green")
| 6,223 |
tensorboard/test.py
|
smit-hinsu/tensorboard
| 0 |
2022686
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorBoard test module.
This module provides a TensorBoard base test class and main function
with some of the niceties of tf.test, while only requiring standard
unittest be installed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import os
import shutil
import six
import tempfile
import unittest
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
_temp_dir = None
def get_temp_dir():
"""Return a temporary directory for tests to use."""
global _temp_dir
if not _temp_dir:
if os.environ.get('TEST_TMPDIR'):
temp_dir = tempfile.mkdtemp(prefix=os.environ['TEST_TMPDIR'])
else:
temp_dir = tempfile.mkdtemp()
def delete_temp_dir(dirname=temp_dir):
try:
shutil.rmtree(dirname)
except OSError as e:
logger.error('Error removing %s: %s', dirname, e)
atexit.register(delete_temp_dir)
_temp_dir = temp_dir
return _temp_dir
class TestCase(unittest.TestCase):
"""TensorBoard base test class.
This class can lazily create a temporary directory for tests to use.
"""
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self._tempdir = None
def assertItemsEqual(self, actual, expected, msg=None):
"""Test that sequence actual contains the same elements as expected,
regardless of their order.
Same as assertCountEqual in Python 3 with unittest.TestCase.
"""
return six.assertCountEqual(super(TestCase, self), actual, expected, msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Test that string actual starts with string expected_start."""
if not actual.startswith(expected_start):
fail_msg = '%r does not start with %r' % (actual, expected_start)
fail_msg += ' : %r' % (msg) if msg else ''
self.fail(fail_msg)
def assertEndsWith(self, actual, expected_end, msg=None):
"""Test that string actual ends with string expected_end."""
if not actual.endswith(expected_end):
fail_msg = '%r does not end with %r' % (actual, expected_end)
fail_msg += ' : %r' % (msg) if msg else ''
self.fail(fail_msg)
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=get_temp_dir())
return self._tempdir
def main(*args, **kwargs):
"""Pass args and kwargs through to unittest main"""
return unittest.main(*args, **kwargs)
| 3,696 |
setup.py
|
molayac/commit-msg-jira
| 0 |
2022963
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='commit-msg-jira',
version='1.0',
description='Hook commit-msg to send worklog to JIRA',
author='<NAME>',
author_email='<EMAIL>',
url='https://www.python.org/sigs/distutils-sig/',
packages=['jiraworklog'],
install_requires=['jira', 'configparser', 'tzlocal'],
scripts=['commit-msg']
)
| 405 |
bigcommerce/resources/payments.py
|
Anmol-Gulati/bigcommerce-api-python
| 69 |
2024683
|
from .base import *
class PaymentMethods(ListableApiResource):
resource_name = 'payments/methods'
| 104 |
stereo/algorithm/spatial_hotspot.py
|
fuyawangye/stereopy
| 61 |
2024019
|
#!/usr/bin/env python3
# coding: utf-8
"""
@file: get_hotspot.py
@description:
@author: <NAME>
@email: <EMAIL>
@last modified by: <NAME>
change log:
2021/10/14 create file.
"""
import copy
import pandas as pd
import hotspot
def spatial_hotspot(data, model='normal', n_neighbors=30, n_jobs=20, fdr_threshold=0.05,
min_gene_threshold=50, outdir=None):
"""
identifying informative genes (and gene modules)
:param data: StereoExpData
:param model: Specifies the null model to use for gene expression.
Valid choices are:
- 'danb': Depth-Adjusted Negative Binomial
- 'bernoulli': Models probability of detection
- 'normal': Depth-Adjusted Normal
- 'none': Assumes data has been pre-standardized
:param n_neighbors: Neighborhood size.
:param n_jobs: Number of parallel jobs to run.
:param fdr_threshold: Correlation threshold at which to stop assigning genes to modules
:param min_gene_threshold: Controls how small modules can be. Increase if there are too many modules being formed.
Decrease if substructre is not being captured
:param outdir: directory containing output file(hotspot.pkl). Hotspot object will be totally output here.
If None, results will not be output to a file.
:return:Hotspot object.
"""
hit_data = copy.deepcopy(data)
counts = hit_data.to_df().T # gene x cell
pos = pd.DataFrame(hit_data.position, index=counts.columns) # cell name as index
num_umi = counts.sum(axis=0) # total counts per cell
# Create the Hotspot object and the neighborhood graph
hs = hotspot.Hotspot(counts, model=model, latent=pos, umi_counts=num_umi)
hs.create_knn_graph(
weighted_graph=False, n_neighbors=n_neighbors,
)
hs_results = hs.compute_autocorrelations(jobs=n_jobs)
# select the genes with significant spatial autocorrelation
hs_genes = hs_results.index[hs_results.FDR < fdr_threshold]
# Compute pair-wise local correlations between these genes
lcz = hs.compute_local_correlations(hs_genes, jobs=n_jobs)
modules = hs.create_modules(
min_gene_threshold=min_gene_threshold, core_only=False, fdr_threshold=fdr_threshold,
)
module_scores = hs.calculate_module_scores()
if outdir is not None:
from stereo.io.writer import save_pkl
save_pkl(hs, output=f"{outdir}/hotspot.pkl")
return hs
| 2,434 |
optimal_transport_morphometry/core/migrations/0002_initial_models.py
|
girder/otm-server
| 0 |
2024510
|
# Generated by Django 3.1.3 on 2020-11-20 18:49
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import s3_file_field.fields
import optimal_transport_morphometry.core.models.image
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_default_site'),
]
operations = [
migrations.CreateModel(
name='Atlas',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'blob',
s3_file_field.fields.S3FileField(
max_length=2000,
upload_to=s3_file_field.fields.S3FileField.uuid_prefix_filename,
),
),
('name', models.CharField(max_length=255)),
],
options={
'verbose_name_plural': 'atlases',
},
),
migrations.CreateModel(
name='Dataset',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'created',
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name='created'
),
),
(
'modified',
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name='modified'
),
),
('name', models.CharField(max_length=255)),
('description', models.TextField(default='', max_length=3000)),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='Patient',
fields=[
('identifier', models.CharField(max_length=255, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='UploadBatch',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
(
'dataset',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='upload_batches',
to='core.dataset',
),
),
],
),
migrations.CreateModel(
name='PendingUpload',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=255)),
(
'batch',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='pending_uploads',
to='core.uploadbatch',
),
),
(
'patient',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='pending_uploads',
to='core.patient',
),
),
],
),
migrations.CreateModel(
name='Image',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=255)),
(
'type',
models.CharField(
default=optimal_transport_morphometry.core.models.image.ImageType[
'structural_mri'
],
max_length=100,
),
),
(
'blob',
s3_file_field.fields.S3FileField(
max_length=2000,
upload_to=s3_file_field.fields.S3FileField.uuid_prefix_filename,
),
),
(
'metadata',
optimal_transport_morphometry.core.models.image.MetadataField(
blank=True, default=dict
),
),
(
'dataset',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='images',
to='core.dataset',
),
),
(
'patient',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='images',
to='core.patient',
),
),
],
options={
'ordering': ['name'],
},
),
migrations.AddIndex(
model_name='pendingupload',
index=models.Index(fields=['batch', 'name'], name='core_pendin_batch_i_520706_idx'),
),
migrations.AddIndex(
model_name='image',
index=models.Index(fields=['dataset'], name='core_image_dataset_9750b7_idx'),
),
]
| 6,310 |
Python Basic/Convert Number to Binary.py
|
perfect104/python-codes
| 4 |
2023809
|
'''
@author : CodePerfectPlus
@Topic : Change Number TO Binary
'''
num = 23
print(int(bin(num)[2:]))
| 104 |
client/logger.py
|
MajicGit/boompow
| 54 |
2024841
|
import sys
import logging
from logging.handlers import WatchedFileHandler, TimedRotatingFileHandler
class WatchedTimedRotatingFileHandler(TimedRotatingFileHandler, WatchedFileHandler):
def __init__(self, filename, **kwargs):
super().__init__(filename, **kwargs)
self.dev, self.ino = -1, -1
self._statstream()
def emit(self, record):
self.reopenIfNeeded()
super().emit(record)
def get_logger():
log_file = "logs/bpow.log"
logger = logging.getLogger("bpow")
logger.setLevel(logging.DEBUG)
stream = logging.StreamHandler(stream=sys.stdout)
stream.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s", "%H:%M:%S"))
stream.setLevel(logging.INFO)
logger.addHandler(stream)
file = WatchedTimedRotatingFileHandler(log_file, when="d", interval=1, backupCount=30)
file.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(filename)s@%(funcName)s:%(lineno)s\n%(message)s", "%Y-%m-%d %H:%M:%S %z"))
file.setLevel(logging.DEBUG)
logger.addHandler(file)
return logger
| 1,083 |
post_gnome/setup.py
|
dylanrighi/GnomeTools
| 0 |
2023728
|
#!/usr/bin/env python
"""
setup.py for the post_gnome package
"""
# This setup is suitable for "python setup.py develop".
from setuptools import setup
setup(
name = "post_gnome",
version = "0.1.0",
description = "utilities for post processing data for GNOME",
long_description=open("README.rst").read(),
packages = ["post_gnome",],
scripts = ["scripts/gnome_nc2kmz.py",
"scripts/moss2kmz_series.py",
"scripts/moss2kmz_simple.py",
],
author = "<NAME>",
author_email = "<EMAIL>",
url="http://www.response.restoration.noaa.gov/gnome",
license = "LICENSE.txt",
keywords = "GNOME netcdf kmz",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Meteorology/Oceanography",
"License :: Public Domain",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
],
)
| 1,205 |
utils.py
|
arsham/GoToChar
| 1 |
2024781
|
# Copyright 2016 <NAME> <<EMAIL>>. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license
# License that can be found in the LICENSE file.
from sublime import Region
class Constants:
NEXT_MODE = "next"
BACK_MODE = "back"
MODES = (NEXT_MODE, BACK_MODE)
RIGHT = 1
LEFT = -1
class State:
"""
Defines the current state of the plug-in.
An object of this class the only source of the truth.
"""
invoked = False
in_plugin = False
select = False
def reset(self):
self.invoked = False
self.in_plugin = False
state_object = None
def current_state():
global state_object
if state_object is None:
state_object = State()
return state_object
def remove_last(view):
"""
Removes the last character entered and returns it because we don't want to display it.
"""
position = view.sel()[0].begin()
region = Region(position, position - 1)
character = view.substr(region)
view.run_command("left_delete")
# undoing twice to remove the character and also retain the view's dirty state.
view.run_command("undo")
view.run_command("undo")
return character
def to_next(view, character):
"""
moves the cursor to the next occurrence of the `character`
"""
return _find_and_move(view, character, Constants.RIGHT)
def to_back(view, character):
"""
moves the cursor to the previous occurrence of the `character`
"""
return _find_and_move(view, character, Constants.LEFT)
def _find_and_move(view, character, direction):
lines = {} # a dictionary of cursor region tuple to line region
for sel in view.sel():
a = min(sel.a, sel.b) # normalising the selection region
b = max(sel.a, sel.b)
# in case of going right, the add_cell_check points to the character's position
add_cell_check = 0
if direction == Constants.LEFT:
# otherwise it is the left side of it, which is the previous character's position
add_cell_check = -1
if a == b and view.substr(a + add_cell_check) == character:
# to hop over the character if it is the character already, and look up the next one
a += direction
b += direction
sel = Region(a, b)
lines[(a, b)] = view.line(sel)
regions = []
for sel, line in lines.items():
regions.append(_get_found_regions(view, character, sel, line, direction))
if regions:
# because we don't want to clear the selection if there is no region
view.sel().clear()
for region in regions:
view.sel().add(region)
current_state().reset()
def _get_found_regions(view, character, sel, line, direction):
"""
Finds the regions of where the cursor(s) should land. It the command is in select mode,
the regions are apart, otherwise it is on one character.
:rtype: Region
"""
if direction == Constants.RIGHT:
line_portion = Region(sel[0], line.b)
else:
line_portion = Region(line.a, sel[1])
from_sel = view.substr(line_portion)
if direction == Constants.RIGHT:
found_pos = from_sel.find(character)
else:
found_pos = from_sel.rfind(character)
if found_pos > 0:
# otherwise we didn't find anything
if current_state().select:
if direction == Constants.RIGHT:
a = sel[0]
b = sel[0] + found_pos
else:
a = line.a + found_pos
b = sel[1]
else:
if direction == Constants.RIGHT:
a = b = sel[0] + found_pos
else:
a = b = line.a + found_pos
return Region(a, b)
# for clearing only the region that can be advanced, we need to
# push back the current selection
return Region(sel[0], sel[1])
| 3,921 |
Task4: Dock station recognition system using ROS and LIDAR/Robot_movement/scripts/move_base_simple.py
|
malwake-git/smartmethods-AI-track
| 1 |
2024863
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Header
import rospy
from geometry_msgs.msg import PoseStamped
rospy.init_node("mynode")
goal_publisher = rospy.Publisher("move_base_simple/goal", PoseStamped, queue_size=5)
goal = PoseStamped()
goal.header.seq = 1
goal.header.stamp = rospy.Time.now()
goal.header.frame_id = "map"
goal.pose.position.x = 1.0
goal.pose.position.y = 2.0
goal.pose.position.z = 0.0
goal.pose.orientation.x = 0.0
goal.pose.orientation.y = 0.0
goal.pose.orientation.z = 0.0
goal.pose.orientation.w = 1.0
rospy.sleep(1)
goal_publisher.publish(goal)
rospy.spin()
| 607 |
scheduler/tests/unit/scheduler/actions/transform_machines/test_list_machines.py
|
vdloo/simulacra
| 2 |
2023638
|
from unittest.mock import Mock
from tests.testcase import TestCase
from scheduler.actions.transform_machines import list_machines
class TestListMachines(TestCase):
def setUp(self):
self.request = self.set_up_patch(
'scheduler.actions.transform_machines.Request'
)
self.urlopen = self.set_up_patch(
'scheduler.actions.transform_machines.urlopen'
)
self.urlopen.return_value.__exit__ = lambda a, b, c, d: None
self.req_handle = Mock()
self.resp = b'[{"ID":"962c903e-f206-aa23-8c8d-0a80db121078",' \
b'"Node":"cloud1","Address":' \
b'"fc03:cced:19b5:7c78:b7e5:520d:b7e3:1357",' \
b'"Datacenter":"raptiformica",' \
b'"TaggedAddresses":{"lan":' \
b'"fc03:cced:19b5:7c78:b7e5:520d:b7e3:1357",' \
b'"wan":"fc03:cced:19b5:7c78:b7e5:520d:b7e3:1357"},' \
b'"Meta":{"consul-network-segment":""},' \
b'"CreateIndex":29511,"ModifyIndex":29528},' \
b'{"ID":"757cf773-6d5d-6500-83a0-1010918b9809",' \
b'"Node":"cloud2","Address":' \
b'"fcdf:a62d:1b46:b898:761e:d753:4e5:7fd",' \
b'"Datacenter":"raptiformica","TaggedAddresses":' \
b'{"lan":"fcdf:a62d:1b46:b898:761e:d753:4e5:7fd",' \
b'"wan":"fcdf:a62d:1b46:b898:761e:d753:4e5:7fd"},' \
b'"Meta":{"consul-network-segment":""},' \
b'"CreateIndex":6,"ModifyIndex":35},' \
b'{"ID":"765104ce-8c88-9baa-2a29-6f7ee50f1719",' \
b'"Node":"host4","Address":' \
b'"fc9f:34ec:b491:293e:edb3:a890:b239:b6d6",' \
b'"Datacenter":"raptiformica","TaggedAddresses":' \
b'{"lan":"fc9f:34ec:b491:293e:edb3:a890:b239:b6d6",' \
b'"wan":"fc9f:34ec:b491:293e:edb3:a890:b239:b6d6"},' \
b'"Meta":{"consul-network-segment":""},' \
b'"CreateIndex":200,"ModifyIndex":202},' \
b'{"ID":"f45dae53-0fcc-fb73-37d9-d55816420ab5",' \
b'"Node":"retropie","Address":' \
b'"fc00:d4e0:31b6:e19:a983:e335:4569:2b26",' \
b'"Datacenter":"raptiformica","TaggedAddresses":' \
b'{"lan":"fc00:d4e0:31b6:e19:a983:e335:4569:2b26","wan":' \
b'"fc00:d4e0:31b6:e19:a983:e335:4569:2b26"},' \
b'"Meta":{"consul-network-segment":""},' \
b'"CreateIndex":182,"ModifyIndex":185}]'.decode('utf-8')
self.req_handle.read.return_value = self.resp
self.urlopen.return_value.__enter__ = lambda x: self.req_handle
def test_list_machines_instantiates_urllib_request(self):
list_machines()
self.request.assert_called_once_with(
'http://localhost:8500/v1/catalog/nodes'
)
def test_list_machines_opens_request(self):
list_machines()
self.urlopen.assert_called_once_with(
self.request.return_value
)
def test_list_machines_reads_request_handle(self):
list_machines()
self.req_handle.read.assert_called_one_with()
def test_list_machines_returns_list_of_machines(self):
ret = list_machines()
expected_machines = [
{
'ID': '962c903e-f206-aa23-8c8d-0a80db121078',
'Node': 'cloud1',
'Address': 'fc03:cced:19b5:7c78:b7e5:520d:b7e3:1357',
'Datacenter': 'raptiformica',
'TaggedAddresses': {
'lan': 'fc03:cced:19b5:7c78:b7e5:520d:b7e3:1357',
'wan': 'fc03:cced:19b5:7c78:b7e5:520d:b7e3:1357'
},
'Meta': {'consul-network-segment': ''},
'CreateIndex': 29511,
'ModifyIndex': 29528},
{
'ID': '757cf773-6d5d-6500-83a0-1010918b9809',
'Node': 'cloud2',
'Address': 'fcdf:a62d:1b46:b898:761e:d753:4e5:7fd',
'Datacenter': 'raptiformica',
'TaggedAddresses': {
'lan': 'fcdf:a62d:1b46:b898:761e:d753:4e5:7fd',
'wan': 'fcdf:a62d:1b46:b898:761e:d753:4e5:7fd'
},
'Meta': {'consul-network-segment': ''},
'CreateIndex': 6,
'ModifyIndex': 35
},
{
'ID': '765104ce-8c88-9baa-2a29-6f7ee50f1719',
'Node': 'host4',
'Address': 'fc9f:34ec:b491:293e:edb3:a890:b239:b6d6',
'Datacenter': 'raptiformica',
'TaggedAddresses': {
'lan': 'fc9f:34ec:b491:293e:edb3:a890:b239:b6d6',
'wan': 'fc9f:34ec:b491:293e:edb3:a890:b239:b6d6'
}, 'Meta': {'consul-network-segment': ''},
'CreateIndex': 200,
'ModifyIndex': 202
},
{
'ID': 'f45dae53-0fcc-fb73-37d9-d55816420ab5',
'Node': 'retropie',
'Address': 'fc00:d4e0:31b6:e19:a983:e335:4569:2b26',
'Datacenter': 'raptiformica',
'TaggedAddresses': {
'lan': 'fc00:d4e0:31b6:e19:a983:e335:4569:2b26',
'wan': 'fc00:d4e0:31b6:e19:a983:e335:4569:2b26'
},
'Meta': {'consul-network-segment': ''},
'CreateIndex': 182,
'ModifyIndex': 185
}
]
self.assertEqual(expected_machines, ret)
| 5,724 |
cfn_guard_test/report.py
|
tiborhercz/cfn-guard-test
| 0 |
2024855
|
from typing import List
from cfn_guard_test.rule import CfnGuardRule
from cfn_guard_test.case import CfnGuardTestCase
from cfn_guard_test.suites import CfnGuardTestSuites
from junit_xml import TestCase, TestSuite, to_xml_report_string
class CfnGuardReport:
"""
Understands how to create reports
"""
__suites: List[TestSuite]
def __init__(self, suites: CfnGuardTestSuites):
self.__suites = []
for suite in suites.all_suites:
self.__timestamp = suite.duration
self.__suites.append(
TestSuite(
name=suite.ruleset, test_cases=self.__get_all_test_cases(suite)
)
)
@property
def elapsed_sec(self) -> float:
"""
Because we only have the time for the whole suite we will only return this once to the case.
"""
timestamp = 0.0
if self.__timestamp:
timestamp = self.__timestamp
self.__timestamp = 0.0
return timestamp
def __get_all_test_cases(self, suite):
cases = []
for case in suite.all_test_cases:
for rule in case.all_rules:
cases.append(self.__create_test_case(case=case, rule=rule))
return cases
def __create_test_case(
self, case: CfnGuardTestCase, rule: CfnGuardRule
) -> TestCase:
test_case = TestCase(
name=rule.name, classname=case.name, elapsed_sec=self.elapsed_sec
)
if rule.skipped:
test_case.add_skipped_info(
f'Rule {rule.name} was skipped on case #{case.number} "{case.name}"'
)
if rule.failed:
test_case.add_failure_info(
f'Rule {rule.name} failed on case #{case.number} "{case.name}"'
)
return test_case
def write(self, path: str) -> None:
with open(path, "w") as f:
f.write(to_xml_report_string(self.__suites, prettyprint=True))
| 1,991 |
purescripto/installer.py
|
purescript-python/purescripto
| 7 |
2024137
|
import re
import io
import os
import requests
import zipfile
import stat
from pathlib import Path
from distutils.util import get_platform
tag = re.compile('refs/tags/v(\S+)')
def make_executable(cmd_path):
# always modify mode to READ + EXEC
os.chmod(cmd_path, stat.S_IREAD | stat.S_IEXEC)
def show_tags(url=r"https://github.com/purescript-python/purescript-python"):
"""
Use ls-remote in gitPython
https://stackoverflow.com/questions/35585236/git-ls-remote-in-gitpython
"""
import git
g = git.cmd.Git()
for ref in g.ls_remote(url).split('\n'):
found = tag.findall(ref.split('\t')[-1].strip())
if not found:
continue
yield found[0]
def mk_tmplt(template):
if isinstance(template, dict):
def check(data,
*,
tmplt=tuple((k, mk_tmplt(v)) for k, v in template.items())):
if not isinstance(data, dict):
return False
for k, v in tmplt:
if k not in data:
return False
if not v(data[k]):
return False
return True
elif isinstance(template, list):
def check(data, *, tmplt=tuple(map(mk_tmplt, template))):
if not isinstance(data, list):
return False
if len(data) != len(tmplt):
return False
for t, v in zip(tmplt, data):
if not t(v):
return False
return True
elif isinstance(template, type):
def check(data, *, t=template):
return isinstance(data, t)
elif template is any:
check = lambda _: True
elif hasattr(template, 'match'):
check = template.match
else:
def check(data, *, o=template):
return data == o
return check
def traverse(f, data):
if isinstance(data, dict):
for each in data.values():
yield from traverse(f, each)
elif isinstance(data, list):
for each in data:
yield from traverse(f, each)
if f(data):
yield data
def gq(tmp, data):
return traverse(mk_tmplt(tmp), data)
def get_binary(out_path):
"""out_path is the directory of executable, instead of the path
"""
if isinstance(out_path, str):
out_path = Path(out_path)
elif not isinstance(out_path, Path):
raise TypeError(type(out_path))
from purescripto.version import __blueprint_version__
max_fit_tag = max(filter(lambda x: x.startswith(__blueprint_version__), show_tags()))
print('Downloading binaries from purescript-python/purescript-python...')
data = requests.get(
r"https://api.github.com/repos/purescript-python/purescript-python/releases/tags/v{}"
.format(max_fit_tag)).json()
print('Binaries downloaded.')
plat_name = get_platform()
matcher = re.compile('\S+' + re.escape(plat_name))
tmplt = {'browser_download_url': matcher}
try:
each = next(gq(tmplt, data))
except StopIteration:
import sys
print(
"It seems that binaries for your platform is not available.\n"
"Following way must work, but can be quite time-consuming:\n"
"Firstly, Install Haskell Stack Build Tool: https://docs.haskellstack.org/en/stable/README/\n"
"Second, Clone https://github.com/purescript-python/purescript-python, then do `stack install .`"
)
sys.exit(1)
url = each['browser_download_url']
zf = zipfile.ZipFile(io.BytesIO(requests.get(url).content))
exe = "pspy-blueprint"
if 'win' in url:
exe += '.exe'
out_path.mkdir(exist_ok=True, parents=True, mode=0o777)
zf.extract(exe, path=str(out_path))
make_executable(str(out_path / exe))
| 3,820 |
code-green/workshop/code/lambda-code.py
|
purcellconsult/amazon-asdi
| 55 |
2024572
|
import time
import boto3
import json
import collections
import operator
import datetime
import os
# athena database name
athenaDatabase = 'ghcn'
# S3 constant
S3_QUERY='query-result'
S3_BUCKET ='YOUR_BUCKET_HERE'
# set defaults
DEFAULT_CITIES = "best" # choices are 'list' (returns all cities) or 'best' (returns city with closest temp to target temp)
DEFAULT_TARGET = 230 # Any int that is represented in tenths of celcius
DEFAULT_DATE_HISTORY = 14 # defaults to 14 days from current day in SQL query
DEFAULT_MIN_LOOKBACK = 5
# number of retries
RETRY_COUNT = 15
## override defaults with Environment variables if available
if 'GLUE_DATABASE' in os.environ:
athenaDatabase = os.environ['GLUE_DATABASE']
if 'S3_QUERY_OUTPUT_LOCATION' in os.environ:
S3_OUTPUT = os.environ['S3_QUERY_OUTPUT_LOCATION']
else:
S3_OUTPUT = 's3://' + S3_BUCKET + '/' + S3_QUERY
if 'GHCN_TABLE_NAME' in os.environ:
GHCN_TABLE_NAME = os.environ['GHCN_TABLE_NAME']
else:
GHCN_TABLE_NAME = 'ghcntable'
if 'STADIUM_TABLE_NAME' in os.environ:
STADIUM_TABLE_NAME = os.environ['STADIUM_TABLE_NAME']
else:
STADIUM_TABLE_NAME = 'stadium'
def lambda_handler(event, context):
try:
city = event['queryStringParameters']['cities']
if ((city != "list") and (city != "best")):
city = DEFAULT_CITIES
except:
city = DEFAULT_CITIES
try:
target = int(event['queryStringParameters']['target'])
except:
target = DEFAULT_TARGET
try:
lookbackDays = int(event['queryStringParameters']['days'])
except:
lookbackDays = DEFAULT_DATE_HISTORY
if (lookbackDays < DEFAULT_MIN_LOOKBACK):
lookbackDays = DEFAULT_MIN_LOOKBACK
dateObj = datetime.date.today() - datetime.timedelta(days=lookbackDays)
queryDate = int(dateObj.strftime('%Y%m%d'))
# query has hardcoded elements for simplicity of this workshop
query = f"""SELECT city, avg(CAST(data_value as INTEGER)) as temp FROM "{STADIUM_TABLE_NAME}" as stadium
INNER JOIN "{GHCN_TABLE_NAME}" as ghcn ON stadium.station_id = ghcn.id
WHERE ghcn.year_date >= '{queryDate}'
AND ghcn.element = 'TAVG'
GROUP BY city"""
# athena client
client = boto3.client('athena')
# Execution
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': athenaDatabase
},
ResultConfiguration={
'OutputLocation': S3_OUTPUT,
}
)
# get query execution id
query_execution_id = response['QueryExecutionId']
print(query_execution_id)
# get execution status
for i in range(1, 1 + RETRY_COUNT):
# get query execution
query_status = client.get_query_execution(QueryExecutionId=query_execution_id)
query_execution_status = query_status['QueryExecution']['Status']['State']
if query_execution_status == 'SUCCEEDED':
print("STATUS:" + query_execution_status)
break
if query_execution_status == 'FAILED':
raise Exception("STATUS:" + query_execution_status)
else:
print("STATUS:" + query_execution_status)
time.sleep(i)
else:
client.stop_query_execution(QueryExecutionId=query_execution_id)
raise Exception('TIME OVER')
# get query results
result = client.get_query_results(QueryExecutionId=query_execution_id)
# Convert the result set into something a bit easier to manage
i=1
stations= {}
num_cities = len(result['ResultSet']['Rows'])
while i < num_cities:
# Pull out the station city and station avg temp from the json returned from query
station_city = result['ResultSet']['Rows'][i]['Data'][0]['VarCharValue']
station_temp = int(float(result['ResultSet']['Rows'][i]['Data'][1]['VarCharValue']))
# the delta from target shows how far (in tenths of a degree) we are from the target temp
delta_from_target = abs(station_temp - target)
# save it in a new dict. Station[<City Name>] = [ degree delta from target, avg temp of city]
stations[station_city] = [ delta_from_target, station_temp ]
i = i+1
sorted_stations = sorted(stations.items(), key=operator.itemgetter(1))
stations_dict = collections.OrderedDict(sorted_stations)
best_city = list(stations_dict)[0]
if (city == "list"):
return {
'statusCode': 200,
'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' },
'body': json.dumps(stations_dict)
}
elif (city == "best"):
return_val = { }
return_val[best_city] = stations[best_city]
return {
'statusCode': 200,
'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' },
'body': json.dumps(return_val)
}
else:
return {
'statusCode': 200,
'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' },
'body': json.dumps(stations_dict)
}
| 5,407 |
yc302/1578.py
|
c-yan/yukicoder
| 0 |
2024147
|
m = 1000000007
A, B, C = map(int, input().split())
K = int(input())
print(pow(A * B * C, pow(2, K, m - 1), m))
| 113 |
python_experiments/playground/play/tex_to_normal.py
|
mexuaz/AccTrussDecomposition
| 9 |
2024135
|
if __name__ == '__main__':
with open('tmp.txt') as ifs:
lines = [
l.lstrip().replace('\item', '(1)').replace('\\begin{inparaenum}[(1)]', '').
replace('\end{inparaenum}', '')
# .replace('$', '')
for l in ifs.readlines()]
lines = filter(lambda l: not l.startswith('%'), lines)
line = '\n'.join(lines)
new_line = line.replace('\n', ' ')
max_space_num = 5
for i in range(max_space_num):
new_line = new_line.replace(' ', ' ')
with open('tmp_out.txt', 'w') as ofs:
ofs.write(new_line)
| 616 |
tools/base.py
|
YuzeLiao/Column-Oriented-ML-System
| 3 |
2024686
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
class Transpose(object):
"""
This class is used for data transpose.
"""
def __init__(self):
self.data = []
self.max_row_length = 0
def line_process(self, line):
"""
This method does some simple processing on each line of input data.
"""
line = line.strip().split()
if self.max_row_length < len(line):
self.max_row_length = len(line)
return line
def line_extend(self, line):
"""
This method extends a line's length to max_row_length.
"""
extend_len = self.max_row_length - len(line)
if extend_len > 0:
line.extend(["0"] * extend_len)
return line
def read_data(self, input_filename):
"""
This method reads data.
"""
input_file = open(input_filename, "r")
self.data = input_file.readlines()
self.data = map(self.line_process, self.data)
def data_transpose(self, data):
"""
This method accomplishes the major work of data-transposition.
"""
if data is not None:
self.data = data
self.data = map(self.line_extend, self.data)
self.data = map(list, zip(*self.data))
return self.data
def data_print(self, output_filename):
"""
This method prints data to a file.
"""
output_file = open(output_filename, "w")
for line in self.data:
output_file.write(" ".join(line) + "\n")
class ColumnBasedTranspose(object):
"""
This class is used to transfer data from
row-based format to column-based format.
"""
def __init__(self, batch_size, input_filename, output_filename):
self.data = []
self.max_length = 0
self.instance_count = 0
self.batch_size = batch_size
self.global_max_length = 0
self.transposer = Transpose()
self.input_file = open(input_filename, "r")
self.output_file = open(output_filename, "w")
def read_data(self):
"""
This method reads data.
"""
self.data = []
self.instance_count = 0
while self.instance_count < self.batch_size:
line = self.input_file.readline()
if not line:
break
self.data.append(line)
self.instance_count += 1
def expand_line(self, line):
"""
This method expands a line to a dense vector.
"""
line = line.strip().split()
last_idx = 0
result = [line[0]]
for item in line[1:]:
[idx, val] = item.split(":")
idx = int(idx)
distance = idx - int(last_idx)
if distance > 1:
result.extend(["0"] * (distance - 1) + [val])
last_idx = idx
elif distance == 1:
result.extend([val])
last_idx = idx
else:
result[idx] = val
if self.max_length < len(result):
self.max_length = len(result)
return result
def expand_data(self):
"""
This method expands data matrix.
"""
while 1:
self.read_data()
if self.instance_count == 0:
break
self.max_length = 0
self.data = map(self.expand_line, self.data)
if self.global_max_length < self.max_length:
self.global_max_length = self.max_length
self.transposer.max_row_length = self.max_length
self.data = self.transposer.data_transpose(self.data)
self.data_print()
self.output_file.write(str(self.global_max_length) + '\n')
def data_print(self):
"""
This method prints data to a file.
"""
i = 0
sub = 0
while i < len(self.data):
all_zero = 1
for item in self.data[i]:
if item != '0':
all_zero = 0
break
if all_zero == 1:
del self.data[i]
sub += 1
else:
if i != 0:
self.data[i].insert(0, str(i + sub))
i += 1
if self.data:
self.output_file.write(str(len(self.data)) + '\n')
self.output_file.write(" ".join(self.data[0]) + '\n')
for line in self.data[1:]:
idx = 0
self.output_file.write(line[0] + " ")
for item in line[1:]:
if item != "0":
self.output_file.write(str(idx) + ":" + item + " ")
idx += 1
self.output_file.write('\n')
| 4,749 |
opencv/q25.py
|
wuwuwuyuanhang/python
| 1 |
2024900
|
# @Auther : wuwuwu
# @Time : 2020/4/16
# @File : q25.py
# @Description : 最近邻插值
import cv2 as cv
import numpy as np
def nearestNeighborInterpolation(img, ax=1.0, ay=1.0):
"""
:param img:
:param ax:
:param ay:
:return:
"""""
H, W, C = img.shape
aH = int(ay * H)
aW = int(ax * W)
# index
y = np.arange(aH).repeat(aW).reshape(aW, -1)
x = np.tile(np.arange(aW), (aH, 1))
y = np.round(y / ay).astype(np.int)
x = np.round(x / ax).astype(np.int)
dst = img[y, x]
return dst.astype(np.uint8)
if __name__ == '__main__':
img = cv.imread('lenna.jpg')
dst = nearestNeighborInterpolation(img, ax=1.5, ay=1.5)
cv.imshow('input', img)
cv.imshow('output', dst)
cv.waitKey(0)
cv.destroyAllWindows()
| 786 |
mundo_3/ex102.py
|
tseiiti/curso_em_video
| 0 |
2024911
|
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 1002:
Crie um programa que tenha uma função "fatorial" que receba
dois parâmetros: o primeiro que indique o número a calcular e o
outro chamado show, que será um valor lógico (opcional) indicando se
será mostrado ou não na tela o processo de cálculo do fatorial.
''')
def fatorial(num, show = False):
"""Calcula o fatorial de um número.
Args:
num (int): O número a ser calculado.
show (bool, optional): Mostra ou não a conta. Defaults to False.
Returns:
string | int: O valor do Fatorial de um número n.
"""
res = 1
msg = ''
i = 1
while i < num:
msg += f'{i} x '
res *= i
i += 1
res *= i
msg += f'{i} = {res}'
if show:
return msg
else:
return res
print(fatorial(1))
print(fatorial(2))
print(fatorial(3))
print(fatorial(5))
print(fatorial(1, True))
print(fatorial(2, True))
print(fatorial(3, True))
print(fatorial(5, True))
help(fatorial)
| 997 |
fython/instruction/xipruc.py
|
nicolasessisbreton/fython
| 41 |
2022645
|
from ..config import *
from .printruc import printruc
def xipruc(linecod):
s = linecod
if s.debug:
printruc(s)
| 117 |
assistant/addressbook/queries.py
|
personal-assisntant-2/personal-assistant
| 0 |
2024177
|
from typing import List, Dict
from .models import Abonent, Phone, Email, Note, Tag
from django.db.models import Q
from datetime import date, timedelta
# , date_min: date = None, date_max: date = None
def read_abonents(user, pattern: str = '', tags: list = [], date_start: date = None, date_stop: date = None) -> list:
"""Ищет и возвращает список экземпляров записей типа Abonent, соотвествующих параметрам поиска.
Параметры поиска задаются паттерном 'pattern', списком tags и временным интервалом date_min ... date_max.
Если pattern == '' (пустая строка - состояние по умолчанию), поиск осуществляется только с учетом
остальных параметров. Поиск по паттерну проходит по всем текстовым и строковым полям БД: name,
address, phone, email, note.
Поиск по временным отметкам осуществляется по полям Abonent.birthday и Note.date (дата создания заметки)
Поиск по временным отметкам реализуется по правилам:
- date_min == None and date_max == None - состояние по умолчанию. Поиск по времени не осуществляется.
- date_min == None and date_max == date - в результаты поиска включаются записи, временные отметки которых
соотвествуют условию < date_max
- date_min == date and date_max == None - в результаты поиска включаются записи, временные отметки которых
соотвествуют условию > date_min
- date_min == date_1 and date_max == date_2 - в результаты поиска включаются записи, временные отметки которых
соотвествуют условию BETWEEN date_1 AND date_2 (попадающие в интервал, включая границы)
- date_min == date_max (тип данных - date) - в выборку попадут записи в которых временные метки равны date_min
"""
if pattern == '':
results_patt = Abonent.objects.filter(owner=user)
else:
#results_patt = Abonent.objects.filter(emails__email__icontains=pattern)
results = Abonent.objects.filter(owner=user)
results_patt = results.filter(name__icontains=pattern)
r1 = results.filter(address__icontains=pattern)
r2 = results.filter(notes__note__icontains=pattern)
r3 = results.filter(phones__phone__icontains=pattern)
r4 = results.filter(emails__email__icontains=pattern)
results_patt.union(r1, r2, r3, r4)
if tags == []:
results_patt_tags = results_patt
else:
#results_patt_tags = results_patt & Abonent.objects.filter(notes__tags__tag=tags[0])
results_patt_tags = results_patt.filter(notes__tags__tag=tags[0])
for t in tags[1:len(tags)]:
results_patt_tags = results_patt_tags.filter(notes__tags__tag=t)
# & Abonent.objects.filter(notes__tags__tag=t)
if date_start == None and date_stop == None:
results_patt_tags_date = results_patt_tags
elif date_start != None and date_stop == None:
results_patt_tags_date_birthday = results_patt_tags.filter(
birthday__gte=date_start)
results_patt_tags_date_notes = results_patt_tags.filter(
notes__date__gte=date_start)
results_patt_tags_date = results_patt_tags_date_birthday.union(
results_patt_tags_date_notes)
elif date_start == None and date_stop != None:
results_patt_tags_date_birthday = results_patt_tags.filter(
birthday__lte=date_stop)
results_patt_tags_date_notes = results_patt_tags.filter(
notes__date__lte=date_stop)
results_patt_tags_date = results_patt_tags_date_birthday.union(
results_patt_tags_date_notes)
else:
results_patt_tags_date_birthday = results_patt_tags.filter(
birthday__lte=date_stop) & results_patt_tags.filter(
birthday__gte=date_start)
results_patt_tags_date_notes = results_patt_tags.filter(
notes__date__lte=date_stop) & results_patt_tags.filter(
notes__date__gte=date_start)
results_patt_tags_date = results_patt_tags_date_birthday.union(
results_patt_tags_date_notes)
return results_patt_tags_date
def get_date_month_day(period : int, owner)->List[Dict]:
''' query abonenets by date in the range
from today to plus 'period' days
dates will compared as tuples(month , day)
'''
# даты будут сравниваться как кортежи (месяц, день)
date_begin = date.today()
date_end = date_begin + timedelta(days=period)
date_begin = (date_begin.month, date_begin.day)
date_end = (date_end.month, date_end.day)
abonents = Abonent.objects.filter(owner=owner,
birthday__isnull=False)
abonents_list = []
# из полученного полного запроса переписываются в список только подходящие
for abonent in abonents:
if date_begin < (abonent.birthday.month, abonent.birthday.day) < date_end:
abonents_list.append({'pk': abonent.id,
'name': abonent.name,
'birthday': abonent.birthday,
'short_bd': (abonent.birthday.month, abonent.birthday.day),
'str_bd': abonent.birthday.strftime('%A %d %B %Y')})
# сортировка по (месяц, день)
abonents_list.sort(key=lambda el: el['short_bd'])
return abonents_list
| 5,327 |
main/models.py
|
hannxiao/autotrade2
| 0 |
2024931
|
from django.db import models
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
import uuid
class Theory(models.Model):
"""
Model representing a trading theory.
"""
name = models.CharField(max_length=200, help_text="Enter the name of theory")
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the theory")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('theory-detail', args=[str(self.id)])
class Indicator(models.Model):
"""
Model representing a indicator.
"""
name = models.CharField(max_length=200, help_text="Enter the name of indicator")
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the indicator")
related = models.ManyToManyField('self', blank=True)
def display_related(self):
return ', '.join([ind.name for ind in self.related.all()])
display_related.short_description = 'Related indicators'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('indicator-detail', args=[str(self.id)])
class IndicatorInstance(models.Model):
"""
A specific indicator.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular indicator")
indicator = models.ForeignKey(Indicator, on_delete=models.SET_NULL, null=True)
kwarg = models.CharField(max_length=200, help_text="Enter the dictionary containing indicator-related parameter",
null=True, blank=True)
def __str__(self):
if self.indicator:
return 'Instance of ' + self.indicator.name
return 'null'
class Strategy(models.Model):
"""
Model representing a Strategy (may be related to indicator or theory).
"""
name = models.CharField(max_length=200, help_text="Enter the name of strategy")
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the strategy")
indicator = models.ManyToManyField(Indicator, blank=True)
def display_indicator(self):
return ', '.join([ind.name for ind in self.indicator.all()])
display_indicator.short_description = 'Related indicators'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('strategy-detail', args=[str(self.id)])
class StrategyInstance(models.Model):
"""
Model representing a Strategy (may be related to indicator or theory).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular strategy")
strategy = models.ForeignKey(Strategy, on_delete=models.SET_NULL, null=True)
kwarg = models.CharField(max_length=200, help_text="Enter the dictionary containing indicator-related parameter",
null=True, blank=True)
def __str__(self):
if self.strategy:
return 'Instance of ' + self.indicator.name
return 'null'
class Portfolio(models.Model):
"""
A combination of many single stocks.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular portfolio")
weights = models.CharField(max_length=200, help_text="Enter an array that has sum = 1") # later change it to an array container
symbols = models.CharField(max_length=200, help_text="Enter a list of symbols")
def __str__(self):
return self.symbols
| 3,797 |
cm4/vbox/__init__.py
|
swsachith/cm
| 0 |
2024412
|
from cloudmesh.common.Shell import Shell
def version(verbose=False):
result = Shell.execute("vagrant", ["version"])
if verbose:
return result
else:
lines = result.split("\n")
for line in lines:
if "Installed Version:" in line:
return line.replace("Installed Version:", "").strip()
return None
| 366 |
pedido/migrations/0009_remove_pedido_endereco.py
|
borbinhaa/django-pizza-website
| 0 |
2024501
|
# Generated by Django 3.2.7 on 2021-09-30 18:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pedido', '0008_pedido_endereco'),
]
operations = [
migrations.RemoveField(
model_name='pedido',
name='endereco',
),
]
| 326 |
rebound/python_examples/megno/problem.py
|
rodluger/ttv-devil
| 0 |
2024331
|
#!/usr/bin/python
# This example integrates Jupiter and Saturn in the Solar system for a variety of initial conditions.
# Alongside the normal equations of motions, IAS15 is used to integrate the variational equations.
# These can be used to measure the Mean Exponential Growth of Nearby Orbits (MEGNO), a chaos indicator.
# This example script runs 12^2 simulations and plots the MEGNO value. Values close to <Y>=2 correspond
# to regular quasi-periodic orbits. Higher values of <Y> correspond to chaotic orbits.
# Import matplotlib
import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# Import the rebound module
import rebound
# Import other modules
import numpy as np
import multiprocessing
import warnings
# Runs one simulation.
def simulation(par):
saturn_a, saturn_e = par
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.min_dt = 5.
sim.dt = 1.
# These parameters are only approximately those of Jupiter and Saturn.
sun = rebound.Particle(m=1.)
sim.add(sun)
jupiter = sim.add(primary=sun,m=0.000954, a=5.204, M=0.600, omega=0.257, e=0.048)
saturn = sim.add(primary=sun,m=0.000285, a=saturn_a, M=0.871, omega=1.616, e=saturn_e)
sim.move_to_com()
sim.init_megno()
# Hide warning messages (WHFast timestep too large)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(1e3*2.*np.pi)
return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years
### Setup grid and run many simulations in parallel
N = 100 # Grid size, increase this number to see more detail
a = np.linspace(7.,10.,N) # range of saturn semi-major axis in AU
e = np.linspace(0.,0.5,N) # range of saturn eccentricity
parameters = []
for _e in e:
for _a in a:
parameters.append([_a,_e])
simulation((8,0.))
# Run simulations in parallel
pool = rebound.InterruptiblePool() # Number of threads default to the number of CPUs on the system
print("Running %d simulations on %d threads..." % (len(parameters), pool._processes))
res = np.nan_to_num(np.array(pool.map(simulation,parameters)))
megno = np.clip(res[:,0].reshape((N,N)),1.8,4.) # clip arrays to plot saturated
lyaptimescale = np.clip(np.absolute(res[:,1].reshape((N,N))),1e1,1e5)
### Create plot and save as pdf
# Setup plots
f, axarr = plt.subplots(2,figsize=(10,10))
extent = [a.min(), a.max(), e.min(), e.max()]
for ax in axarr:
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]")
ax.set_ylabel("$e_{\mathrm{Saturn}}$")
# Plot MEGNO
im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb1 = plt.colorbar(im1, ax=axarr[0])
cb1.solids.set_rasterized(True)
cb1.set_label("MEGNO $\\langle Y \\rangle$")
# Plot Lyapunov timescale
im2 = axarr[1].imshow(lyaptimescale, vmin=1e1, vmax=1e5, norm=LogNorm(), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent)
cb2 = plt.colorbar(im2, ax=axarr[1])
cb2.solids.set_rasterized(True)
cb2.set_label("Lyapunov timescale [years]")
plt.savefig("megno.pdf")
### Automatically open plot (OSX only)
from sys import platform as _platform
if _platform == "darwin":
import os
os.system("open megno.pdf")
| 3,478 |
robot_localisation/robot.py
|
AxelTLarsson/robot-localisation
| 1 |
2024579
|
"""
"""
import numpy as np
from robot_localisation.grid import Heading
from numpy.random import random_sample
from robot_localisation.grid import *
from enum import IntEnum
class Robot:
"""
Representation of the actual robot.
"""
def __init__(self, grid, transition_matrix):
"""
The robot starts at a random position drawn from
a uniform distribution over the fields on the grid.
The transition matrix is needed to compute the next
step for the robot to make.
"""
# pose = (row, col, heading)
self.pose = (np.random.randint(0, grid.shape[0]),
np.random.randint(0, grid.shape[1]),
Heading(np.random.randint(0, 4)))
self.grid = grid
self.transition_matrix = transition_matrix
def get_position(self):
"""
Return the true position of the robot. I.e. the (x, y) from
the pose (x, y, h).
"""
x, y, h = self.pose
return (x, y)
def get_pose(self):
"""
Return the current pose of the robot.
"""
return self.pose
def __str__(self):
return "The robot is at: {}".format(str(self.pose))
def step(self):
"""
Move one step on the grid.
P( keep heading | not encountering a wall) = 0.7
P( change heading | not encountering a wall) = 0.3
P( keep heading | encountering a wall) = 0.0
P( change heading | encountering a wall) = 1.0
This is all coded in the transition matrix which we
reuse here for this purpose.
"""
# Reuse the transition matrix
probabilities = self.transition_matrix[
self.grid.pose_to_index(self.pose)]
values = np.array(
list(range(0, self.grid.shape[0] * self.grid.shape[1] * 4)))
bins = np.add.accumulate(probabilities)
new_index = values[np.digitize(random_sample(1), bins)][0]
self.pose = self.grid.index_to_pose(new_index)
class Pos(IntEnum):
"""
The different possible types of positions the sensor can report.
"""
true = 1
surrounding = 2
next_surrounding = 3
nothing = 4
class Sensor:
"""
The sensor approximates the location of the robot according to:
- the true location L with probability 0.1
- any of the 8 surrounding fields L_s with probability 0.05 each
- any of the next 16 surrounding fields L_s2 with probability 0.025 each
- nothing with probability 0.1
"""
def __init__(self):
self.surr = [
(-1, -1), (-1, 0), (-1, +1), (0, -1), (0, +1),
(+1, -1), (+1, 0), (+1, +1)
]
self.next_surr = [
(-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2),
(-1, -2), (-1, +2), ( 0, -2), (+0, 2), (+1, -2),
(+1, +2), (+2, -2), (+2, -1), (+2, 0), (+2, 1),
(+2, +2)
]
def get_position(self, robot):
# todo: maybe rewrite as a generator, on each iteration
# asking robot to move one step or should that be the
# responsibility of the caller?
"""
Return approximate location of the robot.
"""
real_pos = robot.get_position()
values = np.array(
[Pos.true, Pos.surrounding, Pos.next_surrounding, Pos.nothing])
probabilities = np.array([0.1, 0.4, 0.4, 0.1])
bins = np.add.accumulate(probabilities)
pos_type = values[np.digitize(random_sample(1), bins)][0]
return {
Pos.true: real_pos,
Pos.surrounding: Sensor.surrounding(real_pos),
Pos.next_surrounding: Sensor.next_surrounding(real_pos),
Pos.nothing: None,
}[pos_type]
def surrounding(pos):
"""
Return a random adjacent position to 'pos'.
"""
x, y = pos
choices = [(x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1), (x, y+1),
(x+1, y-1), (x+1, y), (x+1, y+1)]
return choices[np.random.randint(len(choices))]
def next_surrounding(pos):
"""
Return a random next-adjacent position to 'pos'.
"""
x, y = pos
choices = [(x-2, y-2), (x-2, y-1), (x-2, y), (x-2, y+1), (x-2, y+2),
(x-1, y-2), (x-1, y+2), (x, y-2), (x, y+2), (x+1, y-2),
(x+1, y+2), (x+2, y-2), (x+2, y-1), (x+2, y), (x+2, y+1),
(x+2, y+2)]
return choices[np.random.randint(len(choices))]
def get_obs_matrix(self, position, grid_shape):
if position is None:
return None
n = grid_shape[0] * grid_shape[1] * 4
mat = np.zeros((n,))
x, y = position
# index for facing north in the current position
if 0 <= x < grid_shape[0] and 0 <= y < grid_shape[1]:
position_index = position_to_north_state(position, grid_shape)
mat[position_index:position_index+4] = 0.1
for o in self.next_surr:
x_, y_ = x + o[0], y + o[1]
if 0 <= x_ < grid_shape[0] and 0 <= y_ < grid_shape[1]:
o_index = position_to_north_state((x_, y_), grid_shape)
mat[o_index:o_index+4] = 0.025
for o in self.surr:
x_, y_ = x + o[0], y + o[1]
if 0 <= x_ < grid_shape[0] and 0 <= y_ < grid_shape[1]:
o_index = position_to_north_state((x_, y_), grid_shape)
mat[o_index:o_index+4] = 0.05
return np.diag(mat)
def position_to_north_state(position, grid_shape):
return (position[0] * grid_shape[1] + position[1]) * 4
if __name__ == '__main__':
np.set_printoptions(linewidth=1000)
sens = Sensor()
obs = sens.get_obs_matrix((3, 3), (2, 2))
print(obs)
| 5,769 |
setup.py
|
kennyjoseph/twitter_geo_preproc
| 0 |
2024614
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='tweet_geocode',
version='1.0',
description='Package for geocoding tweets. All code from <NAME>, forked from his repo. ' +
'Take a look at install_script.sh to install the required c/c++ libs (geos and spatialindex)',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/kennyjoseph/twitter_geo_preproc',
requires=['shapely', 'simplejson','rtree','yajl'],
packages=['tweet_geocode'],
package_data={'tweet_geocode': ['data/*']},
)
| 580 |
section03/lecture011.py
|
JorijnInEemland/MyPythonProjects
| 0 |
2022835
|
print(7 / 4) # / is division
print(7 % 4) # % is modulus
print(50 % 5) # 50 is divisible by 5
print(20 % 2) # n mod 2 is a good way of checking whether a number is even
print(2 ** 3) # ** is power
print((2 + 10) * 3) # brackets work
| 241 |
experimental/datasets/utils/converters.py
|
CLARIN-PL/embeddings
| 33 |
2024383
|
from typing import Any, Dict, List
import spacy
from spacy.training import offsets_to_biluo_tags
def convert_spacy_jsonl_to_connl_bilou(
data: List[Dict[Any, Any]], nlp: spacy.Language, out_path: str
) -> None:
"""Convert spacy jsonl data to connl format."""
for text in data:
raw_text = nlp(text["text"])
spans = text["spans"]
entities = [(span["start"], span["end"], span["label"]) for span in spans]
labels = offsets_to_biluo_tags(doc=raw_text, entities=entities)
tokens = [tok.text for tok in raw_text]
with open(out_path, "a") as f:
for t, l in zip(tokens, labels):
f.write(f"{t} \t {l} \n")
f.write("\n")
| 715 |
src/muses/search_index/urls.py
|
Aincient/cleo
| 0 |
2024578
|
from django.conf.urls import url, include
from rest_framework_extensions.routers import ExtendedDefaultRouter
from .viewsets import (
CollectionItemDocumentViewSet,
CollectionItemFacetsOnlyDocumentViewSet,
)
__all__ = ('urlpatterns',)
router = ExtendedDefaultRouter()
collection_items = router.register(
r'collectionitem',
CollectionItemDocumentViewSet,
base_name='collectionitem'
)
collection_items_facets_only = router.register(
r'collectionitemfacetsonly',
CollectionItemFacetsOnlyDocumentViewSet,
base_name='collectionitemfacetsonly'
)
urlpatterns = [
url(r'^', include(router.urls)),
]
| 632 |
tests/test_parscit_extractor.py
|
robodasha/crossref_resolver
| 0 |
2023386
|
import logging
import unittest
import research_papers.logutils as logutils
__author__ = 'robodasha'
__email__ = '<EMAIL>'
class TestParscitExtractor(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestParscitExtractor, self).__init__(*args, **kwargs)
logutils.setup_logging()
self._logger = logging.getLogger(__name__)
def setUp(self):
self._logger.info('Setup {0}'.format(self.__class__.__name__))
def tearDown(self):
self._logger.info('Tear down {0}'.format(self.__class__.__name__))
def test_extract_file(self):
self.fail()
def test_extract_directory(self):
self.fail()
if __name__ == '__main__':
unittest.main()
| 724 |
chess_engine/piece.py
|
rbaltrusch/chess_engine
| 0 |
2024786
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 13:33:57 2021
@author: Korean_Crimson
"""
from chess_engine import move
from chess_engine.util import convert
from chess_engine.consts import PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING
from chess_engine.move import InitialPawnMove
class Piece:
def __init__(self, moves, position, representation):
self.moves = moves
self.position = position
self.representation = representation
self.team = representation[-1]
self.captured = False
self.position_history = []
def __repr__(self):
return f'{self.__class__.__name__}{self.position}'
def compute_valid_moves(self, board):
valid_squares = []
for move_ in self.moves:
valid_squares_ = move_.compute_valid_moves(self.position, board, self.team)
valid_squares.extend(valid_squares_)
return list(set(valid_squares))
def move_to(self, position):
print(f'{self.team}: {self.representation} from {convert(self.position)} to {convert(position)}')
self.position = position
self.position_history.append(position)
class Pawn(Piece):
def __init__(self, direction, position, representation):
moves = [move.InitialPawnMove(direction), move.PawnMove(direction), move.PawnCapture(direction)]
super().__init__(moves, position, representation)
def move_to(self, position):
super().move_to(position)
if len(self.position_history) == 1:
self.moves = [move_ for move_ in self.moves if not isinstance(move_, InitialPawnMove)]
class Knight(Piece):
def __init__(self, direction, position, representation):
moves = []
super().__init__(moves, position, representation)
class Bishop(Piece):
def __init__(self, direction, position, representation):
moves = [move.BishopMove(8)]
super().__init__(moves, position, representation)
class Rook(Piece):
def __init__(self, direction, position, representation):
moves = [move.RookMove()]
super().__init__(moves, position, representation)
class Queen(Piece):
def __init__(self, direction, position, representation):
moves = [move.RookMove(), move.BishopMove(8)]
super().__init__(moves, position, representation)
class King(Queen):
def __init__(self, direction, position, representation):
super().__init__(direction, position, representation)
for move_ in self.moves:
move_.range = 1
PIECES = {PAWN: Pawn,
KNIGHT: Knight,
BISHOP: Bishop,
ROOK: Rook,
QUEEN: Queen,
KING: King}
| 2,651 |
RPF_count_CDS_nonStranded.py
|
zhengtaoxiao/RPF-count-CDS
| 2 |
2023770
|
import HTSeq
import sys
from collections import Counter
"""
usage: python RPF_counts_CDS.py bamFile gtfFile
change the following parameters for specific instance:
exclude_start_distance = 45 # 15 codons
exclude_stop_distance = 15 # 5 codons
minLen = 26
maxLen = 34
only used for non-strand specific mapping.
"""
# if there are multiple TIS, use the most 5' end start codon and the most 3' end stop codon
# read the gtf file
def readGTF(gtfFile):
gtf = HTSeq.GFF_Reader(gtfFile)
start_codon_sites = {}
stop_codon_sites = {}
CDS_features = HTSeq.GenomicArrayOfSets("auto", stranded="no")
i = 0
for f in gtf:
i += 1
if i % 10000 == 0:
sys.stderr.write("%d GFF lines processed.\r" % i)
gname = f.attr['gene_id']
if f.type == "CDS":
CDS_features[f.iv] += gname
if f.type == "start_codon":
if gname not in start_codon_sites:
start_codon_sites[gname] = f.iv.start_d
else:
if f.iv.strand == "+":
start_codon_sites[gname] = min(f.iv.start, start_codon_sites[gname])
else:
start_codon_sites[gname] = max(f.iv.start_d, start_codon_sites[gname])
if f.type == "stop_codon":
if gname not in stop_codon_sites:
stop_codon_sites[gname] = f.iv.end_d
else:
if f.iv.strand == "+":
stop_codon_sites[gname] = max(f.iv.end, stop_codon_sites[gname])
else:
stop_codon_sites[gname] = min(f.iv.end_d, stop_codon_sites[gname])
return start_codon_sites, stop_codon_sites, CDS_features
#main function
##define the paramaters:
bamFile = sys.argv[1]
gtfFile = sys.argv[2]
exclude_start_distance = 45 # 15 codons
exclude_stop_distance = 15 # 5 codons
minLen = 26
maxLen = 34
#read the gtfFile
start_codon_sites, stop_codon_sites, CDS_features = readGTF(gtfFile)
#read the bamFile
#only unique mapped reads are used
#intersection_straict mode.
counts = Counter()
empty = 0
ambiguous = 0
lowqual = 0
notaligned = 0
nonunique = 0
bam = HTSeq.BAM_Reader(bamFile)
for r in bam:
if not r.aligned:
notaligned += 1
continue
if r.optional_field("NH") > 1:
nonunique += 1
continue
if r.iv.chrom in ["MT","chrM","chrMT"]: #skip the mitochondria chrosome, change this for your instance.
continue
if minLen<= len(r.read.seq) <=maxLen:
pass
else:
continue
iv_seq = (co.ref_iv for co in r.cigar if co.type == "M" and co.size > 0)
fs = None
for iv in iv_seq:
for iv2, fs2 in CDS_features[iv].steps():
if fs is None:
fs = fs2.copy()
else:
fs = fs.intersection(fs2)
if fs is None or len(fs) == 0:
empty += 1
elif len(fs) >1:
ambiguous += 1
else:
gname = list(fs)[0]
try:
if min(abs(start_codon_sites[gname] - r.iv.start_d),abs(start_codon_sites[gname] - r.iv.end_d)) < exclude_start_distance:
continue
elif min(abs(r.iv.end_d - stop_codon_sites[gname]),abs(r.iv.start_d - stop_codon_sites[gname])) < exclude_stop_distance:
continue
else:
counts[gname] += 1
except:
counts[gname] += 1
for g in sorted(counts):
print "%s\t%d" % (g, counts[g])
print "__no_feature\t%d" % empty
print "__ambiguous\t%d" % ambiguous
print "__too_low_aQual\t%d" % lowqual
print "__not_aligned\t%d" % notaligned
print "__alignment_not_unique\t%d" % nonunique
| 3,166 |
SirIsaac/transcriptionNetwork.py
|
sidambhire/SirIsaac
| 39 |
2022946
|
# TranscriptionNetwork.py
#
# <NAME>
# 06.09.2009
# 07.05.2009 added input variables
#
# A SloppyCell implementation of small transcription regulation networks.
#
from SloppyCell.ReactionNetworks import *
def TranscriptionNetworkZiv(netid='TranscriptionNetwork'):
"""
Creates SloppyCell transcription network with 4 species as in
ZivNemWig07. Each of three transcription factors (X, Y, and Z)
is regulated (either activated or inhibited) by exactly one
other transcription factor. GFP is always
down-regulated by transcription factor 3.
For now, just implements the first circuit from ZivNemWig07
(number 1 in Figure 1) until I figure out a better way of
enumerating the possibilities.
"""
net = Network(netid, name='Transcription Network')
net.addCompartment('Comp',name='Compartment')
net.addParameter( 'n', 2, isOptimizable=False ) # Hill coefficient
nameList = ['TFX','TFY','TFZ','GFP']
initialConditions = [0.,0.,0.,1.]
suffixList = ['X','Y','Z','G']
connectionList = ['TFX','TFX','TFY','TFZ'] # which TF regulates each
signList = [-1,-1,-1,-1] # +1 activation, -1 inhibition
# transcription factor species
for name,IC in zip(nameList,initialConditions):
net.addSpecies( name, 'Comp', IC )
# decay rates (rG set to definite number in paper)
defaultr = 1.
for suffix in suffixList[:-1]:
net.addParameter( 'r'+suffix, defaultr, isOptimizable=True )
net.addParameter( 'rG', defaultr, isOptimizable=False) # <---- need to set this
# Michaelis constants
defaultK = 1.
for suffix in suffixList:
net.addParameter( 'K'+suffix, defaultK, isOptimizable=True )
# range parameters
defaulta = 1.
for suffix in suffixList:
net.addParameter( 'a'+suffix, defaulta, isOptimizable=True )
# leak parameter
defaulta0 = 1.
net.addParameter( 'a0', defaulta0, isOptimizable=True )
# input parameters
defaults = 1.
for suffix in suffixList:
net.addParameter( 's'+suffix, defaults, isOptimizable=False )
# reaction rate rules
for name,suffix,connection,sign in \
zip(nameList,suffixList,connectionList,signList):
if sign == +1 : # activated
net.addRateRule( name, \
'-r'+suffix+'*'+name+' + a0 + a'+suffix+ \
'*('+connection+'/s'+suffix+')**n /'+'(K'+suffix+'**n + (' \
+connection+'/s'+suffix+')**n)' )
#net.addRateRule( name, \
# '-r'+suffix+'*'+name+' + a0 + a'+suffix+'*'+connection+'**n /' \
# +'(K'+suffix+'**n + '+connection+'**n)' )
else: # inhibited
net.addRateRule( name, \
'-r'+suffix+'*'+name+' + a0 + a'+suffix+'*K'+suffix+'**n /' \
+'(K'+suffix+'**n + ('+connection+'/s'+suffix+')**n)' )
#net.addRateRule( name, \
# '-r'+suffix+'*'+name+' + a0 + a'+suffix+'*K'+suffix+'**n /' \
# +'(K'+suffix+'**n + '+connection+'**n)' )
return net
| 3,341 |
mmt/training/__init__.py
|
jianzhnie/MultimodalTransformer
| 1 |
2023163
|
'''
Author: jianzhnie
Date: 2021-11-18 18:19:57
LastEditTime: 2021-11-18 18:19:57
LastEditors: jianzhnie
Description:
'''
| 123 |
expected_results/page_content/contact_page_content.py
|
ikostan/ParaBankSeleniumAutomation
| 4 |
2024832
|
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
from expected_results.page_content.base_page_content import BasePageContent
class ContactPageContent(BasePageContent):
"""
Holds expected results/values for Contact web page
"""
URL = BasePageContent.URL + 'contact.htm'
TITLE = BasePageContent.TITLE + 'Customer Care'
DESCRIPTION = 'Email support is available by filling out the following form.'
THANK_YOU = 'Thank you '
SUCCESS_MESSAGE = 'A Customer Care Representative will be contacting you.'
CONTACT_FORM = {
'name': {'title': 'Name:',
'id': 'name',
"name": 'name',
'class': 'input',
'type': 'text',
'error': 'Name is required.'},
'email': {'title': 'Email:',
'id': 'email',
"name": 'email',
'class': 'input',
'type': 'text',
'error': 'Email is required.'},
"phone": {'title': 'Phone:',
'id': 'phone',
"name": 'phone',
'class': 'input',
'type': 'text',
'error': 'Phone is required.'},
'message': {'title': 'Message:',
'id': 'message',
"name": 'message',
'class': 'input',
'type': 'text',
'error': 'Message is required.'},
'button': {'value': 'Send to Customer Care',
'class': 'button',
'type': 'submit'}
}
| 1,474 |
examples/layout_rectangles.py
|
kendallreid/popsicle
| 60 |
2024711
|
import sys
sys.path.insert(0, "../")
import math
from popsicle import juce_gui_basics
from popsicle import juce, START_JUCE_COMPONENT
class MainContentComponent(juce.Component):
header = juce.TextButton()
sidebar = juce.TextButton()
limeContent = juce.TextButton()
grapefruitContent = juce.TextButton()
lemonContent = juce.TextButton()
orangeContent = juce.TextButton()
footer = juce.TextButton()
def __init__(self):
super().__init__()
self.header.setColour(juce.TextButton.buttonColourId, juce.Colours.cornflowerblue)
self.header.setButtonText("Header")
self.addAndMakeVisible(self.header)
self.footer.setColour(juce.TextButton.buttonColourId, juce.Colours.cornflowerblue)
self.footer.setButtonText("Footer")
self.addAndMakeVisible(self.footer)
self.sidebar.setColour(juce.TextButton.buttonColourId, juce.Colours.grey)
self.sidebar.setButtonText("Sidebar")
self.addAndMakeVisible(self.sidebar)
self.limeContent.setColour(juce.TextButton.buttonColourId, juce.Colours.lime)
self.addAndMakeVisible(self.limeContent)
self.grapefruitContent.setColour (juce.TextButton.buttonColourId, juce.Colours.yellowgreen)
self.addAndMakeVisible(self.grapefruitContent)
self.lemonContent.setColour (juce.TextButton.buttonColourId, juce.Colours.yellow)
self.addAndMakeVisible(self.lemonContent)
self.orangeContent.setColour(juce.TextButton.buttonColourId, juce.Colours.orange)
self.addAndMakeVisible(self.orangeContent)
self.setSize(400, 400)
def paint(self, g):
g.fillAll(juce.Colours.darkgrey)
def resized(self):
area = self.getLocalBounds()
headerFooterHeight = 36
self.header.setBounds(area.removeFromTop (headerFooterHeight))
self.footer.setBounds(area.removeFromBottom (headerFooterHeight))
sidebarWidth = 80
self.sidebar.setBounds(area.removeFromLeft (sidebarWidth))
contentItemHeight = 24
self.limeContent.setBounds (area.removeFromTop (contentItemHeight))
self.grapefruitContent.setBounds (area.removeFromTop (contentItemHeight))
self.lemonContent.setBounds (area.removeFromTop (contentItemHeight))
self.orangeContent.setBounds (area.removeFromTop (contentItemHeight))
if __name__ == "__main__":
START_JUCE_COMPONENT(MainContentComponent, name="Advanced GUI layout techniques", width=400, height=400)
| 2,524 |
packages/micropython-official/v1.12/esp32/stubs/select.py
|
TheVinhLuong102/micropy-stubs
| 18 |
2024942
|
"""
Module: 'select' on esp32 1.12.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-26', machine='ESP32 module with ESP32')
# Stubber: 1.3.2
POLLERR = 8
POLLHUP = 16
POLLIN = 1
POLLOUT = 4
def poll():
pass
def select():
pass
| 279 |
data_process/change_data_format_excel2tsv.py
|
yuejiaxiang/semEvel2020_task8
| 0 |
2023128
|
# -*- coding: UTF-8 -*-
# @Time : 2021/1/25
# @Author : <EMAIL>
# Apache License
# Copyright©2020-2021 <EMAIL> All Rights Reserved
import json
import os
import math
import pandas as pd
import numpy as np
from data_process.change_data_format_unit import excel2tsv_unit, save_data, clean_pre_tsv_format
def isnan(thing):
if type(thing) != float:
return False
return math.isnan(thing)
def get_b(text, item, slice_idx, token):
token = token.replace('10.0', '10')
token = token.replace('11.0', '11')
token = token.replace('12.0', '12')
token = token.replace('13.0', '13')
token = token.replace('14.0', '14')
token = token.replace('15.0', '15')
token = token.replace('16.0', '16')
token = token.replace('17.0', '17')
token = token.replace('18.0', '18')
token = token.replace('19.0', '19')
token = token.replace('1.0', '1')
token = token.replace('2.0', '2')
token = token.replace('3.0', '3')
token = token.replace('4.0', '4')
token = token.replace('5.0', '5')
token = token.replace('6.0', '6')
token = token.replace('7.0', '7')
token = token.replace('8.0', '8')
token = token.replace('9.0', '9')
token = token.replace('0.0', '0')
t1 = 0
t2 = 1
try:
if len(token) > 0:
if '-' in token:
x = str(token).split('-')
else:
x = str(token).split('$')
t1 = int(x[0])
if len(x) > 1:
t2 = int(x[1])
except Exception as e:
return -1
if t1 >= len(slice_idx):
return -1
end = slice_idx[t1]
for i in range(t2):
b = text.find(item, end)
end = b + len(item)
return b
def excel2tsv(data, out_path):
ann_id = 0
token_id = 0
para_id_old = '-1'
anno_set = {}
rel = {}
um = {}
for excel_line, d in enumerate(data):
if not isnan(d[0]):
para_id = d[0] if not isnan(d[0]) else ''
sent_idx = json.loads(d[1]) if not isnan(d[1]) else ''
sent_text = str(d[2]) if not isnan(d[2]) else ''
slice_idx = json.loads(d[3]) if not isnan(d[3]) else ''
quantity = str(d[5]) if not isnan(d[5]) else ''
quantity_line = str(d[6]).strip() if not isnan(d[6]) else ''
unit = str(d[7]).strip() if not isnan(d[7]) else ''
mod = str(d[8]).strip().split(' ') if not isnan(d[8]) else ['']
property = str(d[9]) if not isnan(d[9]) else ''
property_line = str(d[10]).strip() if not isnan(d[10]) else ''
entity = str(d[11]) if not isnan(d[11]) else ''
entity_line = str(d[12]).strip() if not isnan(d[12]) else ''
quantity_Tid = ''
property_Tid = ''
for mo in mod:
if mo not in ['', 'IsApproximate', 'IsCount', 'IsRange', 'IsList', 'IsMean', 'IsMedian', 'IsMeanHasSD', 'HasTolerance', 'IsRangeHasTolerance']:
print('illegal mod {} - {}'.format(excel_line + 2, mo))
if para_id:
if para_id != para_id_old:
if len(anno_set) > 0:
tsv = excel2tsv_unit(anno_set, rel, um, para_id_old)
out_put_file = os.path.join(out_path, para_id_old + '.tsv')
save_data(tsv, out_put_file)
ann_id = 0
token_id = 0
anno_set = {}
rel = {}
um = {}
para_id_old = para_id
anno_set[ann_id] = []
if quantity:
b = get_b(sent_text, quantity, slice_idx, quantity_line)
e = b + len(quantity)
if sent_text[b:e] != quantity:
print('not match {} - {}'.format(excel_line+2, 'quantity'))
token_name = 'T' + str(ann_id) + '-' + str(token_id)
quantity_Tid = token_name
token_id += 1
um[token_name] = {'Unit': unit, 'modifier': mod}
anno_set[ann_id].append([b+sent_idx[0], e+sent_idx[0], ann_id, token_name, 'Quantity', quantity])
if property:
b = get_b(sent_text, property, slice_idx, property_line)
e = b + len(property)
if sent_text[b:e] != property:
print('not match {} - {}'.format(excel_line+2, 'property'))
token_name = 'T' + str(ann_id) + '-' + str(token_id)
property_Tid = token_name
token_id += 1
anno_set[ann_id].append([b+sent_idx[0], e+sent_idx[0], ann_id, token_name, 'MeasuredProperty', property])
rel[token_name] = ['HasQuantity', quantity_Tid]
if entity:
b = get_b(sent_text, entity, slice_idx, entity_line)
e = b + len(entity)
if sent_text[b:e] != entity:
print('not match {} - {}'.format(excel_line+2, 'entity'))
token_name = 'T' + str(ann_id) + '-' + str(token_id)
token_id += 1
anno_set[ann_id].append([b+sent_idx[0], e+sent_idx[0], ann_id, token_name, 'MeasuredEntity', entity])
if property_Tid:
rel[token_name] = ['HasProperty', property_Tid]
else:
rel[token_name] = ['HasQuantity', quantity_Tid]
ann_id += 1
if len(anno_set) > 0:
tsv = excel2tsv_unit(anno_set, rel, um, para_id)
out_put_file = os.path.join(out_path, para_id_old + '.tsv')
save_data(tsv, out_put_file)
return
def generate_tsv(file, out_path):
clean_pre_tsv_format(path=out_path)
sheet = pd.read_excel(file, header=None, skiprows=1)
data = np.array(sheet).tolist()
excel2tsv(data, out_path)
if __name__ == '__main__':
# generate_tsv('human/test_anno1_20210126.xlsx', '../MeasEval/data/human_eval_anno1')
# generate_tsv('human/test_anno2.xlsx', '../MeasEval/data/human_eval_anno2')
# generate_tsv('human/test_anno3.xlsx', '../MeasEval/data/human_eval_anno3')
generate_tsv('data_enhancement/NER_union_roberta_quantity_with_roberta_joint_ERE_isoQ_MOD1.xlsx',
'../ner_process/pre_tsv_format')
| 6,043 |
Alphabetic Patterns/alphabeticpattern58.py
|
vaidehisinha1/Python-PatternHouse
| 0 |
2024742
|
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
print("Enter the no of rows: ")
n = int(input())
for i in range(0, n):
for j in range(0, i+1):
print(alpha[i], end=" ")
for k in range(0,n-i-1):
print("*",end=" ")
print()
# Enter the no of rows:
# 5
# A * * * *
# B B * * *
# C C C * *
# D D D D *
# E E E E E
| 345 |
vision/module/__init__.py
|
pieperm/IARC-2020
| 12 |
2024511
|
"""
module __init__.
Identifies "module" as a package in order to import submodules.
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
try:
from location import ModuleLocation
from region_of_interest import get_region_of_interest
from module_bounding import get_module_bounds
from module_orientation import get_module_orientation, get_module_roll
from module_depth import get_module_depth
except ImportError as e:
print(f"module/__init__.py failed: {e}")
| 502 |
Chapter15/DigitsClassification/DigitsClassification-ConvNet_Keras.py
|
sim42/SCML
| 0 |
2024044
|
"""
Handwritten Digit Classification using Convolutional Neural Network and Keras
MNIST
"""
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
import time
(X_train, y_train), (X_test, y_test) = mnist.load_data("mnist.npz")
# Scale images to the [0, 1] range
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
num_classes = 10
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
input_shape = (28, 28, 1)
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(8, kernel_size=(5, 5), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(8, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.6),
#layers.Dense(100, activation="sigmoid"),
layers.Dense(num_classes, activation="softmax"),
]
)
# Adaptive Moment Estimation
model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9), metrics=["accuracy"])
batch_size = 20
epochs = 10
time_start = time.time()
model.fit(X_train[:10000], y_train[:10000], batch_size=batch_size, epochs=epochs, validation_split=0.1)
time_end = time.time()
print("Times Used %.2f S"%(time_end - time_start))
score = model.evaluate(X_test[:10000], y_test[:10000], verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
| 1,729 |
src/opcua_webhmi_bridge/frontend_messaging.py
|
renovate-tests/opcua-webhmi-bridge
| 0 |
2024564
|
"""Management of messaging changes to frontend application."""
import asyncio
import logging
from json.decoder import JSONDecodeError
from typing import Dict, Union
from aiohttp import ClientError, ClientSession, ClientTimeout, web
from .config import CentrifugoSettings
from .library import AsyncTask, MessageConsumer
from .messages import (
HeartBeatMessage,
LinkStatus,
MessageType,
OPCDataChangeMessage,
OPCStatusMessage,
)
OPCMessage = Union[OPCDataChangeMessage, OPCStatusMessage]
HEARTBEAT_TIMEOUT = 5
_logger = logging.getLogger(__name__)
class FrontendMessagingWriter(MessageConsumer[OPCMessage]):
"""Handles signalization to frontend."""
logger = _logger
purpose = "Frontend messaging publisher"
def __init__(self, config: CentrifugoSettings):
"""Initializes frontend signalization.
Args:
config: Centrifugo related configuration options.
"""
super().__init__()
self._config = config
async def task(self) -> None:
"""Implements frontend signalization asynchronous task."""
api_key = self._config.api_key.get_secret_value()
headers = {"Authorization": f"apikey {api_key}"}
async with ClientSession(
headers=headers, timeout=ClientTimeout(total=10)
) as session:
while True:
message: Union[OPCMessage, HeartBeatMessage]
try:
message = await asyncio.wait_for(
self._queue.get(), timeout=HEARTBEAT_TIMEOUT
)
except asyncio.TimeoutError:
message = HeartBeatMessage()
command = {
"method": "publish",
"params": {
"channel": message.message_type.value,
"data": message.frontend_data,
},
}
try:
async with session.post(self._config.api_url, json=command) as resp:
resp.raise_for_status()
resp_data = await resp.json()
if (error := resp_data.get("error")) is not None:
_logger.error(
"%s - Centrifugo API error: %s %s",
self.purpose,
error["code"],
error["message"],
)
except ClientError as err:
_logger.error("%s error: %s", self.purpose, err)
class CentrifugoProxyServer(AsyncTask):
"""Centrifugo HTTP proxy server."""
logger = _logger
purpose = "Centrifugo proxy server"
def __init__(
self, config: CentrifugoSettings, messaging_writer: FrontendMessagingWriter
) -> None:
"""Initialize Centrifugo proxy server instance.
Args:
config: Centrifugo related configuration options.
messaging_writer: Instance of frontend signalization task.
"""
self._config = config
self._messaging_writer = messaging_writer
self._last_opc_data: Dict[str, OPCDataChangeMessage] = {}
self.last_opc_status = OPCStatusMessage(LinkStatus.Down)
def clear_last_opc_data(self) -> None:
"""Clears the record of last OPC-UA data received."""
self._last_opc_data = {}
def record_last_opc_data(self, message: OPCDataChangeMessage) -> None:
"""Records the last OPC-UA data received for each node ID.
Args:
message: The message to add to the record.
"""
self._last_opc_data[message.node_id] = message
async def centrifugo_subscribe(self, request: web.Request) -> web.Response:
"""Handle Centrifugo subscription requests."""
def _error(code: int, message: str) -> web.Response:
return web.json_response({"error": {"code": code, "message": message}})
try:
context = await request.json()
channel = context.get("channel")
except JSONDecodeError:
raise web.HTTPInternalServerError(reason="JSON decode error")
except AttributeError:
raise web.HTTPBadRequest(reason="Bad request format")
if channel is None:
return _error(1000, "Missing channel field")
elif channel == MessageType.OPC_DATA_CHANGE:
for message in self._last_opc_data.values():
self._messaging_writer.put(message)
elif channel == MessageType.OPC_STATUS:
self._messaging_writer.put(self.last_opc_status)
try:
MessageType(channel)
except ValueError:
return _error(1001, "Unknown channel")
return web.json_response({"result": {}})
async def task(self) -> None:
"""Implements Centrifugo proxy asynchronous task."""
app = web.Application()
app.router.add_post("/centrifugo/subscribe", self.centrifugo_subscribe)
runner = web.AppRunner(app)
await runner.setup()
try:
site = web.TCPSite(runner, None, self._config.proxy_port)
await site.start()
_logger.info("Centrifugo proxy server started")
while True:
await asyncio.sleep(3600)
finally:
await runner.cleanup()
| 5,404 |
src/client/client.py
|
uncle-lv/mini-redis-python
| 0 |
2023671
|
import socket
from loguru import logger
class Client:
def __init__(self, host: str = '127.0.0.1', port: int = 6379) -> None:
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
logger.info('The client has connected at {}:{}'.format(host, port))
def send(self, msg: bytes) -> int:
logger.info('Send message: {}'.format(msg))
return self.sock.send(msg)
def recv(self, buf_size: int = 1024) -> bytes:
return self.sock.recv(buf_size)
def close(self) -> None:
self.sock.close()
logger.info('Client has closed.')
client = Client()
client.send(b'*3\r\n$3\r\nSET\r\n$5\r\nHello\r\n$5\r\nWorld\r\n')
logger.info('Receive: {}'.format(client.recv()))
client.close()
| 874 |
LeetCodeBook/array/252-canAttendMeetings.py
|
samprasgit/PythonAlgorithmBasis
| 0 |
2024411
|
class Solution(object):
def canAttendMeetings(self, nums):
n = len(nums)
nums.sort(key=lambda x: x.start)
for i in range(1, n):
if nums[i - 1].end > nums[i].start:
return False
return True
if __package__ == "__main__":
nums = [[0, 30], [5, 10], [15, 20]]
nums1 = [[7, 10], [2, 4]]
s = Solution()
res = s.canAttendMeetings(nums1)
| 418 |
configs/pspnet_wsss/pspnet_scalenet101_40kx32_coco_urn.py
|
XMed-Lab/URN
| 28 |
2023441
|
_base_ = './pspnet_r50-d8_40kx32_coco.py'
model = dict(
pretrained='data/models/scalenet/weights/scalenet101.pth',
backbone=dict(
type='ScaleNet',
layers=[3, 4, 23, 3],
structure='data/models/scalenet/structures/scalenet101.json',
out_indices=(0, 1, 2, 3),
strides=(1, 2, 1, 1),
dilations=(1, 1, 2, 4),
norm_eval=False),
decode_head=dict(
in_channels=2048,
loss_decode=dict(weight_thresh=0.05)),
auxiliary_head=dict(
in_channels=1024,
loss_decode=dict(weight_thresh=0.05))
)
# 512 out of memory, so reduce to 448
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 448), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=(448, 448), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True),
dict(type='Pad', size=(448, 448), pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
data = dict(
train=dict(pipeline=train_pipeline,
ann_dir='voc_format/urn_s101_coco')
)
| 1,308 |
tests/resource_test.py
|
bkbarry/swagger-py
| 0 |
2023747
|
#!/usr/bin/env python
#
# Copyright (c) 2014, Yelp, Inc.
#
"""Swagger client tests to validate 'resource' declarations
A sample 'resource' is listed below.
{
"apiVersion": "1.0.0",
"swaggerVersion": "1.2",
"basePath": "http://petstore.swagger.wordnik.com/api",
"produces": [
"application/json"
],
"apis": [...]
}
"""
import json
import unittest
import httpretty
from swaggerpy.client import SwaggerClient, Resource, Operation
from swaggerpy.processors import SwaggerError
class ResourceTest(unittest.TestCase):
def setUp(self):
parameter = {
"paramType": "query",
"name": "test_param",
"type": "string"
}
operation = {
"method": "GET",
"nickname": "testHTTP",
"type": "void",
"parameters": [parameter]
}
api = {
"path": "/test_http",
"operations": [operation]
}
self.response = {
"swaggerVersion": "1.2",
"basePath": "/",
"apis": [api]
}
def register_urls(self):
httpretty.register_uri(
httpretty.GET, "http://localhost/api-docs",
body=json.dumps(
{"swaggerVersion": "1.2", "apis": [{"path": "/api_test"}]}))
httpretty.register_uri(
httpretty.GET, "http://localhost/api-docs/api_test",
body=json.dumps(self.response))
@httpretty.activate
def test_error_on_wrong_swagger_version(self):
self.response["swaggerVersion"] = "XYZ"
self.register_urls()
self.assertRaises(SwaggerError, SwaggerClient,
u'http://localhost/api-docs')
@httpretty.activate
def test_error_on_missing_attr(self):
def iterate_test(field):
self.response.pop(field)
self.register_urls()
self.assertRaises(SwaggerError, SwaggerClient,
u'http://localhost/api-docs')
[iterate_test(field) for field in (
'swaggerVersion', 'basePath', 'apis')]
# Use baesPath as api domain if it is '/' in the API declaration
@httpretty.activate
def test_correct_route_with_basePath_as_slash(self):
httpretty.register_uri(
httpretty.GET, "http://localhost/test_http?query=foo",
body='[]')
self.register_urls()
resource = SwaggerClient(u'http://localhost/api-docs').api_test
resp = resource.testHTTP(test_param="foo").result()
self.assertEqual([], resp)
@httpretty.activate
def test_append_base_path_if_base_path_isnt_absolute(self):
self.response["basePath"] = "/append"
httpretty.register_uri(
httpretty.GET, "http://localhost/append/test_http?",
body='[]')
self.register_urls()
resource = SwaggerClient(u'http://localhost/api-docs').api_test
resource.testHTTP(test_param="foo").result()
self.assertEqual(["foo"],
httpretty.last_request().querystring['test_param'])
@httpretty.activate
def test_setattrs_on_client_and_resource(self):
self.register_urls()
client = SwaggerClient(u'http://localhost/api-docs')
self.assertTrue(isinstance(client.api_test, Resource))
self.assertTrue(isinstance(client.api_test.testHTTP, Operation))
@httpretty.activate
def test_api_base_path_if_passed_is_always_used_as_base_path(self):
httpretty.register_uri(
httpretty.GET, "http://foo/test_http?", body='')
self.response["basePath"] = "http://localhost"
self.register_urls()
resource = SwaggerClient(u'http://localhost/api-docs',
api_base_path='http://foo').api_test
resource.testHTTP(test_param="foo").result()
self.assertEqual(["foo"],
httpretty.last_request().querystring['test_param'])
# Use basePath mentioned in the API declaration only if
# it does not start with '/' & no api_base_path is provided in the params
@httpretty.activate
def test_correct_route_with_basePath_no_slash(self):
httpretty.register_uri(
httpretty.GET, "http://localhost/lame/test/test_http?query=foo",
body=u'""')
self.response["basePath"] = "http://localhost/lame/test"
self.register_urls()
resource = SwaggerClient(u'http://localhost/api-docs').api_test
resp = resource.testHTTP(test_param="foo").result()
self.assertEqual('', resp)
if __name__ == '__main__':
unittest.main()
| 4,630 |
jupyterfs/extension.py
|
ceball/jupyter-fs
| 0 |
2024498
|
# *****************************************************************************
#
# Copyright (c) 2019, the jupyter-fs authors.
#
# This file is part of the jupyter-fs library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from __future__ import print_function
import json
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
from .meta_contents_manager import MetaContentsManager
class GetHandler(IPythonHandler):
def initialize(self, keys=None):
# dont append colon for default
self.keys = keys or []
def get(self):
'''Returns all the available contents manager prefixes
e.g. if the contents manager configuration is something like:
{
"file": LargeFileContentsManager,
"s3": S3ContentsManager,
"samba": SambaContentsManager
}
the result here will be:
["file", "s3", "samba"]
which will allow the frontent to instantiate 3 new filetrees, one
for each of the available contents managers.
'''
self.finish(json.dumps(self.keys))
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
base_url = web_app.settings['base_url']
host_pattern = '.*$'
managers = nb_server_app.config.get('JupyterFS', {}).get('contents_managers', {})
if isinstance(nb_server_app.contents_manager, MetaContentsManager):
nb_server_app.contents_manager.init(managers)
print('Jupyter-fs active with {} managers'.format(len(nb_server_app.contents_manager._contents_managers)))
print('Installing jupyter-fs handler on path %s' % url_path_join(base_url, 'multicontents'))
web_app.add_handlers(host_pattern, [(url_path_join(base_url, 'multicontents/get'), GetHandler, {'keys': list(nb_server_app.contents_manager._contents_managers.keys())})])
else:
print('Not using jupyter-fs')
| 2,151 |
tdameritrade/urls.py
|
t-triobox/tdameritrade
| 528 |
2023954
|
BASE = "https://api.tdameritrade.com/v1/"
########################
# Accounts and Trading #
########################
# https://developer.tdameritrade.com/account-access/apis
# ORDERS
CANCEL_ORDER = BASE + "accounts/{accountId}/orders/{orderId}" # DELETE
GET_ORDER = BASE + "accounts/{accountId}/orders/{orderId}" # GET
GET_ORDERS_BY_PATH = BASE + "accounts/{accountId}/orders" # GET
GET_ORDER_BY_QUERY = BASE + "orders" # GET
PLACE_ORDER = BASE + "accounts/{accountId}/orders" # POST
REPLACE_ORDER = BASE + "accounts/{accountId}/orders/{orderId}" # PUT
GET_ORDER_BY_PATH_ARGS = ("maxResults", "fromEnteredTime", "toEnteredTime", "status")
GET_ORDER_BY_QUERY_ARGS = (
"accountId",
"maxResults",
"fromEnteredTime",
"toEnteredTime",
"status",
)
STATUS_VALUES = (
"AWAITING_PARENT_ORDER",
"AWAITING_CONDITION",
"AWAITING_MANUAL_REVIEW",
"ACCEPTED",
"AWAITING_UR_OUT",
"PENDING_ACTIVATION",
"QUEUED",
"WORKING",
"REJECTED",
"PENDING_CANCEL",
"CANCELED",
"PENDING_REPLACE",
"REPLACED",
"FILLED",
"EXPIRED",
)
# maxResults: int
# fromEnteredTime: yyyy-MM-dd
# toEnteredTime: yyyy-MM-dd
# status: STATUS_VALUES
# SAVED ORDERS
CREATE_SAVED_ORDER = BASE + "accounts/{accountId}/savedorders" # POST
DELETE_SAVED_ORDER = BASE + "accounts/{accountId}/savedorders/{savedOrderId}" # DELETE
GET_SAVED_ORDER = BASE + "accounts/{accountId}/savedorders/{savedOrderId}" # GET
GET_SAVED_ORDER_BY_PATH = BASE + "accounts/{accountId}/savedorders" # GET
REPLACE_SAVED_ORDER = BASE + "accounts/{accountId}/savedorders/{savedOrderId}" # PUT
# ACCOUNTS
GET_ACCOUNT = BASE + "accounts/{accountId}" # GET
GET_ACCOUNTS = BASE + "accounts" # GET
##################
# AUTHENTICATION #
##################
# https://developer.tdameritrade.com/authentication/apis
ACCESS_TOKEN = BASE + "oauth2/token" # POST
ACCESS_TOKEN_ARGS = (
"grant_type",
"refresh_token",
"access_type",
"code",
"client_id",
"redirect_uri",
)
###############
# INSTRUMENTS #
###############
# https://developer.tdameritrade.com/instruments/apis
SEARCH_INSTRUMENTS = BASE + "instruments" # GET
SEARCH_INSTRUMENTS_ARGS = ("symbol", "projection")
SEARCH_INSTRUMENT_PROJECTION = (
"symbol-search",
"symbol-regex",
"desc-search",
"desc-regex",
"fundamental",
)
GET_INSTRUMENT = BASE + "instruments/{cusip}" # GET
################
# MARKET HOURS #
################
# https://developer.tdameritrade.com/market-hours/apis
GET_HOURS_FOR_MULTIPLE_MARKETS = BASE + "marketdata/hours" # GET
GET_HOURS_FOR_MULTIPLE_MARKETS_ARGS = ("markets", "date")
MARKETS_VALUES = ("EQUITY", "OPTION", "FUTURE", "BOND", "FOREX")
GET_HOURS_FOR_SINGLE_MARKET = BASE + "marketdata/{market}/hours" # GET
GET_HOURS_FOR_SINGLE_MARKET_ARGS = "date"
# date: yyyy-MM-dd or yyyy-MM-dd'T'HH:mm::ssz
##########
# MOVERS #
##########
# https://developer.tdameritrade.com/movers/apis
MOVERS = BASE + "marketdata/{index}/movers" # GET
MOVERS_ARGS = ("direction", "change")
DIRECTION_VALUES = ("up", "down")
CHANGE_VALUES = ("value", "percent")
#################
# OPTION CHAINS #
#################
# https://developer.tdameritrade.com/option-chains/apis
GET_OPTION_CHAIN = BASE + "marketdata/chains" # GET
OPTION_CHAIN_ARGS = (
"symbol",
"contractType",
"strikeCount",
"includeQuotes",
"strategy",
"interval",
"strike",
"range",
"fromDate",
"toDate",
"volatility",
"underlyingPrice",
"interestRate",
"daysToExpiration",
"expMonth",
"optionType",
)
CONTRACT_TYPE_VALUES = ("CALL", "PUT", "ALL")
STRATEGY_VALUES = (
"SINGLE",
"ANALYTICAL",
"COVERED",
"VERTICAL",
"CALENDAR",
"STRANGLE",
"STRADDLE",
"BUTTERFLY",
"CONDOR",
"DIAGONAL",
"COLLAR",
"ROLL",
)
RANGE_VALUES = ("ITM", "NTM", "OTM", "SAK", "SBK", "SNK", "ALL")
OPTION_TYPE_VALUES = ("S", "NS", "ALL")
OPTION_EXPMONTH_VALUES = (
"JAN",
"FEB",
"MAR",
"APR",
"MAY",
"JUN",
"JUL",
"AUG",
"SEP",
"OCT",
"NOV",
"DEC",
"ALL",
)
#################
# PRICE HISTORY #
#################
# https://developer.tdameritrade.com/price-history/apis
GET_PRICE_HISTORY = BASE + "marketdata/{symbol}/pricehistory" # GET
GET_PRICE_HISTORY_ARGS = (
"periodType",
"period",
"frequencyType",
"frequency",
"endDate",
"startDate",
"needExtendedHoursData",
)
PERIOD_TYPE_VALUES = ("day", "month", "year", "ytd")
FREQUENCY_TYPE_VALUES = ("minute", "daily", "weekly", "monthly")
##########
# QUOTES #
##########
# https://developer.tdameritrade.com/quotes/apis
GET_QUOTE = BASE + "marketdata/{symbol}/quotes" # GET
GET_QUOTES = BASE + "marketdata/quotes" # GET
GET_QUOTES_ARGS = ("symbol",)
#######################
# TRANSACTION HISTORY #
#######################
# https://developer.tdameritrade.com/transaction-history/apis
GET_TRANSACTION = BASE + "accounts/{accountId}/transactions/{transactionId}" # GET
GET_TRANSACTIONS = BASE + "accounts/{accountId}/transactions" # GET
GET_TRANSACTIONS_ARGS = ("type", "symbol", "startDate", "endDate")
GET_TRANSCATION_TYPE_VALUES = (
"ALL",
"TRADE",
"BUY_ONLY",
"SELL_ONLY",
"CASH_IN_OR_CASH_OUT",
"CHECKING",
"DIVIDEND",
"INTEREST",
"OTHER",
"ADVISOR_FEES",
)
###################
# User Info/Prefs #
###################
# https://developer.tdameritrade.com/user-principal/apis
GET_PREFERENCES = BASE + "accounts/{accountId}/preferences" # GET
GET_STREAMER_SUBSCRIPTION_KEYS = BASE + "userprincipals/streamersubscriptionkeys" # GET
GET_STREAMER_SUBSCRIPTION_KEYS_ARGS = ("accountIds",)
GET_USER_PRINCIPALS = BASE + "userprincipals" # GET
GET_USER_PRINCIPALS_ARGS = ("fields",)
USER_PRINCIPALS_FIELDS_VALUES = (
"streamerSubscriptionKeys",
"streamerConnectionInfo",
"preferences",
"surrogateIds",
)
UPDATE_PREFERENCES = BASE + "accounts/{accountId}/preferences" # PUT
#############
# WATCHLIST #
#############
# https://developer.tdameritrade.com/watchlist/apis
CREATE_WATCHLIST = BASE + "accounts/{accountId}/watchlists" # POST
DELETE_WATCHLIST = BASE + "accounts/{accountId}/watchlists/{watchlistId}" # DELETE
GET_WATCHLIST = BASE + "accounts/{accountId}/watchlists/{watchlistId}" # GET
GET_WATCHLISTS_MULTIPLE_ACCOUNTS = BASE + "accounts/watchlists" # GET
GET_WATCHLISTS = BASE + "accounts/{accountId}/watchlists" # GET
REPLACE_WATCHLIST = BASE + "accounts/{accountId}/watchlists/{watchlistId}" # PUT
UPDATE_WATCHLIST = BASE + "accounts/{accountId}/watchlists/{watchlistId}" # PATCH
| 6,569 |
pyapdl/sections.py
|
JorgeDeLosSantos/pyapdl
| 16 |
2024436
|
# -*- coding: utf-8 -*-
class Section(object):
def __init__(self):
pass
class RectangleSection(Section):
"""
Rectangle section
Parameters
----------
b : int, float
Rectangle base
h : int, float
Rectangle height
"""
def __init__(self,b,h):
Section.__init__(self)
self.b = b
self.h = h
self.type = "RECT"
def get_data(self):
self.data = (self.b,self.h,2,2)
return (len(self.data)*"%s,")%self.data
class ISection(Section):
"""
Creates a ISection
Parameters
----------
w1 : int,float
Width 1
w2 : int,float
Width 2
w3 : int,float
Width 3
t1 : int,float
Thickness 1
t2 : int,float
Thickness 2
t3 : int,float
Thickness 3
"""
def __init__(self,w1,w2,w3,t1,t2,t3):
Section.__init__(self)
self.w1 = w1
self.w2 = w2
self.w3 = w3
self.t1 = t1
self.t2 = t2
self.t3 = t3
self.type = "I"
def get_data(self):
self.data = (self.w1, self.w2, self.w3, self.t1, self.t2, self.t3)
return (len(self.data)*"%s,")%self.data
def create_beam_section(sid,sname,stype):
_bs = "SECTYPE,%s,BEAM,%s,%s,0"%(sid,stype.type,sname) # Beam section
_bd = "SECDATA,%s"%(stype.get_data())
return "\n".join([_bs,_bd])
if __name__ == '__main__':
print create_beam_section(1,"_b1",RectangleSection(0.2,0.2))
| 1,528 |
handlers.py
|
BourneXu/ChrisBot
| 0 |
2022768
|
import os
import json
import requests
import telegram
import configparser
import fake_useragent
from loguru import logger
from bs4 import BeautifulSoup
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
class Handlers:
@staticmethod
def help(update, context):
logger.debug(update.message.chat_id)
context.bot.send_message(
chat_id=update.message.chat_id,
text="""
我是蛋蛋小分队专属订制 Bot, please talk to me!\n#######################\n回复我: \n1. /help 查看帮助,支持的命令\n2. /cs 查看计算机类热门信息, 来自 Hacker News等\n3. /shop 查看购物类热门信息, 来自 北美烧钱快报 等
...\n#######################\n已接入图灵机器人, 直接回复, 可与我聊天哦~
""",
parse_mode=telegram.ParseMode.MARKDOWN,
)
@staticmethod
def echo(update, context):
logger.debug(update.message.chat_id)
context.bot.send_message(chat_id=update.message.chat_id, text=update.message.text)
@staticmethod
def tuling(update, context):
# Try to read config to get apiKey and userId
try:
config = configparser.ConfigParser()
config.read("./config.ini")
apiKey = config["tuling"]["apiKey"]
userId = config["tuling"]["userId"]
except:
raise ValueError("Config file `config.ini` is missing.")
url = "http://openapi.tuling123.com/openapi/api/v2"
payload = {
"reqType": 0,
"perception": {
"inputText": {"text": update.message.text},
"inputImage": {"url": "imageUrl"},
"selfInfo": {"location": {"city": "上海", "province": "上海", "street": "南京东路"}},
},
"userInfo": {"apiKey": apiKey, "userId": userId},
}
headers = {"Content-Type": "text/json"}
response = requests.post(url, data=json.dumps(payload), headers=headers)
res = response.json()
try:
restext = res["results"][0]["values"]["text"]
except:
restext = "我貌似挂了..."
logger.debug(update.message.chat_id)
context.bot.send_message(chat_id=update.message.chat_id, text=restext)
@staticmethod
def get_hackernews(update, context):
url = "https://news.ycombinator.com/"
res = requests.get(url)
html = res.text
soup = BeautifulSoup(html, "html.parser")
newslist = soup.select('td[class="title"]')
newsletter = ""
# hackernews = []
for i in range(1, min(6, len(newslist) // 2 + 1)):
link = (newslist[i * 2 - 1]).a.get("href")
title = (newslist[i * 2 - 1]).text
# hackernews.append((link, title))
newsletter += "{}. [{}]({})\n".format(i, title, link)
logger.debug(update.message.chat_id)
context.bot.send_message(
chat_id=update.message.chat_id, text=newsletter, parse_mode=telegram.ParseMode.MARKDOWN
)
@staticmethod
def get_dealmoon(update, context):
url = "https://www.dealmoon.cn/"
headers = {"user-agent": Handlers.get_fake_useragent(), "authority": "www.dealmoon.cn"}
res = requests.get(url, headers=headers)
html = res.text
soup = BeautifulSoup(html, "html.parser")
dealhot = soup.select('div[class="box_item box_item_new"]')
dealletter = ""
for i in range(1, min(9, len(dealhot) + 1)):
link = dealhot[i - 1].a.get("href")
title = dealhot[i - 1].text.strip()
dealletter += "{}. [{}]({})\n".format(i, title, link)
logger.debug(update.message.chat_id)
context.bot.send_message(
chat_id=update.message.chat_id, text=dealletter, parse_mode=telegram.ParseMode.MARKDOWN
)
@staticmethod
def get_fake_useragent():
path = os.getcwd() + "/fake_useragent.json"
ua = fake_useragent.UserAgent(path=path)
return ua.random
| 3,928 |
solutions/ctci/solutions/2_Linked_Lists/2-1_Remove_Dups/solution.py
|
zwliew/ctci
| 4 |
2024343
|
# # Uses a set to store the data that has been found
# # O(N) time, O(N) space
# # N == len(head)
# # 19.475s
# def solve(head):
# s = set()
# cur = head
# prv = None
# while cur:
# if cur.val in s:
# prv.nxt = cur.nxt
# else:
# s.add(cur.val)
# prv = cur
# cur = cur.nxt
# return head
# Similar to solution 1, but tracks the current node differently
# O(N) time, O(N) space
# N == len(head)
# 19.992s
def solve(head):
s = set([head.val])
cur = head
while cur.nxt:
if cur.nxt.val in s:
cur.nxt = cur.nxt.nxt
else:
s.add(cur.nxt.val)
cur = cur.nxt
return head
# # Iterate through the linked list with 2 pointers and check if
# # each node is different from every subsequent node
# # O(N^2) time, O(1) space
# # N == len(head)
# # 19.363s
# def solve(head):
# cur = head
# while cur:
# run = cur
# while run.nxt:
# if cur.val == run.nxt.val:
# run.nxt = run.nxt.nxt
# else:
# run = run.nxt
# cur = cur.nxt
# return head
| 1,160 |
autoajax/sites.py
|
furious-luke/django-autoajax
| 1 |
2024163
|
from django.conf.urls import url
from .forms import DependentSelect
class AutoAjaxSite(object):
def __init__(self):
self._registry = []
self._urls = None
def make_urls(self):
urls = []
for app, form_cls, field_name, field in self._registry:
form_name = form_cls.__name__.lower()
path = 'autoajax/' + '/'.join([app.name, form_name, field_name])
urls.append(url('^%s$'%path, field.as_view()))
if isinstance(field.widget, DependentSelect):
field.widget.attrs['data-url'] = '/' + path
self._urls = urls
@property
def urls(self):
if self._urls is None:
self.make_urls()
return self._urls, 'autoajax', 'autoajax'
site = AutoAjaxSite()
| 782 |
Check/Mark glyphs with no contours.py
|
huertatipografica/huertatipografica-scripts
| 19 |
2024807
|
#MenuTitle: Mark glyphs with no contours
# -*- coding: utf-8 -*-
__doc__="""
Mark glyphs with no contours in the current Layer
"""
import GlyphsApp
Doc = Glyphs.currentDocument
Font = Glyphs.font
selectedLayers = Font.selectedLayers
outputString = ''
for thisLayer in selectedLayers:
count = 0
if thisLayer.parent.subCategory != 'Space':
for thisPath in thisLayer.paths:
count += len(thisPath.nodes)
if count < 2:
thisLayer.parent.color=6
outputString +='/'+thisLayer.parent.name
print "Glyphs with no contours in master: " + outputString
| 558 |
01/data_types.py
|
tousekjan/fit-ctu-edw
| 0 |
2023107
|
import pandas as pd
import numpy as np
import math
def isNaN(num):
return num != num
def getDataTypes(filename, columnNames = None):
df = pd.read_csv(filename)
if columnNames is not None:
df.columns = columnNames
dataTypeDict = dict(df.dtypes)
print('\n' + filename + '\n')
for column in dataTypeDict:
values = df[column]
maxLength = 0
nullable = False
for value in values:
if not value or isNaN(value):
nullable = True
continue
length = len(str(value))
if dataTypeDict[column] == object and length > maxLength:
maxLength = length
res = ''
if dataTypeDict[column] == object:
res += 'varchar(' + str(maxLength) + ')'
else:
res += str(dataTypeDict[column])
if nullable == True:
res += ' NULL'
print(column + ': ' + res)
getDataTypes('data\\airports\\countries_20190227.csv')
columnNamesAirline = ['id', 'name', 'alternative_name', 'IATA', 'ICAO', 'callsign', 'country', 'active']
getDataTypes('data\\airports\\airlines_20190227.dat', columnNamesAirline)
columnNamesAirport = ['Id', 'name', 'city', 'country', 'IATA', 'ICAO',
'latitude', 'longtitude', 'altitude', 'timezone', 'DST', 'Tz database time zone', 'type', 'source']
getDataTypes('data\\airports\\airports_20190227.dat', columnNamesAirport)
columnNamesPlane = ['name', 'IATA', 'ICAO']
getDataTypes('data\\airports\\planes_20180123.dat', columnNamesPlane)
getDataTypes('data\\airports\\regions_20190227.csv')
columnNamesRoute = ['airline_code', 'airline_id', 'source_airport_code',
'source_airport_id', 'destination_airport_code', 'destination_airport_id', 'codeshare', 'number_of_stops', 'equipment']
getDataTypes('data\\airports\\routes_20190227.dat', columnNamesRoute)
getDataTypes('data\\airports\\airport-frequencies_20190227.csv')
getDataTypes('data\\airports\\runways_20190227.csv')
getDataTypes('data\\airports\\navaids_20190227.csv')
| 2,047 |
main.py
|
abhint/Telegram_URL_Image_UploadBot
| 2 |
2024924
|
#!/usr/bin/env python3
# This is bot coded by <NAME> and used for educational purposes only
# https://github.com/AbhijithNT
# (c) <NAME>
# Thank you https://github.com/pyrogram/pyrogram
from pyrogram import Client
from config import config
plugins = dict(
root="plugins"
)
bot = Client(
"Image upload bot",
bot_token = config.BOT_TOKEN,
api_id = config.API_ID,
api_hash = config.API_HASH,
plugins = plugins
)
bot.run()
| 453 |
eval/accuracy.py
|
ricsi98/pytorch-cifar
| 0 |
2024506
|
import torch
import argparse
from . import Evaluator, EvaluationFunction
LOGS = False
IS_2N = False
DEVICE = "cpu"
BS = 64
class Accuracy(EvaluationFunction):
def __init__(self):
super().__init__()
self.name = "Accuracy"
self.correct = 0
self.all = 0
self.needs_plain_output = False
self.needs_plain_adv_output = False
def process(self, mdl_output, adv_output, plain_output, plain_adv_output, target, epsilon):
with torch.no_grad():
y = self._probs_to_labels(mdl_output)
n_correct = torch.sum((y == target).double()).item()
self.correct += int(n_correct)
self.all += target.shape[0]
def result(self):
return self.correct / self.all
def peek_result(self):
return self.result()
def simple_2n_classifier(y):
print(y[0], torch.softmax(y[0], dim=0))
return torch.sum(torch.softmax(y[:, 10:], dim=1), dim=1)
class BinaryAccuracy(Accuracy):
def __init__(self, classification_function):
super().__init__()
self.name = "Binary Accuracy"
self.needs_plain_adv_output = False
self.needs_attack_adv_output = False
self.needs_plain_output = True
self.clf = classification_function
def process(self, mdl_output, adv_output, plain_output, plain_adv_output, target, epsilon):
with torch.no_grad():
post_y = self.clf(mdl_output)
pre_y = self.clf(plain_output)
probs = torch.cat((post_y, pre_y), dim=0)
y_ = torch.round(probs)
ones = torch.ones_like(post_y)
zeros = torch.zeros_like(pre_y)
labels = torch.cat((ones, zeros), dim=0)
self.all += labels.shape[0]
print(probs)
self.correct += (y_ == labels).int().sum().item()
print(self.all, self.correct)
def accuracy(model_path, n_classes, adv_model_path, epsilon):
evaluator = Evaluator(adv_model_path, verbose=LOGS, device=DEVICE, batch_size=BS)
evaluator.load_model(model_path, n_classes)
return evaluator.evaluate(Accuracy(), epsilon)
def main():
parser = argparse.ArgumentParser(description="Calculate accuracy score")
parser.add_argument("--model", type=str, help="model path")
parser.add_argument("--nClasses", type=int, help="model number of classes, either 10, 11 or 20")
parser.add_argument("--advModel", type=str, help="adversary model path")
parser.add_argument("--epsilon", type=float, help="fgsm epsilon")
parser.add_argument("--verbose", "-v", action='store_true')
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--batchSize", type=int, default=64)
args = parser.parse_args()
global LOGS
LOGS = args.verbose
global IS_2N
IS_2N = args.nClasses == 20
global DEVICE
DEVICE = "cuda" if (torch.cuda.is_available() and args.gpu) else "cpu"
print("DEVICE", DEVICE)
global BS
BS = args.batchSize
print("BATCH SIZE", BS)
print("ACCURACY", accuracy(args.model, args.nClasses, args.advModel, args.epsilon))
if __name__ == '__main__':
main()
| 3,160 |
features/steps/scene_parser_steps.py
|
cloose/ray-tracer-challenge
| 0 |
2024724
|
from behave import when, then # pylint: disable=no-name-in-module
from asserts import assert_float, assert_tuple, assert_matrix
from core import color
from core import multiply_matrix
from core import translation, scaling
from scene import scene_from_yaml
@when(u'w <- scene_from_yaml(data)')
def step_parse_scene_from_yaml_into_world_w(context):
_, context.w = scene_from_yaml(context.data)
@when(u'c <- scene_from_yaml(data)')
def step_parse_scne_from_yaml_into_camera_c(context):
context.c, _ = scene_from_yaml(context.data)
@then(u'w.objects.count = {expected:d}')
def step_assert_world_objects_count(context, expected):
actual = len(context.w.objects)
assert actual == expected, \
f"count: {actual} is not equal to {expected}"
@then(
u'w.objects[{index:d}].material.color = color({red:g}, {green:g},{blue:g})'
)
def step_assert_material_color_of_object_at_index(context, index, red, green,
blue):
assert_tuple(context.w.objects[index].material.color,
color(red, green, blue))
@then(u'w.objects[{index:d}].material.ambient = {expected:g}')
def step_impl(context, index, expected):
assert_float(context.w.objects[index].material.ambient, expected)
@then(
u'w.objects[{index:d}].transform = scaling({sx:g}, {sy:g}, {sz:g}) * translation({tx:g}, {ty:g}, {tz:g})'
)
def step_assert_transformation_matrix_of_object_at_index(
context, index, tx, ty, tz, sx, sy, sz):
assert_matrix(
context.w.objects[index].transform(),
multiply_matrix(scaling(sx, sy, sz), translation(tx, ty, tz)))
| 1,629 |
saferpay/migrations/0006_saferpaytransaction_notify_token.py
|
89grad/django-saferpay
| 0 |
2024945
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-11 22:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saferpay', '0005_saferpaytransaction_has_notify'),
]
operations = [
migrations.AddField(
model_name='saferpaytransaction',
name='notify_token',
field=models.CharField(blank=True, max_length=32),
),
]
| 494 |
src/main.py
|
elimarmacena/genetic_algorithm_minimize
| 0 |
2022889
|
from entities.lab import Lab
from utils import constants as const
from utils import common
def main():
# number of iterations in a population
generation_life_list = [10,20]
for lifetime in generation_life_list:
generation_best_hist = dict()
generation_hist = dict()
print(f'THE LIFETIME {lifetime} GENERATION STARTED')
# Note for my self: create a const at utils.constants EXECUTIONS
# Every different lifetime generation must be executed @EXECUTIONS times
for i in range(const.EXECUTIONS):
print(f'* EXECUTION {i+1} STARTED')
# starting a population
# during our population creation the fitness is calculated
population = Lab()
fitness_population_text = ''.join(str(h.get_fitness())+'; ' for h in population.get_init_population())
generation_best_hist[(i,-1)]=population.get_best_fitness()
generation_hist[(i,-1)]=population.get_init_population()
current_lifetime = 0
while current_lifetime < lifetime:
# Performing the selection
population.championship()
# Performing the crossover
population.crossover()
# Performing the mutation
population.mutation()
# Creating a new generation
new_gen = population.create_new_gen()
generation_best_hist[(i,current_lifetime)]=population.get_best_fitness()
generation_hist[(i,current_lifetime)]=population.get_current_population()
current_lifetime += 1
# End While current_lifetime
print('=\t=\t=\t=\t=\t=\t=\t=\t=')
# End for I
common.write_mean_bests(generation_best_hist,lifetime)
common.write_best(generation_best_hist,lifetime)
common.write_history(generation_hist,lifetime)
# End for lifetime
print('=\t\t\t\tFINISHED\t\t\t=')
print('Please Check The Output Folder')
main()
| 1,827 |
Chapter 6/lenet_filter_visualization.py
|
Thimira/Build-Deeper
| 1 |
2024730
|
'''Visualization of the filters of VGG16, via gradient ascent in input space.
This script can run on CPU in a few minutes (with the TensorFlow backend).
Results example: http://i.imgur.com/4nj4KjN.jpg
'''
from __future__ import print_function
from scipy.misc import imsave
import numpy as np
import time
from keras.applications import vgg16
from keras import backend as K
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.optimizers import SGD
# dimensions of the generated pictures for each filter.
img_width = 28
img_height = 28
# the name of the layer we want to visualize
# (check the model.summary())
layer_name = 'conv2d_2'
def build_lenet(width, height, depth, classes, weightsPath=None):
# Initialize the model
model = Sequential()
# The first set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding="same",
input_shape=(height, width, depth)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# The second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# The set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# The softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
# If a weights path is supplied, then load the weights
if weightsPath is not None:
model.load_weights(weightsPath)
# Return the constructed network architecture
return model
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
model = build_lenet(width=28, height=28, depth=1, classes=10, weightsPath="data/lenet_weights.hdf5")
print('Model loaded.')
# get the summary of the model
model.summary()
# we remove the fully-connected layers from the model
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
opt = SGD(lr=0.01)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# get the summary of the model again, in order to get the layer names
model.summary()
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
# utility function to normalize a tensor by its L2 norm
def normalize(x):
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
kept_filters = []
for filter_index in range(0, 50):
# we only scan through the first 200 filters,
# but there are actually 512 of them
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
input_img_data = np.random.random((1, img_width, img_height, 1))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# we will stich the best 36 filters on a 6 x 6 grid.
n = 6
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 36 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 28 x 28, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
# save the result to disk
imsave('lenet_filters_%dx%d.png' % (n, n), stitched_filters)
| 5,767 |
profiles_api/serializers.py
|
aakratij/profiles-rest-api
| 0 |
2024804
|
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
""" Serializes a name field for testing our APIView """
name= serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
""" Serializes a user profile object """
class Meta:
model=models.UserProfile
fields=( 'id' , 'name' , 'email' , 'password')
extra_kwargs = {
'password' : {
'write_only' : True ,
'style' : { 'input_type' : 'password' }
}
}
def create(self , validated_data):
""" Create and return a new user """
user = models.UserProfile.objects.create_user(
email = validated_data['email'],
name=validated_data['name'],
password=<PASSWORD>_data['password'],
)
return user
| 910 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.