max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
examples/shape/cylinder.py
|
yp920505/classy_blocks
| 0 |
2022767
|
import os
from classes.primitives import Edge
from classes.mesh import Mesh
from shapes.shapes import Cylinder
def create():
axis_point_1 = [0, 0, 0]
axis_point_2 = [5, 5, 0]
radius_point_1 = [0, 0, 2]
cylinder = Cylinder(axis_point_1, axis_point_2, radius_point_1)
cylinder.set_bottom_patch('inlet')
cylinder.set_top_patch('outlet')
cylinder.set_outer_patch('walls')
cylinder.set_axial_cell_count(30)
cylinder.set_radial_cell_count(20)
cylinder.set_tangential_cell_count(15)
cylinder.set_axial_cell_size(-0.05)
cylinder.set_outer_cell_size(0.03)
mesh = Mesh()
mesh.add_shape(cylinder)
mesh.write('util/blockMeshDict.template', 'examples/meshCase/system/blockMeshDict')
# run blockMesh
os.system("blockMesh -case examples/meshCase")
| 812 |
problems/062.py
|
6112/project-euler
| 0 |
2022973
|
# encoding=utf-8
## SOLVED 2014/11/18
## 127035954683
# The cube, 41063625 (3453), can be permuted to produce two other cubes:
# 56623104 (3843) and 66430125 (4053). In fact, 41063625 is the smallest cube
# which has exactly three permutations of its digits which are also cube.
# Find the smallest cube for which exactly five permutations of its digits are
# cube.
from helpers.sequence import is_permutation
from math import ceil
def euler():
for n in range(346, 6000):
# number of cubes that are permutations of n^3
cube_permutations = 0
digits = str(n * n * n)
maximum = maximum_for(digits)
# for each number from n to maximum (see maximum_for()), check if its
# cube is a permutation of n
for m in range(n, maximum):
cube = m * m * m
if is_permutation(str(cube), digits):
cube_permutations += 1
# return it if it has the right number of permutations
if cube_permutations == 5:
return n ** 3
# calculate the highest possible value for the cube root of a permutation of
# the given digits
def maximum_for(digits):
xs = reversed(sorted(digits))
return ceil(int("".join(xs)) ** (1 / 3))
| 1,229 |
week_1/calculator.py
|
Ashutosh781/QSTP-Kratos_electronics-Solutions
| 0 |
2024014
|
#! /usr/bin/env python
s = 'Y'
while s != 'n':
try:
x = input("Enter the first number : ")
s = input("Enter the operation : ")
y = input("Enter the second number : ")
ans = eval(x+s+y)
print(f'{x}{s}{y} = {ans}')
except:
print("Division by zero is not allowed")
while True:
s = input("Do you want to use the calculator again (Y/n) : ")
if (s == 'Y') or (s == 'n'):
break
print("\nEnd")
| 494 |
expert_system/config/Cmd.py
|
mffdsp/Sistema_Especialista_IA
| 3 |
2024119
|
import argparse
import sys
class Cmd:
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", choices=['shell', 'interactive', 'interface'], default='mode_shell', help="Interface mode")
parser.add_argument("-r", "--rules", action='store_true', help="Displays the rules")
parser.add_argument("-v", "--verbose", action='store_true', help="Displays the steps of the resolution")
parser.add_argument("input", nargs='?', help="The file containing rules, facts and queries")
args = parser.parse_args()
| 538 |
BFS/773_sliding_puzzle.py
|
MartinMa28/Algorithms_review
| 0 |
2024264
|
from collections import deque
from copy import deepcopy
class Solution:
def __init__(self):
self.directions = ((1, 0), (-1, 0), (0, 1), (0, -1))
def _on_board(self, row, col):
return row >= 0 and row < 2 and col >= 0 and col < 3
def slidingPuzzle(self, board: List[List[int]]) -> int:
for i in range(2):
for j in range(3):
if board[i][j] == 0:
row = i
col = j
break
visited = set()
t_b = tuple([tuple(r) for r in board])
visited.add(t_b)
queue = deque([(row, col, board, 0)])
while len(queue) > 0:
row, col, board, move = queue.popleft()
print(board)
if board == [[1,2,3],[4,5,0]]:
return move
for d in self.directions:
new_row = row + d[0]
new_col = col + d[1]
if self._on_board(new_row, new_col):
board[row][col], board[new_row][new_col] = \
board[new_row][new_col], board[row][col]
t_n_b = tuple([tuple(r) for r in board])
if t_n_b not in visited:
visited.add(t_n_b)
queue.append((new_row, new_col, deepcopy(board), move + 1))
board[row][col], board[new_row][new_col] = \
board[new_row][new_col], board[row][col]
return -1
| 1,653 |
ttwitter_py/views/authentic.py
|
tongfeifan/ttwitter_py
| 1 |
2023521
|
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View, TemplateView
from django.http import HttpResponseRedirect, JsonResponse
from ..models import UserProfile
class Login(TemplateView):
template_name = "login.html"
def post(self, request):
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect("/u/{}/".format(username))
else:
form = {
'error': True,
}
return JsonResponse(data=form)
else:
form = {
'error': True
}
return JsonResponse(data=form)
def logout_view(request):
logout(request)
class Register(View):
def post(self, request):
username = request.POST.get("username")
password = request.POST.get("password")
| 1,088 |
ev3dev2simulator/robotpart/UltrasonicSensorBottom.py
|
NielsOkker/ev3dev2simulator
| 0 |
2022751
|
from ev3dev2simulator.config.config import get_simulation_settings
from ev3dev2simulator.robotpart.BodyPart import BodyPart
class UltrasonicSensorBottom(BodyPart):
"""
Class representing an UltrasonicSensor of the simulated robotpart mounted towards the ground.
"""
def __init__(self, config, robot):
dims = get_simulation_settings()['body_part_sizes']['ultrasonic_sensor_bottom']
super(UltrasonicSensorBottom, self).__init__(config, robot, dims['width'], dims['height'], 'ultrasonic_sensor',
driver_name='lego-ev3-us')
def setup_visuals(self, scale):
img_cfg = get_simulation_settings()['image_paths']
self.init_sprite(img_cfg['ultrasonic_sensor_bottom'], scale)
def get_latest_value(self):
return self.distance()
def distance(self) -> float:
"""
Get the distance in pixels between this ultrasonic sensor and an the ground.
:return: a floating point value representing the distance.
"""
for o in self.sensible_obstacles:
if o.collided_with(self.sprite.center_x, self.sprite.center_y):
return self.get_default_value()
return 20
def get_default_value(self):
"""
1 pixel == 1mm so measurement values this sensor returns are one to one mappable to millimeters.
Max distance real world robotpart ultrasonic sensor returns is 2550mm.
:return: default value in pixels.
"""
return 2550
| 1,534 |
src/db_models_base.py
|
fp12/sfv-bot
| 0 |
2024236
|
metaattr = 'metaattr'
class DBModel(type):
def __new__(cls, name, bases, namespace, **kargs):
# don't propagate kargs but add them to namespace
namespace[metaattr] = kargs.get(metaattr)
return super().__new__(cls, name, bases, namespace)
def __init__(cls, name, bases, namespace, **kargs):
# don't propagate kargs
super().__init__(name, bases, namespace)
def __call__(cls, *args, **kwds):
# create instance but don't propagate arguments
obj = type.__call__(cls)
# create attributes according to definition
if metaattr in cls.__dict__:
valid_args = []
# allow plain args, tuples and lists
if isinstance(args[0], tuple) or isinstance(args[0], list):
valid_args = list(args[0])
elif len(cls.__dict__[metaattr]) == len(args):
valid_args = args
if len(valid_args) == len(cls.__dict__[metaattr]):
for i, f in enumerate(cls.__dict__[metaattr]):
setattr(obj, f, valid_args[i])
else:
for f in cls.__dict__[metaattr]:
setattr(obj, f, None)
return obj
| 1,213 |
open_cli3/main.py
|
privcloud-com/open-cli
| 0 |
2023986
|
import os
import logging
import argparse
from distutils.util import strtobool
from . import cli
from . import formatter
HISTORY_PATH = os.path.join(os.path.expanduser("~"), ".open-cli3")
def main():
"""Open-CLI entry point."""
args_parser = argparse.ArgumentParser(description="Open-CLI.")
args_parser.add_argument("-s", "--source", type=str, default=None, help="Open API spec source")
args_parser.add_argument(
"-v", "--verbose", action="store_true", help="If set, set log level to debug"
)
args_parser.add_argument(
"-t", "--history", type=str, default=HISTORY_PATH, help="History file path"
)
args_parser.add_argument(
"-c", "--command", type=str, help="Command (request) to execute", nargs='+', required=False
)
args_parser.add_argument(
"-f",
"--format",
type=str,
choices=formatter.FORMATTERS.keys(),
default=formatter.JSON,
help="Set the CLI output format",
)
args_parser.add_argument(
"--header",
nargs="+",
default=[],
help="Requests headers, usage: --header x-header-1:val-1 x-header-2:val2",
)
args_parser.add_argument(
"--print-request-time",
type=strtobool,
default=False,
help="Show time of each request if this flag set to true",
)
args_parser.add_argument(
"--profile",
type=str,
default=None,
help="Open API profile name that point out some settings you can apply to a open-cli3 command and "
"which are located in open-cli3 config file")
args = args_parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.ERROR)
open_cli = cli.OpenCLI(
source=args.source,
history_path=args.history,
output_format=args.format,
headers=args.header,
print_request_time=args.print_request_time,
profile_name=args.profile,
)
if args.command:
return open_cli.execute(command=args.command[0])
open_cli.run_loop()
if __name__ == "__main__":
main()
| 2,116 |
event/migrations/0003_auto_20200419_0040.py
|
Aleccc/gtcrew
| 0 |
2023330
|
# Generated by Django 3.0.4 on 2020-04-19 04:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('asset', '0002_auto_20200419_0013'),
('event', '0002_auto_20200418_2310'),
]
operations = [
migrations.AlterField(
model_name='result',
name='shell',
field=models.ForeignKey(blank=True, limit_choices_to={'type': 'shell'}, null=True, on_delete=django.db.models.deletion.SET_NULL, to='asset.Asset'),
),
]
| 571 |
yc97/182.py
|
c-yan/yukicoder
| 0 |
2023026
|
N, *A = map(int, open(0).read().split())
t = {}
for a in A:
t.setdefault(a, 0)
t[a] += 1
print(list(t.values()).count(1))
| 132 |
lib/checks.py
|
mhaberler/weather-stations
| 27 |
2023516
|
"""
Generic Helpers
The code is licensed under the MIT license.
"""
from meteostat import Stations
from stations import merge_dicts, station_template
def find_duplicate(station: dict):
"""
Check if a (similar) station already exists
"""
# Merge station data with template
station = merge_dicts(station, station_template)
# Get all weather df
stations = Stations()
df = stations.fetch()
# Get key fields
wmo = station['identifiers']['wmo']
icao = station['identifiers']['icao']
lat = station['location']['latitude']
lon = station['location']['longitude']
# First, check for Meteostat ID
if station['id'] and station['id'] in df.index:
return df.loc[[station['id']]].reset_index().to_dict('records')[0]
# Now, check for WMO ID
if wmo and (df['wmo'] == wmo).any():
return df[df['wmo'] == wmo].reset_index().to_dict('records')[0]
# Now, check for ICAO ID
if icao and (df['icao'] == icao).any():
return df[df['icao'] == icao].reset_index().to_dict('records')[0]
# Last, check for proximity
stations = stations.nearby(lat, lon, 500)
if stations.count() > 0:
return stations.fetch(1).reset_index().to_dict('records')[0]
# No duplicates
return None
| 1,280 |
src/utils/dedup_csv.py
|
lhf-labs/finance-news-analysis-bert
| 20 |
2022956
|
import pandas as pd
if __name__ == '__main__':
FILE_NAME = '../data/2020-08-11to2021-02-11'
toclean = pd.read_csv(f'{FILE_NAME}.csv')
deduped = toclean.drop_duplicates('RP_STORY_ID')
deduped = deduped.drop_duplicates('EVENT_TEXT')
deduped = deduped.drop_duplicates('HEADLINE')
deduped = deduped[~deduped['EVENT_SENTIMENT_SCORE'].isna()]
deduped = deduped[pd.to_numeric(deduped['EVENT_SENTIMENT_SCORE'], errors='coerce').notnull()]
deduped.to_csv(f'{FILE_NAME}_deduped.csv', index=False)
| 521 |
NLP/Transformer/odd_numbers/model.py
|
gemessss/models
| 1 |
2023744
|
from transformer import Transformer
import numpy as np
import math
import sys
import oneflow as flow
import oneflow.nn as nn
sys.path.append("../")
TO_CUDA = True
def to_cuda(tensor, flag=TO_CUDA, where="cuda"):
if flag:
return tensor.to(where)
else:
return tensor
class Embeddings(nn.Module):
def __init__(self, vocab, d_model):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = flow.zeros((max_len, d_model))
position = flow.arange(0, max_len, dtype=flow.float).unsqueeze(1)
div_term = flow.exp(
flow.arange(0, d_model, 2).to(flow.float) * (-math.log(10000.0) / d_model)
).unsqueeze(0)
pe[:, 0::2] = flow.sin(position * div_term)
pe[:, 1::2] = flow.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.pe = flow.nn.Parameter(pe, requires_grad=False)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
def __init__(
self,
input_sz,
output_sz,
d_model,
nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward,
dropout,
):
super(TransformerModel, self).__init__()
self.transformer = Transformer(
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
dropout=dropout,
batch_first=False,
)
self.softmax = nn.Softmax(dim=2)
self.linear = nn.Linear(d_model, output_sz)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.pos_decoder = PositionalEncoding(d_model, dropout)
self.src_embedding = Embeddings(input_sz, d_model)
self.tgt_embedding = Embeddings(output_sz, d_model)
@staticmethod
def generate_subsequent_mask(tgt_len, src_len):
mask = flow.triu(flow.ones((tgt_len, src_len)), 1)
mask = mask.masked_fill(mask.to(flow.int32), float("-inf"))
return mask
@staticmethod
def make_len_mask(inp):
inp_mask = (inp.numpy() == 0).astype(np.int32)
inp_mask = flow.tensor(inp_mask, dtype=flow.int32)
return inp_mask.transpose(0, 1)
def forward(
self,
src,
tgt,
src_mask=None,
tgt_mask=None,
memory_mask=None,
src_key_padding_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
):
if tgt_mask is None:
tgt_mask = self.generate_subsequent_mask(tgt.shape[0], tgt.shape[0])
tgt_mask = to_cuda(tgt_mask, where=tgt.device)
src_key_padding_mask = self.make_len_mask(src)
src_key_padding_mask = to_cuda(src_key_padding_mask, where=tgt.device)
tgt_key_padding_mask = None
src = self.src_embedding(src)
src = self.pos_encoder(src)
tgt = self.tgt_embedding(tgt)
tgt = self.pos_decoder(tgt)
out = self.transformer(
src,
tgt,
src_mask,
tgt_mask,
memory_mask,
src_key_padding_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
)
out = self.linear(out)
return out
| 3,727 |
interview_prep_day3/spiral_matrix_2.py
|
gengannie/solving_questions
| 0 |
2022853
|
# https://leetcode.com/problems/spiral-matrix-ii/
# QID: 59
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
ans_matrix = [[0 for i in range (n)] for j in range (n)]
dirs = [(0,1), (1,0), (0,-1), (-1,0)]
past = []
max_i = n*n
x, y = 0,0
d = 0
for i in range (0, max_i):
ans_matrix[x][y] = i + 1
past.append((x,y))
future_x = x + dirs[d][0]
future_y = y + dirs[d][1]
if (future_x >= n or future_x < 0 or future_y >= n or future_y < 0 or (future_x, future_y) in past):
d += 1
if (d > 3):
d = 0
x += dirs[d][0]
y += dirs[d][1]
return ans_matrix
| 784 |
djangocms_frontend/contrib/navigation/frameworks/bootstrap5.py
|
fsbraun/djangocms-bootstrap5
| 7 |
2024164
|
class NavigationRenderMixin:
def render(self, context, instance, placeholder):
instance.add_classes(
"navbar",
f"navbar-{instance.config.get('navbar_design', '')}",
f"navbar-expand-{instance.config.get('navbar_breakpoint', '')}",
)
return super().render(context, instance, placeholder)
class PageTreeRenderMixin:
def render(self, context, instance, placeholder):
instance.add_classes("navbar-nav")
return super().render(context, instance, placeholder)
class NavBrandRenderMixin:
def render(self, context, instance, placeholder):
instance.add_classes("navbar-brand")
return super().render(context, instance, placeholder)
class NavLinkRenderMixin:
def render(self, context, instance, placeholder):
instance.add_classes("nav-link")
if instance.child_plugin_instances:
instance.add_classes("dropdown-toggle")
return super().render(context, instance, placeholder)
| 1,009 |
GNS3/Nodes/cisco7200_node.py
|
nsg-ethz/Metha
| 9 |
2022910
|
import os
import json
from GNS3 import send_request, gns3_parser
from GNS3.Nodes.netmiko_node import NetmikoNode
from settings import GNS_ROUTER_PATH, GNS_METHA_SAME_SYSTEM
class Cisco7200Node(NetmikoNode):
def __init__(self, name, gp, config=None):
with open(f'{GNS_ROUTER_PATH}/cisco-7200.json') as f:
router_conf = json.load(f)
router_conf['name'] = name
if config is not None and GNS_METHA_SAME_SYSTEM:
router_conf['properties']['startup_config'] = os.path.abspath(config)
param = json.dumps(router_conf)
jdata = send_request('POST', f'/v2/projects/{gp.pid}/nodes', param, True)
super().__init__(jdata['node_id'], jdata['console'], name, 'show run', 'show ip route', 'clear ip route *')
if config is not None and not GNS_METHA_SAME_SYSTEM:
self.initial_config = config
self.init_commands = ['no logging console', 'no logging monitor']
self.netmiko_node['device_type'] = 'cisco_ios_telnet'
def parse_routing_table(self, srt, adj):
return gns3_parser.textfsm_to_pd(srt, self.name, adj)
| 1,121 |
visualize-examples.py
|
iSTB/concepts
| 1 |
2023199
|
#!/usr/bin/env python
# visualize-examples.py
import concepts.visualize
DIRECTORY = 'visualize-output'
FORMAT = 'pdf'
concepts.visualize.render_all('examples/*.cxt', directory=DIRECTORY, out_format=FORMAT)
| 209 |
examples/GraphLM_walk_AIDS.py
|
dtylor/WalkAsString
| 5 |
2023493
|
# -*- coding: utf-8 -*-
"""GraphLM_walk_AIDS.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1MOA8mc8b__Fb1Nk8yu3iMNdm_vUhL2Hi
"""
# Commented out IPython magic to ensure Python compatibility.
!git clone https://github.com/dtylor/WalkRNN.git
# %cd WalkRNN
!ls
!curl https://course-v3.fast.ai/setup/colab | bash
import fastai
import pandas as pd
from fastai.utils.show_install import *
show_install()
str(fastai.__dict__['version'])
!more /usr/local/lib/python3.6/dist-packages/fastai/version.py
!ls
from google.colab import drive
drive.mount("/content/drive")
from utilities import load_graph_kernel_graph, load_graph_kernel_labels
node_mappings = [{
0: "C",
1: "O",
2: "N",
3: "Cl",
4: "F",
5: "S",
6: "Se",
7: "P",
8: "Na",
9: "I",
10: "Co",
11: "Br",
12: "Li",
13: "Si",
14: "Mg",
15: "Cu",
16: "As",
17: "B",
18: "Pt",
19: "Ru",
20: "K",
21: "Pd",
22: "Au",
23: "Te",
24: "W",
25: "Rh",
26: "Zn",
27: "Bi",
28: "Pb",
29: "Ge",
30: "Sb",
31: "Sn",
32: "Ga",
33: "Hg",
34: "Ho",
35: "Tl",
36: "Ni",
37: "Tb"
}]
label_maps={"node_labels": node_mappings}
G = load_graph_kernel_graph("./AIDS", mappings=label_maps)
y = load_graph_kernel_labels("./AIDS")
from module import get_structural_signatures, walk_as_string
newGraph, pca, kmeans = get_structural_signatures(G)
walks = walk_as_string(newGraph, componentLabels = y)
from fastai.text import *
from sklearn.model_selection import train_test_split
import numpy
walks.head()
walks.shape
from sklearn.model_selection import train_test_split
import numpy
data = list(set(walks.component))
x_traina ,x_test = train_test_split(data,test_size=0.1)
x_train ,x_val = train_test_split(x_traina,test_size=0.2)
train_tmp = pd.DataFrame(x_train)
train_tmp.columns = ['component']
df_train = pd.merge(walks, train_tmp, on='component', sort=False)
df_train.shape
test_tmp = pd.DataFrame(x_test)
test_tmp.columns = ['component']
df_test = pd.merge(walks, test_tmp, on='component', sort=False)
df_test.shape
val_tmp = pd.DataFrame(x_val)
val_tmp.columns = ['component']
df_val = pd.merge(walks, val_tmp, on='component', sort=False)
df_val.shape
!mkdir result
mypath = './result'
data_lm = TextLMDataBunch.from_df(train_df=df_train[['walk', 'label']], valid_df=df_val[[
'walk', 'label']], path=mypath, text_cols='walk', label_cols='label')
data_lm.save('data_lm.pkl')
!ls -l ./result
bs = 32
# load the data (can be used in the future as well to prevent reprocessing)
data_lm = load_data(mypath, 'data_lm.pkl', bs=bs)
data_lm.show_batch() # take a look at the batch fed into the GPU
awd_lstm_lm_config = dict(emb_sz=400, n_hid=400, n_layers=1, pad_token=1, qrnn=False, bidir=False, output_p=0.1, hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
awd_lstm_clas_config = dict(emb_sz=400, n_hid=400, n_layers=1, pad_token=1, qrnn=False, bidir=False, output_p=0.4, hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
learn = language_model_learner(data_lm,arch=AWD_LSTM,config= awd_lstm_lm_config,drop_mult=1.8, callback_fns=ShowGraph,pretrained=False)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))
learn.recorder.plot_losses()
learn.save('fit-head')
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(3, .05, moms=(0.8,0.7))
learn.save('fit-head')
learn.save_encoder('fine_tuned_enc3')
df = walks.sample(frac=1).reset_index(drop=True)
df['index1']=df.index
g = df.groupby('component')
df['RN'] = g['index1'].rank(method='min')
df[df['component']==1].head()
"""Choose a path per node and concatenate for entire component"""
df.groupby('component')['walk'].apply(lambda x: ', '.join(x))
def f(x):
return Series(dict(label = x['label'].min(), text = ', '.join(x['walk'])))
df_text_comp = df[(df['RN']<=6.0)].groupby('component').apply(f)
df_text_comp.head()
df_text_comp['component']= df_text_comp.index
df_text_comp.index.names = ['comp']
df_text_comp.head()
train = pd.merge(df_text_comp, train_tmp, on='component', sort=False)
test = pd.merge(df_text_comp, test_tmp, on='component', sort=False)
val = pd.merge(df_text_comp, val_tmp, on='component', sort=False)
(train.shape,val.shape, test.shape, train.shape[0]/df_text_comp.shape[0])
bs=32#48
data_clas = TextClasDataBunch.from_df(train_df=train[['text','label']],valid_df=val[['text','label']], path=mypath, text_cols='text',label_cols = 'label', vocab=data_lm.vocab)
data_clas.save('tmp_clas')
data_clas = load_data(mypath, 'tmp_clas', bs=bs)
data_clas.show_batch()
learn = text_classifier_learner(data_clas,arch=AWD_LSTM,config = awd_lstm_clas_config, drop_mult=1.7,pretrained=False)
learn.load_encoder('fine_tuned_enc3')
learn.freeze()
gc.collect();
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, 5e-02, moms=(0.8,0.7))
def predict(test,learn):
predictions=[]
for index, row in test.iterrows():
p=learn.predict(row['text'])
#print((row['label'],str(p[0])))
predictions.append((row['text'],str(row['label']),str(p[0])))
dfpred = pd.DataFrame(predictions)
dfpred.columns=['text','label','prediction']
match=dfpred[(dfpred['label']==dfpred['prediction'])]
#match.head()
print((dfpred.shape[0], match.shape[0],match.shape[0]/dfpred.shape[0]))
learn.fit_one_cycle(1, 5e-02, moms=(0.8,0.7))
learn.save('first')
learn.load('first');
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
predict(test,learn)
learn.save('second')
learn.load('second');
learn.freeze_to(-3)
learn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
predict(test,learn)
learn.save('third')
learn.load('third');
learn.unfreeze()
learn.fit_one_cycle(4, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
predict(test,learn)
learn.model
| 6,360 |
mods/plot_rtneat.py
|
thermalpilot/opennero
| 215 |
2024200
|
import sys
import os
import re
import time
from pylab import *
# timestamp format
timestamp_fmt = r'%Y-%b-%d %H:%M:%S'
# timestamp for file
file_timestamp_fmt = r'%Y-%m-%d-%H-%M-%S'
# general prefix for OpenNERO log lines (date and time with msec resolution)
log_prefix = r'(?P<date>[^\[]*)\.(?P<msec>[0-9]+) \(.\) '
# ----------------------------[ ai.rtneat log line regular expression ]-----------------------------------------
# ai.rtneat lines get printed during rtneat evaluations and can be used to keep track of rtneat progress
# an example line has the format
# 2011-Mar-02 12:52:55.167396 (!) [ai.rtneat] z-min: -0.0898853 z-max: 0.250617 r-min: [ -2.00352e+08 0 -4.97067e+08 -1.64796e+08 0 0 ] r-max: [ -2.59186e+06 0 -7.22071e+07 -6.11801e+06 8 0 ] w: [ 0 0 -1 1 0 0 ] mean: [ -5.74247e+07 0 -1.30798e+08 -2.47332e+07 5.71875 0 ] stdev: [ 6.33428e+07 0 1.22863e+08 4.67422e+07 2.75642 0 ]
# The fields are:
# * z-min: weighted Z-score minimum (based on population average and standard deviation)
# * z-max: weighted Z-score maximum (based on population average and standard deviation)
# * r-min: raw score minimum (D values)
# * r-max: raw score maximum (D values)
# * w: user-assigned weights (D values in [-1,1])
# * mean: average over mature population (D values)
# * stdev: standard deviation over mature population (D values)
ai_rtneat_pattern = re.compile(log_prefix + r'\[ai\.rtneat\] z-min: (?P<zmin>\S+) z-max: (?P<zmax>\S+) r-min: \[(?P<rmin>[^\]]+)\] r-max: \[(?P<rmax>[^\]]+)\] w: \[(?P<w>[^\]]+)\] mean: \[(?P<mean>[^\]]+)\] stdev: \[(?P<stdev>[^\]]+)\]')
# this is the format for the equivalent line in NERO 2.0 files
# opennero evaluateAll z-min: -2.16926 z-max: 0 r-min: [ 0 0 0 0 0 352.409 0 2230.01 1e+10 3.40282e+38 21502.5 5970.86 ] r-max: [ 0 134 22 0 0 1465.59 0 18089.8 1.499e+10 -3.40282e+38 95074.5 170507 ] w: [0.00 0.00 0.00 -0.00 0.00 -0.00 -0.00 -0.00 -0.00 -1.00 0.00] mean: [ 0 17.44 4.12 0 0 769.721 0 9097.14 1.12429e+10 0 57245 87640.1 ] stdev: [ 0 35.9222 6.43938 0 0 275.282 0 4604.4 1.76394e+09 0 17438.9 53606.8 ]
nero_pattern = re.compile(r'opennero evaluateall z-min: (?P<zmin>\S+) z-max: (?P<zmax>\S+) r-min: \[(?P<rmin>[^\]]+)\] r-max: \[(?P<rmax>[^\]]+)\] w: \[(?P<w>[^\]]+)\] mean: \[(?P<mean>[^\]]+)\] stdev: \[(?P<stdev>[^\]]+)\]')
FITNESS_STAND_GROUND = "Stand ground"
FITNESS_STICK_TOGETHER = "Stick together"
FITNESS_APPROACH_ENEMY = "Approach enemy"
FITNESS_APPROACH_FLAG = "Approach flag"
FITNESS_HIT_TARGET = "Hit target"
FITNESS_AVOID_FIRE = "Avoid fire"
FITNESS_DIMENSIONS = [FITNESS_STAND_GROUND, FITNESS_STICK_TOGETHER,
FITNESS_APPROACH_ENEMY, FITNESS_APPROACH_FLAG, FITNESS_HIT_TARGET,
FITNESS_AVOID_FIRE]
zmin, zmax, rmin, rmax, w, mean, stdev = [], [], [], [], [], [], []
def process_line(line):
"""
Process a line of the log file and record the information in it in the LearningCurve
"""
global zmin, zmax, rmin, rmax, w, mean, stdev
line = line.strip().lower()
m = ai_rtneat_pattern.search(line)
if not m:
m = nero_pattern.search(line)
if m:
#t = time.strptime(m.group('date'), timestamp_fmt) # time of the record
#ms = int(m.group('msec')) / 1000000.0 # the micro-second part in seconds
#base = time.mktime(t) + ms # seconds since the epoch
zmin.append(float(m.group('zmin')))
zmax.append(float(m.group('zmax')))
rmin.append([float(x) for x in m.group('rmin').strip().split()])
rmax.append([float(x) for x in m.group('rmax').strip().split()])
w.append([float(x) for x in m.group('w').strip().split()])
mean.append([float(x) for x in m.group('mean').strip().split()])
stdev.append([float(x) for x in m.group('stdev').strip().split()])
def process_file(f):
line = f.readline()
while line:
process_line(line.strip())
line = f.readline()
def main():
global zmin, zmax, rmin, rmax, w, mean, stdev
fname = "nero_log.txt"
if len(sys.argv) > 1:
fname = sys.argv[1]
with open(fname) as f:
process_file(f)
zmin, zmax, rmin, rmax, w, mean, stdev = np.array(zmin), np.array(zmax), np.array(rmin), np.array(rmax), np.array(w), np.array(mean), np.array(stdev)
print np.shape(zmin), np.shape(zmax), np.shape(rmin), np.shape(rmax), np.shape(w), np.shape(mean), np.shape(stdev)
# plot each dimension in a separate subplot
dd = np.size(mean,1)
for d in range(dd):
figure()
ax = subplot('211')
#ax.errorbar(np.arange(np.size(mean,0)), mean[:,d], yerr=stdev[:,d])
ax.hold(True)
maxline = ax.plot(rmax[:,d], label='max')
avgline = ax.plot(mean[:,d], label='avg')
minline = ax.plot(rmin[:,d], label='min')
ax.legend()
#ax.ylabel('Raw Fitness')
if len(FITNESS_DIMENSIONS) == dd:
ax.set_title(FITNESS_DIMENSIONS[d])
if d < np.size(w,1):
ax = ax = subplot('212')
ax.plot(w[:,d])
ax.set_title('Slider Weight')
#ax.xlabel('Tick')
show()
if __name__ == "__main__":
main()
| 5,094 |
coursera/semana5/lista_exerc/exerc3_vogal.py
|
pedrobenevides2/Ciencia-de-Dados
| 0 |
2023745
|
def vogal(k):
n=k.lower()
#print(n)
vogal =('a','e','i','o','u')
#print(vogal[0:5])
b=0
for letra in vogal:
if letra == n:
b=b+1
else:
b=b
if b==0:
b = False
else:
b =True
return b
# Execução
x = input("vogal ")
while x.isdigit():
x = input( "vogal ")
p=True
p= vogal(x)
| 374 |
witmotion/protocol.py
|
kenreider/witmotion
| 0 |
2022871
|
import math
from math import *
import struct
from datetime import datetime, timezone
from enum import Enum
G = 9.8
class ReceiveMessage:
payload_length = 8
@classmethod
def compute_checksum(cls, body):
assert len(body) == cls.payload_length
checksum = 0x55 + cls.code
for b in body:
checksum += b
checksum &= 0xFF
return checksum
class TimeMessage(ReceiveMessage):
code = 0x50
def __init__(self, timestamp):
self.timestamp = timestamp
def __str__(self):
return "time message - timestamp:%s" % self.timestamp
@classmethod
def parse(cls, body):
(year2, month, day, hour, minute, second, millisecond) = struct.unpack(
"<BBBBBBH", body
)
year4 = year2 + 1970
d = datetime(
year=year4,
month=month + 1,
day=day + 1,
hour=hour,
minute=minute,
second=second,
microsecond=millisecond * 1000,
)
d = d.replace(tzinfo=timezone.utc)
return cls(timestamp=d.timestamp())
class AccelerationMessage(ReceiveMessage):
code = 0x51
def __init__(self, a, temp_celsius):
self.a = a
self.temp_celsius = temp_celsius
def __str__(self):
return "acceleration message - vec:%s temp_celsius:%s" % (
self.a,
self.temp_celsius,
)
@classmethod
def parse(cls, body):
(axr, ayr, azr, tempr) = struct.unpack("<hhhh", body)
a = (
(axr / 32768) * 16 * G,
(ayr / 32768) * 16 * G,
(azr / 32768) * 16 * G,
)
temp_celsius = tempr / 100
return cls(
a=a,
temp_celsius=temp_celsius,
)
class AngularVelocityMessage(ReceiveMessage):
code = 0x52
def __init__(self, w, temp_celsius):
self.w = w
self.temp_celsius = temp_celsius
def __str__(self):
return "angular velocity message - w:%s temp_celsius:%s" % (
self.w,
self.temp_celsius,
)
@classmethod
def parse(cls, body):
(wxr, wyr, wzr, tempr) = struct.unpack("<hhhh", body)
w = (
(wxr / 32768) * 2000,
(wyr / 32768) * 2000,
(wzr / 32768) * 2000,
)
temp_celsius = tempr / 100
return cls(
w=w,
temp_celsius=temp_celsius,
)
class AngleMessage(ReceiveMessage):
code = 0x53
def __init__(self, roll, pitch, yaw, version):
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.version = version
def __str__(self):
return (
"angle message - roll:%0.1f pitch:%0.1f yaw:%0.1f version:%s"
% (
self.roll,
self.pitch,
self.yaw,
self.version,
)
)
@classmethod
def parse(cls, body):
(rollr, pitchr, yawr, version) = struct.unpack("<hhhh", body)
roll = (rollr / 32768) * 180
pitch = (pitchr / 32768) * 180
yaw = (yawr / 32768) * 180
return cls(
roll=roll,
pitch=pitch,
yaw=yaw,
version=version,
)
class MagneticMessage(ReceiveMessage):
code = 0x54
def __init__(self, bearing, mag, temp_celsius):
self.bearing = bearing
self.mag = mag
self.temp_celsius = temp_celsius
def __str__(self):
return "magnetic message - vec:%s bearing:%s temp_celsius:%s" % (
self.mag,
self.bearing,
self.temp_celsius,
)
@classmethod
def parse(cls, body):
x, y, z, tempr = struct.unpack("<hhhh", body)
mag = (x, y, z)
temp_celsius = tempr / 100
bearing = math.atan2(y,x)/math.pi*180
if bearing<0:
bearing = bearing+360
return cls(
mag=mag,
bearing=bearing,
temp_celsius=temp_celsius,
)
class QuaternionMessage(ReceiveMessage):
code = 0x59
def __init__(self, q):
self.q = q
def __str__(self):
return "quaternion message - q:%s %s %s %s" % self.q
@classmethod
def parse(cls, body):
qr = struct.unpack("<hhhh", body)
q = tuple(el / 32768 for el in qr)
return cls(q=q)
receive_messages = {
cls.code: cls
for cls in (
TimeMessage,
AccelerationMessage,
AngularVelocityMessage,
AngleMessage,
MagneticMessage,
QuaternionMessage,
)
}
class CalibrationMode(Enum):
"""
Available sensor calibration modes.
"""
none = 0
"No calibration mode enabled."
gyro_accel = 1
"Enable gyroscope and accelerometer calibration."
magnetic = 2
"Enable magnetic calibration."
class InstallationDirection(Enum):
"""
Available installation directions.
"""
horizontal = 0x00
"Device installed horizontally (default)."
vertical = 0x01
"Device installed vertically."
class ReturnRateSelect(Enum):
rate_0_2hz = 0x01
rate_0_5hz = 0x02
rate_1hz = 0x03
rate_2hz = 0x04
rate_5hz = 0x05
rate_10hz = 0x06
rate_20hz = 0x07
rate_50hz = 0x08
rate_100hz = 0x09
rate_125hz = 0x0A
rate_200hz = 0x0B
rate_single = 0x0C
rate_not_output = 0x0D
class BaudRateSelect(Enum):
baud_4800 = 0x01
baud_9600 = 0x02
baud_19200 = 0x03
baud_38400 = 0x04
baud_57600 = 0x05
baud_115200 = 0x06
baud_230400 = 0x07
baud_460800 = 0x08
baud_921600 = 0x09
class Register(Enum):
save = 0x00
calsw = 0x01
rsw = 0x02
rate = 0x03
baud = 0x04
axoffset = 0x05
ayoffset = 0x06
azoffset = 0x07
gxoffset = 0x08
gyoffset = 0x09
gzoffset = 0x0A
hxoffset = 0x0B
hyoffset = 0x0C
hzoffset = 0x0D
sleep = 0x22
direction = 0x23
alg = 0x24
mmyy = 0x30
hhdd = 0x31
ssmm = 0x32
ms = 0x33
ax = 0x34
ay = 0x35
az = 0x36
gx = 0x37
gy = 0x38
gz = 0x39
hx = 0x3A
hy = 0x3B
hz = 0x3C
roll = 0x3D
pitch = 0x3E
yaw = 0x3F
temp = 0x40
q0 = 0x51
q1 = 0x52
q2 = 0x53
q3 = 0x54
gyro = 0x63
unknown_config_cmd = 0x69
class ConfigCommand:
def __init__(self, register, data):
self.register = register
self.data = data
def __str__(self):
return "config command - register %s -> data %s" % (
self.register.name,
self.data,
)
def serialize(self):
return struct.pack(
"<BBBH",
0xFF,
0xAA,
self.register.value,
self.data,
)
| 6,803 |
makecafe.py
|
gurbieta/palm-os-lifedrive-installer
| 5 |
2024142
|
# Generate an acecafe header suitable for the LD ROM partition
#
import getopt, os, sys
from struct import pack
from sys import stdout, stderr
HEADER = 0xacecafe0
DEF_OFFSET = 134080 # in sectors (1 sector = 512 bytes)
DEF_LENGTH = 20479778 # in bytes
BLOCKSIZE = 512 # for cancatenate option
def usage():
stderr.write('Usage: %s [-o offset] [-l length] [romfile]\n' % sys.argv[0])
stderr.write('Generate a LD rom partition header.\n\n')
stderr.write(' -o [offset] image offset in sectors (defaults to %d)\n' % DEF_OFFSET)
stderr.write(' -l [length] image length in bytes (defaults to size of given filename)\n')
stderr.write(' -c concatenate header with rom image\n')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:l:c')
except getopt.GetoptError:
usage()
sys.exit(2)
offset = DEF_OFFSET
length = DEF_LENGTH
concat = 0
if args:
filename = args[0]
length = os.path.getsize(filename)
for o, a in opts:
if o == '-o':
offset = int(a)
elif o == '-l':
length = int(a)
elif o == '-c':
concat = 1
# first sector
# image offset stuff
stdout.write( pack('<IIII', HEADER, 4, 0x10000, 0) )
stdout.write( pack('<IIII', 0, offset, 0xb000, 0) )
stdout.write('\0' * 0x1e0)
# second sector
# image length
stdout.write( pack('<IIII', length, 0, 0, 0) )
stdout.write('\0' * 0x1f0)
if concat and filename:
f = file(filename, 'rb')
while 1:
data = f.read(BLOCKSIZE)
if data == '': break
stdout.write(data)
del data
if __name__ == '__main__': main()
| 1,554 |
Backend/Socket/server.py
|
beckjing/AppLab
| 0 |
2024030
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-08-07 16:35
# @Author : <NAME>
# @Site : www.nanosparrow.com
# @File : server
# @Software: PyCharm
import socket
HOST = '0.0.0.0'
PORT = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('Connect by', addr)
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
| 531 |
admin.py
|
chris-skud/madison-transit-api
| 0 |
2023246
|
import os
import wsgiref.handlers
import logging
import webapp2 as webapp
import json
from operator import itemgetter
from datetime import datetime
from datetime import date
from datetime import timedelta
from google.appengine.api import channel
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api.labs.taskqueue import Task
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.runtime import apiproxy_errors
import gdata.docs.service
import gdata.spreadsheet.service
import gdata.spreadsheet.text_db
import config
from data_model import DeveloperKeys
#
# Every so often persist the API counters to the datastore
#
class PersistCounterHandler(webapp.RequestHandler):
def get(self):
logging.debug('persisting API counters to the datastore')
devkeys_to_save = []
devkeys = db.GqlQuery("SELECT * FROM DeveloperKeys").fetch(100)
for dk in devkeys:
counter_key = dk.developerKey + ':counter'
count = memcache.get(counter_key)
if count is not None:
logging.debug('persisting %s at %s' % (counter_key,str(count)))
dk.requestCounter += int(count)
memcache.set(counter_key,0)
devkeys_to_save.append(dk)
if len(devkeys_to_save) > 0:
db.put(devkeys_to_save)
logging.debug('... done persisting %s counters' % str(len(devkeys_to_save)))
## end
#
# Daily reporting email
#
class DailyReportHandler(webapp.RequestHandler):
def get(self):
devkeys_to_save = []
msg_body = '\n'
# right now we're only reporting on the API counters
devkeys = db.GqlQuery("SELECT * FROM DeveloperKeys").fetch(100)
for dk in devkeys:
msg_body += dk.developerName + '(%s) : ' % dk.developerKey
msg_body += str(dk.requestCounter)
msg_body += '\n'
# post counter to google doc
updateField(dk.developerKey,dk.requestCounter)
# reset the daily counter
if dk.requestCounter > 0:
dk.requestCounter = 0
devkeys_to_save.append(dk)
# save the modified developer keys
if len(devkeys_to_save) > 0:
db.put(devkeys_to_save)
## end
class GDocHandler(webapp.RequestHandler):
def get(self):
devkeys = db.GqlQuery("SELECT * FROM DeveloperKeys").fetch(100)
for dk in devkeys:
logging.debug('updating gdoc for %s with %s' % (dk.developerKey,str(dk.requestCounter)))
updateField(dk.developerKey,dk.requestCounter)
## end
class ResetChannelsHandler(webapp.RequestHandler):
def get(self):
now = datetime.now()
channels = json.loads(memcache.get('channels') or '{}')
for channel_id, created in channels.items():
dt = datetime.strptime(created.split(".")[0], "%Y-%m-%d %H:%M:%S")
# NOTE: normally this would be 60 minutes; set it lower to expose the refresh behavior
if (now - dt) > timedelta(minutes=1):
del channels[channel_id]
channel.send_message(channel_id, json.dumps({'function':'reload'}))
if channels:
memcache.set('channels', json.dumps(channels))
logging.debug('channels not empty')
else:
memcache.delete('channels')
logging.error('empty. delete it.')
## end
def updateField(category,value):
member = 'Sheet1'
# get a connection to the db/spreadsheet
client = gdata.spreadsheet.text_db.DatabaseClient(config.GOOGLE_DOC_EMAIL,config.GOOGLE_DOC_PASSWORD)
today = date.today() + timedelta(hours=-6)
dateString = str(today.month)+ "/" + str(today.day) + "/" + str(today.year)
logging.info('adding %s to %s for %s on %s' % (value,category,member,dateString))
databases = client.GetDatabases(config.GOOGLE_DOC_KEY,
config.GOOGLE_DOC_TITLE)
if len(databases) != 1:
logging.error("database query is broken!?! can't find the document")
for db in databases:
tables = db.GetTables(name=member)
for t in tables:
if t:
records = t.FindRecords('date == %s' % dateString)
for r in records:
if r:
if category not in r.content:
logging.error('could not find %s - 0' % category)
else:
r.content[category] = str(value)
r.Push()
else:
logging.error("unable to find the contents for this record!?!")
else:
logging.error("couldn't find the table!?!")
return
## end
class CreateDeveloperKeysHandler(webapp.RequestHandler):
def get(self):
key = DeveloperKeys()
key.developerName = '<NAME>'
key.developerKey = 'fixme'
key.developerEmail = '<EMAIL>'
key.requestCounter = 0
key.errorCounter = 0
key.put()
## end
class APIUserDumpHandler(webapp.RequestHandler):
def get(self):
devs = db.GqlQuery("SELECT * FROM DeveloperKeys").fetch(limit=None)
template_values = {
'devs' : devs
}
path = os.path.join(os.path.dirname(__file__), 'views/devlist.html')
self.response.out.write(template.render(path,template_values))
application = webapp.WSGIApplication([('/admin/persistcounters', PersistCounterHandler),
('/admin/dailyreport', DailyReportHandler),
('/admin/gdoctest', GDocHandler),
('/admin/resetchannels', ResetChannelsHandler),
('/admin/apidump', APIUserDumpHandler),
('/admin/api/create', CreateDeveloperKeysHandler)
],
debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| 6,151 |
liacs_calendar/read_schedule.py
|
maximjanssen/liacs-create-calendar
| 1 |
2022632
|
"""
The functions that are needed to read the Excel-file and process it into
Python understandable objects.
"""
from os import path
from datetime import timezone, datetime, timedelta, time, date
try:
import xlrd
except ImportError:
print("Please install all dependencies using `pip install -r requirements.txt`")
exit(-1)
__raw_data__ = []
def read_excel(f: str) -> list:
"""Read an Excel file (xls) and process all the content into a list.
:param f: Filename of the file to process. It helps if this is an
absolute path.
:type f: str
:returns: A list containing all entries of the xls
:rtype: list
"""
#Do some sanity checks first
if type(f) is not str:
raise TypeError("Parameter file of function read_excel must be str, but was {}".format(str(type(f))))
if not path.exists(f) or not path.isfile(f):
raise ValueError("Given file ({}) does not exist, or is not a file.".format(f))
#Open the wb
wb = xlrd.open_workbook(filename=f, on_demand=True, formatting_info=True)
xf = wb.xf_list
font = wb.font_list
sheet = wb.sheet_by_index(0)
do_add = True
return_list = []
for r in range(sheet.nrows):
#Add new row in list
row_result_list = []
do_add = True
for c in range(sheet.row_len(r)):
if not do_add:
break
cell = sheet.cell(r,c)
curr_xf = xf[cell.xf_index]
curr_font = font[curr_xf.font_index]
#skip cells with text struck out
if curr_font.struck_out:
do_add = False
if cell.ctype == xlrd.XL_CELL_DATE:
cell_val = xlrd.xldate.xldate_as_tuple(cell.value, 0)
else:
cell_val = cell.value
row_result_list.append(cell_val)
if do_add:
return_list.append(row_result_list)
global __raw_data__
__raw_data__ = return_list
return return_list
def get_course_entries(course_names: list, exclude_names: list = [], data: list = []) -> list:
if not course_names or type(course_names) is not list:
raise TypeError("Parameter course_name must be a list and may not be an empty list!")
if type(exclude_names) is not list:
raise TypeError("Parameter exclude_names must be a list!")
global __raw_data__
if not __raw_data__ and not data:
raise RuntimeError("Please either run read_excel, or provide data in the data parameter")
if data:
d = data
else:
d = __raw_data__
header = d[0]
#I'm only interested in these entries
entry_date = None
entry_sttime = None
entry_endtime = None
entry_building = None
entry_room = None
entry_activity = None
#Find which column corresponds to what
i = 0
for h in header:
#switch stmt would be nice here
curr_entry = h.lower()
if curr_entry == "date":
entry_date = i
elif curr_entry == "starttime":
entry_sttime = i
elif curr_entry == "endtime":
entry_endtime = i
elif curr_entry == "building":
entry_building = i
elif curr_entry == "room":
entry_room = i
elif curr_entry == "activity":
entry_activity = i
i = i + 1
if not entry_activity:
raise ValueError("Could not find the column containing the activity. This is necessary.")
tzone = timezone(timedelta(hours=1), name="Europe/Amsterdam")
result = []
last_entry = None
i = 0
for entry in d[1:]:
act_date = None
d = None
starttime = None
endtime = None
dstart = None
dend = None
location = ""
activity = entry[entry_activity].lower()
#skip all processing if course is not in list
do_process = False
for course in course_names:
if activity.startswith(course):
do_process = True
break
for exclude in exclude_names:
#skip all checks if course is excluded by user
if activity.startswith(exclude):
do_process = False
break
if not do_process:
continue
i = i + 1
if entry_date:
act_date = entry[entry_date]
d = date(act_date[0], act_date[1], act_date[2])
if entry_sttime:
st = entry[entry_sttime]
if type(st) is tuple:
starttime = time(st[3], st[4])
else:
try:
st = st.split(":")
starttime = time(int(st[0]), int(st[1]))
except:
print(entry[entry_sttime])
# starttime = time.fromisoformat(entry[entry_sttime])
if entry_endtime:
et = entry[entry_endtime]
if type(et) is tuple:
endtime = time(et[3], et[4])
else:
try:
et = et.split(":")
endtime = time(int(et[0]), int(et[1]))
except:
print(entry[entry_endtime])
# endtime = time.fromisoformat(entry[entry_endtime])
if d and starttime:
dstart = datetime.combine(d, starttime, tzinfo=tzone)
if d and endtime:
dend = datetime.combine(d, endtime, tzinfo=tzone)
if entry_building:
location = str(entry[entry_building])
if entry_room:
location = location + " " + str(entry[entry_room])
#Check if this entry is the same as previous, but only with a different location
#if so, merge these entries
if last_entry and (last_entry[0] == activity and last_entry[1] == dstart and last_entry[2] == dend):
last_entry[3] += " " + location
result[-1] = last_entry
else:
result.append([activity, dstart, dend, location])
last_entry = [activity, dstart, dend, location]
print("Found {} entries.".format(i), end="\r")
print("Found {} entries. Done.".format(i))
return result
def get_course_list(data: list = []) -> list:
global __raw_data__
if not __raw_data__ and not data:
raise RuntimeError("Please either run read_excel, or provide data in the data parameter")
#Here we can safely assume either data or raw_data is used
if data:
d = data
else:
d = __raw_data__
#Find the column in which the activity name is stored
#Most likely in 9th column, so try that first
col_activity = -1
if str(d[0][9]).lower() == "activity":
col_activity = 9
else:
for i in len(d[0]):
if str(i).lower() == "activity":
#Found it, stop
col_activity = i
break
course_list = []
#Skip first row, for it is the header row
for row in range(1, len(d)):
if d[row][col_activity] not in course_list:
course_list.append(d[row][col_activity])
return list(course_list)
| 7,203 |
pyfilter/proposals/linear.py
|
merz9b/pyfilter
| 0 |
2024263
|
from .base import Proposal
from torch.distributions import Normal, MultivariateNormal
from ..timeseries import LinearGaussianObservations as LGO
import torch
from ..utils import construct_diag
class LinearGaussianObservations(Proposal):
"""
Proposal designed for cases when the observation density is a linear combination of the states, and has a Gaussian
density. Note that in order for this to work for multi-dimensional models you must use matrices to form the
combination.
"""
def __init__(self):
super().__init__()
self._mat = None
def _get_mat_and_fix_y(self, x, y):
return self._model.observable._theta_vals[0], y
def set_model(self, model):
if not isinstance(model, LGO):
raise ValueError('Model must be of instance {}'.format(LGO.__name__))
self._model = model
return self
@staticmethod
def _kernel_1d(y, loc, h_var_inv, o_var_inv, c):
cov = 1 / (h_var_inv + c ** 2 * o_var_inv)
m = cov * (h_var_inv * loc + c * o_var_inv * y)
kernel = Normal(m, cov.sqrt())
return kernel
def _kernel_2d(self, y, loc, h_var_inv, o_var_inv, c):
tc = c if self._model.obs_ndim > 1 else c.unsqueeze(-2)
# ===== Define covariance ===== #
ttc = tc.transpose(-2, -1)
diag_o_var_inv = construct_diag(o_var_inv if self._model.observable.ndim > 1 else o_var_inv.unsqueeze(-1))
t2 = torch.matmul(ttc, torch.matmul(diag_o_var_inv, tc))
cov = (construct_diag(h_var_inv) + t2).inverse()
# ===== Get mean ===== #
t1 = h_var_inv * loc
t2 = torch.matmul(diag_o_var_inv, y if y.dim() > 0 else y.unsqueeze(-1))
t3 = torch.matmul(ttc, t2.unsqueeze(-1))[..., 0]
m = torch.matmul(cov, (t1 + t3).unsqueeze(-1))[..., 0]
return MultivariateNormal(m, scale_tril=torch.cholesky(cov))
def construct(self, y, x):
# ===== Hidden ===== #
loc = self._model.hidden.mean(x)
h_var_inv = 1 / self._model.hidden.scale(x) ** 2
# ===== Observable ===== #
c, y = self._get_mat_and_fix_y(x, y)
o_var_inv = 1 / self._model.observable.scale(x) ** 2
if self._model.hidden_ndim < 2:
self._kernel = self._kernel_1d(y, loc, h_var_inv, o_var_inv, c)
else:
self._kernel = self._kernel_2d(y, loc, h_var_inv, o_var_inv, c)
return self
| 2,431 |
zmsavings/utils/converter.py
|
vleseg/zmsavings
| 1 |
2024149
|
from datetime import datetime
# Third-party imports
from money import Money
class Converter(object):
def __init__(self, model_field_name, convert_method):
self.model_field_name = model_field_name
self._convert = convert_method
def __call__(self, value):
return self._convert(value)
@classmethod
def to_datetime(cls, model_field_name, fmt):
def _convert(value):
return datetime.strptime(value, fmt)
return cls(model_field_name, _convert)
@classmethod
def to_rubles(cls, model_field_name):
def _convert(value):
if value == '':
value = 0
else:
value = value.replace(',', '.')
return Money(amount=value, currency='RUR')
return cls(model_field_name, _convert)
| 822 |
desktopdraw_use_dxlib.py
|
Kazuhito00/desktopdraw_use_dxlib
| 1 |
2023760
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from ctypes import cdll, create_string_buffer
if __name__ == '__main__':
# DXライブラリのDLLをロード
dxlib = cdll.DxLib_x64
# ウィンドウタイトル設定
dxlib.dx_SetMainWindowText(" ")
# ウィンドウのスタイルを枠無しに指定
dxlib.dx_SetWindowStyleMode(2)
# ウィンドウを透明に指定
dxlib.dx_SetUseBackBufferTransColorFlag(1)
# ウィンドウモード(TRUE:1)に指定
dxlib.dx_ChangeWindowMode(1)
# ウィンドウサイズ変更
dxlib.dx_SetWindowSizeChangeEnableFlag(0, 0)
dxlib.dx_SetGraphMode(1920, 1080, 32)
dxlib.dx_SetWindowSize(1920, 1080)
# 非アクティブ状態でも動作継続するよう指定
dxlib.dx_SetAlwaysRunFlag(1)
# 最前面(DX_WIN_ZTYPE_TOPMOST:3)表示するよう指定
dxlib.dx_SetWindowZOrder(3)
# フォント指定
dxlib.dx_SetFontSize(30)
dxlib.dx_ChangeFontType(2)
# DXライブラリ初期化
if dxlib.dx_DxLib_Init() == -1:
sys.exit()
# メインループ
dxlib.dx_SetDrawScreen(-2) # DX_SCREEN_BACK:-2
while dxlib.dx_ProcessMessage() == 0:
dxlib.dx_ClearDrawScreen()
draw_string = 'DESKTOP DRAW TEST'
encode_string = draw_string.encode('utf-8')
draw_string = create_string_buffer(encode_string)
dxlib.dx_DrawString(300, 300, draw_string, dxlib.dx_GetColor(
0, 255, 0))
dxlib.dx_ScreenFlip()
dxlib.dx_DxLib_End()
| 1,303 |
2nd_week/src/nlp.py
|
ChoiJunsik/relay_08
| 3 |
2024181
|
import pandas as pd
import re
from krwordrank.word import KRWordRank
import os, sys
cur_dir = os.path.dirname(os.path.abspath( __file__ ))
os.chdir(cur_dir)
os.chdir('..')
os.chdir('./public')
sentence_pattern = re.compile('\n+|[.?!]')
df = pd.read_csv('./contents.csv')
data = df[['title', 'body']].agg('\n'.join, axis=1)
split_data = [sentence_pattern.split(row) for row in data]
min_count = 4 # 단어의 최소 출현 빈도수 (그래프 생성 시)
max_length = 10 # 단어의 최대 길이
wordrank_extractor = KRWordRank(min_count=min_count, max_length=max_length)
beta = 0.85 # PageRank의 decaying factor beta
max_iter = 10
verbose = True
df.tagList = df.tagList.astype(str)
for i, row in enumerate(split_data):
try:
keywords, rank, graph = wordrank_extractor.extract(row, beta, max_iter)
print(f'[success] index: {i}, len: {len(row)}, keywords: {tuple(keywords.keys())}')
df._set_value(i, 'tagList', ' '.join(list(keywords.keys())))
except:
print(f'[fail] index: {i}, len: {len(row)}')
df._set_value(i, 'tagList', '')
df.to_csv('./tag_contents.csv', encoding='utf8', index=False)
| 1,138 |
src/processor/raw/processor.py
|
lcwong0928/hitlads
| 0 |
2024280
|
from abc import abstractmethod
import pandas as pd
from src.configuration.constants import RAW_DATA_DIRECTORY, INTERIM_DATA_DIRECTORY
class RawProcessor:
@classmethod
@abstractmethod
def process(cls, input_directory: str = RAW_DATA_DIRECTORY, out_directory: str = INTERIM_DATA_DIRECTORY):
pass
@classmethod
def create_index_labels(cls, index: pd.DataFrame, anomalies: pd.DataFrame) -> list:
labels = []
for i in index:
label = 0
for start, end in zip(anomalies.start, anomalies.end):
if start <= i <= end:
label = 1
break
labels.append(label)
return labels
| 709 |
liveClassify_v2.py
|
Healbadbad/InternetOfThingsClass
| 0 |
2023590
|
import numpy as np
import pdb
from iotdata_simpleMostlyStatic import IOTData_simple
from ShimmerBluetooth import ShimmerBluetooth
from peakdetect2 import locatePeaks
import pickle as pkl
from sklearn.ensemble import RandomForestClassifier
def countSteps(data):
# remove the mean
data = data - np.mean(data)
#disregard end samples
data = data[0:(len(data)-len(data)%8)]
# break the time domain signal into 8-long windows
data = np.reshape(data,(-1,8))
# calculate energy in the windows
energy = np.sum(np.square(data),axis=1)
# calculate an energy threshold
enThreshold = 1.5*np.mean(energy)
pkInds = locatePeaks(energy,enThreshold*1,5,minamp = enThreshold)
# for i in range(len(energy)):
# if energy[i]<enThreshold:
# energy[i] = 0
# pkInds,o2 = peakdetect.peakdet(energy,enThreshold)
return len(pkInds)
# devices = ["Ground_Truth_Treadmill1", "Ground_Truth_Treadmill2", "Ground_Truth_Treadmill3", "Ground_Truth_Treadmill5"]
# device = IOTData(devices, 'Ground_Truth_data\\03_27_16\\')
# devices = IOTData(None, comport = "COM5")
featuregenerator = IOTData_simple(windowsize = 256)
print "loading classifier"
pkledClassifier = open('randomforestclassifier2.pkl','rb')
classifier = pkl.load(pkledClassifier)
pkledClassifier.close()
print "initializing bluetooth connection"
sensors = [ShimmerBluetooth("COM9", 256)]
predictions = [None]*len(sensors)
stepCount = [0]*len(sensors)
try:
print "entering main loop"
while True:
for i in range(len(sensors)):
# aquire data
df = np.array(sensors[i].getFrame())
featureVector = featuregenerator.generateWindowFeatures(df)
print(len(featureVector))
featureVector = np.reshape(featureVector,(1,-1))
# classify
pred = classifier.predict(featureVector)
predictions[i] = pred[0]
# estimate step count
# i dunno?
accelMagnitude = np.sqrt(np.square(df[:,0])+np.square(df[:,1])+np.square(df[:,2]))
if predictions[i] != "not_walking":
stepCount[i] += countSteps(accelMagnitude)
# save prediciton
predictions[i] = pred
# output predictions
print '-'*20
for i in range(len(sensors)):
print 'Sensor #', i+1, ':',predictions[i], 'and', stepCount[i], 'total steps'
except KeyboardInterrupt:
for sensor in sensors:
sensor.closeDevice()
| 2,501 |
app.py
|
atlefren/mineturer2
| 8 |
2023947
|
# -*- coding: utf-8 -*-
import os
from flask import Flask
from webassets.loaders import PythonLoader
from flask.ext.assets import Environment
from views import create_views
from database import init_db
def create_bundles(app):
assets = Environment(app)
assets.debug = True if app.debug == 'True' else False
bundles = PythonLoader('assetbundle').load_bundles()
for name, bundle in bundles.iteritems():
assets.register(name, bundle)
def create_app(debug, database_url):
app = Flask(__name__)
app.secret_key = os.environ.get('SECRET_KEY', 'development_fallback')
app.debug = debug
(app.db_session, app.db_metadata, app.db_engine) = init_db(database_url)
create_bundles(app)
create_views(app)
return app
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
db = os.environ.get('DATABASE_URL', 'sqlite:////tmp/mineturer.db')
app = create_app(os.environ.get('DEBUG', False), db)
app.run(host='0.0.0.0', port=port, debug=True)
| 1,013 |
flint/optim/adam.py
|
Renovamen/tinyark
| 15 |
2022722
|
import numpy as np
from typing import Tuple
from .optimizer import Optimizer
class Adam(Optimizer):
"""
Implementation of Adam algorithm proposed in [1].
.. math::
v_t = \\beta_1 v_{t-1} + (1 - \\beta_1) g_t
.. math::
h_t = \\beta_2 h_{t-1} + (1 - \\beta_2) g_t^2
Bias correction:
.. math::
\hat{v}_t = \\frac{v_t}{1 - \\beta_1^t}
.. math::
\hat{h}_t = \\frac{h_t}{1 - \\beta_2^t}
Update parameters:
.. math::
\\theta_t = \\theta_{t-1} - \\text{lr} \cdot \\frac{\hat{v}_t}{\sqrt{\hat{h}_t + \epsilon}}
Parameters
----------
params : iterable
An iterable of Tensor
lr : float, optional, default=1e-3
Learning rate
betas : Tuple[float, float], optional, default=(0.9, 0.999)
Coefficients used for computing running averages of gradient and its
square
eps : float, optional, default=1e-8
Term added to the denominator to improve numerical stability
weight_decay : float, optional, default=0
Weight decay (L2 penalty)
References
----------
1. "`Adam: A Method for Stochastic Optimization. <https://arxiv.org/abs/1412.6980>`_" <NAME> and <NAME>. ICLR 2015.
"""
def __init__(
self,
params = None,
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.
):
super(Adam, self).__init__(params, lr, weight_decay)
self.eps = eps
self.beta1, self.beta2 = betas
self.v = [np.zeros_like(p.data) for p in self.params]
self.h = [np.zeros_like(p.data) for p in self.params]
def step(self):
super(Adam, self).step()
for i, (v, h, p) in enumerate(zip(self.v, self.h, self.params)):
if p.requires_grad:
# l2 penalty
p_grad = p.grad + self.weight_decay * p.data
# moving average of gradients
v = self.beta1 * v + (1 - self.beta1) * p.grad
self.v[i] = v
# moving average of squared gradients
h = self.beta2 * h + (1 - self.beta2) * (p.grad ** 2)
self.h[i] = h
# bias correction
v_correction = 1 - (self.beta1 ** self.iterations)
h_correction = 1 - (self.beta2 ** self.iterations)
# update parameters
p.data -= (self.lr / v_correction * v) / (np.sqrt(h) / np.sqrt(h_correction) + self.eps)
| 2,535 |
src/ml/net/pt/__init__.py
|
iN1k1/deep-pyramidal-representations-peron-re-identification
| 13 |
2024240
|
from .models import PTModel
from .utils import *
from .dense import DenseNet, get_densenet_backbone
from .resnet import ResNet, get_resnet_backbone
from .spp import SpatialPyramidPooling
__all__ = ['PTModel', 'init_weights_classifier_module', 'init_weights_module_kaiming',
'init_weights_classifier_model', 'init_weights_model_kaiming',
'init_weights_normal_model', 'init_weights_normal_module',
'init_weights_orthogonal_model', 'init_weights_orthogonal_module',
'DenseNet', 'get_densenet_backbone',
'ResNet', 'get_resnet_backbone',
'utils', 'SpatialPyramidPooling']
| 633 |
distributed/protocol/tests/test_protocol_utils.py
|
crusaderky/distributed
| 1,358 |
2024199
|
from __future__ import annotations
import pytest
from distributed.protocol.utils import merge_memoryviews, pack_frames, unpack_frames
def test_pack_frames():
frames = [b"123", b"asdf"]
b = pack_frames(frames)
assert isinstance(b, bytes)
frames2 = unpack_frames(b)
assert frames == frames2
class TestMergeMemroyviews:
def test_empty(self):
empty = merge_memoryviews([])
assert isinstance(empty, memoryview) and len(empty) == 0
def test_one(self):
base = bytearray(range(10))
base_mv = memoryview(base)
assert merge_memoryviews([base_mv]) is base_mv
@pytest.mark.parametrize(
"slices",
[
[slice(None, 3), slice(3, None)],
[slice(1, 3), slice(3, None)],
[slice(1, 3), slice(3, -1)],
[slice(0, 0), slice(None)],
[slice(None), slice(-1, -1)],
[slice(0, 0), slice(0, 0)],
[slice(None, 3), slice(3, 7), slice(7, None)],
[slice(2, 3), slice(3, 7), slice(7, 9)],
[slice(2, 3), slice(3, 7), slice(7, 9), slice(9, 9)],
[slice(1, 2), slice(2, 5), slice(5, 8), slice(8, None)],
],
)
def test_parts(self, slices: list[slice]):
base = bytearray(range(10))
base_mv = memoryview(base)
equiv_start = min(s.indices(10)[0] for s in slices)
equiv_stop = max(s.indices(10)[1] for s in slices)
equiv = base_mv[equiv_start:equiv_stop]
parts = [base_mv[s] for s in slices]
result = merge_memoryviews(parts)
assert result.obj is base
assert len(result) == len(equiv)
assert result == equiv
def test_readonly_buffer(self):
pytest.importorskip(
"numpy", reason="Read-only buffer zero-copy merging requires NumPy"
)
base = bytes(range(10))
base_mv = memoryview(base)
result = merge_memoryviews([base_mv[:4], base_mv[4:]])
assert result.obj is base
assert len(result) == len(base)
assert result == base
def test_catch_non_memoryview(self):
with pytest.raises(TypeError, match="Expected memoryview"):
merge_memoryviews([b"1234", memoryview(b"4567")])
with pytest.raises(TypeError, match="expected memoryview"):
merge_memoryviews([memoryview(b"123"), b"1234"])
@pytest.mark.parametrize(
"slices",
[
[slice(None, 3), slice(4, None)],
[slice(None, 3), slice(2, None)],
[slice(1, 3), slice(3, 6), slice(9, None)],
],
)
def test_catch_gaps(self, slices: list[slice]):
base = bytearray(range(10))
base_mv = memoryview(base)
parts = [base_mv[s] for s in slices]
with pytest.raises(ValueError, match="does not start where the previous ends"):
merge_memoryviews(parts)
def test_catch_different_buffer(self):
base = bytearray(range(8))
base_mv = memoryview(base)
with pytest.raises(ValueError, match="different buffer"):
merge_memoryviews([base_mv, memoryview(base.copy())])
def test_catch_different_non_contiguous(self):
base = bytearray(range(8))
base_mv = memoryview(base)[::-1]
with pytest.raises(ValueError, match="non-contiguous"):
merge_memoryviews([base_mv[:3], base_mv[3:]])
def test_catch_multidimensional(self):
base = bytearray(range(6))
base_mv = memoryview(base).cast("B", [3, 2])
with pytest.raises(ValueError, match="has 2 dimensions, not 1"):
merge_memoryviews([base_mv[:1], base_mv[1:]])
def test_catch_different_formats(self):
base = bytearray(range(8))
base_mv = memoryview(base)
with pytest.raises(ValueError, match="inconsistent format: I vs B"):
merge_memoryviews([base_mv[:4], base_mv[4:].cast("I")])
| 3,909 |
stock/models.py
|
aveetron/cafe3_resturant_management
| 0 |
2023080
|
from django.db import models
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=50, unique=True, null=False, blank=False)
price = models.IntegerField(null=False, blank=False)
def __str__(self):
return self.name
| 270 |
porcupine/plugins/statusbar.py
|
ThePhilgrim/porcupine
| 0 |
2024312
|
"""Display a status bar in each file tab."""
import tkinter
from tkinter import ttk
from porcupine import get_tab_manager, tabs, textwidget, utils
class StatusBar(ttk.Frame):
def __init__(self, tab: tabs.FileTab):
super().__init__(tab.bottom_frame)
self.tab = tab
self.left_label = ttk.Label(self)
self.right_label = ttk.Label(self)
self.left_label.pack(side="left")
self.right_label.pack(side="right")
def show_path(self, junk: object = None) -> None:
self.left_label.config(text=("New file" if self.tab.path is None else str(self.tab.path)))
def show_cursor_or_selection(self, junk: object = None) -> None:
try:
# For line count, if the cursor is in beginning of line, don't count that as another line.
chars = textwidget.count(self.tab.textwidget, "sel.first", "sel.last")
lines = textwidget.count(
self.tab.textwidget, "sel.first", "sel.last - 1 char", option="-lines"
)
except tkinter.TclError:
# no text selected
line, column = self.tab.textwidget.index("insert").split(".")
self.right_label.config(text=f"Line {line}, column {column}")
else:
if lines == 0:
self.right_label.config(text=f"{chars} characters selected")
else:
self.right_label.config(text=f"{chars} characters on {lines+1} lines selected")
def show_reload_warning(self, event: utils.EventWithData) -> None:
if event.data_class(tabs.ReloadInfo).was_modified:
oops = utils.get_binding("<<Undo>>")
self.left_label.config(
foreground="red",
text=(
f"File was reloaded with unsaved changes. Press {oops} to get your changes"
" back."
),
)
def clear_reload_warning(self, junk: object) -> None:
if self.left_label["foreground"]:
self.left_label.config(foreground="")
self.show_path()
def on_new_tab(tab: tabs.Tab) -> None:
if isinstance(tab, tabs.FileTab):
statusbar = StatusBar(tab)
statusbar.pack(side="bottom", fill="x")
tab.bind("<<PathChanged>>", statusbar.show_path, add=True)
utils.bind_with_data(tab, "<<Reloaded>>", statusbar.show_reload_warning, add=True)
tab.textwidget.bind("<<CursorMoved>>", statusbar.show_cursor_or_selection, add=True)
tab.textwidget.bind("<<Selection>>", statusbar.show_cursor_or_selection, add=True)
tab.textwidget.bind("<<ContentChanged>>", statusbar.clear_reload_warning, add=True)
statusbar.show_path()
statusbar.show_cursor_or_selection()
def setup() -> None:
get_tab_manager().add_tab_callback(on_new_tab)
| 2,821 |
iscr/utils.py
|
iammrhelo/iscr-searchengine
| 0 |
2023451
|
import math
import pickle
def normalize(d, inplace=False):
total = sum(d.values())
if inplace is True:
for k, v in d.items():
d[k] = v / total
else:
norm_d = {}
for k, v in d.items():
norm_d[k] = v / total
return norm_d
def load_from_pickle(filename):
with open(filename, 'rb') as fin:
return pickle.load(fin)
def save_to_pickle(filename, obj):
with open(filename, 'wb') as fout:
pickle.dump(obj, fout)
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
| 744 |
pyt_binary_classification.py
|
mjbhobe/dl-pytorch
| 5 |
2023818
|
"""
pyt_binary_classification.py: binary classification of 2D data
@author: <NAME>
My experiments with Python, Machine Learning & Deep Learning.
This code is meant for education purposes only & is not intended for commercial/production use!
Use at your own risk!! I am not responsible if your CPU or GPU gets fried :D
"""
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, r2_score
from sklearn.utils import shuffle
# tweaks for libraries
np.set_printoptions(precision=6, linewidth=1024, suppress=True)
plt.style.use('seaborn')
sns.set(style='whitegrid', font_scale=1.1, palette='muted')
# Pytorch imports
import torch
print('Using Pytorch version: ', torch.__version__)
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torchsummary import summary
# My helper functions for training/evaluating etc.
import pytorch_toolkit as pytk
seed = 42
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.enabled = False
NUM_EPOCHS = 2500
BATCH_SIZE = 1024 * 4
LR = 0.001
DATA_FILE = os.path.join('.', 'csv_files', 'weatherAUS.csv')
print(f"Data file: {DATA_FILE}")
MODEL_SAVE_PATH = os.path.join('.', 'model_states', 'weather_model.pt')
# ---------------------------------------------------------------------------
# load data, select fields & apply scaling
# ---------------------------------------------------------------------------
def get_data(test_split=0.20, shuffle_it=True, balance=False, sampling_strategy=0.85,
debug=False):
from imblearn.over_sampling import SMOTE
df = pd.read_csv(DATA_FILE)
if shuffle_it:
df = shuffle(df)
cols = ['Rainfall', 'Humidity3pm',
'Pressure9am', 'RainToday', 'RainTomorrow']
df = df[cols]
# convert categorical cols - RainToday & RainTomorrow to numeric
df['RainToday'].replace({"No": 0, "Yes": 1}, inplace=True)
df['RainTomorrow'].replace({"No": 0, "Yes": 1}, inplace=True)
# drop all rows where any cols == Null
df = df.dropna(how='any')
# display plot of target
sns.countplot(df.RainTomorrow)
plt.title("RainTomorrow: existing counts")
plt.show()
X = df.drop(['RainTomorrow'], axis=1).values
y = df['RainTomorrow'].values
if debug:
print(f"{'Before balancing ' if balance else ''} X.shape = {X.shape}, "
f"y.shape = {y.shape}, y-count = {np.bincount(y)}")
if balance:
ros = SMOTE(sampling_strategy=sampling_strategy, random_state=seed)
X, y = ros.fit_resample(X, y)
if debug:
print(f"Resampled -> X.shape = {X.shape}, y.shape = {y.shape}, "
f"y-count = {np.bincount(y)}")
# display plot of target
sns.countplot(y)
plt.title("RainTomorrow: after re-balancing")
plt.show()
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_split, random_state=seed)
if debug:
print(
f"Split data -> X_train.shape = {X_train.shape}, y_train.shape = {y_train.shape}, "
f"X_test.shape = {X_test.shape}, y_test.shape = {y_test.shape}")
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# y_train = np.expand_dims(y_train, axis=1)
# y_test = np.expand_dims(y_test, axis=1)
# NOTE: BCELoss() expects labels to be floats - why???
y_train = y_train.astype('float32')
y_test = y_test.astype('float32')
y_train = y_train[:, np.newaxis]
y_test = y_test[:, np.newaxis]
return (X_train, y_train), (X_test, y_test)
# our binary classification model
# class Net(pytk.PytkModule):
# def __init__(self, features):
# super(Net, self).__init__()
# self.fc1 = pytk.Linear(features, 10)
# self.fc2 = pytk.Linear(10, 5)
# self.out = pytk.Linear(5, 1)
#
# def forward(self, x):
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = F.sigmoid(self.out(x))
# return x
class Net(pytk.PytkModule):
def __init__(self, features):
super(Net, self).__init__()
self.fc1 = pytk.Linear(features, 32)
self.fc2 = pytk.Linear(32, 16)
self.fc3 = pytk.Linear(16, 8)
self.out = pytk.Linear(8, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.sigmoid(self.out(x))
return x
DO_TRAINING = True
DO_PREDICTION = True
def main():
# load & preprocess data
(X_train, y_train), (X_test, y_test) = get_data(balance=True, sampling_strategy=0.90,
debug=True)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
if DO_TRAINING:
# build model
model = Net(X_train.shape[1])
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
# optimizer = optim.SGD(model.parameters(), lr=LR)
model.compile(loss=criterion, optimizer=optimizer, metrics=['accuracy'])
print(model)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=200, gamma=0.2)
hist = model.fit(X_train, y_train, validation_split=0.2, epochs=NUM_EPOCHS,
batch_size=-1,
lr_scheduler=scheduler,
report_interval=50, verbose=2)
pytk.show_plots(hist, metric='accuracy')
# evaluate performance
print('Evaluating performance...')
loss, acc = model.evaluate(X_train, y_train, batch_size=2048)
print(f' - Train dataset -> loss: {loss:.3f} acc: {acc:.3f}')
loss, acc = model.evaluate(X_test, y_test)
print(f' - Test dataset -> loss: {loss:.3f} acc: {acc:.3f}')
model.save(MODEL_SAVE_PATH)
del model
if DO_PREDICTION:
if not os.path.exists(MODEL_SAVE_PATH):
raise ValueError(f"Could not find saved model at {MODEL_SAVE_PATH}. Did you train model?")
# run predictions
# model = pytk.load_model(MODEL_SAVE_PATH)
model = Net(X_train.shape[1])
model.load(MODEL_SAVE_PATH)
print(model)
y_pred = (model.predict(X_test) >= 0.5).astype('int32').ravel()
y_test = y_test.astype('int32').reshape(-1)
print(classification_report(y_test, y_pred))
pytk.plot_confusion_matrix(confusion_matrix(y_test, y_pred), ["No Rain", "Rain"],
title="Rain Prediction for Tomorrow")
del model
if __name__ == '__main__':
main()
# Results:
# Training (1000 epochs)
# - loss: 0.377 acc: 84.0%
# Training (1000 epochs)
# - loss: 0.377 acc: 84.1%
# Conclusion: No overfitting, but accuracy is low. Possibly due to very imbalanced data
#
# Training (1000 epochs) with re-sampling
# - loss: 0.377 acc: 84.0%
# Training (1000 epochs)
# - loss: 0.377 acc: 84.1%
# Conclusion: No overfitting, but accuracy is low. Possibly due to very imbalanced data
| 7,653 |
report.py
|
bakink/oracle-imagecopy-backup
| 20 |
2024338
|
#!/usr/bin/python2
import os, sys, json
from backupcommon import scriptpath, Configuration, BackupLogger, BackupTemplate, info, error, debug, exception, create_snapshot_class
from tempfile import mkstemp
from oraexec import OracleExec
def printhelp():
print "Usage: report.py [comma separated list of databases]"
sys.exit(2)
if len(sys.argv) not in [1,2]:
printhelp()
# Directory where the executable script is located
scriptpath = scriptpath()
# Read configuration
logf = mkstemp(prefix='backupreport-', suffix='.log')
os.close(logf[0])
Configuration.init('generic')
BackupLogger.init(logf[1], 'reporting')
Configuration.substitutions.update( {'logfile': BackupLogger.logfile, 'autorestorecatalog': Configuration.get('autorestorecatalog', 'autorestore')} )
reporttemplate = BackupTemplate('reporttemplate.cfg')
def exec_sqlplus(oraexec, script, header = 'sqlplusheader'):
finalscript = "%s\n%s\n%s" % (reporttemplate.get(header), script, reporttemplate.get('sqlplusfooter'))
output = oraexec.sqlplus(finalscript, silent=True)
for line in output.splitlines():
if line.startswith('OUTLOG: '):
yield(line.strip()[8:])
def process_database(dbname):
Configuration.defaultsection = dbname
Configuration.substitutions.update({'dbname': dbname})
oraexec = OracleExec(oraclehome=Configuration.get('oraclehome', 'generic'), tnspath=os.path.join(scriptpath, Configuration.get('tnsadmin', 'generic')))
# Read job status information from the database
jobinfo = {}
for line in exec_sqlplus(oraexec, reporttemplate.get('jobstatus')):
j = json.loads(line)
if j["type"] == "job":
if j["job_name"] == "ARCHLOGBACKUP_JOB":
jobinfo["archlog"] = j
elif j["job_name"] == "IMAGECOPY_JOB":
jobinfo["imagecopy"] = j
elif j["type"] == "exec":
if j["job_name"] == "ARCHLOGBACKUP_JOB":
jobinfo["archlogexec"] = j
elif j["job_name"] == "IMAGECOPY_JOB":
jobinfo["imagecopyexec"] = j
# Read snapshot information
zfs = create_snapshot_class(dbname)
snaps = zfs.listsnapshots(True, True)
# Autorestore information
autorestoreinfo = None
try:
for line in exec_sqlplus(oraexec, reporttemplate.get('autorestorestatus'), 'sqlplusautorestoreheader'):
autorestoreinfo = json.loads(line)
except:
pass
# Print output
print "%s:" % dbname
try:
print " Backup job: %s, last: %s, duration: %s, last failure: %s" % (jobinfo['imagecopy']['state'], jobinfo['imagecopy']['last_start_date'], jobinfo['imagecopy']['last_run_duration'], jobinfo['imagecopyexec']['last_failed'])
print " Archivelog job: %s, last: %s, duration: %s, last failure: %s" % (jobinfo['archlog']['state'], jobinfo['archlog']['last_start_date'], jobinfo['archlog']['last_run_duration'], jobinfo['archlogexec']['last_failed'])
if len(snaps) > 0:
firstsnap = zfs.getsnapinfo(snaps[0])
lastsnap = zfs.getsnapinfo(snaps[-1])
print " Snapshots: %d, latest: %s, oldest: %s" % (len(snaps), firstsnap["creation"], lastsnap["creation"])
else:
print " Snapshots: none"
if autorestoreinfo is not None:
print " Last successful restore: %s, last restore failure: %s, last successful validation: %s, avg difference from target (s): %d, avg restore time (min): %d" % (autorestoreinfo["last_success"], autorestoreinfo["last_fail"], autorestoreinfo["last_validated"], autorestoreinfo["avgdiff"], autorestoreinfo["avgrestoremin"])
except:
print " Error getting information."
excludelist = ['generic','rman','zfssa','autorestore']
includelist = []
if len(sys.argv) == 2:
includelist = sys.argv[1].split(",")
# Loop through all sections
for dbname in Configuration.sections():
if dbname not in excludelist and (len(includelist) == 0 or dbname in includelist):
process_database(dbname)
| 4,003 |
mk-trees-01.py
|
mykespb/pythoner
| 1 |
2023974
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mk-trees-01.py 2018-04-27 0.2
# <NAME>, 2018-04
# tests for trees etc
# Just adding old tests and problems for students of Python.
# ---------------------------
import random
# ---------------------------
# test 1
# ---------------------------
VALFROM = 0
VALTO = 50
VALNUM = 20
class Tree1:
def __init__(self, val=0):
self.val = val
self.left = None
self.right = None
def add(self, val):
"""add a unique value to tree"""
if val < self.val:
if self.left:
self.left.add(val)
else:
self.left = Tree1(val)
elif val > self.val:
if self.right:
self.right.add(val)
else:
self.right = Tree1(val)
def print(self):
"""ordered output"""
if self.left:
self.left.print()
print(self.val, end=", ")
if self.right:
self.right.print()
def exists(self, val):
"""test if value is in tree"""
if val == self.val:
return True
if val < self.val and self.left:
return self.left.exists(val)
if val > self.val and self.right:
return self.right.exists(val)
return False
def test1():
"""create a tree and find a value"""
tree = t1make(VALNUM)
tree.print()
t1find(tree)
def t1make(valnum):
"""make a tree"""
r = random.randint(VALFROM, VALTO)
print (f"adding {r}, ")
t = Tree1(r)
for i in range(1, valnum):
r = random.randint(VALFROM, VALTO)
print (f"adding {r}, ")
t.add (r)
return t
def t1find(tree):
"""find a value"""
print()
for i in range(VALNUM):
r = random.randint(VALFROM, VALTO)
print(f"Looking for {r}, result is {tree.exists(r)}")
# ---------------------------
def main(args):
test1()
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| 2,029 |
table/county.py
|
lovenery/life-prediction-model
| 5 |
2024175
|
import pandas as pd
import os.path as path
df = pd.read_csv(path.join(path.dirname(__file__), './county.csv'))
# print df
# .js
for index, row in df.iterrows():
print("{\n\tid: %d,\n\tname: \"%s\"\n}," % (index+1, row.chinese))
| 235 |
utils/load_weights.py
|
advsail/Tensorflow-quantization-test
| 74 |
2024283
|
import h5py
import sys
import numpy as np
sys.path.append('../')
def weight_loader(weight_file):
weights = {}
f = h5py.File(weight_file, mode='r')
# f = f['model_weights']
try:
layers = f.attrs['layer_names']
except:
raise ValueError("weights file must contain attribution: 'layer_names'")
for layer_name in layers:
g = f[layer_name]
for weight_name in g.attrs['weight_names']:
weight_value = g[weight_name].value
name = str(weight_name).split("'")[1]
weights[name] = weight_value
return weights
| 595 |
component_tests/cpackaga_a/test_a.py
|
crazeejeeves/trisuite
| 0 |
2022733
|
from unittest import TestCase
from ddt import *
from basic_math.accumulate import add
from framework.tags import tag, ProductTag
class TestComponentA(TestCase):
def test_one_param(self):
self.assertRaises(TypeError, lambda: add(1))
@tag("Nightly", "Long-running", priority=1)
def test_two_params(self):
self.assertEquals(3, add(1, 2), "Add result did not produce 3")
@tag("Nightly", priority=1, product=ProductTag.BME)
def test_three_params(self):
self.assertEquals(6, add(1, 2, 3), "Add result did not produce 6")
@tag("Nightly", priority=1, product=ProductTag.BME)
def test_parameterized_params(self, a, b, c, result):
self.assertEquals(result, add(a, b, c), "Add result did not produce {}".format(result))
| 782 |
tests/builtins/test_filter.py
|
katharosada/voc
| 0 |
2024042
|
from .. utils import TranspileTestCase, BuiltinTwoargFunctionTestCase
class FilterTests(TranspileTestCase):
def test_bool(self):
self.assertCodeExecution('print(list(filter(bool, [True, False, True])))')
self.assertCodeExecution('print(list(filter(bool, [1, 0, 3, -1])))')
self.assertCodeExecution('print(list(filter(bool, [])))')
def test_none(self):
self.assertCodeExecution('print(list(filter(None, [True, False, True])))')
self.assertCodeExecution('print(list(filter(None, [])))')
def test_lambda(self):
self.assertCodeExecution('print(list(filter(lambda x: x > 1, [3, 4, 56, 1, -11])))')
def test_wrong_argument(self):
self.assertCodeExecution('print(list(filter(None, None)))', exits_early=True)
class BuiltinFilterFunctionTests(BuiltinTwoargFunctionTestCase, TranspileTestCase):
functions = ["filter"]
not_implemented = [
'test_bool_bytearray',
'test_bool_bytes',
'test_bool_dict',
'test_bool_frozenset',
'test_bool_list',
'test_bool_range',
'test_bool_set',
'test_bool_str',
'test_bool_tuple',
'test_bytearray_bytearray',
'test_bytearray_bytes',
'test_bytearray_dict',
'test_bytearray_frozenset',
'test_bytearray_list',
'test_bytearray_range',
'test_bytearray_set',
'test_bytearray_str',
'test_bytearray_tuple',
'test_bytes_bytearray',
'test_bytes_bytes',
'test_bytes_dict',
'test_bytes_frozenset',
'test_bytes_list',
'test_bytes_range',
'test_bytes_set',
'test_bytes_str',
'test_bytes_tuple',
'test_class_bytearray',
'test_complex_bytearray',
'test_complex_bytes',
'test_complex_dict',
'test_complex_frozenset',
'test_complex_list',
'test_complex_range',
'test_complex_set',
'test_complex_str',
'test_complex_tuple',
'test_dict_bytearray',
'test_dict_bytes',
'test_dict_dict',
'test_dict_frozenset',
'test_dict_list',
'test_dict_range',
'test_dict_set',
'test_dict_str',
'test_dict_tuple',
'test_float_bytearray',
'test_float_bytes',
'test_float_dict',
'test_float_frozenset',
'test_float_list',
'test_float_range',
'test_float_set',
'test_float_str',
'test_float_tuple',
'test_frozenset_bytearray',
'test_frozenset_bytes',
'test_frozenset_dict',
'test_frozenset_frozenset',
'test_frozenset_list',
'test_frozenset_range',
'test_frozenset_set',
'test_frozenset_str',
'test_frozenset_tuple',
'test_int_bytearray',
'test_int_bytes',
'test_int_dict',
'test_int_frozenset',
'test_int_list',
'test_int_range',
'test_int_set',
'test_int_str',
'test_int_tuple',
'test_list_bytearray',
'test_list_bytes',
'test_list_dict',
'test_list_frozenset',
'test_list_list',
'test_list_range',
'test_list_set',
'test_list_str',
'test_list_tuple',
'test_None_bytearray',
'test_NotImplemented_bytearray',
'test_NotImplemented_bytes',
'test_NotImplemented_dict',
'test_NotImplemented_frozenset',
'test_NotImplemented_list',
'test_NotImplemented_range',
'test_NotImplemented_set',
'test_NotImplemented_str',
'test_NotImplemented_tuple',
'test_range_bytearray',
'test_range_bytes',
'test_range_dict',
'test_range_frozenset',
'test_range_list',
'test_range_range',
'test_range_set',
'test_range_str',
'test_range_tuple',
'test_set_bytearray',
'test_set_bytes',
'test_set_dict',
'test_set_frozenset',
'test_set_list',
'test_set_range',
'test_set_set',
'test_set_str',
'test_set_tuple',
'test_slice_bytearray',
'test_slice_bytes',
'test_slice_dict',
'test_slice_frozenset',
'test_slice_list',
'test_slice_range',
'test_slice_set',
'test_slice_str',
'test_slice_tuple',
'test_str_bytearray',
'test_str_bytes',
'test_str_dict',
'test_str_frozenset',
'test_str_list',
'test_str_range',
'test_str_set',
'test_str_str',
'test_str_tuple',
'test_tuple_bytearray',
'test_tuple_bytes',
'test_tuple_dict',
'test_tuple_frozenset',
'test_tuple_list',
'test_tuple_range',
'test_tuple_set',
'test_tuple_str',
'test_tuple_tuple',
]
| 4,903 |
genchartbl.py
|
bryancall/nghttp3
| 474 |
2022918
|
#!/usr/bin/env python3
import sys
import string
def name(i):
if i < 0x21:
return \
['NUL ', 'SOH ', 'STX ', 'ETX ', 'EOT ', 'ENQ ', 'ACK ', 'BEL ',
'BS ', 'HT ', 'LF ', 'VT ', 'FF ', 'CR ', 'SO ', 'SI ',
'DLE ', 'DC1 ', 'DC2 ', 'DC3 ', 'DC4 ', 'NAK ', 'SYN ', 'ETB ',
'CAN ', 'EM ', 'SUB ', 'ESC ', 'FS ', 'GS ', 'RS ', 'US ',
'SPC '][i]
if i == 0x7f:
return 'DEL '
def gentbl(tblname, pred):
sys.stdout.write('''\
/* Generated by genchartbl.py */
static const int {}[] = {{
'''.format(tblname))
for i in range(256):
if pred(chr(i)):
v = 1
else:
v = 0
if 0x21 <= i and i < 0x7f:
sys.stdout.write('{} /* {} */, '.format(v, chr(i)))
elif 0x80 <= i:
sys.stdout.write('{} /* {} */, '.format(v, hex(i)))
else:
sys.stdout.write('{} /* {} */, '.format(v, name(i)))
if (i + 1)%4 == 0:
sys.stdout.write('\n')
sys.stdout.write('};\n')
def sf_key():
gentbl('SF_KEY_CHARS', lambda c: c in string.ascii_lowercase or
c in string.digits or c in '_-.*')
def sf_dquote():
gentbl('SF_DQUOTE_CHARS', lambda c: (0x20 <= ord(c) and ord(c) <= 0x21) or
(0x23 <= ord(c) and ord(c) <= 0x5b) or
(0x5d <= ord(c) and ord(c) <= 0x7e))
def sf_token():
gentbl('SF_TOKEN_CHARS', lambda c: c in "!#$%&'*+-.^_`|~:/" or
c in string.digits or c in string.ascii_letters)
def sf_byteseq():
gentbl('SF_BYTESEQ_CHARS', lambda c: c in string.ascii_letters or
c in string.digits or c in '+/=')
sf_key()
sys.stdout.write('\n')
sf_dquote()
sys.stdout.write('\n')
sf_token()
sys.stdout.write('\n')
sf_byteseq()
sys.stdout.write('\n')
| 1,809 |
cc150/PermutationOfString.py
|
JulyKikuAkita/PythonPrac
| 1 |
2023895
|
__author__ = 'July'
# write a method to compute all permutation of a string
# write a methof to compute all combinatino of a string
import re
class Solution:
# no ordering
def getPermutation(self, str):
result = self.getPermutationRecu(str)
for i in xrange(len(result)):
result[i] = result[i].translate(None, '.')
return result
def getPermutationRecu(self, str):
result = []
if str == None:
return
elif len(str) == 0: #base case
result.append(".") # if use "", result is null
return result
first_char = str[0]
remain_char = str[1:]
words = self.getPermutationRecu(remain_char)
for word in words:
for j in xrange(len(word)):
result.append(self.insertCharAt(word, j, first_char))
return result
def insertCharAt(self, word, index, char):
#print word
first_half = str(word[:index])
last_half = str(word[index:])
return first_half + char + last_half
def getCombinateion(self, str):
result = []
space = 1 << len(str)
for i in xrange(space):
k = i
index = 0
substr = []
while k:
if k & 1 > 0:
substr.append(str[index])
k >>= 1
index += 1
result.append(substr)
return result
if __name__ == "__main__":
print Solution().getPermutation("ab")
print Solution().getCombinateion("ab")
| 1,563 |
data_processing/main.py
|
bioengstrom/master-thesis
| 0 |
2024337
|
"""
Starting point for the pose extraction, using OpenPose.
"""
import os
import time
import numpy as np
from helpers import SHOULD_LIMIT, lower_lim_check, upper_lim_check, read_from_json
from helpers.display_helper import display_session
from helpers.json_helper import combine_json_files
from helpers.paths import DATASET_PATH, EXTR_PATH, EXTR_PATH_SSD
from pose_extraction.extraction_config import TRIMMED_SESSION_FLAG, SHOULD_USE_TRIMMED
from pose_extraction.foi_extraction import extract_session
from pre_processing.cc_sync_sessions import cc_session_sync
from pre_processing.post_extraction_processing import process_extracted_files
def loop_over_session(session_dir, subject_idx, session_idx, action):
"""
Extracts, processes and saves the poses from a session. A session can consist of many videos covering different
views.
:param session_dir:
:param subject_idx:
:param session_idx:
:param action:
:return:
"""
if not os.path.exists(session_dir):
return
# Get the view names (child file names of a session)
_, _, views = next(os.walk(session_dir))
action(session_dir, subject_idx, session_idx, views)
def loop_over_subject(subject_dir, subject_idx, action=None):
"""
Extract the poses from a single subject
:param subject_dir:
:param subject_idx:
:param action:
:return:
"""
if not os.path.exists(subject_dir):
return
# Get the session names (child folder names of a subject)
_, sess_names, _ = next(os.walk(subject_dir))
if SHOULD_USE_TRIMMED:
# Remove un-trimmed sessions (sess) before extraction if they have trimmed counterparts
# Some sessions are trimmed in the beginning and end to remove frames containing more than one individual
for sess_name in sess_names:
if TRIMMED_SESSION_FLAG in sess_name:
deprecate_session_name = sess_name.replace(TRIMMED_SESSION_FLAG, "")
sess_names.remove(deprecate_session_name)
else:
for sess_name in sess_names:
if TRIMMED_SESSION_FLAG in sess_name:
sess_names.remove(sess_name)
for sess_idx in range(len(sorted(sess_names))):
if SHOULD_LIMIT and lower_lim_check(sess_idx, "sess"):
continue
if SHOULD_LIMIT and upper_lim_check(sess_idx, "sess"):
break
sess_dir = os.path.join(subject_dir, sess_names[sess_idx])
loop_over_session(sess_dir, subject_idx, sess_idx, action)
def loop_over_foi_dataset(root_dir, action=None):
"""
Extract the poses from all the subjects
:param root_dir:
:param action:
:return:
"""
# Loop through the dir containing all the subjects
_, subject_names, _ = next(os.walk(root_dir))
for subject_idx in range(len(sorted(subject_names))):
if SHOULD_LIMIT and lower_lim_check(subject_idx, "sub"):
continue
if SHOULD_LIMIT and upper_lim_check(subject_idx, "sub"):
break
subject_dir = os.path.join(root_dir, subject_names[subject_idx])
loop_over_subject(subject_dir, subject_idx, action)
if __name__ == "__main__":
start_time = time.time()
"""""""""""
For extraction of the FOI dataset
"""""""""""
#loop_over_foi_dataset(root_dir=DATASET_PATH, action=extract_session)
"""""""""""
For syncing the sessions
"""""""""""
#loop_over_foi_dataset(root_dir=DATASET_PATH, action=cc_session_sync)
"""""""""""
For viewing session, synced or not
"""""""""""
#loop_over_foi_dataset(root_dir=DATASET_PATH, action=display_session)
"""""""""""
other
"""""""""""
#process_extracted_files()
#data_info = read_from_json(EXTR_PATH + "final_data_info.json")
#print(data_info)
#combine_json_files(EXTR_PATH + "final/")
#data = read_from_json(EXTR_PATH_SSD + "final/combined/combined.json")
#print(np.array(data["SUB5_SESS0_VIEW3.json"]).shape)
#print(f"Main finished in {time.time()-start_time:0.1f}s")
| 4,045 |
hftools/dataset/tests/test_arrayobj_funcs.py
|
extrakteon/hftools-1
| 3 |
2022967
|
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import pdb
import numpy as np
from numpy import newaxis
import hftools.dataset.arrayobj as aobj
import hftools.dataset.dim as dim
import hftools.dataset as ds
from hftools.testing import TestCase, skip, make_load_tests
from hftools.testing import random_value_array, random_complex_value_array,\
SKIP
basepath = os.path.split(__file__)[0]
class Test_get_new_anonymous(TestCase):
def test_get_new_anon_1(self):
dims = (dim.DimSweep("a", 3),)
anon = aobj.get_new_anonymous_dim(dims, [1, 2, 3])
self.assertIsInstance(anon, aobj.DimAnonymous)
self.assertEqual(anon.name, "ANON1")
def test_get_new_anon_2(self):
dims = (dim.DimSweep("ANON1", 3), dim.DimSweep("ANON2", 3), )
anon = aobj.get_new_anonymous_dim(dims, [1, 2, 3])
self.assertIsInstance(anon, aobj.DimAnonymous)
self.assertEqual(anon.name, "ANON1")
def test_get_new_anon_3(self):
dims = aobj.hfarray([1, 2, 3])
anon = aobj.get_new_anonymous_dim(dims, [1, 2, 3])
self.assertIsInstance(anon, aobj.DimAnonymous)
self.assertEqual(anon.name, "ANON1")
class Test_axis_handler(TestCase):
def setUp(self):
(ai, bi, ci) = (dim.DimSweep("ai", 2),
dim.DimRep("bi", 3),
dim.DimSweep("ci", 4))
self.ai = ai
self.bi = bi
self.ci = ci
self.a = aobj.hfarray(ai)
self.b = aobj.hfarray(bi) * 10
self.c = aobj.hfarray(ci) * 100
self.abc = self.a + self.c + self.b
def test_1(self):
self.assertIsNone(aobj.axis_handler(self.a, None))
def test_2(self):
res = aobj.axis_handler(self.a, "ai")
self.assertEqual(res, self.ai)
def test_3(self):
res = aobj.axis_handler(self.abc, 2)
self.assertEqual(res, self.bi)
def test_4(self):
res = aobj.axis_handler(self.abc, dim.DimRep)
self.assertEqual(res, self.bi)
def test_5(self):
res = aobj.axis_handler(self.abc, self.bi)
self.assertEqual(res, self.bi)
def test_error_1(self):
self.assertRaises(IndexError, aobj.axis_handler,
self.abc, dim.DimAnonymous)
def test_error_2(self):
self.assertRaises(IndexError, aobj.axis_handler,
self.abc, dim.DimSweep)
def test_error_3(self):
self.assertRaises(IndexError, aobj.axis_handler,
self.a, self.ci)
class Test_multiple_axis_handler(TestCase):
def setUp(self):
(ai, bi, ci) = (dim.DimSweep("ai", 2),
dim.DimRep("bi", 3),
dim.DimSweep("ci", 4))
self.ai = ai
self.bi = bi
self.ci = ci
self.a = aobj.hfarray(ai)
self.b = aobj.hfarray(bi) * 10
self.c = aobj.hfarray(ci) * 100
self.abc = self.a + self.b + self.c
def test_1(self):
self.assertEqual(aobj.multiple_axis_handler(self.a, None),
(None, None))
def test_2(self):
self.assertEqual(aobj.multiple_axis_handler(self.a, self.ai),
((self.ai,), (0,)))
def test_3(self):
self.assertEqual(aobj.multiple_axis_handler(self.a, (self.ai, )),
((self.ai,), (0,)))
def test_4(self):
self.assertEqual(aobj.multiple_axis_handler(self.a, (0, )),
((self.ai,), (0,)))
def test_5(self):
self.assertEqual(aobj.multiple_axis_handler(self.abc, (0, )),
((self.ai,), (0,)))
def test_6(self):
self.assertEqual(aobj.multiple_axis_handler(self.abc, ("ai", 1)),
((self.ai, self.ci), (0, 1)))
def test_7(self):
self.assertEqual(aobj.multiple_axis_handler(self.abc, dim.DimSweep),
((self.ai, self.ci), (0, 1)))
def test_8(self):
self.assertEqual(aobj.multiple_axis_handler(self.abc,
(self.ai, self.ci)),
((self.ai, self.ci), (0, 1)))
def test_erro_1(self):
self.assertRaises(IndexError, aobj.multiple_axis_handler,
self.a, self.ci)
| 4,594 |
examples/idioms/programs/059.0668-write-to-standard-error-stream.py
|
laowantong/paroxython
| 31 |
2023259
|
"""Write to standard error stream.
Print the message "_x is negative" to standard error (stderr), with integer _x value substitution (e.g. "-2 is negative").
Source: programming-idioms.org
"""
# Implementation author: cym13
# Created on 2015-11-30T12:37:29.950568Z
# Last modified on 2015-11-30T12:37:29.950568Z
# Version 1
# Python3
import sys
print(x, "is negative", file=sys.stderr)
| 392 |
task/models.py
|
cavidanhasanli/TaskManager
| 0 |
2023134
|
import peewee
from app.database import db
from user.models import User
class BaseModel(peewee.Model):
class Meta:
database = db
class UserIpDetail(BaseModel):
id = peewee.AutoField()
ip = peewee.CharField(null=False, max_length=255)
details = peewee.CharField(null=False, max_length=255)
user_id = peewee.ForeignKeyField(User, backref="useripdetail", on_delete="CASCADE")
| 406 |
examples/0230-navdate-navdate_collection.py
|
dnoneill/pyIIIFpres
| 12 |
2023501
|
# https://iiif.io/api/cookbook/recipe/0230-navdate/navdate-collection.json
from IIIFpres import iiifpapi3
iiifpapi3.BASE_URL = r"https://iiif.io/api/cookbook/recipe/0230-navdate/"
manifest_1 = iiifpapi3.Manifest()
manifest_1.set_id(extendbase_url="navdate_map_1-manifest.json")
manifest_1.add_label("en","1987 Chesapeake and Ohio Canal, Washington, D.C., Maryland, West Virginia, official map and guide")
manifest_1.set_navDate("1987-01-01T00:00:00+00:00")
canvas = manifest_1.add_canvas_to_items()
canvas.set_id(extendbase_url="canvas/p1")
canvas.set_height(7072)
canvas.set_width(5212)
canvas.add_label("en","1987 Map, recto and verso, with a date of publication")
annopage = canvas.add_annotationpage_to_items()
annopage.set_id(extendbase_url="page/p1/1")
annotation = annopage.add_annotation_to_items(target=canvas.id)
annotation.set_motivation("painting")
annotation.set_id(extendbase_url="annotation/p0001-image")
annotation.body.set_height(7072)
annotation.body.set_width(5212)
annotation.body.set_id("https://iiif.io/api/image/3.0/example/reference/43153e2ec7531f14dd1c9b2fc401678a-88695674/full/max/0/default.jpg")
annotation.body.set_format("image/jpeg")
annotation.body.set_type("Image")
s = annotation.body.add_service()
s.set_id("https://iiif.io/api/image/3.0/example/reference/43153e2ec7531f14dd1c9b2fc401678a-88695674/")
s.set_type("ImageService3")
s.set_profile("level1")
manifest_2 = iiifpapi3.Manifest()
manifest_2.set_id(extendbase_url="navdate_map_2-manifest.json")
manifest_2.add_label("en","1986 Chesapeake and Ohio Canal, Washington, D.C., Maryland, West Virginia, official map and guide")
manifest_2.set_navDate("1986-01-01T00:00:00+00:00")
canvas = manifest_2.add_canvas_to_items()
canvas.set_id(extendbase_url="canvas/p1")
canvas.set_height(1765)
canvas.set_width(1286)
canvas.add_label("en","1986 Map, recto and verso, with a date of publication" )
annopage = canvas.add_annotationpage_to_items()
annopage.set_id(extendbase_url="page/p1/1")
annotation = annopage.add_annotation_to_items(target=canvas.id)
annotation.set_motivation("painting")
annotation.set_id(extendbase_url="annotation/p0001-image")
annotation.body.set_height(1765)
annotation.body.set_width(1286)
annotation.body.set_id("https://iiif.io/api/image/3.0/example/reference/43153e2ec7531f14dd1c9b2fc401678a-87691274-1986/full/max/0/default.jpg")
annotation.body.set_format("image/jpeg")
annotation.body.set_type("Image")
s = annotation.body.add_service()
s.set_id("https://iiif.io/api/image/3.0/example/reference/43153e2ec7531f14dd1c9b2fc401678a-87691274-1986/")
s.set_type("ImageService3")
s.set_profile("level1")
collection = iiifpapi3.Collection()
collection.set_id("https://iiif.io/api/cookbook/recipe/0230-navdate/navdate-collection.json")
collection.add_label(language='en',text="Chesapeake and Ohio Canal map and guide pamphlets")
tbn = collection.add_thumbnail()
tbn.set_id("https://iiif.io/api/image/3.0/example/reference/43153e2ec7531f14dd1c9b2fc401678a-88695674/full/max/0/default.jpg")
tbn.set_type('Image')
tbn.set_format('image/jpeg')
tbn.set_height(300)
tbn.set_width(221)
srv = tbn.add_service()
srv.set_id("https://iiif.io/api/image/3.0/example/reference/43153e2ec7531f14dd1c9b2fc401678a-88695674")
srv.set_profile('level1')
srv.set_type('ImageService3')
collection.add_manifest_to_items(manifest_2)
collection.add_manifest_to_items(manifest_1)
| 3,361 |
external/openglcts/scripts/verify_kc_cts_rev.py
|
iabernikhin/VK-GL-CTS
| 354 |
2024220
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# Khronos OpenGL CTS
# ------------------
#
# Copyright (c) 2016 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
import shutil
import argparse
import subprocess
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from fetch_kc_cts import SHA1
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts"))
from build.common import *
EXTERNAL_DIR = os.path.realpath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")))
def computeChecksum (data):
return hashlib.sha256(data).hexdigest()
class Source:
def __init__(self, baseDir, extractDir):
self.baseDir = baseDir
self.extractDir = extractDir
def clean (self):
fullDstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir)
if os.path.exists(fullDstPath):
shutil.rmtree(fullDstPath, ignore_errors=False)
class GitRepo (Source):
def __init__(self, url, revision, baseDir, extractDir = "src"):
Source.__init__(self, baseDir, extractDir)
self.url = url
self.revision = revision
def update (self):
fullDstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir)
if not os.path.exists(fullDstPath):
execute(["git", "clone", "--no-checkout", self.url, fullDstPath])
pushWorkingDir(fullDstPath)
try:
execute(["git", "fetch", self.url, "+refs/heads/*:refs/remotes/origin/*"])
execute(["git", "checkout", self.revision])
finally:
popWorkingDir()
def compare_rev(self):
fullDstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir)
pushWorkingDir(fullDstPath)
try:
out = subprocess.check_output(["git", "rev-parse", "HEAD"])
if out.replace('\n', '') != SHA1:
raise Exception ("KC CTS checkout revision %s in external/fetch_kc_cts.py doesn't match KC CTS master HEAD revision %s" % (SHA1, out))
finally:
popWorkingDir()
PACKAGES = [
GitRepo(
"<EMAIL>:opengl/kc-cts.git",
"HEAD",
"kc-cts"),
]
if __name__ == "__main__":
for pkg in PACKAGES:
pkg.update()
pkg.compare_rev()
| 2,713 |
vunit/test/unit/test_test_suites.py
|
bjacobs1/vunit
| 1 |
2024118
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, <NAME> <EMAIL>
"""
Test the test suites
"""
from os.path import join
from unittest import TestCase
from vunit.test_suites import (TestRun)
from vunit.test_report import (PASSED, SKIPPED, FAILED)
from vunit.test.common import create_tempdir
class TestTestSuites(TestCase):
"""
Test the test suites
"""
def test_missing_results_fails_all(self):
self.assertEqual(
self._read_test_results(contents=None,
expected_test_cases=["test1", "test2"]),
{"test1": FAILED, "test2": FAILED})
def test_read_results_all_passed(self):
self.assertEqual(
self._read_test_results(contents="""\
test_start:test1
test_start:test2
test_suite_done
""",
expected_test_cases=["test1", "test2"]),
{"test1": PASSED, "test2": PASSED})
def test_read_results_suite_not_done(self):
self.assertEqual(
self._read_test_results(contents="""\
test_start:test1
test_start:test2
""",
expected_test_cases=["test1", "test2"]),
{"test1": PASSED, "test2": FAILED})
self.assertEqual(
self._read_test_results(contents="""\
test_start:test2
test_start:test1
""",
expected_test_cases=["test1", "test2"]),
{"test1": FAILED, "test2": PASSED})
def test_read_results_skipped_test(self):
self.assertEqual(
self._read_test_results(contents="""\
test_start:test1
test_suite_done
""",
expected_test_cases=["test1", "test2", "test3"]),
{"test1": PASSED, "test2": SKIPPED, "test3": SKIPPED})
def test_read_results_anonynmous_test_pass(self):
self.assertEqual(
self._read_test_results(contents="""\
test_suite_done
""",
expected_test_cases=[None]),
{None: PASSED})
def test_read_results_anonynmous_test_fail(self):
self.assertEqual(
self._read_test_results(contents="""\
""",
expected_test_cases=[None]),
{None: FAILED})
def test_read_results_unknown_test(self):
try:
self._read_test_results(
contents="""\
test_start:test1
test_start:test3
test_suite_done""",
expected_test_cases=["test1"])
except RuntimeError as exc:
self.assertIn("unknown test case test3", str(exc))
else:
assert False, "RuntimeError not raised"
@staticmethod
def _read_test_results(contents, expected_test_cases):
"""
Helper method to test the read_test_results function
"""
with create_tempdir() as path:
file_name = join(path, "vunit_results")
if contents is not None:
with open(file_name, "w") as fptr:
fptr.write(contents)
run = TestRun(simulator_if=None,
config=None,
elaborate_only=False,
test_suite_name=None,
test_cases=expected_test_cases)
return run._read_test_results(file_name=file_name) # pylint: disable=protected-access
| 3,527 |
LeetCode/2019-10-03-407-Trapping-Rain-Water-II.py
|
HeRuivio/-Algorithm
| 5 |
2023996
|
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-10-03 08:11:17
# @Last Modified by: 何睿
# @Last Modified time: 2019-10-03 11:57:48
import heapq
from typing import List
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
if not heightMap or not heightMap[0]:
return 0
queue, visited, max_heigth, water = [], set(), 0, 0
row, col = len(heightMap), len(heightMap[0])
count = row * col
self.fill_border(queue, heightMap)
visited = {(row, col) for _, row, col in queue}
while queue:
if len(visited) == count:
return water
height, i, j = heapq.heappop(queue)
max_heigth = max(max_heigth, height)
water += self.visit_neighbors(queue, visited, heightMap, max_heigth, row, col, i, j)
return water
def visit_neighbors(self, queue, visited, heightMap, max_heigth, row, col, i, j):
water = 0
for x, y in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
nrow, ncol = i + x, j + y
if (nrow, ncol) not in visited and self.is_in_border(row, col, nrow, ncol):
num = heightMap[nrow][ncol]
water += max(0, max_heigth - num)
visited.add((nrow, ncol))
heapq.heappush(queue, (num, nrow, ncol))
return water
def fill_border(self, queue, heightMap):
row, col = len(heightMap), len(heightMap[0])
for i in range(row):
heapq.heappush(queue, (heightMap[i][0], i, 0))
heapq.heappush(queue, (heightMap[i][-1], i, col - 1))
for i in range(1, col - 1):
heapq.heappush(queue, (heightMap[0][i], 0, i))
heapq.heappush(queue, (heightMap[-1][i], row - 1, i))
def is_in_border(self, row, col, i, j):
return 0 <= i < row and 0 <= j < col
| 1,899 |
ampel/ztf/base/CatalogMatchUnit.py
|
AmpelProject/Ampel-ZTF
| 1 |
2022997
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-ZTF/ampel/ztf/base/CatalogMatchUnit.py
# License: BSD-3-Clause
# Author: <NAME> <<EMAIL>>
# Date: 10.03.2021
# Last Modified Date: 10.03.2021
# Last Modified By: <NAME> <<EMAIL>>
from functools import cached_property
from typing import (
Sequence,
Dict,
Any,
Literal,
TypedDict,
Optional,
List,
Union,
overload,
)
import backoff
import requests
from requests_toolbelt.sessions import BaseUrlSession
from ampel.base.LogicalUnit import LogicalUnit
from ampel.core.ContextUnit import ContextUnit
class BaseConeSearchRequest(TypedDict):
"""
:param use: either extcats or catsHTM, depending on how the catalog is set up.
:param rs_arcsec: search radius for the cone search, in arcseconds
In case 'use' is set to 'extcats', 'catq_kwargs' can (or MUST?) contain the names of the ra and dec
keys in the catalog (see example below), all valid arguments to extcats.CatalogQuert.findclosest
can be given, such as pre- and post cone-search query filters can be passed.
In case 'use' is set to 'catsHTM', 'catq_kwargs' SHOULD contain the the names of the ra and dec
keys in the catalog if those are different from 'ra' and 'dec' the 'keys_to_append' parameters
is OPTIONAL and specifies which fields from the catalog should be returned in case of positional match:
if not present: all the fields in the given catalog will be returned.
if `list`: just take this subset of fields.
Example (SDSS_spec):
{
'use': 'extcats',
'catq_kwargs': {
'ra_key': 'ra',
'dec_key': 'dec'
},
'rs_arcsec': 3,
'keys_to_append': ['z', 'bptclass', 'subclass']
}
Example (NED):
{
'use': 'catsHTM',
'rs_arcsec': 20,
'keys_to_append': ['fuffa1', 'fuffa2', ..],
}
"""
name: str
use: Literal["extcats", "catsHTM"]
rs_arcsec: float
class ConeSearchRequest(BaseConeSearchRequest, total=False):
keys_to_append: None | Sequence[str]
pre_filter: None | dict[str, Any]
post_filter: None | dict[str, Any]
class CatalogItem(TypedDict):
body: dict[str, Any]
dist_arcsec: float
class CatalogMatchUnitBase:
"""
A mixin providing catalog matching with catalogmatch-service
"""
@cached_property
def session(self) -> BaseUrlSession:
"""
A session bound to the base URL of the catalogmatch service
"""
raise NotImplementedError
@overload
def _cone_search(
self,
method: Literal["any"],
ra: float,
dec: float,
catalogs: Sequence[ConeSearchRequest],
) -> list[bool]:
...
@overload
def _cone_search(
self,
method: Literal["nearest"],
ra: float,
dec: float,
catalogs: Sequence[ConeSearchRequest],
) -> list[None | CatalogItem]:
...
@overload
def _cone_search(
self,
method: Literal["all"],
ra: float,
dec: float,
catalogs: Sequence[ConeSearchRequest],
) -> list[None | list[CatalogItem]]:
...
@backoff.on_exception(
backoff.expo,
requests.ConnectionError,
max_tries=5,
factor=10,
)
@backoff.on_exception(
backoff.expo,
requests.HTTPError,
giveup=lambda e: e.response.status_code not in {503, 504, 429, 408},
max_time=60,
)
def _cone_search(
self,
method: Literal["any", "nearest", "all"],
ra: float,
dec: float,
catalogs: Sequence[ConeSearchRequest],
) -> list[bool] | list[None | CatalogItem] | list[None | list[CatalogItem]]:
response = self.session.post(
f"cone_search/{method}",
json={
"ra_deg": ra,
"dec_deg": dec,
"catalogs": catalogs,
},
)
response.raise_for_status()
return response.json()
def cone_search_any(
self, ra: float, dec: float, catalogs: Sequence[ConeSearchRequest]
) -> list[bool]:
return self._cone_search("any", ra, dec, catalogs)
def cone_search_nearest(
self, ra: float, dec: float, catalogs: Sequence[ConeSearchRequest]
) -> list[None | CatalogItem]:
return self._cone_search("nearest", ra, dec, catalogs)
def cone_search_all(
self, ra: float, dec: float, catalogs: Sequence[ConeSearchRequest]
) -> list[None | list[CatalogItem]]:
return self._cone_search("all", ra, dec, catalogs)
class CatalogMatchUnit(CatalogMatchUnitBase, LogicalUnit):
"""
Catalog matching for LogicalUnits
"""
require = ("ampel-ztf/catalogmatch",)
@cached_property
def session(self) -> BaseUrlSession:
assert self.resource is not None
return BaseUrlSession(base_url=self.resource["ampel-ztf/catalogmatch"])
class CatalogMatchContextUnit(CatalogMatchUnitBase, ContextUnit):
"""
Catalog matching for ContextUnits
"""
@cached_property
def session(self) -> BaseUrlSession:
return BaseUrlSession(
base_url=self.context.config.get(
"resource.ampel-ztf/catalogmatch", str, raise_exc=True
)
)
| 5,367 |
tests/test_classify_comments.py
|
drivet/mf2util
| 0 |
2023767
|
import copy
import mf2util
TEST_BLOB = {
"alternates": [
],
"items": [
{
"properties": {
"name": ["Author"],
"photo": ["http://example.com/author_img.jpg"],
"url": ["http://example.com"]
},
"type": ["h-card"],
"value": "<NAME>"
},
{
"properties": {
"content": [
{
"html": "some content",
"value": "some content"
}
],
"name": ["some title"],
"published": ["2014-05-07T17:15:44+00:00"],
"url": ["http://example.com/reply/2014/05/07/1"]
},
"type": [
"h-entry"
]
}
],
"rels": {
}
}
def test_no_reference():
blob = copy.deepcopy(TEST_BLOB)
assert mf2util.classify_comment(blob, ('http://example.com',)) == []
# add some irrelevant references
blob['items'][1]['in-reply-to'] = [
"http://werd.io/2014/homebrew-website-club-4",
"https://www.facebook.com/events/1430990723825351/"
]
assert mf2util.classify_comment(blob, ('http://example.com',)) == []
# no target url
assert mf2util.classify_comment(blob, ()) == []
def test_rsvps():
blob = copy.deepcopy(TEST_BLOB)
blob['items'][1]['properties'].update({
'in-reply-to': ['http://mydomain.com/my-post'],
'rsvp': ['yes'],
})
assert mf2util.classify_comment(
blob, ('http://mydoma.in/short', 'http://mydomain.com/my-post')) \
== ['rsvp', 'reply']
def test_invites():
blob = copy.deepcopy(TEST_BLOB)
blob['items'][1]['properties'].update({
'in-reply-to': ['http://mydomain.com/my-post'],
'invitee': [{
'name': '<NAME>',
'url': 'https://kylewm.com',
}],
})
assert mf2util.classify_comment(
blob, ('http://mydoma.in/short', 'http://mydomain.com/my-post')) \
== ['invite', 'reply']
def test_likes():
"""make sure we find likes"""
blob = copy.deepcopy(TEST_BLOB)
# add some references
blob['items'][1]['properties'].update({
'in-reply-to': ['http://someoneelse.com/post'],
'like-of': ['http://mydomain.com/my-post'],
})
assert mf2util.classify_comment(
blob, ('http://mydoma.in/short', 'http://mydomain.com/my-post')) \
== ['like']
def test_reposts():
"""make sure we find reposts"""
blob = copy.deepcopy(TEST_BLOB)
# add some references
blob['items'][1]['properties'].update({
'repost-of': ['http://mydomain.com/my-post'],
'like-of': ['http://someoneelse.com/post'],
})
assert mf2util.classify_comment(
blob, ('http://mydoma.in/short', 'http://mydomain.com/my-post')) \
== ['repost']
def test_multireply():
"""check behavior if our post is one among several posts
in a multireply"""
blob = copy.deepcopy(TEST_BLOB)
# add some references
blob['items'][1]['properties'].update({
'in-reply-to': [
'http://someoneelse.com/post',
'http://mydomain.com/my-post',
'http://athirddomain.org/permalink',
],
})
assert mf2util.classify_comment(blob, ('http://mydomain.com/my-post')) \
== ['reply']
def test_multimodal():
"""a mention can have more than one classification, make sure we find
all of them. also tests some of the alternate/historical classnames"""
blob = copy.deepcopy(TEST_BLOB)
# add some references
blob['items'][1]['properties'].update({
'reply-to': ['http://noone.im/'],
'repost-of': [
'http://someoneelse.com',
'http://mydomain.com/my-post',
],
'like': [
'http://mydoma.in/short',
'http://someoneelse.com/post',
],
})
assert sorted(
mf2util.classify_comment(
blob, ('http://mydoma.in/short', 'http://mydomain.com/my-post')))\
== ['like', 'repost']
def test_h_cite():
"""Test object references (e.g., class="p-in-reply-to h-cite")"""
blob = copy.deepcopy(TEST_BLOB)
# add some references
blob['items'][1]['properties'].update({
'in-reply-to': [{
'type': 'h-cite',
'properties': {
'url': ['http://mydomain.com/my-post'],
},
}],
})
assert mf2util.classify_comment(blob, ('http://mydomain.com/my-post',))\
== ['reply']
| 4,594 |
orchestra/contrib/settings/admin.py
|
RubenPX/django-orchestra
| 68 |
2024053
|
from django.contrib import admin, messages
from django.shortcuts import render_to_response
from django.views import generic
from django.utils.translation import ngettext, ugettext_lazy as _
from orchestra.contrib.settings import Setting
from orchestra.utils import sys
from . import parser
from .forms import SettingFormSet
class SettingView(generic.edit.FormView):
template_name = 'admin/settings/change_form.html'
reload_template_name = 'admin/settings/reload.html'
form_class = SettingFormSet
success_url = '.'
def get_context_data(self, **kwargs):
context = super(SettingView, self).get_context_data(**kwargs)
context.update({
'title': _("Change settings"),
'settings_file': parser.get_settings_file(),
})
return context
def get_initial(self):
initial_data = []
prev_app = None
account = 0
for name, setting in Setting.settings.items():
app = name.split('_')[0]
initial = {
'name': setting.name,
'help_text': setting.help_text,
'default': setting.default,
'type': type(setting.default),
'value': setting.value,
'setting': setting,
'app': app,
}
if app == 'ORCHESTRA':
initial_data.insert(account, initial)
account += 1
else:
initial_data.append(initial)
return initial_data
def form_valid(self, form):
settings = Setting.settings
changes = {}
for data in form.cleaned_data:
setting = settings[data['name']]
if not isinstance(data['value'], parser.NotSupported) and setting.editable:
if setting.value != data['value']:
# Ignore differences between lists and tuples
if (type(setting.value) != type(data['value']) and
isinstance(data['value'], list) and
tuple(data['value']) == setting.value):
continue
if setting.default == data['value']:
changes[setting.name] = parser.Remove()
else:
changes[setting.name] = data['value']
if changes:
# Display confirmation
if not self.request.POST.get('confirmation'):
settings_file = parser.get_settings_file()
new_content = parser.apply(changes)
cmd = "cat <<EOF | diff %s -\n%s\nEOF" % (settings_file, new_content)
diff = sys.run(cmd, valid_codes=(1, 0)).stdout
context = self.get_context_data(form=form)
context['diff'] = diff
if not diff:
messages.warning(self.request, _("Changes detected but no diff %s.") % changes)
return self.render_to_response(context)
n = len(changes)
# Save changes
parser.save(changes)
sys.touch_wsgi()
context = {
'message': ngettext(
_("One change successfully applied, orchestra is being restarted."),
_("%s changes successfully applied, orchestra is being restarted.") % n,
n),
}
return render_to_response(self.reload_template_name, context)
else:
messages.success(self.request, _("No changes have been detected."))
return super(SettingView, self).form_valid(form)
class SettingFileView(generic.TemplateView):
template_name = 'admin/settings/view.html'
def get_context_data(self, **kwargs):
context = super(SettingFileView, self).get_context_data(**kwargs)
settings_file = parser.get_settings_file()
with open(settings_file, 'r') as handler:
content = handler.read()
context.update({
'title': _("Settings file content"),
'settings_file': settings_file,
'content': content,
})
return context
admin.site.register_url(r'^settings/setting/view/$', SettingFileView.as_view(), 'settings_setting_view')
admin.site.register_url(r'^settings/setting/$', SettingView.as_view(), 'settings_setting_change')
| 4,378 |
src/sima/riflex/responseamount.py
|
SINTEF/simapy
| 0 |
2022777
|
# Generated with ResponseAmount
#
from enum import Enum
from enum import auto
class ResponseAmount(Enum):
""""""
MIN = auto()
MED = auto()
MAX = auto()
def label(self):
if self == ResponseAmount.MIN:
return "Minimum"
if self == ResponseAmount.MED:
return "Medium"
if self == ResponseAmount.MAX:
return "Maximum"
| 394 |
tests/utils.py
|
notnotcamscott/portraiture
| 0 |
2023023
|
import json
from runway import RunwayModel
def get_test_client(rw_model):
assert isinstance(rw_model, RunwayModel)
rw_model.app.config['TESTING'] = True
return rw_model.app.test_client()
def get_manifest(client):
response = client.get('/meta')
return json.loads(response.data)
| 299 |
examples/squareworld_example.py
|
omardrwch/rl_exploration_benchmark
| 1 |
2023467
|
from rlxp.envs import SquareWorld
from rlxp.rendering import render_env2d
env = SquareWorld()
env.enable_rendering()
for tt in range(10):
env.step(env.action_space.sample())
render_env2d(env)
| 199 |
decision-trees/python/objective_functions.py
|
rjgpacheco/decision-trees
| 0 |
2024238
|
"""
Implement decision functions for decision trees
"""
import numpy as np
from utils import is_left, is_right, to_array
def gini_impurity(y):
categories, counts = np.unique(y, return_counts=True)
p = counts / counts.sum() # Class probabilities
return 1 - np.multiply(p, p).sum() # Final Gini calculation
def gini_impurity_split(x, y, boundary):
y = to_array(y)
left = is_left(x, boundary)
right = is_right(x, boundary)
gini_l = gini_impurity(y[left])
gini_r = gini_impurity(y[right])
gini_split = gini_l * left.sum() + gini_r * right.sum()
gini_split = gini_split / len(y)
return gini_split
def information_gain():
return None
| 687 |
deployment/pulumi_aws_fargate/pulumi_redata/pulumi_redata/service.py
|
jwarlander/redata
| 0 |
2022657
|
from typing import List
import pulumi_aws as aws
from pulumi import ComponentResource, Output, Resource, ResourceOptions
class BackendService(ComponentResource):
def __init__(self,
name,
cluster,
subnets,
task_definition,
load_balancers: List[aws.ecs.ServiceLoadBalancerArgs] = None,
namespace_id = None,
security_groups = None,
opts: ResourceOptions = None):
super().__init__("redata:service:BackendService", name, {}, opts)
svc_registries_args = None
if namespace_id is not None:
sd_svc = aws.servicediscovery.Service(f"{name}-sd-svc",
name=name,
dns_config=aws.servicediscovery.ServiceDnsConfigArgs(
namespace_id=namespace_id,
dns_records=[aws.servicediscovery.ServiceDnsConfigDnsRecordArgs(ttl=10, type="A")],
routing_policy="MULTIVALUE",
),
health_check_custom_config=aws.servicediscovery.ServiceHealthCheckCustomConfigArgs(
failure_threshold=1,
),
opts=ResourceOptions(parent=self, delete_before_replace=True)
)
svc_registries_args = aws.ecs.ServiceServiceRegistriesArgs(registry_arn=sd_svc.arn)
self.service = aws.ecs.Service(f"{name}-svc",
cluster=cluster,
desired_count=1,
launch_type='FARGATE',
platform_version='1.4.0',
service_registries=svc_registries_args,
task_definition=task_definition,
network_configuration=aws.ecs.ServiceNetworkConfigurationArgs(
subnets=subnets,
security_groups=security_groups,
),
load_balancers=load_balancers,
opts=ResourceOptions(parent=self)
)
self.register_outputs({})
class WebService(ComponentResource):
def __init__(self,
name,
cluster,
health_check_path,
listener_arn,
security_groups,
service_path,
service_port,
subnets,
task_definition,
vpc_id,
namespace_id = None,
opts: ResourceOptions = None):
super().__init__("redata:service:WebService", name, {}, opts)
tg = aws.lb.TargetGroup(f"{name}-tg",
health_check=aws.lb.TargetGroupHealthCheckArgs(
path=health_check_path
),
port=service_port,
protocol='HTTP',
target_type='ip',
vpc_id=vpc_id,
opts=ResourceOptions(parent=self)
)
lr = aws.lb.ListenerRule(f"{name}-listener-rule",
listener_arn=listener_arn,
actions=[aws.lb.ListenerRuleActionArgs(
type="forward",
target_group_arn=tg.arn,
)],
conditions=[
aws.lb.ListenerRuleConditionArgs(
path_pattern=aws.lb.ListenerRuleConditionPathPatternArgs(
values=[f"{service_path}*"],
),
),
],
opts=ResourceOptions(parent=self)
)
self.service = BackendService(name,
cluster=cluster,
subnets=subnets,
task_definition=task_definition,
namespace_id=namespace_id,
security_groups=security_groups,
load_balancers=[aws.ecs.ServiceLoadBalancerArgs(
target_group_arn=tg.arn,
container_name=f"redata-{name}",
container_port=service_port,
)],
opts=ResourceOptions(parent=self),
)
self.register_outputs({})
| 3,918 |
regexlib/2021-5-15/python_re2_test_file/regexlib_1114.py
|
yetingli/ReDoS-Benchmarks
| 1 |
2023081
|
# 1114
# ^p(ost)?[ |\.]*o(ffice)?[ |\.]*(box)?[ 0-9]*[^[a-z ]]*
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:"po"+" "*5000+"! _1_POA(i)"
import re2 as re
from time import perf_counter
regex = """^p(ost)?[ |\.]*o(ffice)?[ |\.]*(box)?[ 0-9]*[^[a-z ]]*"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "po" + " " * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
| 559 |
scripts/profile_cpu.py
|
hyenal/tensorflow-image-models
| 154 |
2023439
|
"""
Script to measure inference speed on CPU.
Copyright 2021 <NAME>
"""
from pathlib import Path
import click
import pandas as pd
import tensorflow as tf
import tfimm
from tfimm.utils.profile import time_model
@click.command()
@click.option("--results-file", help="Where to save results")
@click.option("--name-filter", type=str, default="", help="Regex to include models")
@click.option("--module", type=str, default="", help="Filter models by module")
@click.option("--exclude-filters", type=str, default="", help="Regex to exclude models")
@click.option("--input-size", type=int, default=None, help="Model input resolution")
@click.option("--nb-classes", type=int, default=None, help="Number of classes")
@click.option("--ignore-results/--no-ignore-results", default=False)
def main(
results_file,
name_filter,
module,
exclude_filters,
input_size,
nb_classes,
ignore_results,
):
"""
Main function to do the work.
The parameters `name_filter`, `module` and `exclude_filters` are passed directly to
`tfimm.list_models` to find which models to profile.
If `--ignore-results` is set, we ignore any results already existing in the results
file and rerun profiling for all models. Otherwise (default) we run profiling only
on models not already in the results file.
"""
model_names = tfimm.list_models(
name_filter=name_filter, module=module, exclude_filters=exclude_filters
)
results_file = Path(results_file)
if results_file.exists() and not ignore_results:
results_df = pd.read_csv(results_file, index_col=0)
else:
results_df = pd.DataFrame(
columns=[
"inference_time",
"inference_img_per_sec",
]
)
results_df.index.name = "model"
model_names = [name for name in model_names if name not in results_df.index]
for model_name in model_names:
print(f"Model: {model_name}. ", end="")
try:
img_per_sec = time_model(
model_name,
target="inference",
input_size=input_size,
nb_classes=nb_classes,
batch_size=1,
float_policy="float32",
nb_batches=5,
)
duration = 1.0 / img_per_sec
except tf.errors.InvalidArgumentError:
img_per_sec = 0
duration = 0
results_df.loc[model_name, "inference_time"] = duration
results_df.loc[model_name, "inference_img_per_sec"] = img_per_sec
print(f"Time: {duration:.3f}.")
results_df.to_csv(results_file)
# Some final massaging of results
results_df.sort_index(inplace=True)
results_df.to_csv(results_file)
if __name__ == "__main__":
main()
| 2,805 |
moodle/mod/assign/assignment.py
|
Hardikris/moodlepy
| 0 |
2022773
|
from typing import List, Optional
from moodle import ResponsesFactory, MoodleWarning
from moodle.attr import dataclass, field
@dataclass
class File:
"""File
Args:
filename (Optional[str]): File name.
filepath (Optional[str]): File path.
filesize (Optional[int]): File size.
fileurl (Optional[str]): Downloadable file url.
timemodified (Optional[int]): Time modified.
mimetype (Optional[str]): File mime type.
isexternalfile (Optional[int]): Whether is an external file.
repositorytype (Optional[str]): The repository type for external files.
"""
filename: Optional[str]
filepath: Optional[str]
filesize: Optional[int]
fileurl: Optional[str]
timemodified: Optional[int]
mimetype: Optional[str]
isexternalfile: Optional[int]
repositorytype: Optional[str]
@dataclass
class Config:
"""Config
Args
id (Optional[int]): assign_plugin_config id
assignment (Optional[int]): assignment id
plugin (str): plugin
subtype (str): subtype
name (str): name
value (str): value
"""
id: Optional[int]
assignment: Optional[int]
plugin: str
subtype: str
name: str
value: str
@dataclass
class Assignment:
"""Assigment
Args:
id (int): assignment id
cmid (int): course module id
course (int): course id
name (str): assignment name
nosubmissions (int): no submissions
submissiondrafts (int): submissions drafts
sendnotifications (int): send notifications
sendlatenotifications (int): send notifications
sendstudentnotifications (int): send student notifications (default)
duedate (int): assignment due date
allowsubmissionsfromdate (int): allow submissions from date
grade (int): grade type
timemodified (int): last time assignment was modified
completionsubmit (int): if enabled, set activity as complete following submission
cutoffdate (int): date after which submission is not accepted without an extension
gradingduedate (int): the expected date for marking the submissions
teamsubmission (int): if enabled, students submit as a team
requireallteammemberssubmit (int): if enabled, all team members must submit
teamsubmissiongroupingid (int): the grouping id for the team submission groups
blindmarking (int): if enabled, hide identities until reveal identities actioned
hidegrader (int): If enabled, hide grader to student
revealidentities (int): show identities for a blind marking assignment
attemptreopenmethod (str): method used to control opening new attempts
maxattempts (int): maximum number of attempts allowed
markingworkflow (int): enable marking workflow
markingallocation (int): enable marking allocation
requiresubmissionstatement (int): student must accept submission statement
preventsubmissionnotingroup (Optional[int]): Prevent submission not in group
submissionstatement (Optional[str]): Submission statement formatted.
submissionstatementformat (int): submissionstatement format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN)
configs (Config): configuration settings
intro (Optional[str]): assignment intro, not allways returned because it deppends on the activity configuration
introformat (Optional[int]): intro format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN)
introfiles (List[File]): Files in the introduction text
introattachments (List[File]): intro attachments files
"""
id: int
cmid: int
course: int
name: str
nosubmissions: int
submissiondrafts: int
sendnotifications: int
sendlatenotifications: int
sendstudentnotifications: int
duedate: int
allowsubmissionsfromdate: int
grade: int
timemodified: int
completionsubmit: int
cutoffdate: int
gradingduedate: int
teamsubmission: int
requireallteammemberssubmit: int
teamsubmissiongroupingid: int
blindmarking: int
hidegrader: int
revealidentities: int
attemptreopenmethod: str
maxattempts: int
markingworkflow: int
markingallocation: int
requiresubmissionstatement: int
preventsubmissionnotingroup: Optional[int]
submissionstatement: Optional[str]
submissionstatementformat: int
configs: Config
intro: Optional[str]
introformat: Optional[int]
introfiles: List[File] = field(factory=list)
introattachments: List[File] = field(factory=list)
@dataclass
class AssignmentCourse(ResponsesFactory[Assignment]):
"""Courses with assignment
Args:
id (int): course id
fullname (str): course full name
shortname (str): course short name
timemodified (int): last time modified
assignments (List[Assignment]): assignment info
"""
id: int
fullname: str
shortname: str
timemodified: int
assignments: List[Assignment] = field(factory=list)
@property
def items(self) -> List[Assignment]:
return self.assignments
@dataclass
class Assignments(ResponsesFactory[AssignmentCourse]):
"""Assigments from get assignments
Args:
courses (List[AssignmentCourse]): List of course with assigments
warnings (List[Warning]): List of warnings
"""
courses: List[AssignmentCourse] = field(factory=list)
warnings: List[MoodleWarning] = field(factory=list)
@property
def items(self) -> List[AssignmentCourse]:
return self.courses
| 5,638 |
package/crud/cli/commands/check.py
|
derekmerck/pycrud
| 0 |
2023878
|
import click
from crud.abc import Endpoint
from crud.cli.utils import CLICK_ENDPOINT
@click.command(short_help="Check endpoint status")
@click.argument("endpoint", type=CLICK_ENDPOINT)
@click.pass_context
def check(ctx, endpoint: Endpoint):
"""Check endpoint status
\b
$ crud-cli check redis
"""
click.echo(click.style('Check Endpoint Status', underline=True, bold=True))
avail = endpoint.check()
s = "{}: {}".format(endpoint.name, "Ready" if avail else "Unavailable")
if avail:
click.echo(click.style(s, fg="green"))
else:
click.echo(click.style(s, fg="red"))
| 617 |
pathways/urls.py
|
CodeForBuffalo/affordable_water
| 2 |
2024359
|
from django.urls import path
from pathways import views
urlpatterns = [
path('', views.HomeView.as_view(), name='pathways-home'),
path('about/', views.AboutView.as_view(),name='pathways-about'),
path('nondiscrimination/', views.NondiscriminationView.as_view(), name='pathways-nondiscrimination'),
path('privacy/', views.PrivacyView.as_view(), name='pathways-privacy'),
path('metrics/', views.ProgramMetricsView.as_view(), name='pathways-metrics'),
path('apply/', views.ApplyOverviewAssistanceView.as_view(),
name='pathways-apply'),
path('apply/discount-overview/', views.ApplyDiscountView.as_view(),
name='pathways-apply-discount-overview'),
# Amnesty Debt Forgiveness
path('forgive/overview/', views.ForgiveOverviewView.as_view(),
name='pathways-forgive-overview'),
path('forgive/city-resident/', views.ForgiveCityResidentView.as_view(),
name='pathways-forgive-city-resident'),
path('forgive/additional-questions/', views.ForgiveAdditionalQuestionsView.as_view(),
name='pathways-forgive-additional-questions'),
path('forgive/resident-info/', views.ForgiveResidentInfoView.as_view(),
name='pathways-forgive-resident-info'),
path('forgive/refer/', views.ForgiveReferralView.as_view(),
name='pathways-forgive-refer'),
path('forgive/review-application/', views.ForgiveReviewApplicationView.as_view(),
name='pathways-forgive-review-application'),
path('forgive/confirmation/', views.ForgiveConfirmationView.as_view(),
name='pathways-forgive-confirmation'),
path('apply/city-resident/', views.CityResidentView.as_view(),
name='pathways-apply-city-resident'),
path('apply/non-resident/', views.NonResidentView.as_view(),
name='pathways-apply-non-resident'),
# Household
path('apply/household-size/', views.HouseholdSizeView.as_view(),
name='pathways-apply-household-size'),
path('apply/household-benefits/', views.HouseholdBenefitsView.as_view(),
name='pathways-apply-household-benefits'),
# Income
path('apply/household-contributors/', views.HouseholdContributorsView.as_view(),
name='pathways-apply-household-contributors'),
path('apply/job-status/', views.JobStatusView.as_view(),
name='pathways-apply-job-status'),
path('apply/self-employment/', views.SelfEmploymentView.as_view(),
name='pathways-apply-self-employment'),
path('apply/number-of-jobs/', views.NumberOfJobsView.as_view(),
name='pathways-apply-number-of-jobs'),
path('apply/income-methods/', views.IncomeMethodsView.as_view(),
name='pathways-apply-income-methods'),
path('apply/income/', views.IncomeView.as_view(),
name='pathways-apply-income'),
path('apply/other-income-sources/', views.OtherIncomeSourcesView.as_view(),
name='pathways-apply-other-income-sources'),
path('apply/non-job-income/', views.NonJobIncomeView.as_view(),
name='pathways-apply-non-job-income'),
# Eligibility
path('apply/review-eligibility/', views.ReviewEligibilityView.as_view(),
name='pathways-apply-review-eligibility'),
path('apply/eligibility/', views.EligibilityView.as_view(),
name='pathways-apply-eligibility'),
# Additional Info
path('apply/additional-questions/', views.AdditionalQuestionsView.as_view(),
name='pathways-apply-additional-questions'),
path('apply/resident-info/', views.ResidentInfoView.as_view(),
name='pathways-apply-resident-info'),
path('apply/account-holder/', views.AccountHolderView.as_view(),
name='pathways-apply-account-holder'),
path('apply/address/', views.AddressView.as_view(),
name='pathways-apply-address'),
path('apply/contact-info/', views.ContactInfoView.as_view(),
name='pathways-apply-contact-info'),
path('apply/account-number/', views.AccountNumberView.as_view(),
name='pathways-apply-account-number'),
# Review and sign
path('apply/review-application/', views.ReviewApplicationView.as_view(),
name='pathways-apply-review-application'),
path('apply/legal/', views.LegalView.as_view(),
name='pathways-apply-legal'),
path('apply/refer/', views.ReferralView.as_view(),
name='pathways-apply-refer'),
path('apply/signature/', views.SignatureView.as_view(),
name='pathways-apply-signature'),
# Documents
path('apply/documents-overview/', views.DocumentOverviewView.as_view(),
name='pathways-apply-documents-overview'),
path('apply/documents-income/', views.DocumentIncomeView.as_view(),
name='pathways-apply-documents-income'),
path('apply/documents-residence/', views.DocumentResidenceView.as_view(),
name='pathways-apply-documents-residence'),
path('apply/confirmation/', views.ConfirmationView.as_view(),
name='pathways-apply-confirmation'),
path('later-documents/', views.LaterDocumentsView.as_view(),
name='pathways-later-documents'),
path('later-documents/no-match-found/', views.NoDocumentFoundView.as_view(),
name='pathways-later-documents-not-found'),
path('later-documents/more-info-needed/', views.MoreDocumentInfoRequiredView.as_view(),
name='pathways-later-documents-more-info')
]
| 5,307 |
Chapter 04/Chap04_Example4.33.py
|
Anancha/Programming-Techniques-using-Python
| 0 |
2023844
|
from functools import reduce
myseq_list = [0,1,2,3,4,7,8,9]
mysum = reduce (lambda a, b: a+b, myseq_list)
print(mysum)
| 119 |
schttp/utils.py
|
h0nde/schttp
| 9 |
2023469
|
from .models import URL, Response
from .structures import CaseInsensitiveDict
from .exceptions import *
from functools import lru_cache
from socket import socket, gethostbyname
from base64 import b64encode
from gzip import decompress as gzip_decompress
from zlib import decompress as zlib_decompress
from brotli import decompress as brotli_decompress
default_ports = {
"http": 80,
"https": 443
}
def parse_url(url):
scheme, _, url = url.partition(":")
url, _, path = url[2:].partition("/")
auth, _, url = url.rpartition("@")
hostname, _, port = url.partition(":")
scheme = scheme.lower()
auth = auth if auth else None
hostname = hostname.lower()
port = port and int(port) or default_ports.get(scheme)
path = "/" + path.partition("#")[0]
return URL(
scheme=scheme,
auth=auth,
hostname=hostname,
port=port,
path=path
)
@lru_cache()
def ip_from_hostname(hostname):
return gethostbyname(hostname)
def tunnel_connect(conn, proxy_url, address):
conn.connect((proxy_url.hostname, proxy_url.port))
if proxy_url.scheme in ("http", "https"):
# HTTP proxy
proxy_headers = CaseInsensitiveDict()
if proxy_url.auth:
proxy_headers["Proxy-Authorization"] = "Basic " + b64encode(proxy_url.auth.encode()).decode()
# Send initial CONNECT request to proxy server.
send_request(conn, "CONNECT", f"{address[0]}:{address[1]}", tuple(proxy_headers.items()))
if not (resp := conn.recv(4096)).partition(b" ")[2].startswith(b"200"):
# Proxy server did not return status 200 for initial CONNECT request.
raise ProxyError(
f"Malformed CONNECT response: {resp.splitlines()[0]}")
return True
raise SchemeNotImplemented(
f"'{proxy_url.scheme}' is not a supported proxy scheme")
def send_request(conn, method, path, headers, body=None):
conn.sendall(b"".join((
# Status line
(method + " " + path + " HTTP/1.1\r\n").encode(),
# Headers
"".join([
name + ": " + str(value) + "\r\n"
for name, value in headers
if value is not None
]).encode(),
# Body separator
b"\r\n",
# Body
body is not None and body or b""
)))
def get_response(conn, chunk_size):
resp, _, resp_body = conn.recv(49152).partition(b"\r\n\r\n")
if not resp:
raise EmptyResponse("Empty response received")
status_line, _, resp_headers = resp.decode().partition("\r\n")
status, message = status_line.split(" ", 2)[1:]
status = int(status)
resp_headers = CaseInsensitiveDict(
line.split(": ", 1)
for line in resp_headers.splitlines()
)
resp_body = stream_body(
conn, resp_headers,
initial_body=resp_body,
chunk_size=chunk_size)
if (encoding := resp_headers.get("Content-Encoding")):
resp_body = decode_content(resp_body, encoding)
return Response(
status=status,
message=message,
headers=resp_headers,
body=resp_body)
def stream_body(conn, headers, chunk_size, initial_body=None):
body = initial_body or b""
if (exp_length := headers.get("content-length")):
# Content-Length
exp_length = int(exp_length)
while exp_length > len(body):
body += conn.recv(chunk_size)
return body
elif headers.get("transfer-encoding") == "chunked":
# Transfer-Encoding: chunked
while not body.endswith(b"0\r\n\r\n"):
body += conn.recv(chunk_size)
temp_body = b""
index = 0
while True:
new_index = body.find(b"\r\n", index)
length = int(body[index : new_index], 16)
if not length:
break
index = new_index + 2 + length + 2
temp_body += body[new_index + 2 : index - 2]
return temp_body
else:
# No transfer header specified.
# Stream chunks until an empty one is received.
while True:
if (chunk := conn.recv(chunk_size)):
body += chunk
else:
break
return body
def decode_content(data, encoding):
if encoding == "gzip":
return gzip_decompress(data)
elif encoding == "deflate":
return zlib_decompress(data, -15)
elif encoding == "br":
return brotli_decompress(data)
return data
| 4,517 |
Projects/cgi-app/start.py
|
tonysulfaro/MI-250
| 0 |
2023306
|
from http.server import CGIHTTPRequestHandler, HTTPServer
def runCgi():
print('starting server...')
# Server settings
server_address = ('127.0.0.1', 8989)
handler = CGIHTTPRequestHandler
# I can configure the directories I want to use here
handler.cgi_directories = ['/cgi']
httpd = HTTPServer(server_address, handler)
print('running server...')
print('now listening on port: ' + str(server_address[1]))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
def main():
runCgi()
if __name__ == "__main__":
main()
| 620 |
python/tests/test_hashtable.py
|
Yonatan1P/data-structures-and-algorithms
| 1 |
2024358
|
from challenges.hashtable.hashtable import Hashtable
def test_create_hashtable():
assert Hashtable()
def test_add_key():
hashtable = Hashtable()
hashtable.add('hi','my name is WHO')
assert hashtable._buckets[hashtable._hash('hi')]
def test_get_value():
table = Hashtable()
table.add('hey','my name is WHERE')
actual = table.get('hey')
expected = 'my name is WHERE'
assert actual == expected
def test_get_null_key():
table = Hashtable()
actual = table.get('hey')
expected = 'Null'
assert actual == expected
def test_handle_collision():
table = Hashtable()
table.add('hi', 'my name is WHAT')
table.add('Ñ','my name is WHO')
assert table._hash('Ñ') == table._hash('hi')
def test_get_second_in_bueket():
table = Hashtable()
table.add('hi', 'my name is WHAT')
table.add('Ñ','my name is WHO')
actual = table.get('Ñ')
expected = 'my name is WHO'
assert actual == expected
def test_contains():
table = Hashtable()
table.add('hi', 'my name is WHAT')
assert table.contains('hi')
def test_add_key_in_range():
table = Hashtable()
table.add('hi', 'my name is WHAT')
| 1,177 |
exercicios/06. estrutura de controle/057.py
|
augustolimads/Exercicios_Python
| 0 |
2024052
|
sexo = str(input('Digite o seu sexo[M/F]: ')).strip().upper()[0]
while sexo not in 'MmFf':
print('Digite novamente...')
sexo = str(input('Digite o seu sexo[M/F]: ')).strip().upper()[0]
| 192 |
site_scons/site_tools/chromium_builders.py
|
rwatson/chromium-capsicum
| 11 |
2024290
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool module for adding, to a construction environment, Chromium-specific
wrappers around SCons builders. This gives us a central place for any
customization we need to make to the different things we build.
"""
import sys
from SCons.Script import *
class Null(object):
def __new__(cls, *args, **kwargs):
if '_inst' not in vars(cls):
cls._inst = super(type, cls).__new__(cls, *args, **kwargs)
return cls._inst
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __repr__(self): return "Null()"
def __nonzero__(self): return False
def __getattr__(self, name): return self
def __setattr__(self, name, val): return self
def __delattr__(self, name): return self
def __getitem__(self, name): return self
def generate(env):
# Add the grit tool to the base environment because we use this a lot.
sys.path.append(env.Dir('$SRC_DIR/tools/grit').abspath)
env.Tool('scons', toolpath=[env.Dir('$SRC_DIR/tools/grit/grit')])
# Add the repack python script tool that we use in multiple places.
sys.path.append(env.Dir('$SRC_DIR/tools/data_pack').abspath)
env.Tool('scons', toolpath=[env.Dir('$SRC_DIR/tools/data_pack/')])
def exists(env):
return True
| 1,408 |
binarize/utilDataGenerator.py
|
caroacostatovany/hackathon_RIIAA2021
| 0 |
2023917
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import random
import math
import cv2
import numpy as np
from keras import backend as K
# ----------------------------------------------------------------------------
def load_files(array_x_files, x_sufix, y_sufix):
x_data = []
y_data = []
for fname_x in array_x_files:
fname_y = fname_x.replace(x_sufix, y_sufix)
img_x = cv2.imread(fname_x, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(fname_y, cv2.IMREAD_GRAYSCALE)
x_data.append(img_x)
y_data.append(img_y)
x_data = np.asarray(x_data).astype('float32')
x_data = 255. - x_data
y_data = np.asarray(y_data).astype('float32') / 255.
y_data = 1. - y_data
return x_data, y_data
# ----------------------------------------------------------------------------
def generate_chunks(array_x_files, x_sufix, y_sufix, window_size, step_size):
x_data = []
y_data = []
for fname_x in array_x_files:
fname_y = fname_x.replace(x_sufix, y_sufix)
img_x = cv2.imread(fname_x, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(fname_y, cv2.IMREAD_GRAYSCALE)
if img_x.shape[0] < window_size or img_x.shape[1] < window_size: # Scale approach
new_rows = window_size if img_x.shape[0] < window_size else img_x.shape[0]
new_cols = window_size if img_x.shape[1] < window_size else img_x.shape[1]
img_x = cv2.resize(img_x, (new_cols, new_rows), interpolation = cv2.INTER_CUBIC)
img_y = cv2.resize(img_y, (new_cols, new_rows), interpolation = cv2.INTER_CUBIC)
for (x, y, window) in sliding_window(img_x, stepSize=step_size, windowSize=(window_size, window_size)):
if window.shape[0] != window_size or window.shape[1] != window_size: # if the window does not meet our desired window size, ignore it
continue
x_data.append( window.copy() )
for (x, y, window) in sliding_window(img_y, stepSize=step_size, windowSize=(window_size, window_size)):
if window.shape[0] != window_size or window.shape[1] != window_size: # if the window does not meet our desired window size, ignore it
continue
y_data.append( window.copy() )
x_data = np.asarray(x_data).astype('float32')
x_data = 255. - x_data
y_data = np.asarray(y_data).astype('float32') / 255.
y_data = 1. - y_data
print('x_data min:', np.min(x_data), ' - mean:', np.mean(x_data), ' - max:', np.max(x_data))
print('y_data min:', np.min(y_data), ' - mean:', np.mean(y_data), ' - max:', np.max(y_data))
if K.image_data_format() == 'channels_first':
x_data = x_data.reshape(x_data.shape[0], 1, x_data.shape[1], x_data.shape[2]) # channel_first
y_data = y_data.reshape(y_data.shape[0], 1, y_data.shape[1], y_data.shape[2]) # channel_first
else:
x_data = x_data.reshape(x_data.shape[0], x_data.shape[1], x_data.shape[2], 1)
y_data = y_data.reshape(y_data.shape[0], y_data.shape[1], y_data.shape[2], 1)
return x_data, y_data
# ----------------------------------------------------------------------------
# slide a window across the image
def sliding_window(img, stepSize, windowSize):
n_steps_y = int( math.ceil( img.shape[0] / float(stepSize) ) )
n_steps_x = int( math.ceil( img.shape[1] / float(stepSize) ) )
for y in range(n_steps_y):
for x in range(n_steps_x):
posX = x * stepSize
posY = y * stepSize
posToX = posX + windowSize[0]
posToY = posY + windowSize[1]
if posToX > img.shape[1]:
posToX = img.shape[1] - 1
posX = posToX - windowSize[0]
if posToY > img.shape[0]:
posToY = img.shape[0] - 1
posY = posToY - windowSize[1]
yield (posX, posY, img[posY:posToY, posX:posToX]) # yield the current window
# ----------------------------------------------------------------------------
class LazyFileLoader:
def __init__(self, array_x_files, x_sufix, y_sufix, page_size):
self.array_x_files = array_x_files
self.x_sufix = x_sufix
self.y_sufix = y_sufix
self.pos = 0
if page_size <= 0:
self.page_size = len(array_x_files)
else:
self.page_size = page_size
def __len__(self):
return len(self.array_x_files)
def __iter__(self):
return self
def __next__(self):
return self.next()
def truncate_to_size(self, truncate_to):
self.array_x_files = self.array_x_files[0:truncate_to]
def set_x_files(self, array_x_files):
self.array_x_files = array_x_files
def reset(self):
self.pos = 0
def get_pos(self):
return self.pos
def set_pos(self, pos):
self.pos = pos
def shuffle(self):
random.shuffle(self.array_x_files)
def next(self):
psize = self.page_size
if self.pos + psize >= len(self.array_x_files): # last page?
if self.pos >= len(self.array_x_files):
raise StopIteration
else:
psize = len(self.array_x_files) - self.pos
print('> Loading page from', self.pos, 'to', self.pos + psize, '...')
X_data, Y_data = load_files(self.array_x_files[self.pos:self.pos + psize], self.x_sufix, self.y_sufix)
self.pos += self.page_size
return X_data, Y_data
# ----------------------------------------------------------------------------
class LazyChunkGenerator(LazyFileLoader):
def __init__(self, array_x_files, x_sufix, y_sufix, page_size, window_size, step_size):
LazyFileLoader.__init__(self, array_x_files, x_sufix, y_sufix, page_size)
self.window_size = window_size
self.step_size = step_size
def next(self):
psize = self.page_size
if self.pos + psize >= len(self.array_x_files): # last page?
if self.pos >= len(self.array_x_files):
raise StopIteration
else:
psize = len(self.array_x_files) - self.pos
print('> Loading page from', self.pos, 'to', self.pos + psize, '...')
X_data, Y_data = generate_chunks(self.array_x_files[self.pos:self.pos + psize], self.x_sufix, self.y_sufix, self.window_size, self.step_size)
self.pos += self.page_size
return X_data, Y_data
| 6,388 |
neutron_fwaas/services/firewall/drivers/dptech/driver_client.py
|
Woody89/neutron-fwaas
| 0 |
2022794
|
from neutron.i18n import _
from oslo_config import cfg
from zeep import Client as ZeepClient
from requests import Session
from zeep.transports import Transport
from requests.auth import HTTPBasicAuth
import logging
logging.basicConfig(level=logging.INFO)
logging.getLogger('zeep.transports').setLevel(logging.DEBUG)
device_opts = [
cfg.StrOpt('host',
default='localhost',
help=_('The server hostname/ip to connect to.')),
cfg.StrOpt('username',
default='admin',
help=_('The username which use for connect backend '
'firewall device')),
cfg.StrOpt('password',
default='<PASSWORD>',
help=_('The password which use to connect backend '
'firewall device')),
cfg.StrOpt('protocol',
default='https',
help=_("The protocol of request 'http|https'"))
]
device_group = cfg.OptGroup(
name="device",
title="device info in the Group"
)
CONF = cfg.CONF
CONF.register_group(device_group)
CONF.register_opts(device_opts, device_group)
ZEEP_CLIENT = None
username = None
password = <PASSWORD>
class Client():
def __init__(self):
self.host = CONF.device.host
self.username = CONF.device.username
self.password = CONF.device.password
@classmethod
def get_instance(cls):
global ZEEP_CLIENT
if not ZEEP_CLIENT:
ZEEP_CLIENT = cls()
return ZEEP_CLIENT
def get_client(self, url_dir, host_ip, username=None, password=None):
try:
if username:
self.username = username
if password:
self.password = password
if not host_ip:
host_ip = CONF.device.host
protocol = CONF.device.protocol
# host_ip = "192.168.1.234"
ip_link = protocol + '://%s' % host_ip
full_url = "%s%s" % (ip_link, url_dir)
session = Session()
session.verify = False
session.auth = HTTPBasicAuth(self.username, self.password)
transport = Transport(session=session)
client = ZeepClient(full_url, transport=transport)
service = client.service
except Exception as e:
raise
return service
| 2,336 |
tests/unit/test_output_utils.py
|
dav-pascual/mrack
| 1 |
2024113
|
# Copyright 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mrack.outputs.utils"""
from unittest.mock import patch
from mrack.outputs.utils import get_external_id
@patch("mrack.outputs.utils.resolve_hostname")
def test_get_external_id(mock_resolve, provisioning_config, host1_aws, metahost1):
"""
Test that resolve_hostname is not called when it is not supposed to be.
"""
dns = "my.dns.name"
mock_resolve.return_value = dns
# By default, it resolves DNS
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == dns
# Disable in host metadata
metahost1["resolve_host"] = False
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == host1_aws.ip_addr
# Disable in provider
del metahost1["resolve_host"]
provisioning_config["aws"]["resolve_host"] = False
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == host1_aws.ip_addr
# Explicitly enabled in provider
provisioning_config["aws"]["resolve_host"] = True
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == dns
# Resolution enabled, but nothing is resolved
mock_resolve.return_value = None
ext_id = get_external_id(host1_aws, metahost1, provisioning_config)
assert ext_id == host1_aws.ip_addr
| 1,907 |
examples/example-2.py
|
itzmanish/python-loggable-decorator
| 0 |
2023925
|
# -*- coding: utf-8 -*-
import logging
import sys
# this is just an example config
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG
)
from loggable import loggable_class as loggable
@loggable
class Example2(object):
def decorated_method_one(self):
self.logger.debug("Debug decorated_method_with_name")
print("This is a decorated method")
def decorated_method_two(self):
self.logger.debug("Debug decorated_method_two")
print("This is a decorated method")
if __name__ == "__main__":
Example2().decorated_method_one()
Example2().decorated_method_two()
| 621 |
tests/test_config_loader.py
|
atten/django-docker-helpers
| 8 |
2023976
|
# noinspection PyPackageRequirements
import pytest
import os
from django_docker_helpers.config import ConfigLoader, exceptions
from django_docker_helpers.config.backends import *
from django_docker_helpers.utils import mp_serialize_dict
pytestmark = pytest.mark.config_loader
REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
CONSUL_HOST = os.getenv('CONSUL_HOST', '127.0.0.1')
CONSUL_PORT = os.getenv('CONSUL_PORT', 8500)
@pytest.fixture
def store_mpt_consul_config():
import consul
sample = {
'project': {
'variable': 2
}
}
c = consul.Consul(host=CONSUL_HOST, port=CONSUL_PORT)
for path, value in mp_serialize_dict(sample, separator='/'):
c.kv.put(path, value)
return c
@pytest.fixture
def store_consul_config():
import consul
from yaml import dump
sample = {
'some': {
'variable': 2,
}
}
c = consul.Consul(host=CONSUL_HOST, port=CONSUL_PORT)
data = dump(sample).encode()
c.kv.put('my/service/config.yml', data)
return c
@pytest.fixture
def store_mpt_redis_config():
import redis
sample = {
'project': {
'i': {
'am': {
'redis': True
}
}
}
}
c = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
for path, value in mp_serialize_dict(sample, separator='.'):
c.set(path, value)
c.set('my-prefix:%s' % path, value)
return c
@pytest.fixture
def store_redis_config():
from redis import Redis
from yaml import dump
sample = {
'some': {
'variable': 44,
'brutal': 666
}
}
c = Redis(host=REDIS_HOST, port=REDIS_PORT)
data = dump(sample).encode()
c.set('my/conf/service/config.yml', data)
return c
@pytest.fixture
def loader():
env = {
'PROJECT__DEBUG': 'false'
}
parsers = [
EnvironmentParser(scope='project', env=env),
MPTConsulParser(host=CONSUL_HOST, port=CONSUL_PORT, scope='project'),
ConsulParser('my/service/config.yml', host=CONSUL_HOST, port=CONSUL_PORT),
MPTRedisParser(host=REDIS_HOST, port=REDIS_PORT, scope='project'),
RedisParser('my/conf/service/config.yml', host=REDIS_HOST, port=REDIS_PORT),
YamlParser(config='./tests/data/config.yml', scope='project'),
]
return ConfigLoader(parsers=parsers)
# noinspection PyMethodMayBeStatic,PyShadowingNames,PyUnusedLocal
class ConfigLoaderTest:
def test__priority(self,
loader: ConfigLoader,
store_mpt_consul_config,
store_mpt_redis_config,
store_consul_config,
store_redis_config):
assert loader.get('debug') == 'false', 'Ensure value is taken from env'
assert loader.get('debug', coerce_type=bool) is False, 'Ensure value is coercing properly for env'
assert loader.get('variable', coerce_type=int) == 2, 'Ensure consul MPT backend attached'
assert loader.get('i.am.redis', coerce_type=bool) is True, 'Ensure redis MPT backend attached'
assert loader.get('some.variable', coerce_type=int) == 2, 'Ensure consul backend attached'
assert loader.get('some.brutal', coerce_type=int) == 666, 'Ensure redis backend attached'
def test__availability(self, loader: ConfigLoader):
assert loader.get('name') == 'wroom-wroom'
def test__default(self, loader: ConfigLoader):
sentinel = object()
assert loader.get('nonexi', default=sentinel) is sentinel
def test__from_env__raises_on_empty_values(self):
with pytest.raises(ValueError):
ConfigLoader.from_env([], {})
def test__from_env(self):
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
with pytest.raises(Exception):
loader.get('nothing.here')
loader = ConfigLoader.from_env(env=env, silent=True)
assert loader.get('nothing.here', True) is True
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={})
assert loader.parsers
def test__import_parsers(self):
parsers = list(ConfigLoader.import_parsers([
'EnvironmentParser',
'django_docker_helpers.config.backends.YamlParser'
]))
assert parsers == [EnvironmentParser, YamlParser]
def test__load_parser_options_from_env(self):
env = {
'REDISPARSER__ENDPOINT': 'go.deep',
'REDISPARSER__HOST': 'my-host',
'REDISPARSER__PORT': '66',
}
res = ConfigLoader.load_parser_options_from_env(RedisParser, env)
assert res == {'endpoint': 'go.deep', 'host': 'my-host', 'port': 66}
env = {
'ENVIRONMENTPARSER__SCOPE': 'deep',
}
res = ConfigLoader.load_parser_options_from_env(EnvironmentParser, env)
assert res == {'scope': 'deep'}
def test__config_read_queue(self,
loader: ConfigLoader,
store_mpt_consul_config,
store_mpt_redis_config,
store_consul_config,
store_redis_config):
loader.get('some.variable')
loader.get('some.brutal')
loader.get('debug', coerce_type=bool)
loader.get('i.am.redis')
loader.get('variable')
loader.get('name')
loader.get('nothing.here', 'very long string lol')
loader.get('secret')
loader.get('something.long', list(range(100)))
loader.get('something.long.q', '=' * 80)
assert loader.config_read_queue
assert '\033[0m' in loader.format_config_read_queue(use_color=True)
assert '\033[0m' not in loader.format_config_read_queue(use_color=False)
loader.print_config_read_queue(use_color=True)
def test__shortcut_for_get_method(self, loader: ConfigLoader):
assert loader('some.variable')
def test__get_with_required(self, loader: ConfigLoader):
assert loader.get('some.variable', required=True)
with pytest.raises(exceptions.RequiredValueIsEmpty):
loader.get('some.nonexistent_var', required=True)
| 6,801 |
tests/qctests/test_qc_woa_normbias.py
|
jmetteUni/CoTeDe-modified
| 35 |
2023766
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
"""
from datetime import datetime
import numpy as np
from numpy import ma
from cotede.qctests import WOA_NormBias, woa_normbias
from cotede.qc import ProfileQC
from ..data import DummyData
def test_woa_normbias_standard_dataset():
profile = DummyData()
features = woa_normbias(profile, "TEMP")
for v in [
"woa_mean",
"woa_std",
"woa_nsamples",
"woa_se",
"woa_bias",
"woa_normbias",
]:
assert v in features
def test_woa_normbias_invalid_position():
profile = DummyData()
assert "LONGITUDE" in profile.attrs
profile.attrs["LONGITUDE"] = 38
features = woa_normbias(profile, "TEMP")
for v in [
"woa_mean",
"woa_std",
"woa_se",
"woa_bias",
"woa_normbias",
]:
assert np.isnan(features[v]).all()
def test_woa_normbias_invalid_position_track():
alongtrack = {
"time": ["2000-01-01", "2000-01-02", "2000-01-03"],
"DEPTH": np.array([0, 0, 0]),
"latitude": [14.9, 15, 15.1],
"longitude": [38, 38.1, 38],
"TEMP": [25, 25, 25],
}
features = woa_normbias(alongtrack, "TEMP")
for v in [
"woa_mean",
"woa_std",
"woa_se",
"woa_bias",
"woa_normbias",
]:
assert np.isnan(features[v]).all()
def test_woa_track():
alongtrack = {
"time": ["2000-01-01", "2000-01-02", "2000-01-03"],
"DEPTH": [0, 0, 0],
"latitude": [14.9, 15, 15.1],
"longitude": [-38, -38.1, -38],
"TEMP": [25, 25, 25],
}
features = woa_normbias(alongtrack, "TEMP")
for v in [
"woa_mean",
"woa_std",
"woa_nsamples",
"woa_se",
"woa_bias",
"woa_normbias",
]:
assert v in features
def test_standard_dataset():
"""Test WOA_NormBias with a standard dataset
"""
flags = {
"woa_normbias": np.array(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 9], dtype="i1"
)
}
profile = DummyData()
cfg = {"threshold": 3}
y = WOA_NormBias(profile, "TEMP", cfg, autoflag=True)
assert len(y.features) > 0
for f in y.flags:
assert np.allclose(y.flags[f], flags[f], equal_nan=True)
def test_features_non_masked_array():
"""WOA_NormBias should return features type array
Originally I used masked arrays, which is the standard for OceansDB but
recently I moved to regular arrays using NaN.
"""
profile = DummyData()
cfg = {"threshold": 3}
y = WOA_NormBias(profile, "TEMP", cfg, autoflag=True)
assert len(y.features) > 0
for v in y.features:
assert not isinstance(y.features[v], ma.MaskedArray)
def test_basic():
"""
"""
profile = DummyData()
cfg = {
"TEMP": {"woa_normbias": {"threshold": 3, "flag_bad": 3}},
"PSAL": {"woa_normbias": {"threshold": 3, "flag_bad": 3}},
}
pqc = ProfileQC(profile, cfg=cfg)
assert "woa_normbias" in pqc.flags["TEMP"]
assert sorted(np.unique(pqc.flags["TEMP"]["woa_normbias"])) == [1, 3, 9]
assert sorted(np.unique(pqc.flags["PSAL"]["woa_normbias"])) == [1, 9]
def test_attribute():
profile = DummyData()
cfg = {"TEMP": {"woa_normbias": {"threshold": 3}}}
pqc = ProfileQC(profile, cfg=cfg)
assert "woa_normbias" in pqc.flags["TEMP"]
assert pqc.flags["TEMP"]["woa_normbias"].shape == profile.data["TEMP"].shape
assert np.unique(pqc.features["TEMP"]["woa_mean"]).size > 1
assert np.allclose(
pqc.flags["TEMP"]["woa_normbias"],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 9],
equal_nan=True,
)
def test_track():
profile = DummyData()
N = profile["TEMP"].size
profile.data["LATITUDE"] = np.linspace(4, 25, N)
profile.data["LONGITUDE"] = np.linspace(-30, -38, N)
profile.data["PRES"] *= 0
# Location in data, one per measurement, has precedence on attrs
profile.attrs["LATITUDE"] = None
profile.attrs["LONGITUDE"] = None
cfg = {"TEMP": {"woa_normbias": {"threshold": 3}}}
pqc = ProfileQC(profile, cfg=cfg)
assert "woa_normbias" in pqc.flags["TEMP"]
assert pqc.flags["TEMP"]["woa_normbias"].shape == profile.data["TEMP"].shape
assert np.unique(pqc.features["TEMP"]["woa_mean"]).size > 1
assert np.allclose(
pqc.flags["TEMP"]["woa_normbias"],
[1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 9],
equal_nan=True,
)
def test_standard_error():
"""I need to improve this!!
"""
profile = DummyData()
profile.attrs["datetime"] = datetime(2016, 6, 4)
profile.attrs["LATITUDE"] = -30.0
profile.attrs["LONGITUDE"] = 15
profile.data["PRES"] = np.array([2.0, 5.0, 6.0, 21.0, 44.0, 79.0, 1000, 5000])
profile.data["TEMP"] = np.array([16.0, 15.6, 15.9, 15.7, 15.2, 14.1, 8.6, 2.0])
cfg = {"TEMP": {"woa_normbias": {"threshold": 10}}}
pqc = ProfileQC(profile, cfg=cfg)
assert "woa_normbias" in pqc.flags["TEMP"]
assert pqc.flags["TEMP"]["woa_normbias"].shape == profile.data["TEMP"].shape
assert (pqc.flags["TEMP"]["woa_normbias"] == [1, 1, 1, 1, 1, 1, 3, 0]).all()
cfg = {"TEMP": {"woa_normbias": {"threshold": 10, "use_standard_error": False}}}
pqc_noSE = ProfileQC(profile, cfg=cfg)
assert "woa_normbias" in pqc.flags["TEMP"]
assert pqc.flags["TEMP"]["woa_normbias"].shape == profile.data["TEMP"].shape
assert (pqc.flags["TEMP"]["woa_normbias"] == [1, 1, 1, 1, 1, 1, 3, 0]).all()
cfg = {"TEMP": {"woa_normbias": {"threshold": 10, "use_standard_error": True}}}
pqc_SE = ProfileQC(profile, cfg=cfg)
assert "woa_normbias" in pqc.flags["TEMP"]
assert pqc.flags["TEMP"]["woa_normbias"].shape == profile.data["TEMP"].shape
assert (pqc.flags["TEMP"]["woa_normbias"] == [1, 1, 1, 1, 1, 1, 3, 0]).all()
| 5,909 |
test.py
|
Samadarshi-Maity/Casimir-programming
| 0 |
2024409
|
print('hello world')
"""
<NAME>
<NAME>
"""
import numpy as np
'''
Samadarshi: Circumference of this beautiful circle
'''
def circumference_of_circle(r):
''' Returns circumference of a circle'''
return 2*np.pi.r
def surface_area_of_circle(r):
return np.pi*r**2
| 280 |
mwclientcli/commands/__init__.py
|
adundovi/mwclientcli
| 2 |
2023011
|
__all__ = ['Edit', 'Show', 'List', 'Search', 'Move', 'Remove', 'Upload', 'Open']
from edit import Edit
from show import Show
from list import List
from search import Search
from move import Move
from remove import Remove
from upload import Upload
from open import Open
| 270 |
samples/network/nat/manage_nat_gateway.py
|
leigharubin/azure-samples-python-management
| 47 |
2023534
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
GROUP_NAME = "testgroupx"
NAT_GATEWAY = "nat_gatewayxxyyzz"
PUBLIC_IP_ADDRESS = "publicipaddress"
PUBLIC_IP_PREFIX = "publicipprefix"
# Create client
# For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
network_client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
# - init depended resources -
# Create public ip address
network_client.public_ip_addresses.begin_create_or_update(
GROUP_NAME,
PUBLIC_IP_ADDRESS,
{
'location': "eastus",
'public_ip_allocation_method': 'Static',
'idle_timeout_in_minutes': 4,
'sku': {
'name': 'Standard'
}
}
).result()
# Create public ip prefix
network_client.public_ip_prefixes.begin_create_or_update(
GROUP_NAME,
PUBLIC_IP_PREFIX,
{
"location": "eastus",
"prefix_length": "30",
"sku": {
"name": "Standard"
}
}
).result()
# - end -
# Create nat gateway
nat_gateway = network_client.nat_gateways.begin_create_or_update(
GROUP_NAME,
NAT_GATEWAY,
{
"location": "eastus",
"sku": {
"name": "Standard"
},
"public_ip_addresses": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + GROUP_NAME + "/providers/Microsoft.Network/publicIPAddresses/" + PUBLIC_IP_ADDRESS
}
],
"public_ip_prefixes": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + GROUP_NAME + "/providers/Microsoft.Network/publicIPPrefixes/" + PUBLIC_IP_PREFIX
}
]
}
).result()
print("Create nat gateway:\n{}".format(nat_gateway))
# Get nat gateway
nat_gateway = network_client.nat_gateways.get(
GROUP_NAME,
NAT_GATEWAY
)
print("Get nat gateway:\n{}".format(nat_gateway))
# Update nat gateway
nat_gateway = network_client.nat_gateways.update_tags(
GROUP_NAME,
NAT_GATEWAY,
{
"tags": {
"tag1": "value1",
"tag2": "value2"
}
}
)
print("Update nat gateway:\n{}".format(nat_gateway))
# Delete nat gateway
nat_gateway = network_client.nat_gateways.begin_delete(
GROUP_NAME,
NAT_GATEWAY
).result()
print("Delete nat gateway.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
| 3,561 |
visorspass/visor/migrations/0023_alter_indicador_nombre.py
|
RobertoMarroquin/spass
| 0 |
2022817
|
# Generated by Django 3.2.5 on 2021-11-30 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visor', '0022_auto_20211111_2017'),
]
operations = [
migrations.AlterField(
model_name='indicador',
name='nombre',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name='Nombre'),
),
]
| 434 |
studygroups/signals.py
|
p2pu/learning-circles
| 10 |
2022957
|
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from studygroups.utils import render_to_string_ctx
from django.core.mail import EmailMultiAlternatives, send_mail
from django.conf import settings
from django.utils import timezone
from studygroups.email_helper import render_html_with_css
from .models import Application
from .models import StudyGroup
from .models import Course
from .models import get_study_group_organizers
from .utils import html_body_to_text
from .utils import use_language
from advice.models import Advice
import pytz
@receiver(post_save, sender=Application)
def handle_new_application(sender, instance, created, **kwargs):
""" Send welcome message to learner introducing them to their facilitator """
if not created:
return
application = instance
# get a random piece of advice
# TODO only supported in English atm
advice = None
if application.study_group.language == 'en':
advice = Advice.objects.order_by('?').first()
# activate language and timezone for message reminder
with use_language(application.study_group.language), timezone.override(pytz.timezone(application.study_group.timezone)):
# Send welcome message to learner
learner_signup_subject = render_to_string_ctx(
'studygroups/email/learner_signup-subject.txt', {
'application': application,
'advice': advice,
}
).strip('\n')
learner_signup_html = render_html_with_css(
'studygroups/email/learner_signup.html', {
'application': application,
'advice': advice,
}
)
learner_signup_body = html_body_to_text(learner_signup_html)
to = [application.email]
# CC facilitator and put in reply-to
welcome_message = EmailMultiAlternatives(
learner_signup_subject,
learner_signup_body,
settings.DEFAULT_FROM_EMAIL,
to,
cc=[application.study_group.facilitator.email],
reply_to=[application.study_group.facilitator.email]
)
welcome_message.attach_alternative(learner_signup_html, 'text/html')
welcome_message.send()
@receiver(post_save, sender=StudyGroup)
def handle_new_study_group_creation(sender, instance, created, **kwargs):
if not created:
return
study_group = instance
context = {
'study_group': study_group,
}
subject = render_to_string_ctx('studygroups/email/learning_circle_created-subject.txt', context).strip(' \n')
html_body = render_html_with_css('studygroups/email/learning_circle_created.html', context)
text_body = html_body_to_text(html_body)
# on all learning circles, CC p2pu
cc = [settings.TEAM_EMAIL]
# if the user is part of a team, send to the organizer(s)
cc += [ o.email for o in get_study_group_organizers(study_group)]
# if there is a question, send to the welcoming comitte
if study_group.facilitator_concerns:
cc += [settings.COMMUNITY_MANAGER]
notification = EmailMultiAlternatives(
subject,
text_body,
settings.DEFAULT_FROM_EMAIL,
[study_group.facilitator.email],
cc=cc,
reply_to=[study_group.facilitator.email] + cc
)
notification.attach_alternative(html_body, 'text/html')
notification.send()
| 3,381 |
Attendance_Management_System/vcet/student/migrations/0016_auto_20180402_1511.py
|
jainritik153/ATTENDANCE-MANAGEMENT-SYSTEM
| 0 |
2023552
|
# Generated by Django 2.0.2 on 2018-04-02 15:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0015_auto_20180402_1016'),
]
operations = [
migrations.AddField(
model_name='aoa',
name='_4',
field=models.CharField(default='Not', max_length=10),
),
migrations.AddField(
model_name='aoa',
name='_5',
field=models.CharField(default='Not', max_length=10),
),
migrations.AddField(
model_name='aoa',
name='_6',
field=models.CharField(default='Not', max_length=10),
),
migrations.AddField(
model_name='aoa',
name='_7',
field=models.CharField(default='Not', max_length=10),
),
]
| 869 |
Exercício feitos pela primeira vez/ex092.py
|
Claayton/pythonExerciciosLinux
| 1 |
2023599
|
# Ex092
from datetime import date
ano_atual = date.today().year
cadastro = {}
cadastro['Nome'] = str(input('Nome: ')).title().strip()
ano_de_nascimento = int(input('Ano de nascimento: '))
cadastro['Idade'] = ano_atual - ano_de_nascimento
cadastro['CTPS'] = int(input('Carteira de Trabalho [digite "0" caso não tenha]: '))
if cadastro['CTPS'] != 0:
while True:
cadastro['Contratação'] = int(input('Ano de contratação: '))
if cadastro['Contratação'] > ano_de_nascimento:
break
else:
print('\033[31mSegundo os dados cadastrados')
print('essa pessoa não teria nascido ainda nessa data')
print('POR FAVOR TENTE NOVAMENTE\033[m')
cadastro['Salário'] = float(input('Salário: '))
cadastro['Aposentadoria'] = (cadastro['Contratação'] + 35) - ano_de_nascimento
print('-' * 30)
for c, j in cadastro.items():
print(f'\033[36m{c}\033[m tem o valor \033[34m{j}\033[m')
print('-' * 30)
| 963 |
ntype.py
|
accept8605/OOT-Rando-with-working-Navi
| 2 |
2022709
|
# Written by mzxrules
import struct
class uint16:
_struct = struct.Struct('>H')
def write(buffer, address, value):
struct.pack_into('>H', buffer, address, value)
def read(buffer, address=0):
return uint16._struct.unpack_from(buffer, address)[0]
def bytes(value):
value = value & 0xFFFF
return [(value >> 8) & 0xFF, value & 0xFF]
def value(values):
return (values[0] << 8) | values[1]
class uint32:
_struct = struct.Struct('>I')
def write(buffer, address, value):
struct.pack_into('>I', buffer, address, value)
def read(buffer, address=0):
return uint32._struct.unpack_from(buffer, address)[0]
def bytes(value):
value = value & 0xFFFFFFFF
return [(value >> 24) & 0xFF, (value >> 16) & 0xFF, (value >> 8) & 0xFF, value & 0xFF]
def value(values):
return (values[0] << 24) | (values[1] << 16) | (values[2] << 8) | values[3]
class int32:
_struct = struct.Struct('>i')
def write(buffer, address, value):
struct.pack_into('>i', buffer, address, value)
def read(buffer, address=0):
return int32._struct.unpack_from(buffer, address)[0]
def bytes(value):
value = value & 0xFFFFFFFF
return [(value >> 24) & 0xFF, (value >> 16) & 0xFF, (value >> 8) & 0xFF, value & 0xFF]
def value(values):
value = (values[0] << 24) | (values[1] << 16) | (values[2] << 8) | values[3]
if value >= 0x80000000:
value ^= 0xFFFFFFFF
value += 1
return value
class uint24:
def write(buffer, address, value):
byte_arr = bytes(value)
buffer[address:address + 3] = byte_arr[0:3]
def read(buffer, address=0):
return (buffer[address+0] << 16) | (buffer[address+1] << 8) | buffer[address+2]
def bytes(value):
value = value & 0xFFFFFF
return [(value >> 16) & 0xFF, (value >> 8) & 0xFF, value & 0xFF]
def value(values):
return (values[0] << 16) | (values[1] << 8) | values[2]
class BigStream(object):
def __init__(self, buffer:bytearray):
self.last_address = 0
self.buffer = buffer
def seek_address(self, address=None, delta=None):
if delta is None:
self.last_address = address
else:
self.last_address += delta
def eof(self):
return self.last_address >= len(self.buffer)
def read_byte(self, address=None):
if address == None:
address = self.last_address
self.last_address = address + 1
return self.buffer[address]
def read_bytes(self, address=None, length=1):
if address == None:
address = self.last_address
self.last_address = address + length
return self.buffer[address : address + length]
def read_int16(self, address=None):
if address == None:
address = self.last_address
return uint16.value(self.read_bytes(address, 2))
def read_int24(self, address=None):
if address == None:
address = self.last_address
return uint24.value(self.read_bytes(address, 3))
def read_int32(self, address=None):
if address == None:
address = self.last_address
return uint32.value(self.read_bytes(address, 4))
def write_byte(self, address, value):
if address == None:
address = self.last_address
self.buffer[address] = value
self.last_address = address + 1
def write_sbyte(self, address, value):
if address == None:
address = self.last_address
self.write_bytes(address, struct.pack('b', value))
def write_int16(self, address, value):
if address == None:
address = self.last_address
self.write_bytes(address, uint16.bytes(value))
def write_int24(self, address, value):
if address == None:
address = self.last_address
self.write_bytes(address, uint24.bytes(value))
def write_int32(self, address, value):
if address == None:
address = self.last_address
self.write_bytes(address, uint32.bytes(value))
def write_f32(self, address, value:float):
if address == None:
address = self.last_address
self.write_bytes(address, struct.pack('>f', value))
def write_bytes(self, startaddress, values):
if startaddress == None:
startaddress = self.last_address
for i, value in enumerate(values):
self.write_byte(startaddress + i, value)
def write_int16s(self, startaddress, values):
if startaddress == None:
startaddress = self.last_address
for i, value in enumerate(values):
self.write_int16(startaddress + (i * 2), value)
def write_int24s(self, startaddress, values):
if startaddress == None:
startaddress = self.last_address
for i, value in enumerate(values):
self.write_int24(startaddress + (i * 3), value)
def write_int32s(self, startaddress, values):
if startaddress == None:
startaddress = self.last_address
for i, value in enumerate(values):
self.write_int32(startaddress + (i * 4), value)
def append_byte(self, value):
self.buffer.append(value)
def append_sbyte(self, value):
self.append_bytes(struct.pack('b', value))
def append_int16(self, value):
self.append_bytes(uint16.bytes(value))
def append_int24(self, value):
self.append_bytes(uint24.bytes(value))
def append_int32(self, value):
self.append_bytes(uint32.bytes(value))
def append_f32(self, value:float):
self.append_bytes(struct.pack('>f', value))
def append_bytes(self, values):
for value in values:
self.append_byte(value)
def append_int16s(self, values):
for value in values:
self.append_int16(value)
def append_int24s(self, values):
for value in values:
self.append_int24(value)
def append_int32s(self, values):
for value in values:
self.append_int32(value)
| 6,216 |
26-50/test1.py
|
tonyyzy/ProjectEuler
| 0 |
2023902
|
b_list = [2, 2, 2, 2, 2, 2, 5, 13, 5, 19, 2, 7, 7]
b_list.sort()
b_dict = {}
token = 0
counter = 0
for i in b_list:
if token == 0:
token = i
counter += 1
elif token == i:
counter += 1
else:
b_dict[token] = counter
token = i
counter = 1
b_dict[token] = counter
print(b_dict)
| 355 |
src/suffix_trees/test/test_simple.py
|
udimaha/jump_model
| 0 |
2022619
|
import pytest
from src.suffix_trees.STree import starts_with, STree
def test_lcs():
a = [
[1, 2, 5, 3, 5, 4, 1], [1, 2, 5, 3, 5, 4, 14, 9, 11], [1, 2, 5, 1, 2, 5, 3, 5, 4, 1, 2, 5, 3, 5, 4],
[1, 2, 5, 3, 5, 4, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 2, 5, 5, 5, 3, 3, 5, 5, 5, 4, 4, 1, 1, 1, 1, 1, 2, 5, 3, 5, 4, 1]]
st = STree(a)
assert st.lcs() == [1, 2, 5, 3, 5, 4]
def test_missing():
text = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
stree = STree(text)
assert stree.find((4, 3, 2)) == -1
assert stree.find((9, 9, 9)) == -1
assert stree.find(tuple(text) + (20,)) == -1
def test_find():
data = list(range(1, 9)) + list(range(1, 3))
st = STree(data)
assert st.find((1, 2, 3)) == 0
assert st.find_all((1, 2)) == {0, 8}
@pytest.mark.parametrize("is_tuple", (True, False))
@pytest.mark.parametrize("to_check,prefix,expected", (
([1, 2, 3, 4], [1, 2, 3], True),
([1, 2, 3, 4], [1, 2], True),
([1, 2, 3, 4], [1], True),
([1, 2, 3, 4], [], True),
([1, 2, 3, 4], [2], False),
([1, 2, 3, 4], [4, 3, 2, 1], False)
))
def test_starts_with(is_tuple: bool, to_check, prefix, expected):
if is_tuple:
prefix = tuple(prefix)
assert expected == starts_with(to_check, prefix)
| 1,284 |
lib/ftp_sync.py
|
dlf412/comscore
| 1 |
2023692
|
#!/usr/bin/env python
# encoding: utf-8
import os
import gzip
import shutil
import traceback
import pysftp
from functools import partial
from os.path import dirname, exists, basename
from utils import md5sum
class FTPSyncError(Exception):
pass
class FileNotExists(FTPSyncError):
pass
class CompressError(FTPSyncError):
pass
class InvalidFilelist(FTPSyncError):
pass
class MD5CheckError(FTPSyncError):
pass
class DownloadError(FTPSyncError):
pass
class UploadError(FTPSyncError):
pass
class ConnectionError(FTPSyncError):
pass
class FTPSync(object):
conn = None
def __init__(self, host, username=None, password=<PASSWORD>, port=22):
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
try:
self.conn = pysftp.Connection(host,
username=username,
password=password,
port=port, cnopts=cnopts)
except:
raise ConnectionError("connection failed, %s" %
traceback.format_exc())
def download_d(self, remotedir, localdir):
try:
self.conn.get_d(remotedir, localdir)
except:
raise DownloadError("download error, error: %s" %
traceback.format_exc())
def download(self, remotefile, localfile, files):
try:
if type(files) == str:
files = [files]
for f in files:
self.conn.get(os.path.join(remotefile, basename(f)),
os.path.join(localfile, basename(f)))
except:
raise DownloadError("download error, error: %s" %
traceback.format_exc())
def upload(self, localdir, remotedir, files):
try:
if type(files) == str:
files = [files]
if not self.conn.exists(remotedir):
self.conn.makedirs(remotedir)
for f in files:
self.conn.put(os.path.join(localdir, basename(f)),
os.path.join(remotedir, basename(f)))
except:
raise UploadError("upload error, error:%s" %
traceback.format_exc())
def exists(self, src):
return self.conn.exists(src)
def __del__(self):
if self.conn:
self.conn.close()
def _remote_path(root_path, day):
_1st = root_path
_2nd = day[:4] + '_' + day[4:6]
_3rd = day
return os.path.join(_1st, _2nd, _3rd)
def _output_files(dailydir, suffix='.csv'):
#NOTES, call once `listdir` can not find file !!!
# we use cloudfs, after first access, it will sync file from oss server
csvs = os.listdir(dailydir)
csvs = os.listdir(dailydir)
#NOTES
endswith = lambda suff, self: str.endswith(self, suff)
csvs = filter(partial(endswith, suffix), csvs)
return [os.path.join(dailydir, c) for c in csvs]
def _filelist(dailydir, files):
fname = os.path.join(dailydir, "filelist.txt")
with open(fname, 'w') as fd:
for f in files:
fd.write("%s %s\n" % (basename(f), md5sum(f)))
return fname
def gz(src, files, dst=None):
if dst is None:
dst = src
for f in files:
with open(os.path.join(src, f), 'r') as f_in, \
gzip.open(os.path.join(dst, f + ".gz"), 'wb') as f_out:
f_out.writelines(f_in)
return [os.path.join(dst, f + ".gz") for f in files]
def ungz(src, files, dst=None):
if dst is None:
dst = src
for f in files:
with gzip.open(os.path.join(src, f), 'r') as f_in, \
open(os.path.join(dst, f[:-3]), 'wb') as f_out:
f_out.writelines(f_in)
return [os.path.join(dst, f[:-3]) for f in files]
def valid_files(dailydir, filelist):
files = []
with open(os.path.join(dailydir, filelist), "r") as f:
for line in f:
if not line.strip():
continue
file_name, md5 = line.strip().split()
abs_file = os.path.join(dailydir, file_name)
if not exists(abs_file):
raise InvalidFilelist("%s not exist" % file_name)
if not md5sum(abs_file) == md5:
raise MD5CheckError("md5 check failed: %s" % (abs_file))
files.append(abs_file)
return files
def daily_download(ftpsync, day, localdir, uncompress=False):
'''
download sample url file list by one day
'''
indir = _remote_path('DailyInput', day)
dailydir = os.path.join(localdir, indir)
if not exists(dailydir):
os.makedirs(dailydir)
if not ftpsync.exists(os.path.join(indir, 'filelist.txt')):
raise FileNotExists("filelist.txt not exists in {%s}" % indir)
ftpsync.download_d(indir, dailydir)
files = valid_files(dailydir, "filelist.txt")
if not files:
raise FileNotExists("filelist.txt is empty, in {%s}" % indir)
if uncompress:
files = ungz(dailydir, [basename(f) for f in files])
return files
def daily_upload(ftpsync, day, localdir, compress=True):
'''
upload results by one day
'''
outdir = _remote_path('DailyReturn', day)
dailydir = os.path.join(localdir, outdir)
suffix = '.csv.gz' if compress else '.csv'
outfiles = _output_files(dailydir, suffix=suffix)
if not compress:
outfiles = gz(dailydir, outfiles)
ftpsync.upload(dailydir, outdir, outfiles)
filelist = _filelist(dailydir, outfiles)
ftpsync.upload(dailydir, outdir, filelist)
return outfiles
def reference_download(ftpsync, day, localdir):
'''
download reference video
'''
indir = _remote_path('ReferenceCreative', day)
download_dir = localdir
filelist = 'filelist.txt'
if not ftpsync.exists(os.path.join(indir, filelist)):
raise FileNotExists("filelist.txt not exists in {%s}" % indir)
if not exists(download_dir):
os.makedirs(download_dir)
ftpsync.download(indir, download_dir, filelist)
reference_info = []
md5_error_file = []
invalid_file = []
with open(os.path.join(download_dir, filelist), "r") as f:
for line in f:
video = line.strip()
if video:
reference_info.append(video)
meta = os.path.join(indir, video)
if not ftpsync.exists(meta):
invalid_file.append(video)
else:
local_meta = os.path.join(localdir, video)
if not exists(local_meta) or \
md5sum(local_meta) != video[:32]:
ftpsync.download(indir, download_dir, [local_meta])
if md5sum(local_meta) != video[:32]:
md5_error_file.append(video)
if not reference_info:
raise FileNotExists("filelist.txt is empty, on {}".format(day))
return (reference_info, md5_error_file, invalid_file)
def ingest_upload(ftpsync, day, localdir):
'''
ingested csv file upload,
filelist.txt as last uploaded file
'''
outdir = _remote_path('AdsIngestion', day)
upload_dir = os.path.join(localdir, outdir)
result_files = _output_files(upload_dir, suffix='.csv.gz')
filelist = _filelist(upload_dir, result_files)
ftpsync.upload(upload_dir, outdir, result_files)
ftpsync.upload(upload_dir, outdir, filelist)
return result_files
def test_download():
ftpsync = FTPSync('ftp.comscore.com',
username='Vobile',
password='<PASSWORD>')
day = "20160801"
localdir = "/tmp"
print daily_download(ftpsync, day, localdir, uncompress=True)
# test_download()
def test_upload():
ftpsync = FTPSync('ftp.comscore.com',
username='Vobile',
password='<PASSWORD>')
day = "20160823"
localdir = "/tmp"
print daily_upload(ftpsync, day, localdir, compress=False)
# test_upload()
def test_reference_download():
ftpsync = FTPSync('ftp.comscore.com',
username='Vobile',
password='<PASSWORD>')
day = "20160801"
localdir = "/tmp"
print reference_download(ftpsync, day, localdir)
# test_reference_download()
def test_ingest_upload():
ftpsync = FTPSync('ftp.comscore.com',
username='Vobile',
password='<PASSWORD>')
day = "20160801"
localdir = "/tmp"
print ingest_upload(ftpsync, day, localdir)
| 8,599 |
api/questions/sidewalk_search.py
|
scarletstudio/transithealth
| 2 |
2024138
|
from api.utils.database import rows_to_dicts
class SidewalkCafePermitSearch:
"""
Search for sidewalk cafe permits.
"""
def __init__(self, con):
self.con = con
def search_permits(self, search):
"""
Returns permits for restaurants that match the search query.
"""
# Create a view for matching results
statements = """
-- Remove this view if it exists
DROP VIEW IF EXISTS sidewalk_search;
-- Get matching sidewalk permits
CREATE VIEW sidewalk_search AS
SELECT *
FROM sidewalk_cafe
WHERE LOWER(doing_business_as_name) LIKE '%{search}%';
""".format(search=search)
# Then sort the results
query = """
SELECT *
FROM sidewalk_search
ORDER BY issued_date_dt DESC
"""
cur = self.con.cursor()
cur.executescript(statements)
cur.execute(query)
rows = rows_to_dicts(cur, cur.fetchall())
return rows
| 1,042 |
section-22-unittesting/03_calculator/calculator.py
|
mugan86/bootcamp-basic-to-expert-from-scratch
| 31 |
2023145
|
class Calculator:
def add(self, num1, num2):
return num1 + num2
def substract(self, num1, num2):
return num1 - num2
def multiply(self, num1, num2):
return num1 * num2
def division(self, num1, num2):
return num1 / num2
| 269 |
Depatchport.py
|
zj3t/FirmAE
| 0 |
2023453
|
#!/usr/bin/env python3
import os
import subprocess
def command(cmd):
try:
result = subprocess.check_output(cmd, shell=True)
result = result.decode().split('\n')[:-1]
except:
result = ''
return result
if __name__ == "__main__":
os.chdir('./scripts')
for i in range(1,10):
find ="listen=:"+str(i)+"00"
match = "listen=:200"
cmd = 'find ./makeNetwork.py -name "*" -exec perl -pi -e "s/'+str(find)+'/'+str(match)+'/g" {} \;'
command(cmd+' 2> /dev/null')
print("Success Patch.")
os.chdir('../')
| 579 |
src/python3_learn_video/pickle_module.py
|
HuangHuaBingZiGe/GitHub-Demo
| 0 |
2024087
|
"""
pickle:泡菜
数据类型和二进制数据之间的转换工具
将一些json或者数据写入到单独的一个文件里,使代码更加的有逻辑性,更优雅
"""
import pickle
print('--------------------------------------------------')
my_list = [123, 3.14, '小甲鱼', ['another list']]
pickle_file = open('wyz.pkl', 'wb') # 二进制的写入形式
pickle.dump(my_list, pickle_file)
pickle_file.close()
pickle_file = open('wyz.pkl', 'rb')
my_list2 = pickle.load(pickle_file)
print(my_list2)
print('--------------------------------------------------')
| 451 |
server_LDAP/account/views.py
|
CARV-ICS-FORTH/django-oauth2-oidc-example
| 1 |
2022988
|
# Copyright [2021] [FORTH-ICS]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import connection
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate, logout
from account.forms import RegistrationForm, AccountAuthenticationForm, AccountUpdateForm
def registration_view(request):
context = {}
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
email = form.cleaned_data.get('email')
raw_password = form.cleaned_data.get('<PASSWORD>')
account = authenticate(email=email, password=<PASSWORD>)
login(request, account)
return redirect('home')
else:
context['registration_form'] = form
else:
form = RegistrationForm()
context['registration_form'] = form
return render(request, 'account/register.html', context)
def logout_view(request):
logout(request)
return redirect('/')
def login_view(request):
context = {}
user = request.user
if user.is_authenticated:
return redirect("home")
if request.POST:
form = AccountAuthenticationForm(request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect("home")
else:
form = AccountAuthenticationForm()
context['login_form'] = form
# print(form)
return render(request, "account/login.html", context)
def account_view(request):
if not request.user.is_authenticated:
return redirect("login")
context = {}
if request.POST:
form = AccountUpdateForm(request.POST, instance=request.user)
if form.is_valid():
form.initial = {
"email": request.POST['email'],
"username": request.POST['username'],
}
form.save()
context['success_message'] = "Updated"
else:
form = AccountUpdateForm(
initial={
"email": request.user.email,
"username": request.user.username,
}
)
context['account_form'] = form
return render(request, "account/account.html", context)
| 2,565 |
websitescanner/main.py
|
chauhanjatin10/thepythonway
| 0 |
2023445
|
from general import *
from domain_name import *
from ip_address import *
from nmap import *
from robots_txt import *
from whois import *
import os
ROOT_DIR = 'websites'
create(ROOT_DIR)
def gather(name,url):
domain_name = get_domain_name(url)
ip_address = get_ip_address(url)
nmap = get_nmap('-F',ip_address)
robots_txt = get_robots_txt(url)
whois = get_whois(domain_name)
reports(name,url,domain_name,nmap,robots_txt,whois)
def reports(name,url,domain_name,nmap,robots_txt,whois):
project_dir = ROOT_DIR + '/' + name
create(project_dir)
data = url + domain_name + ip_address + nmap + robots_txt + whois
write(project_dir + 'details.txt',data)
gather('thenewsboston','https://www.thenewsboston.com/')
| 716 |
Hackerank/Python/Standard Deviation.py
|
abdzitter/Daily-Coding-DS-ALGO-Practice
| 289 |
2023851
|
n = int(input())
arr = list(map(int, input().split()))
avg = sum(arr) / len(arr)
std = (sum([(arr[x] - avg)**2 for x in range(n)])/n)**(1/2)
print(std)
| 152 |
src/base.py
|
rpSebastian/LeducPoker
| 1 |
2022788
|
class TreeParams():
def __init__(self):
self.root_node = None
class Node():
def __init__(self, parent_node=None):
self.board = None
self.board_string = None
self.street = None
self.current_player = None
self.bets = None
self.pot = None
self.terminal = False
self.num_bets = 0
self.children = None
self.action = None
self.node_type = None
self.cfv = None # [PC, CC]
self.range = None # [PC, CC]
self.strategy = None # [AC, CC]
self.regret = None # [PC, CC]
self.strategy_weight_sum = None # [AC, CC]
self.average_strategy = None # [AC, CC]
self.br_cfv = None # [PC, CC]
self.exploitability = None
self.reach_prop = None # [PC, CC]
self.estimate_value = None # [AC, CC]
self.init = 1
if parent_node is not None:
self.board = parent_node.board
self.board_string = parent_node.board_string
self.street = parent_node.street
self.current_player = 1 - parent_node.current_player
self.bets = parent_node.bets.clone()
self.num_bets = parent_node.num_bets
def vis(self):
outputs = "board = {}\nstreet = {}\nplayer = {}\nbets = [{}, {}]\npot = {}\nterm = {}\n".format(
self.board_string, self.street, self.current_player, self.bets[0].item(), self.bets[1].item(), self.pot, self.terminal
)
outputs += "cfv = {}\nrange = {}\nstrategy={}\nregret={}\nave_strategy={}\nbr_cfv={}\nexp={}\n".format(
self.cfv, self.range, self.strategy, self.regret, self.average_strategy, self.br_cfv, self.exploitability
)
return outputs
def __str__(self):
return "board = {}, street = {}, player = {}, bets = [{}, {}], term = {}".format(
self.board_string, self.street, self.current_player, self.bets[0].item(), self.bets[1].item(), self.terminal
)
def __setattr__(self, name, value):
if 'init' in self.__dict__:
if name in self.__dict__:
self.__dict__[name] = value
else:
raise Exception("name error")
else:
self.__dict__[name] = value
class Players():
def __init__(self):
self.chance = None
self.P1 = None
self.P2 = None
class NodeTypes():
def __init__(self):
self.terminal_fold = None
self.terminal_call = None
self.check = None
self.chance_node = None
self.inner_node = None
| 2,601 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.