max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
fluid/image_classification/se_resnext.py
|
xymyeah/models
| 0 |
2171027
|
import os
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import reader
def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) / 2,
groups=groups,
act=None,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def squeeze_excitation(input, num_channels, reduction_ratio):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
squeeze = fluid.layers.fc(input=pool,
size=num_channels / reduction_ratio,
act='relu')
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def shortcut(input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out:
if stride == 1:
filter_size = 1
else:
filter_size = 3
return conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
conv0 = conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def SE_ResNeXt(input, class_dim, infer=False):
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
conv = conv_bn_layer(
input=input, num_filters=64, filter_size=3, stride=2, act='relu')
conv = conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
conv = conv_bn_layer(
input=conv, num_filters=128, filter_size=3, stride=1, act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
pool = fluid.layers.pool2d(
input=conv, pool_size=0, pool_type='avg', global_pooling=True)
if not infer:
drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
else:
drop = pool
out = fluid.layers.fc(input=drop, size=class_dim, act='softmax')
return out
def train(learning_rate,
batch_size,
num_passes,
init_model=None,
model_save_dir='model',
parallel=True):
class_dim = 1000
image_shape = [3, 224, 224]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
if parallel:
places = fluid.layers.get_places()
pd = fluid.layers.ParallelDo(places)
with pd.do():
image_ = pd.read_input(image)
label_ = pd.read_input(label)
out = SE_ResNeXt(input=image_, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label_)
avg_cost = fluid.layers.mean(x=cost)
accuracy = fluid.layers.accuracy(input=out, label=label_)
pd.write_output(avg_cost)
pd.write_output(accuracy)
avg_cost, accuracy = pd()
avg_cost = fluid.layers.mean(x=avg_cost)
accuracy = fluid.layers.mean(x=accuracy)
else:
out = SE_ResNeXt(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
accuracy = fluid.layers.accuracy(input=out, label=label)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opts = optimizer.minimize(avg_cost)
inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program):
inference_program = fluid.io.get_inference_program([avg_cost, accuracy])
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if init_model is not None:
fluid.io.load_persistables(exe, init_model)
train_reader = paddle.batch(reader.train(), batch_size=batch_size)
test_reader = paddle.batch(reader.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
for pass_id in range(num_passes):
for batch_id, data in enumerate(train_reader()):
loss = exe.run(fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost])
print("Pass {0}, batch {1}, loss {2}".format(pass_id, batch_id,
float(loss[0])))
total_loss = 0.0
total_acc = 0.0
total_batch = 0
for data in test_reader():
loss, acc = exe.run(inference_program,
feed=feeder.feed(data),
fetch_list=[avg_cost, accuracy])
total_loss += float(loss)
total_acc += float(acc)
total_batch += 1
print("End pass {0}, test_loss {1}, test_acc {2}".format(
pass_id, total_loss / total_batch, total_acc / total_batch))
model_path = os.path.join(model_save_dir, str(pass_id))
fluid.io.save_inference_model(model_path, ['image'], [out], exe)
if __name__ == '__main__':
train(
learning_rate=0.1,
batch_size=8,
num_passes=100,
init_model=None,
parallel=False)
| 6,551 |
lesson_7/lesson_7_Pro.py
|
Mike030668/Python--learning--UII
| 0 |
2169083
|
import csv
import pandas as pd
import json
import time
start = time.time()
car_data = []
# Считывание файла построчно
f = open('data_text')
for line in f:
line = line.lstrip(' ').split("\n")[0]
line = line.split(' ')
for _ in range(5):
try:
line.remove('')
except:
pass
car_data.append(line)
f.close()
print(car_data)
print()
DataFrame_from_csv = pd.read_csv('example_time.csv', sep=',')
print(type(DataFrame_from_csv))
print(DataFrame_from_csv)
print()
with open('example_time.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
car_data.append(['time_report_CSV', time.time() - start])
writer.writerows(car_data)
print('Writing complete!')
print()
with open('car_data_json_time.txt', 'w') as f:
car_data.append(['time_report_json', time.time() - start])
json.dump(car_data, f)
dict_to_json = json.dumps(car_data)
print(type(car_data), car_data)
# load, loads
with open('car_data_json_time.txt') as f:
data = json.load(f)
print(type(data), data)
print()
car_data_1 = json.loads(dict_to_json)
print(type(car_data_1), car_data_1)
| 1,118 |
Conet/posting/models.py
|
JJack27/CMPUT404-Project
| 1 |
2169524
|
import uuid
from django.db import models
from Accounts.models import Author
#from django.apps import apps
# Create your models here.
content_type_choice = (
('text/plain', 'text/plain'),
('text/markdown', 'text/markdown'),
('application/base64', 'application/base64'),
('image/png;base64', 'image/png;base64'),
('image/jpeg;base64', 'image/jpeg;base64'),
)
class Post(models.Model):
visible_type_choice = (
('PRIVATE', 'private to visibleTo list'),
('FRIENDS', 'private to my friends'),
('FOAF', 'private to friends of friends'),
('SERVERONLY', 'private to only firends on local server'),
('PUBLIC', 'public'),
)
postid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=128)
source = models.URLField(null=True)
origin = models.URLField(null=True)
description = models.CharField(max_length=200)
postauthor = models.ForeignKey(Author, on_delete=models.CASCADE, related_name="postauthor")
contentType = models.CharField(max_length=32, choices=content_type_choice, default='text/plain')
content = models.TextField(blank=True)
categories = models.CharField(max_length=250)
published = models.DateTimeField(auto_now=True)
visibility = models.CharField(max_length=10, choices=visible_type_choice, default='PUBLIC')
visibleTo = models.TextField(blank=True)
unlisted = models.BooleanField(default=False)
class Comment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
commentauthor = models.ForeignKey(Author, on_delete=models.CASCADE, related_name='author')
post = models.ForeignKey(Post, on_delete=models.CASCADE,related_name='post')
comment = models.CharField(max_length=500)
contentType = models.CharField(max_length=32, choices=content_type_choice, default='text/plain')
published = models.DateTimeField(auto_now=True)
| 1,966 |
hiyori/resolvers/async_.py
|
futursolo/hiyori
| 1 |
2170390
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Set, Tuple, Union
import asyncio
import contextlib
import ipaddress
import pathlib
from .. import exceptions, resolvers
from . import base
__all__ = []
try:
import aiodns
class AsyncResolver(base.BaseResolver):
"""
This resolver implements a dns resolver using :code:`aiodns`.
This is the default implementation if aiodns is installed.
In addition to the arguments accepted by :class:`..base.BaseResolver`,
it also accepts the following arguments:
:arg respect_hosts_file: Whether to look up in hosts file.
:arg dns_servers: List of DNS servers to use. :code:`None` if system
DNS servers should be used.
"""
def __init__(
self,
*,
min_ttl: int = 60,
respect_remote_ttl: bool = True,
respect_hosts_file: bool = True,
dns_servers: Optional[List[str]] = None,
) -> None:
super().__init__(
min_ttl=min_ttl, respect_remote_ttl=respect_remote_ttl
)
if respect_hosts_file:
self._hosts_resolver: Optional[
resolvers.HostsResolver
] = resolvers.HostsResolver(
min_ttl=min_ttl, respect_remote_ttl=respect_remote_ttl
)
else:
self._hosts_resolver = None
self._dns_servers = dns_servers
self._resolver = aiodns.DNSResolver(self._dns_servers)
async def lookup_now(
self, host: str, port: int
) -> base.ResolvedResult:
if self._hosts_resolver is not None:
with contextlib.suppress(exceptions.UnresolvableHost):
return await self._hosts_resolver.lookup(host, port)
results: Set[
Union[
Tuple[
Union[ipaddress.IPv4Address, ipaddress.IPv6Address],
int,
],
pathlib.Path,
]
] = set()
ttl: Optional[int] = None
done, _ = await asyncio.wait(
[
self._resolver.query(host, "A"),
self._resolver.query(host, "AAAA"),
]
)
for tsk in done:
with contextlib.suppress(aiodns.error.DNSError):
for result in tsk.result():
with contextlib.suppress(ValueError):
ip = ipaddress.ip_address(result.host)
results.add((ip, port))
if ttl is None or ttl > result.ttl:
ttl = result.ttl
if ttl is None or ttl < self._min_ttl:
ttl = self._min_ttl
if not results:
try:
for tsk in done:
tsk.result()
raise RuntimeError(
"This shouldn't happen. Please file an issue."
)
except (RuntimeError, aiodns.error.DNSError) as e:
raise exceptions.UnresolvableHost(
f"Failed to resolve {host}:{port}."
) from e
return base.ResolvedResult(
host=host, port=port, results=results, ttl=ttl
)
__all__.append("AsyncResolver")
except ImportError:
pass
| 4,176 |
PDFDemo.py
|
santhipriya13/PracticeCode
| 0 |
2171293
|
import PyPDF2
with open("twopage.pdf",'rb') as file:
print(file)
# print(dir(PyPDF2))
#print(dir(PyPDF2.PdfFileReader))
reader=PyPDF2.PdfFileReader(file)
page=reader.getPage(0)
print(page.rotateCounterClockwise(180))
print(reader.numPages)
writer=PyPDF2.PdfFileWriter()
writer.addPage(page)
with open("tilt.pdf", 'wb') as newfile:
writer.write(newfile)
| 364 |
Week01/q_paste.py
|
HowardNTUST/HackNTU_Data_2017
| 0 |
2169471
|
simg_array = np.array(simg)
img_array2 = img_array.copy()
print("簡單的")
img_array2[200:400, 300:500] = simg_array
show(img_array2)
print("這樣呢?")
img_array2 = img_array.copy()
img_array2[200:400, 300:500][simg_array!=3] = simg_array[simg_array!=3]
show(img_array2)
| 262 |
glenoidplanefitting/algorithms/plane_fitting.py
|
astaolaf/glenoidplanefitting
| 0 |
2170122
|
"""
This is an implementation of a two plane method, see
<NAME>, <NAME>, Chen, <NAME>.
`Predicting normal glenoid version from the pathologic scapula:
a comparison of 4 methods in 2- and 3-dimensional models
<https://doi.org/10.1016/j.jse.2010.05.024>`_
J Shoulder Elbow Surg (2011) 20, 234-244
"""
import math
import numpy as np
import vtk
from glenoidplanefitting.algorithms.models import make_plane_model
def fit_plane_to_points_scapula(points1, return_meta1=False):
"""
Fit a plane to a set of manually selected points on the scapula
:param points1: np.ndarray, size n by 3 array of the following points,
inferior tip of scapula, medial border of scapula, and center
of glenoid fossa.
:param return_meta: If true, also returns the center and normal
used to generate the plane
:return: the fitted plane through the scapula
"""
data = np.array(points1)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
planesource = make_plane_model(center, normal)
if return_meta1:
return planesource.GetOutput(), center, normal
return planesource.GetOutput()
def fit_plane_to_points_glenoid(points2, return_meta2=False):
"""
Fit a plane to a set of manually selected points on the glenoid face
:param points1: np.ndarray, size n by 3 array of the following points,
one superior on the glenoid face,
two inferior on the glenoid face left and right side
:param return_meta: If true, also returns the center and normal
used to generate the plane
:return: the fitted plane of the glenoid face
"""
data2 = np.array(points2)
center2 = data2.mean(axis=0)
result2 = np.linalg.svd(data2 - center2)
normal2 = np.cross(result2[2][0], result2[2][1])
planesource2 = make_plane_model(center2, normal2)
if return_meta2:
return planesource2.GetOutput(), center2, normal2
return planesource2.GetOutput()
def fit_plane_transverse(points1, points3, return_meta3=False):
"""
Fit a transverse plane perpendicular to the scapular plane and
passing through the scapular axis.
:param points1: np.ndarray, size n by 3 array of the following points,
inferior tip of scapula medial border of scapula,
and center of glenoid fossa.
:param points3: np.ndarray, size n by 3 of the following points,
center of glenoid fossa, and medial border
:param return_meta: If true, also returns the center and normal
used to generate the plane
:return: the fitted transverse plane
"""
data = np.array(points1)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
normal1 = np.array(normal)
data3 = np.array(points3)
x_mid = (data3[0,0] + data3[1,0])/2
y_mid = (data3[0,1] + data3[1,1])/2
z_mid = (data3[0,2] + data3[1,2])/2
vector = np.array(data3[0] - data3[1])
center3 = (x_mid, y_mid, z_mid)
normal3 = np.cross(vector, normal1)
planesource3 = make_plane_model(center3, normal3)
if return_meta3:
return planesource3.GetOutput(), center3, normal3
return planesource3.GetOutput()
def planes_version(normal_plane1, normal_plane2):
"""
Determines the glenoid version using the two planes method.
:param normal_plane1: The normal vector of the scapula plane.
:param normal_plane2: The normal vector of the glenoid plane.
:returns: The glenoid version (positive value indicates retroversion)
"""
radians = vtk.vtkMath().AngleBetweenVectors(#pylint:disable=no-member
normal_plane1, normal_plane2)
version = math.degrees(radians) - 90
return version
| 3,790 |
Projects/SVD/Access Red Pixel Data/tests/test_task.py
|
jetbrains-academy/Python-Libraries-NumPy
| 0 |
2170195
|
import unittest
import numpy as np
from task import img, red_pixel_data
class TestCase(unittest.TestCase):
def test_red(self):
expected, actual = img[:, :, 0], red_pixel_data
np.testing.assert_array_equal(expected, actual, err_msg="Got a wrong red array.")
| 281 |
vtp/management/commands/import_vtp.py
|
CzechInvest/ciis
| 1 |
2171614
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import GEOSGeometry
from vtp.models import VtpType, Service, Vtp
from django.contrib.gis.gdal import DataSource
from addresses.tools import geocode
from addresses.models import Address
import json
class Command(BaseCommand):
help = 'Import initial data sets to database'
def add_arguments(self, parser):
parser.add_argument('input', type=str, help="GeoJson file")
def handle(self, *args, **options):
Vtp.objects.all().delete()
with open(options["input"]) as inpt:
data = json.load(inpt)
features = data["features"]
for f in features:
addressid = geocode(f["properties"]["address"])
address = Address.objects.get(adm=addressid)
vtp = Vtp.objects.create(name=f["properties"]["name"],
url=f["properties"]["url"],
address=address)
for s in f["properties"]["services"]:
services = Service.objects.filter(service=s)
if len(services):
vtp.services.add(services[0])
pass
else:
service = Service.objects.create(service=s)
vtp.services.add(service)
pass
for t in f["properties"]["type"]:
types = VtpType.objects.filter(type=t)
if len(types):
vtp.type.add(types[0])
pass
else:
typ = VtpType.objects.create(type=t)
vtp.type.add(typ)
pass
| 1,792 |
maskrcnn_benchmark/data/transforms/build.py
|
DIYer22/maskrcnn-benchmark
| 0 |
2171559
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from . import transforms as T
def build_transforms(cfg, is_train=True):
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
flip_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
flip_prob = 0
to_bgr255 = cfg.INPUT.TO_BGR255
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255
)
transform = T.Compose(
[
T.Resize(min_size, max_size),
T.RandomHorizontalFlip(flip_prob),
T.ToTensor(),
normalize_transform,
]
)
from boxx import cf
if is_train and cf.args.task == "rpc":
transform = T.Compose(
[
T.Resize(min_size, max_size),
T.RandomHorizontalFlip(flip_prob),
T.RandomVerticalFlip(flip_prob),
T.RandomRotate(is_train),
T.ToTensor(),
normalize_transform,
]
)
# adjustBrightness = .5
adjustBrightness = None
if adjustBrightness:
from boxx import pred
# import boxx.g
transforms = transform.transforms
transform = T.Compose(transforms[:-1] + [lambda img,t:(img*adjustBrightness, t)] + transforms[-1:])
pred-"\n\nNotice: adjustBrightness is %s\n\n%s"%(adjustBrightness,transform)
# adjustBrightness = 0.825
# adjustBrightness = 0.65
adjustBrightness = None
if adjustBrightness:
from boxx import np, uint8, pred
import skimage.color as col
from PIL import Image
def adjustBrightnessInHsv(img, vRate):
hsv = col.rgb2hsv(img)
hsv[...,2] *= vRate
newimg = col.hsv2rgb(hsv)
return uint8(newimg)
def adjustBrightnessPil(pil, target):
new = adjustBrightnessInHsv(np.array(pil), adjustBrightness)
newpil = Image.fromarray(new)
return newpil, target
pred-"\n\nNotice: adjustBrightness is %s\n\n%s"%(adjustBrightness,transform)
transforms = transform.transforms
transform = T.Compose(transforms[:1] + [adjustBrightnessPil] + transforms[1:])
from boxx import cf
cf.is_train = is_train
return transform
| 2,454 |
migrations/migrations/0008_remove_field_dont_set_identifier.py
|
MJJojo97/openslides-backend
| 0 |
2171877
|
from datastore.migrations import RemoveFieldsMigration
class Migration(RemoveFieldsMigration):
"""
This migration removes field `motion_state/dont_set_identifier`
from database-events
"""
target_migration_index = 9
collection_fields_map = {"motion_state": ["dont_set_identifier"]}
| 309 |
hamgr/hamgr/db/versions/008_alter_text_fileds_to_largebinary.py
|
platform9/pf9-ha
| 11 |
2171281
|
# Copyright (c) 2019 Platform9 Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
from sqlalchemy import LargeBinary
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
# 'events' column in table 'change_events'
table_change_events = Table('change_events', meta, autoload=True)
table_change_events.c.events.alter(type=LargeBinary)
# 'peers', 'members', 'kvstore' in table 'consul_status'
table_consul_status = Table('consul_status', meta, autoload=True)
table_consul_status.c.peers.alter(type=LargeBinary)
table_consul_status.c.members.alter(type=LargeBinary)
table_consul_status.c.kvstore.alter(type=LargeBinary)
# 'before_rebalance', 'after_rebalance' in table 'consul_role_rebalance'
table_consul_role_rebalance = Table('consul_role_rebalance', meta, autoload=True)
table_consul_role_rebalance.c.before_rebalance.alter(type=LargeBinary)
table_consul_role_rebalance.c.after_rebalance.alter(type=LargeBinary)
def downgrade(migrate_engine):
meta.bind = migrate_engine
# 'events' column in table 'change_events'
table_change_events = Table('change_events', meta, autoload=True)
table_change_events.c.events.alter(type=Text)
# 'peers', 'members', 'kvstore' in table 'consul_status'
table_consul_status = Table('consul_status', meta, autoload=True)
table_consul_status.c.peers.alter(type=Text)
table_consul_status.c.members.alter(type=Text)
table_consul_status.c.kvstore.alter(type=Text)
# 'before_rebalance', 'after_rebalance' in table 'consul_role_rebalance'
table_consul_role_rebalance = Table('consul_role_rebalance', meta, autoload=True)
table_consul_role_rebalance.c.before_rebalance.alter(type=Text)
table_consul_role_rebalance.c.after_rebalance.alter(type=Text)
| 2,455 |
Chapter09/client.py
|
PacktPublishing/Modernizing-Oracle-Tuxedo-Applications-with-Python
| 2 |
2171966
|
import tuxedo as t
_, _, res = t.tpcall("COUNT", {})
print(res)
t.tpbegin(30)
t.tpcall("STORE", {"TA_SOURCE": "Hello"})
print(t.tpcall("COUNT", {}).data)
print(t.tpcall("COUNTXA", {}).data)
t.tpcommit()
print(t.tpcall("COUNT", {}).data)
| 240 |
lenstronomywrapper/LensSystem/arc_quad_lens.py
|
dangilman/LenstronomyWrapper
| 0 |
2170981
|
from lenstronomy.LensModel.lens_model import LensModel
import numpy as np
from lenstronomywrapper.LensSystem.lens_base import LensBase
from lenstronomywrapper.Optimization.quad_optimization.brute import BruteOptimization
from lenstronomywrapper.Optimization.extended_optimization.source_reconstruction import SourceReconstruction
from pyHalo.Cosmology.cosmology import Cosmology
class ArcQuadLensSystem(LensBase):
def __init__(self, macromodel, z_source, lens_light_model, source_light_model,
substructure_realization=None, pyhalo_cosmology=None):
self.lens_light_model = lens_light_model
self.source_light_model = source_light_model
if pyhalo_cosmology is None:
# the default cosmology in pyHalo, currently WMAP9
pyhalo_cosmology = Cosmology()
self.pc_per_arcsec_zsource = 1000 / pyhalo_cosmology.astropy.arcsec_per_kpc_proper(z_source).value
super(ArcQuadLensSystem, self).__init__(macromodel, z_source, substructure_realization, pyhalo_cosmology)
def get_smooth_lens_system(self):
arclens_smooth_component = ArcQuadLensSystem(self.macromodel, self.zsource,
self.lens_light_model, self.source_light_model,
None, self.pyhalo_cosmology)
source_x, source_y = self.source_centroid_x, self.source_centroid_y
light_x, light_y = self.light_centroid_x, self.light_centroid_y
arclens_smooth_component.update_source_centroid(source_x, source_y)
arclens_smooth_component.update_light_centroid(light_x, light_y)
arclens_smooth_component._set_concentric()
return arclens_smooth_component
def get_lens_model_components(self):
return self.macromodel, self.source_light_model, self.lens_light_model, self.background_quasar
def add_source_light_component(self, new_component):
self.source_light_model.add_component(new_component)
def add_lens_light_component(self, new_component):
self.lens_light_model.add_component(new_component)
def add_macromodel_component(self, new_component):
self.macromodel.add_component(new_component)
@classmethod
def fromQuad(cls, quad_lens_system, lens_light_model, source_light_model, inherit_substructure_realization=True):
if inherit_substructure_realization:
pass_realization = quad_lens_system.realization
else:
pass_realization = None
arcquadlens = ArcQuadLensSystem(quad_lens_system.macromodel, quad_lens_system.zsource, lens_light_model,
source_light_model, pass_realization, quad_lens_system.pyhalo_cosmology)
source_x, source_y = quad_lens_system.source_centroid_x, quad_lens_system.source_centroid_y
light_x, light_y = quad_lens_system.light_centroid_x, quad_lens_system.light_centroid_y
arcquadlens.update_source_centroid(source_x, source_y)
arcquadlens.update_light_centroid(light_x, light_y)
arcquadlens._set_concentric()
arcquadlens.position_convention_halo = quad_lens_system.position_convention_halo
return arcquadlens
def _set_concentric(self):
for component in self.lens_light_model.components:
if component.concentric_with_model is not None:
idx = component.concentric_with_model
model = self.macromodel.components[idx]
for i in range(0, len(component._kwargs)):
component._kwargs[i]['center_x'], component._kwargs[i]['center_y'] = model.x_center, model.y_center
for component_idx, component in enumerate(self.source_light_model.components):
if component.concentric_with_source is not None:
for i in range(0, len(component._kwargs)):
match_idx = component.concentric_with_source
if match_idx == 0:
component._kwargs[i]['center_x'] = self.source_centroid_x
component._kwargs[i]['center_y'] = self.source_centroid_y
else:
print(match_idx)
print(self.source_light_model.components)
comp = self.source_light_model.components[match_idx]
component._kwargs[i]['center_x'] = comp._source_x
component._kwargs[i]['center_y'] = comp._source_y
def fit(self, data_to_fit, fit_sequence, **kwargs):
optimizer = SourceReconstruction(self, data_to_fit, **kwargs)
chain_list, kwargs_result, kwargs_model, multi_band_list, kwargs_special, param_class = optimizer.\
optimize(fit_sequence)
return chain_list, kwargs_result, kwargs_model, multi_band_list, kwargs_special, param_class
def initialize(self, data_to_fit, opt_routine='fixed_powerlaw_shear', constrain_params=None, verbose=False):
optimizer = BruteOptimization(self)
_, _, _ = optimizer.optimize(data_to_fit, opt_routine, constrain_params, verbose, include_substructure=False)
return
def update_source_centroid(self, source_x, source_y):
self.source_centroid_x = source_x
self.source_centroid_y = source_y
def update_light_centroid(self, light_x, light_y):
self.light_centroid_x = light_x
self.light_centroid_y = light_y
def update_lens_light(self, new_lens_light_kwargs):
count = 0
for component in self.lens_light_model.components:
ind1 = count
ind2 = count + component.n_models
kwargs_component = new_lens_light_kwargs[ind1:ind2]
component._kwargs = kwargs_component
count += component.n_models
def update_source_light(self, new_source_light_kwargs):
count = 0
for component in self.source_light_model.components:
ind1 = count
ind2 = count + component.n_models
kwargs_component = new_source_light_kwargs[ind1:ind2]
component._kwargs = kwargs_component
count += component.n_models
def get_lens_light(self):
instance, kwargs = self.lens_light_model.lensLight, self.lens_light_model.kwargs_light
return instance, kwargs
def get_source_light(self):
instance, kwargs = self.source_light_model.sourceLight, self.source_light_model.kwargs_light
return instance, kwargs
def quasar_magnification(self, x, y, background_source,
lens_model,
kwargs_lensmodel, normed=True,
retry_if_blended=0,
enforce_unblended=False,
adaptive=False, verbose=False, point_source=False,
grid_axis_ratio=1):
"""
Computes the magnifications (or flux ratios if normed=True)
:param x: x image position
:param y: y image position
:param background_quasar: an instance of the background source light profile
:param lens_model: an instance of LensModel (see lenstronomy.lens_model)
:param kwargs_lensmodel: key word arguments for the lens_model
:param normed: if True returns flux ratios
:param retry_if_blended: a integer that specifies how many times to try
increasing the size of the ray tracing window if an image comes out blended together
:param point_source: if True, computes the magnification of a point source
"""
if point_source:
mags = lens_model.magnification(x, y, kwargs_lensmodel)
mags = abs(mags)
if normed:
mags *= max(mags) ** -1
return mags, False
background_source.setup(self.pc_per_arcsec_zsource)
if not hasattr(self, 'source_centroid_x') or self.source_centroid_x is None:
raise Exception('lens system must have a specified source coordinate in order to compute the magnification'
'from an extended source.')
background_source.update_position(self.source_centroid_x, self.source_centroid_y)
relative_angles = [np.arctan2(-xi, yi) for (xi, yi) in zip(x, y)]
return background_source.magnification(x, y, lens_model,
kwargs_lensmodel,
normed, retry_if_blended,
enforce_unblended, adaptive, verbose,
grid_axis_ratio,
relative_angles)
def plot_images(self, x, y, lens_model=None, kwargs_lensmodel=None):
if lens_model is None or kwargs_lensmodel is None:
lens_model, kwargs_lensmodel = self.get_lensmodel()
return self.background_quasar.plot_images(x, y, lens_model, kwargs_lensmodel)
def add_mcmc_model(self, kwargs_list, kwargs_model, kwargs_result,
multi_band_list, kwargs_special):
self.mcmc_output_kwargs = {'kwargs_list': kwargs_list, 'kwargs_result': kwargs_result,
'kwargs_model': kwargs_model, 'multi_band_list': multi_band_list,
'kwargs_special': kwargs_special}
| 9,407 |
db/save_and_get.py
|
DanielWong0623/Dog-Face-Recognition-PyTorch
| 0 |
2171189
|
from PIL import Image
from dog_facenet import DogFaceNet
import os
import numpy as np
from db.mysql_helper import MySqlHelper
# ------------------------------------------------#
# 存储狗脸特征向量到数据库中
# 获得数据库信息,用于狗脸比对和识别
# ------------------------------------------------
db = MySqlHelper()
# ------------------------------------------------#
# 特征向量转化为字符串数组
# ------------------------------------------------#
def change_feature_vector_to_str_list(feature):
encoding_list = feature.tolist()
encoding_str_list = [str(i) for i in encoding_list]
encoding_str = ','.join(encoding_str_list)
return encoding_str
# ------------------------------------------------#
# 计算指定图片的特征向量并编码后存入数据库
# ------------------------------------------------#
def save_one_feature(img_path, name, model):
image = Image.open(img_path)
feature = model.get_face_feature(image)
encoding_str = change_feature_vector_to_str_list(feature)
sql = 'insert into face_2(name, encoding, path) values(%s, %s, %s)'
ret = db.insert_one(sql, (name, encoding_str, img_path))
if ret == 0:
print('Insertion failed, please try again!')
else:
print(img_path + ' Insertion succeeded!')
def save_one_feature_name_needed(img_path, model, name, original_image_path):
image = Image.open(img_path)
feature = model.get_face_feature(image)
encoding_str = change_feature_vector_to_str_list(feature)
sql = 'insert into face_2(name, encoding, path) values(%s, %s, %s)'
ret = db.insert_one(sql, (name, encoding_str, original_image_path))
return ret
# ------------------------------------------------#
# 计算指定文件夹下所有图片的特征向量并编码后存入数据库
# ------------------------------------------------#
def save_all_features(model, dir_path, mode='have_sub_dir'):
# model = DogFaceNet()
dir_list = os.listdir(dir_path) # 获取文件夹下所有文件名
if mode == 'have_sub_dir':
for subdir in dir_list:
subdir_path = os.path.join(dir_path, subdir)
img_list = os.listdir(subdir_path)
for img in img_list:
img_path = os.path.join(subdir_path, img)
save_one_feature(img_path, subdir, model)
else:
for img in dir_list:
path = os.path.join(dir_path, img)
save_one_feature(path, model)
# ------------------------------------------------#
# 获取狗脸信息
# ------------------------------------------------#
def get_all_features(return_type='np', table='face_2'):
ids = []
names = []
encodings = []
paths = []
if table == 'face_2':
sql = 'select * from face_2'
elif table == 'face_single':
sql = 'select * from face_single'
# sql = 'select * from face_single'
res = db.select_all(sql)
for row in res:
# 获取各属性
id = row[0]
name = bytes.decode(row[1])
encoding = bytes.decode(row[2])
path = bytes.decode(row[3])
# 将encoding转换为浮点np数组
data_list = encoding.strip('[').strip(']').split(',')
float_list = list(map(float, data_list))
encoding_arr = np.array(float_list)
ids.append(id)
names.append(name)
encodings.append(encoding_arr)
paths.append(path)
# 若返回类型为numpy数组
if return_type == 'np':
return np.array(ids), np.array(names), np.array(encodings), np.array(paths)
# 否则返回list类型
else:
return ids, names, encodings, paths
# ------------------------------------------------#
# 从dog_owner表获取狗主信息
# ------------------------------------------------#
def get_owner_info(dog_id):
sql = 'select name, address, tel, email from dog_owner where id = %s'
res = db.select_one(sql, dog_id)
owner_name = bytes.decode(res[0])
address = bytes.decode(res[1])
tel = bytes.decode(res[2])
email = bytes.decode(res[3])
return owner_name, address, tel, email
def insert_owner_infos():
ids, _, _, _ = get_all_features(return_type='list')
for id in ids:
sql = 'insert into dog_owner(id, name, address, tel, email) values(%s, %s, %s, %s, %s)'
ret = db.insert_one(sql, (id, 'Alex', 'No. 1 Weigang', '18888888888', '<EMAIL>'))
if ret == 1:
print('insert success')
else:
print('insert failed')
def insert_one_owner_info(id, name, address, tel, email):
sql = 'insert into dog_owner(id, name, address, tel, email) values(%s, %s, %s, %s, %s)'
ret = db.insert_one(sql, (id, name, address, tel, email))
return ret
def get_id_by_path(path):
sql = 'select id from face_2 where path = %s'
ret = db.select_one(sql, path)
return ret[0]
if __name__ == '__main__':
# sql_1 = 'insert into face_2(name, encoding, path) values(%s, %s, %s)'
# ret = db.insert_one(sql_1, ("Test", "Test", "Test"))
id = get_id_by_path("F:/datasets/Previous_Version_Recognition_Set/test_292_dogs/Wang/WangCai_ww6.jpg")
print(id)
| 5,066 |
1177.py
|
gabrielsilvadev/URI-python-3
| 5 |
2171977
|
t=int(input())
n=[]
j=0
i=0
while i <(1000):
n.append(j)
print('N[{}] = {}'.format(i,j))
if j<(t-1):
j = j + 1
else:
j == t-1
j = 0
i = i + 1
| 191 |
output/models/sun_data/elem_decl/value_constraint/value_constraint01001m/value_constraint01001m7_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1 |
2171477
|
from output.models.sun_data.elem_decl.value_constraint.value_constraint01001m.value_constraint01001m7_xsd.value_constraint01001m7 import (
Id,
Root,
)
__all__ = [
"Id",
"Root",
]
| 196 |
h2o-extensions/jython-cfunc/src/test/resources/py/test_cfunc2.py
|
ahmedengu/h2o-3
| 6,098 |
2171571
|
import water.udf.CFunc2 as Func
class TestCFunc2(Func):
"""
Compute sum of actual + predict
"""
def apply(self, rowActual, rowPredict):
return sum(rowActual.readDoubles()) + sum(rowPredict.readDoubles())
| 231 |
allocation/entities/discipline.py
|
gabrielpereiram10/allocation
| 0 |
2169447
|
class Discipline:
def __init__(self, semester, name, professor):
self.semester = semester
self.name = name
self.professor = professor
def __eq__(self, other: object) -> bool:
return isinstance(other, Discipline) and other.name == self.name and other.semester == self.semester and other.professor == self.professor
def __repr__(self) -> str:
return f'{self.name}'
def __str__(self) -> str:
return self.__repr__()
| 479 |
tests/functions_test.py
|
MaciejWas/lit-script
| 4 |
2170636
|
import sys
import pytest
sys.path.append(".")
from lit_script import core
from lit_script import Interpreter
TESTS_LOCATION = "language/tests"
# Helpers
interpreter = Interpreter()
def make_atom(value: int):
return core.Atom(value=value, type="Int")
class TestInbuiltFuncs:
def test_decides(self):
a: core.Expression = interpreter.read_expression("decides")
assert interpreter.resolve_expression(a).is_function
a: core.Expression = interpreter.read_expression("1 `decides` 4 8")
result = interpreter.resolve_expression(a)
assert result.value == 4
a: core.Expression = interpreter.read_expression("0 `decides` 4 8")
result = interpreter.resolve_expression(a)
assert result.value == 8
with pytest.raises(Exception):
a: core.Expression = interpreter.read_expression('0 `decides` "444" 8')
with pytest.raises(Exception):
a: core.Expression = interpreter.read_expression('0 `decides` "444" "8"')
def test_add(self):
a = make_atom(100)
e = core.FunctionCallExpression(
core.FunctionCall(
fun=core.VariableExpression(core.Variable(name="add")),
arg=core.AtomExpression(a),
)
)
e2 = core.FunctionCallExpression(
core.FunctionCall(
fun=e,
arg=core.AtomExpression(a),
)
)
result: core.Atom = interpreter.resolve_expression(e2)
assert result.value == 200
with pytest.raises(Exception):
a: core.Expression = interpreter.read_expression('add "444" "8"')
with pytest.raises(Exception):
a: core.Expression = interpreter.read_expression('add 1 "8"')
def test_mul(self):
a = make_atom(100)
e = core.FunctionCallExpression(
core.FunctionCall(
fun=core.VariableExpression(core.Variable(name="mul")),
arg=core.AtomExpression(a),
)
)
e2 = core.FunctionCallExpression(
core.FunctionCall(
fun=e,
arg=core.AtomExpression(a),
)
)
result: core.Atom = interpreter.resolve_expression(e2)
assert result.value == 100 * 100
with pytest.raises(Exception):
a: core.Expression = interpreter.read_expression('mul "444" "8"')
with pytest.raises(Exception):
a: core.Expression = interpreter.read_expression('mul 1 "8"')
def test_incr(self):
a = make_atom(100)
e = core.FunctionCallExpression(
core.FunctionCall(
fun=core.VariableExpression(core.Variable(name="increase")),
arg=core.AtomExpression(a),
)
)
result: core.Atom = interpreter.resolve_expression(e)
assert result.value == 100 + 100
def test_neg(self):
a = make_atom(100)
e = core.FunctionCallExpression(
core.FunctionCall(
fun=core.VariableExpression(core.Variable(name="neg")),
arg=core.AtomExpression(a),
)
)
result: core.Atom = interpreter.resolve_expression(e)
assert result.value == -1 * 100
| 3,295 |
CoA/2015/02b.py
|
RuigerS/Tuturials
| 0 |
2168830
|
f=open("./2015/data/02a.txt","r")
totaal=0
for line in f:
kanten=[]
kanten.append(int(line[:line.index("x")]))
line=line[line.index("x")+1:]
kanten.append(int(line[:line.index("x")]))
line=line[line.index("x")+1:]
kanten.append(int(line))
kanten.sort()
# print(kanten)
totaal+=kanten[0]*2+kanten[1]*2+kanten[0]*kanten[1]*kanten[2]
print(totaal)
| 380 |
vize/130401022/sunucu/sunucu.py
|
hasan-se/blm304
| 2 |
2169364
|
import socket
import os #HASAN SESLİ
import sys
import time
import scapy
#HASAN SESLİ
#Önce sunucu.py daha sonra istemci.py dosyasını çalıştırın
sock_ip="127.0.0.1"
sock_port=42
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((sock_ip,sock_port))
veri , istemciAdresi = sock.recvfrom(4096)
print("Mesaj:" ,str(veri))
m="istemci sunucuya baglandi.."
message=bytes(m, encoding="utf8")
sock.sendto(message,istemciAdresi)
def sunucuGet(gelen):
yol='/Users/TheSesli/Desktop/veriHaberlesmesi_sunucu/dosyalar/'+ str(gelen) #sunucu.py dosyasının olduğu dizin
if os.path.isfile(yol):
mesaj3="dosya mevcut"
msj3=mesaj3.encode('utf8')
sock.sendto(msj3, istemciAdresi)
#eğer dosya mevcut ise
dosya=os.stat(yol)
dosyaB=dosya.st_size #dosyanın içerdiği paket sayısı (bayt olarak)
boyut=int(dosyaB / 4096) #4096 size belirlediğimiz için bu şekilde int yapıyoruz
boyut= boyut +1 #gönderimdeki paketleri sayabilmek için
devam=str(boyut) #encoding yapmak için
devam2=devam.encode('utf8')
sock.sendto(devam2,istemciAdresi)
kontrolcu = int(boyut)
dosyaİslemi = open(yol,"rb") #okuma izni ile gelen dosyayı açtık
while kontrolcu != 0:
dosyaİslemi2=dosyaİslemi.read(4096) #4096 bayt kullanıyoruz
sock.sendto(dosyaİslemi2,istemciAdresi)
kontrolcu = kontrolcu - 1
dosyaİslemi.close()
else:
mesaj="Dosya adini yanlis girdiniz ya da dosya mevcut degil\n"
msj=mesaj.encode('utf8')
sock.sendto(msj, istemciAdresi)
def sunucuPut():
if(islemD[0]=="put"):
gelenMesaj, istemciAdresi = sock.recvfrom(4096)
metin=gelenMesaj.decode('utf8')
print(metin)
if(metin=="dosya mevcut"):
print("dosya mevcut")
dosyaAc= open(islemD[1],"wb")
paketSayisi , istemciAdresi = sock.recvfrom(4096)
devam=paketSayisi.decode('utf8')
devam2=int(devam)
while devam2 != 0:
sunucuVeri , istemciAdresi = sock.recvfrom(4096)
dosyaVeri= dosyaAc.write(sunucuVeri)
devam2= devam2 - 1
dosyaAc.close()
print("Yeni dosya yüklendi, listeyi kontrol edebilirsiniz\n")
else:
print(metin+"\n")
def baglantiyiKes():
metin, istemciAdresi=sock.recvfrom(4096)
print("Mesaj : ",str(metin))
msj="Server baglantiyi sonlandirdi.."
mesaj=msj.encode('utf8')
sock.sendto(mesaj,istemciAdresi)
sock.close()
def hata():
msj="Hata: sistem girdiniz '" + secilenİslem + "' komutunu algilamadi..Lütfen kuralli girdiginizden emin olun.."
mesaj=msj.encode('utf8')
sock.sendto(mesaj,istemciAdresi)
print("\nHatali komut girildi..\n")
while True:
sunucuDosyalari = os.listdir(path="C:/Users/TheSesli/Desktop/veriH_server/server_files")
Liste = []
print("Dosyalari listele..\n")
for dosya in sunucuDosyalari:
Liste.append(dosya) #Liste içine sunucu dosyalarını tek tek ekliyoruz
listeString=str(Liste)
listeCozum=listeString.encode('utf8')
mesaj2=bytes("Sunucudaki dosyalar listeleniyor..", encoding='utf8')
sock.sendto(mesaj2,istemciAdresi)
sock.sendto(listeCozum,istemciAdresi)
veri,istemciAdresi=sock.recvfrom(4096)
secilenİslem= veri.decode('utf8')
if(secilenİslem.startswith("get[")):
islemD=secilenİslem.split("[")
islemD[1]=islemD[1].strip("]")
if(islemD[0]== "get"):
sunucuGet(islemD[1])
elif(secilenİslem.startswith("put[")):
islemD=secilenİslem.split("[")
islemD[1]=islemD[1].strip("]")
if (islemD[0] =="put"):
sunucuPut()
elif (secilenİslem.startswith("cikis[")):
islemD=secilenİslem.split("[")
islemD[1]=islemD[1].strip("]")
if(islemD[1]=="quit"):
baglantiyiKes()
break
else:
hata()
| 3,997 |
CoreUtils.py
|
b0bac/MyInformationGather
| 2 |
2170954
|
import time
import tkinter
import datetime
import threading
from DomainUtils import GetSubDomain
from ScanUtils import Ping, PortTcpScan
from DNSUtils import GetARecord, GetCNameRecord
from HttpUtils import HttpScan
class Scanner:
def __init__(self, token, domain, flag, box, size, logger):
self.TopLevelDomain = domain
print(self.TopLevelDomain)
self.VirusTotalToken = token
self.PortFlag = flag
self.EnterprisePort = [21, 22, 23, 25, 53, 80, 81, 110, 111, 123, 123, 135, 137, 139, 161, 389, 443, 445, 465,
500, 515, 520, 523, 548, 623, 636, 873, 902, 1080, 1099, 1433, 1521, 1604, 1645, 1701,
1883, 1900, 2049, 2181, 2375, 2379, 2425, 3128, 3306, 3389, 4730, 5060, 5222, 5351, 5353,
5432, 5555, 5601, 5672, 5683, 5900, 5938, 5984, 6000, 6379, 7001, 7077, 8080, 8081, 8443,
8545, 8686, 9000, 9001, 9042, 9092, 9100, 9200, 9418, 9999, 11211, 27017, 37777, 50000,
50070, 61616]
self.ConsequenceBox = box
self.IDNumber = 0
self.size = size
self.logger = logger
self.filename = "%s"%str(self.TopLevelDomain)+ "_" +str(datetime.datetime.today()).replace(" ","_").split(".")[0]+".csv"
with open(self.filename, 'w') as fw:
fw.write("主域名,子域名,CNAME,地址,端口,页面标题\n")
def LogWriter(self, message):
self.logger.config(state='normal')
self.logger.insert(tkinter.END, message)
self.logger.config(state="disable")
def PortScan(self, ip, port, cname, subdomain):
try:
if PortTcpScan(ip, port):
self.LogWriter("[+] 探测到活跃端口: %s\n" % str(port))
title, message = HttpScan(ip, port)
self.LogWriter(message)
if title is not None:
self.LogWriter("[+] 探测到页面标题: %s\n" % str(title))
self.ConsequenceBox.insert('', 'end',
values=[str(self.IDNumber), self.TopLevelDomain, subdomain, cname, ip, str(port),
title])
with open(self.filename, 'a') as fw:
fw.write("%s,%s,%s,%s,%s,%s\n"%(str(self.TopLevelDomain), subdomain, cname, ip, str(port), title))
self.IDNumber += 1
else:
self.ConsequenceBox.insert('', 'end',
values=[str(self.IDNumber), self.TopLevelDomain, subdomain, cname, ip, str(port), ""])
with open(self.filename, 'a') as fw:
fw.write("%s,%s,%s,%s,%s,%s\n"%(str(self.TopLevelDomain), subdomain, cname, ip, str(port), ""))
self.IDNumber += 1
self.size -= 1
except Exception as exception:
self.LogWriter("[-] 捕获异常: %s, , 异常点 CoreUtils.Scan.Line.57\n"%str(exception))
def Scan(self):
print(self.TopLevelDomain)
try:
_list = GetSubDomain(self.TopLevelDomain, self.VirusTotalToken)
except Exception as exception:
self.LogWriter("[-] 捕获异常: %s, 异常点 CoreUtils.Scan.Line.63\n" % str(exception))
cnamelist = []
iplist = []
print(self.TopLevelDomain)
self.LogWriter("[+] 获取顶级域名: %s\n"%str(self.TopLevelDomain))
for domain in _list:
try:
cnamelist = GetCNameRecord(domain)
except Exception as exception:
self.LogWriter("[-] 捕获异常: %s, 异常点 CoreUtils.Scan.Line.71\n" % str(exception))
continue
if len(cnamelist) == 0:
continue
for cname in cnamelist:
self.LogWriter("[+] 获取新的CNAME: %s\n" % str(cname))
try:
iplist = GetARecord(cname)
except Exception as exception:
self.LogWriter("[-] 捕获异常: %s, 异常点 CoreUtils.Scan.Line.80\n" % str(exception))
continue
if len(iplist) == 0:
continue
for ipaddress in iplist:
self.LogWriter("[+] 解析到地址: %s\n" % str(ipaddress))
if Ping(ipaddress):
self.LogWriter("[+] 探测到活跃地址: %s\n" % str(ipaddress))
if self.PortFlag == 2:
for port in self.EnterprisePort:
self.LogWriter("[+] 探测端口: %s\n" % str(port))
while True:
if self.size <= 20:
thread = threading.Thread(target=self.PortScan, args=(ipaddress, port, cname, domain))
thread.start()
time.sleep(0.5)
break
else:
continue
elif self.PortFlag == 1:
for port in range(1, 65536):
while True:
if self.size <= 20:
thread = threading.Thread(target=self.PortScan, args=(ipaddress, port, cname, domain, self.TopLevelDomain))
thread.start()
time.sleep(0.5)
break
else:
continue
| 5,751 |
tests/inventory/pipelines/test_data/fake_group_members.py
|
pombredanne/forseti-security
| 1 |
2170894
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test group members data."""
FAKE_GROUP_MEMBERS = [
{
'kind': 'admin#directory#member',
'etag': '\"abcd1234ABCD1234\"',
'id': '11111',
'email': '<EMAIL>',
'role': 'MEMBER',
'type': 'USER',
'status': 'ACTIVE'
},
{
'kind': 'admin#directory#member',
'etag': '\"efgh1234EFGH1234\"',
'id': '22222',
'email': '<EMAIL>',
'role': 'MEMBER',
'type': 'USER',
'status': 'ACTIVE'
},
{
'kind': 'admin#directory#member',
'etag': '\"hijk1234HIJK1234\"',
'id': '33333',
'role': 'MEMBER',
'type': 'USER',
'status': 'ACTIVE'
},
]
FAKE_GROUP_MEMBERS_2 = [
{
'kind': 'admin#directory#member',
'etag': '\"abcd1234ABCD1234\"',
'id': '44444',
'email': '<EMAIL>',
'role': 'MEMBER',
'type': 'USER',
'status': 'ACTIVE'
},
{
'kind': 'admin#directory#member',
'etag': '\"efgh1234EFGH1234\"',
'id': '55555',
'email': '<EMAIL>',
'role': 'OWNER',
'type': 'USER',
'status': 'ACTIVE'
}
]
EXPECTED_LOADABLE_GROUP_MEMBERS = [
{
'group_id': 'mygroup',
'member_kind': 'admin#directory#member',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_status': 'ACTIVE',
'member_id': '11111',
'member_email': '<EMAIL>',
'raw_member': '{"status": "ACTIVE", "kind": "admin#directory#member", "email": "<EMAIL>", "etag": "\\"abcd1234ABCD1234\\"", "role": "MEMBER", "type": "USER", "id": "11111"}'
},
{
'group_id': 'mygroup',
'member_kind': 'admin#directory#member',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_status': 'ACTIVE',
'member_id': '22222',
'member_email': '<EMAIL>',
'raw_member': '{"status": "ACTIVE", "kind": "admin#directory#member", "email": "<EMAIL>", "etag": "\\"efgh1234EFGH1234\\"", "role": "MEMBER", "type": "USER", "id": "22222"}'
},
{
'group_id': 'mygroup',
'member_kind': 'admin#directory#member',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_status': 'ACTIVE',
'member_id': '33333',
'member_email': None,
'raw_member': '{"status": "ACTIVE", "kind": "admin#directory#member", "etag": "\\"hijk1234HIJK1234\\"", "role": "MEMBER", "type": "USER", "id": "33333"}'
},
{
'group_id': 'mygroup2',
'member_kind': 'admin#directory#member',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_status': 'ACTIVE',
'member_id': '44444',
'member_email': '<EMAIL>',
'raw_member': '{"status": "ACTIVE", "kind": "admin#directory#member", "email": "<EMAIL>", "etag": "\\"abcd1234ABCD1234\\"", "role": "MEMBER", "type": "USER", "id": "44444"}'
},
{
'group_id': 'mygroup2',
'member_kind': 'admin#directory#member',
'member_role': 'OWNER',
'member_type': 'USER',
'member_status': 'ACTIVE',
'member_id': '55555',
'member_email': '<EMAIL>',
'raw_member': '{"status": "ACTIVE", "kind": "admin#directory#member", "email": "<EMAIL>", "etag": "\\"efgh1234EFGH1234\\"", "role": "OWNER", "type": "USER", "id": "55555"}'
}
]
FAKE_GROUPS_MEMBERS_MAP = [
('mygroup', FAKE_GROUP_MEMBERS),
('mygroup2', FAKE_GROUP_MEMBERS_2)
]
FAKE_GROUP_IDS = [
'a111', 'a222', 'a333', 'a444', 'a555',
'a666', 'a777', 'a888', 'a999', 'a000',
]
EXPECTED_CALL_LIST = [
['a111', 'a222', 'a333'],
['a444', 'a555', 'a666'],
['a777', 'a888', 'a999'],
['a000'],
]
| 4,306 |
rpython/tool/algo/test/test_unionfind.py
|
nanjekyejoannah/pypy
| 381 |
2171724
|
from rpython.tool.algo.unionfind import UnionFind
def test_cleanup():
state = []
class ReferencedByExternalState(object):
def __init__(self, obj):
state.append(self)
self.obj = obj
def absorb(self, other):
state.remove(other)
uf = UnionFind(ReferencedByExternalState)
uf.find(1)
for i in xrange(1, 10, 2):
uf.union(i, 1)
uf.find(2)
for i in xrange(2, 20, 2):
uf.union(i, 2)
assert len(state) == 2 # we have exactly 2 partitions
def test_asymmetric_absorb():
class Info(object):
def __init__(self, obj):
self.values = [obj]
def absorb(self, other):
self.values += other.values
uf = UnionFind(Info)
uf.union(2, 3)
uf.union(1, 2)
assert uf[1].values == uf[2].values == uf[3].values == [1, 2, 3]
| 860 |
corehq/apps/custom_data_fields/views.py
|
dslowikowski/commcare-hq
| 0 |
2171775
|
import json
import re
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator, validate_slug
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Div, HTML
from dimagi.utils.decorators.memoized import memoized
from .models import (CustomDataFieldsDefinition, CustomDataField,
CUSTOM_DATA_FIELD_PREFIX)
class CustomDataFieldsForm(forms.Form):
"""
The main form for editing a custom data definition
"""
data_fields = forms.CharField(widget=forms.HiddenInput)
def verify_no_duplicates(self, data_fields):
errors = set()
slugs = [field['slug'].lower()
for field in data_fields if 'slug' in field]
for slug in slugs:
if slugs.count(slug) > 1:
errors.add(_("Key '{}' was duplicated, key names must be "
"unique.").format(slug))
return errors
def clean_data_fields(self):
raw_data_fields = json.loads(self.cleaned_data['data_fields'])
errors = set()
data_fields = []
for raw_data_field in raw_data_fields:
data_field_form = CustomDataFieldForm(raw_data_field)
data_field_form.is_valid()
data_fields.append(data_field_form.cleaned_data)
if data_field_form.errors:
errors.update([error[0]
for error in data_field_form.errors.values()])
errors.update(self.verify_no_duplicates(data_fields))
if errors:
raise ValidationError('<br/>'.join(sorted(errors)))
return data_fields
class XmlSlugField(forms.SlugField):
default_validators = [
validate_slug,
RegexValidator(
re.compile(r'^(?!xml)', flags=re.IGNORECASE),
_('Properties cannot begin with "xml"'), 'invalid_xml'
)
]
class CustomDataFieldForm(forms.Form):
"""
Sub-form for editing an individual field's definition.
"""
label = forms.CharField(
required=True,
error_messages={'required': _('All fields are required')}
)
slug = XmlSlugField(
required=True,
error_messages={
'required': _("All fields are required"),
'invalid': _("Key fields must consist only of letters, numbers, "
"underscores or hyphens.")
}
)
is_required = forms.BooleanField(required=False)
choices = forms.CharField(widget=forms.HiddenInput, required=False)
def __init__(self, raw, *args, **kwargs):
# Pull the raw_choices out here, because Django incorrectly
# serializes the list and you can't get it
self._raw_choices = filter(None, raw.get('choices', []))
super(CustomDataFieldForm, self).__init__(raw, *args, **kwargs)
def clean_choices(self):
return self._raw_choices
class CustomDataFieldsMixin(object):
"""
Provides the interface for editing the ``CustomDataFieldsDefinition``
for each entity type.
Each entity type must provide a subclass of this mixin.
"""
urlname = None
template_name = "custom_data_fields/custom_data_fields.html"
field_type = None
entity_string = None # User, Group, Location, Product...
@classmethod
def get_validator(cls, domain):
data_model = CustomDataFieldsDefinition.get_or_create(domain, cls.field_type)
return data_model.get_validator(cls)
@classmethod
def page_name(cls):
return _("Edit {} Fields").format(cls.entity_string)
def get_definition(self):
return CustomDataFieldsDefinition.get_or_create(self.domain,
self.field_type)
def get_custom_fields(self):
definition = self.get_definition()
if definition:
return definition.fields
else:
return []
def save_custom_fields(self):
definition = self.get_definition() or CustomDataFieldsDefinition()
definition.field_type = self.field_type
definition.domain = self.domain
definition.fields = [
self.get_field(field)
for field in self.form.cleaned_data['data_fields']
]
definition.save()
def get_field(self, field):
return CustomDataField(
slug=field.get('slug'),
is_required=field.get('is_required'),
label=field.get('label'),
choices=field.get('choices'),
)
@property
def page_context(self):
return {
"custom_fields": json.loads(self.form.data['data_fields']),
"custom_fields_form": self.form,
}
@property
@memoized
def form(self):
if self.request.method == "POST":
return CustomDataFieldsForm(self.request.POST)
else:
serialized = json.dumps([field.to_json()
for field in self.get_custom_fields()])
return CustomDataFieldsForm({'data_fields': serialized})
def post(self, request, *args, **kwargs):
if self.form.is_valid():
self.save_custom_fields()
return self.get(request, success=True, *args, **kwargs)
else:
return self.get(request, *args, **kwargs)
def add_prefix(field_dict):
"""
Prefix all keys in the dict with the defined
custom data prefix (such as data-field-whatevs).
"""
return {
"{}-{}".format(CUSTOM_DATA_FIELD_PREFIX, k): v
for k, v in field_dict.iteritems()
}
def _make_field(field):
if field.choices:
return forms.ChoiceField(
label=field.label,
required=field.is_required,
choices=[('', _('Select one'))] + [(c, c) for c in field.choices],
)
return forms.CharField(label=field.label, required=field.is_required)
class CustomDataEditor(object):
"""
Tool to edit the data for a particular entity, like for an individual user.
"""
def __init__(self, field_view, domain, existing_custom_data=None,
post_dict=None, required_only=False):
self.field_view = field_view
self.domain = domain
self.existing_custom_data = existing_custom_data
self.required_only = required_only
self.form = self.init_form(post_dict)
@property
@memoized
def model(self):
definition = CustomDataFieldsDefinition.get_or_create(
self.domain,
self.field_view.field_type,
)
return definition or CustomDataFieldsDefinition()
def is_valid(self):
return self.form.is_valid()
def get_data_to_save(self):
cleaned_data = self.form.cleaned_data
self.existing_custom_data = None
self.form = self.init_form(add_prefix(cleaned_data))
self.form.is_valid()
return cleaned_data
def init_form(self, post_dict=None):
fields = {
field.slug: _make_field(field) for field in self.model.fields
if not self.required_only or field.is_required
}
field_names = fields.keys()
CustomDataForm = type('CustomDataForm', (forms.Form,), fields)
CustomDataForm.helper = FormHelper()
CustomDataForm.helper.form_tag = False
CustomDataForm.helper.layout = Layout(
Fieldset(
_("Additional Information"),
*field_names
) if self.model.fields else '',
self.get_uncategorized_form(field_names),
)
CustomDataForm._has_uncategorized = bool(
self.get_uncategorized_form(field_names)
)
if post_dict:
fields = post_dict
elif self.existing_custom_data is not None:
fields = add_prefix(self.existing_custom_data)
else:
fields = None
self.form = CustomDataForm(fields, prefix=CUSTOM_DATA_FIELD_PREFIX)
return self.form
def get_uncategorized_form(self, field_names):
def FakeInput(val):
return HTML('<span class="input-xlarge uneditable-input">{}</span>'
.format(val))
def Label(val):
return HTML('<label class="control-label">{}</label>'.format(val))
def _make_field_div(slug, val):
return Div(
Label(slug),
Div(
FakeInput(val),
css_class="controls",
),
css_class="control-group",
)
help_div = [
_make_field_div(slug, val)
for slug, val in self.existing_custom_data.items()
if slug not in field_names
] if self.existing_custom_data is not None else []
msg = """
<strong>Warning!</strong>
This data is not part of the specified user fields and will be
deleted if you save.
You can add them <a href="{}">here</a> to prevent this.
""".format(reverse(
self.field_view.urlname, args=[self.domain]
))
return Fieldset(
_("Unrecognized Information"),
Div(
HTML(msg),
css_class="alert alert-error",
),
*help_div
) if len(help_div) else HTML('')
| 9,487 |
mas_employees/home/home.py
|
Kuraokami/mas-pay-io
| 0 |
2171298
|
"""General page routes."""
from flask import Blueprint
from flask import current_app as app
from flask import render_template
from mas_employees.api import fetch_products
from mas_employees.models import db, Employee
# Blueprint Configuration
home_bp = Blueprint(
"home_bp", __name__, template_folder="templates", static_folder="static"
)
@home_bp.route("/", methods=["GET"])
def home():
"""Homepage."""
employees = Employee.query.all()
return render_template(
"index.jinja2",
title="Welcome to pay-mas.io",
subtitle="List Existing users.",
template="home-template",
employees=employees
)
@home_bp.route("/employees", methods=["GET"])
def employees():
"""List of Employees."""
employees = Employee.query.all()
return render_template(
"index.jinja2",
title="List Employees",
subtitle="List Existing users.",
template="home-template",
employees=employees
)
@home_bp.route("/about", methods=["GET"])
def about():
"""About page."""
return render_template(
"index.jinja2",
title="About",
subtitle="This is an example about page.",
template="home-template page",
)
@home_bp.route("/contact", methods=["GET"])
def contact():
"""Contact page."""
return render_template(
"index.jinja2",
title="Contact",
subtitle="This is an example contact page.",
template="home-template page",
)
| 1,485 |
src/test/test_config_provider.py
|
timo619/raumklima
| 11 |
2171391
|
from os.path import join, dirname, exists
import pytest
from rs500common.configuration import ConfigProvider
def test_config_provider_smoke_error():
with pytest.raises(FileNotFoundError):
ConfigProvider('/does/not/exist.ini')
def test_config_provider_on_existing_file():
file = join(dirname(__file__), '..', 'check_rs500.ini')
assert exists(file)
cf = ConfigProvider(file)
keys = cf.get_config().keys()
assert len(keys) > 0
| 462 |
notification/notification.py
|
dmreiland/Ripsnort
| 5 |
2171047
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
class Notification:
def __init__(self,params):
self.apis = []
dirname = os.path.dirname(__file__)
notifyType = params['type']
if 'email' in notifyType:
sys.path.append(dirname + "/emailsmtp/")
import emailsmtp
self.apis.append( emailsmtp.EmailSMTP(params) )
if 'localnotify' in notifyType:
sys.path.append(dirname + "/localnotify/")
import localnotify
self.apis.append( localnotify.LocalNotify(params) )
if 'audionotify' in notifyType:
sys.path.append(dirname + "/audionotify/")
import audionotify
self.apis.append( audionotify.AudioNotify(params) )
logging.debug("Initialized with apis: " + str(self.apis))
def startedBackingUpDisc(self,discName):
for api in self.apis:
api.startedBackingUpDisc(discName)
def endedBackingUpDisc(self,discName):
for api in self.apis:
api.endedBackingUpDisc(discName)
def startedRippingTracks(self,tracks,discName):
for api in self.apis:
api.startedRippingTracks(tracks,discName)
def finishedRippingTracks(self,tracks,discName,ripTracksDict={}):
for api in self.apis:
api.finishedRippingTracks(tracks,discName,ripTracksDict)
def failure(self,discName,errorMessage):
for api in self.apis:
api.failure(discName,errorMessage)
| 1,577 |
system/install.py
|
AntKirill/AI-virtual-assistant-python
| 91 |
2172011
|
import os , subprocess
try :
from system.screen_text import command_sep
except:
pass
def encode_to_bin(msg):
return msg.encode('ascii')
def decode_to_normal(msg):
return msg.decode('ascii')
def command(msg,no = 1):
if no == 1 :
os.system(msg)
else :
result = subprocess.run(list(msg.split()), stdout=subprocess.PIPE)
out = decode_to_normal(result.communicate)
print('printed:',out,sep='\n')
def install(msg,no = 1):
command_sep()
lt = list(msg.split())
ans = 'Successfully installed:'
for word in lt:
try :
cmd = 'pip install '
command(cmd+word,no)
ans += ' '+word
except:
pass
if ans == 'Successfully installed:':
ans = "Can't install anything, sorry sir"
ans = 'Done, sir.'
command_sep()
return ans
| 872 |
Alpinklubben.py
|
borgarlie/alpinklubben
| 0 |
2170290
|
# coding=utf-8
from flask import Flask, redirect, url_for, g
from flask.ext.login import LoginManager, current_user
from flask_bootstrap import Bootstrap
from flask_appconfig import AppConfig
from resources.chart import chart_resource
from resources.main import main_resource
from resources.user import user_resource
from resources.shop import shop_resource
from entities.shared import db
from entities.user import User
# global variables
DATABASE = 'test.db'
DEBUG = True
SECRET_KEY = '<KEY>'
app = Flask(__name__)
app.register_blueprint(user_resource)
app.register_blueprint(shop_resource)
app.register_blueprint(main_resource)
app.register_blueprint(chart_resource)
app.secret_key = SECRET_KEY
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'user_resource.login'
login_manager.login_message = u"Du må logge inn for å nå denne siden."
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.before_request
def before_request():
g.user = current_user
@app.route('/')
def index():
return redirect(url_for('main_resource.shop'))
if __name__ == '__main__':
AppConfig(app)
Bootstrap(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + DATABASE
db.init_app(app)
with app.app_context():
db.metadata.create_all(db.engine)
app.config['BOOTSTRAP_SERVE_LOCAL'] = True
app.run(debug=DEBUG)
| 1,425 |
dreamgadgets/urls.py
|
dreamplatform/dream-gadgets
| 0 |
2170782
|
from django.conf.urls.defaults import *
urlpatterns = patterns('dreamgadgets.views',
url(r'^kauniainen_services/?$', 'kauniainen_services', name='kauniainen_services'),
url(r'^dreamschool_services/?$', 'dreamschool_services', name='dreamschool_services'),
url(r'^video_service/?$', 'video_service', name='video_service'),
url(r'^fisholution/?$', 'fisholution', name='fisholution'),
url(r'^moodle/?$', 'moodle', name='moodle'),
url(r'^wiki/?$', 'wiki', name='wiki'),
url(r'^blogi/?$', 'blogi', name='blogi'),
url(r'^unelmasalkku/?$', 'unelmasalkku', name='unelmasalkku'),
url(r'^dropbox/?$', 'dropbox', name='dropbox'),
url(r'^sporttigalaksi/?$', 'sporttigalaksi', name='sporttigalaksi'),
url(r'^pelitehdas/?$', 'pelitehdas', name='pelitehdas'),
url(r'^steinerkoulu_espoo/?$', 'steinerkoulu_espoo', name='steinerkoulu_espoo'),
url(r'^steinerkoulu_espoo_services/?$', 'steinerkoulu_espoo_services', name='steinerkoulu_espoo_services'),
url(r'^steinerkoulu_tampere/?$', 'steinerkoulu_tampere', name='steinerkoulu_tampere'),
url(r'^steinerkoulu_tampere_services/?$', 'steinerkoulu_tampere_services', name='steinerkoulu_tampere_services'),
url(r'^google_calendar/?$', 'google_calendar', name='google_calendar'),
url(r'^google_drive/?$', 'google_drive', name='google_drive'),
url(r'^innoomnia/?$', 'innoomnia', name='innoomnia'),
)
| 1,399 |
pyutil/artnet.py
|
DrLuke/PyreeGPN18
| 0 |
2170274
|
import socket
import threading
__author__ = 'adrian'
class ArtNetReceiver(threading.Thread):
def __init__(self, universe):
threading.Thread.__init__(self)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", 6454))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock = sock
self.clb = None
self.universe = universe
self.inp = [0] * 512
self.start()
def new_data(self, data):
for n in range(0, min(len(data), 512)):
if data[n] != self.inp[n]:
if self.clb is not None:
self.clb(n, data[n])
self.inp[n] = data[n]
def run(self):
self.artnetReceiver()
def artnetReceiver(self):
while 1:
data, addr = self.sock.recvfrom(1024)
if data.startswith("Art-Net\x00\x00\x50\x00\x0e".encode()) and len(data) > 20:
universe =data[14] + (data[15] << 8)
if universe != self.universe:
continue
length = (data[16] << 8) +data[17]
dmx = data[18:18 + length]
dmx = [n / 255.0 for n in dmx]
self.new_data(dmx)
#def my_clb(channel, value):
# print(channel, value)
#a = ArtNetReceiver(8)
#a.clb = my_clb
| 1,412 |
src/lib.py
|
cirocosta/mac0417-visao
| 0 |
2171950
|
"""
Basic funcionalitty shared all over the code
"""
import math
import numpy as np
def add_grid(f, delta, color=255):
g = np.copy(f)
g[::delta, :] = color
g[:, ::delta] = color
return g
def add_inner_border(f, size, color=255):
if not size:
return g
g = np.copy(f)
g[:size, :] = color
g[-size:, :] = color
g[:, :size] = color
g[:, -size:] = color
return g
def rotate_90(f):
return f.transpose()
def crop_binary(f):
r, c = f.nonzero()
return f[r[0]:r[-1] + 1, c.min():c.max() + 1]
def gamma_correction(f, gamma):
g = np.copy(f)
t_gamma = normalize(np.arange(256) ** gamma)
return t_gamma[g]
def naive_percentile(arr, p):
"""
Gives the value below which a given percentage
of sorted observations fall.
"""
return np.ceil(len(arr) * p / 100.0)
def normalize(arr, range=(0, 255), percentile=1):
"""
Based on minimum and maximimum values, performs
a linear interpolation of the values.
"""
arr = np.asarray(arr)
faux = np.ravel(arr).astype(float)
min_val = faux.min()
max_val = faux.max()
lower, upper = range
if upper == lower:
g = np.ones(faux.shape) * max_val
if max_val == min_val:
g = np.ones(faux.shape) * (upper + lower) / 2.0
else:
g = (faux - min_val) * (upper - lower) / (max_val - min_val) + lower
g = g.reshape(arr.shape).astype(arr.dtype)
return g
def normalize_with_clip(f, p):
p1, p2 = np.percentile(f, [p, 100 - p])
f = np.clip(f, p1, p2)
return normalize(f)
def scale(val):
return np.array([
[val,0,0],
[0,val,0],
[0,0,1]]
)
def rotate(theta):
return np.array([
[math.cos(theta),-math.sin(theta),0],
[math.sin(theta),math.cos(theta),0],
[0,0,1]
])
def translate(x,y):
return np.array([
[1,0,x],
[0,1,y],
[0,0,1]
])
def affine(f, T):
"""Applies the affine transformation on a given
image.
' g(r, c) = f(T^{-1}(r,c)) '
Direct mapping:
From image (f) map the values to image (g)
- g(T(r,c)) = f((r,c)) ,
(r,c) in [0,H-1]x[0, W-1]
- con: T(r,c) might not fill every pixel of G
Indirect Mapping
From image (g) search the values in image (f)
- g(r', c') = f(T^{-1}(r', c')) ,
(r', c') in [0, H'-1]x[0, W'-1]
- pro: every pixel of (g) receives a value
"""
h, w = f.shape
y1, x1 = np.indices(f.shape)
g = np.lib.pad(f, 1, 'constant', constant_values=0)
yx1 = np.array([
y1.ravel(), x1.ravel(), np.ones(np.product(f.shape))
])
yx_float = np.dot(np.linalg.inv(T), yx1)
yy = np.rint(yx_float[0]).astype(int)+1
xx = np.rint(yx_float[1]).astype(int)+1
y = np.clip(yy, 0, h+1)
x = np.clip(xx, 0, w+1)
return g[y, x].reshape(h, w)
def equalize_histogram(img):
"""
T(r) = (L-1)/n * (\sum\limits_{i=0}^{r},h(i)),
where h(i) = hist(i).
"""
# np.bincount() returns an array that, contains
# the number of occurences of each integer I,
# indexed by the array returned, i.e, out
# 'absolute' histogram (divide it by N and then
# we have a relative)
bins = np.bincount(img.ravel())
n = img.size
T = 255 / n * np.cumsum(bins)
T = T.astype(uint8)
return T[img]
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
def main():
import matplotlib.pyplot as plt
from scipy import ndimage
# img = ndimage.imread('../assets/cameraman.tif', flatten=True)
# img = img.astype(np.uint8)
# new_img = affine(img, translate(2,0))
# plt.imshow(new_img, cmap="gray")
# plt.show()
img = np.arange(12).reshape((3,4))
print img
print affine(img, translate(1,0))
if __name__ == '__main__':
main()
| 3,606 |
app/utils.py
|
kennjr/Pitch
| 0 |
2171933
|
from app.models import Comment
def convert_category_to_num(default_category_str):
if default_category_str != "":
if default_category_str == "Promotion pitch":
return 0
elif default_category_str == "Pickup lines":
return 1
elif default_category_str == "Interview pitch":
return 2
elif default_category_str == "Product pitch":
return 3
def convert_lowercase_category_to_num(default_category_str):
if default_category_str != "":
if default_category_str == "promotion_pitch":
return 0
elif default_category_str == "pickup_lines":
return 1
elif default_category_str == "interview_pitch":
return 2
elif default_category_str == "product_pitch":
return 3
def convert_num_to_category(default_category_str):
if default_category_str >= 0:
if default_category_str == 0:
return "Promotion pitch"
elif default_category_str == 1:
return "Pickup lines"
elif default_category_str == 2:
return "Interview pitch"
elif default_category_str == 3:
return "Product pitch"
else:
return "All"
def convert_ids_string_to_array(ids_str: str):
if ids_str != "":
ids_array = []
split_str = ids_str.split(",")
for item in split_str:
if item != "" and item.isnumeric():
ids_array.append(int(item.strip()))
else:
ids_array = []
return ids_array
def convert_ids_array_to_string(ids_array: list):
global ids_str
if ids_array:
ids_str = ""
for my_id in ids_array:
ids_str += f",{my_id}"
return ids_str
def format_pitches_array(pitches_array):
if pitches_array:
from app.models import Pitch
formatted_pitches_array = []
for pitch in pitches_array:
pitch_txt = pitch.pitch_txt
pitch_category = pitch.pitch_category
pitch_upvt = len(convert_ids_string_to_array(pitch.upvt))
pitch_dwnvt = len(convert_ids_string_to_array(pitch.dwnvt))
pitch_timestamp = pitch.timestamp
pitch_creator_id = pitch.creator_id
pitch_comments = len(convert_ids_string_to_array(pitch.comments))
formatted_pitch = Pitch(id=pitch.id, pitch_txt=pitch_txt, comments=pitch_comments,
timestamp=pitch_timestamp, upvt=pitch_upvt, dwnvt=pitch_dwnvt,
creator_id=pitch_creator_id, pitch_category=pitch_category)
formatted_pitches_array.append(formatted_pitch)
return formatted_pitches_array
else:
return pitches_array
def format_comments_array(comments_array):
if comments_array:
formatted_comments_array = []
for comment in comments_array:
pitch_id = comment.pitch_id
comment_txt = comment.comment_txt
creator_id = comment.creator_id
timestamp = comment.timestamp
formatted_comment = Comment(comment_txt=comment_txt, creator_id=creator_id, pitch_id=pitch_id,
timestamp=timestamp)
formatted_comments_array.append(formatted_comment)
return formatted_comments_array
else:
return []
| 3,388 |
check_home_assistant.py
|
eau-claire-energy-cooperative/utility-scripts
| 1 |
2171191
|
#!/usr/bin/env python3
"""Can check the state of a Home Assistant entity. Use -h for arg descriptions"""
import requests
import json
import argparse
import sys
class HomeAssistant:
url = None
token = None
def __init__(self, url, token):
self.url = url
self.token = token
def _makeRequest(self, endpoint):
headers = {
'Authorization': 'Bearer %s' % self.token,
'content-type': 'application/json',
}
response = requests.get('%s%s' % (self.url,endpoint), headers=headers)
return json.loads(response.text)
def getStates(self, entity = ''):
return self._makeRequest('/api/states/%s' % entity)
def getState(self, entity = ''):
return self.getStates(entity)
def main():
parser = argparse.ArgumentParser(description="Checks the status of a Home Assistant entity")
parser.add_argument('-u', '--url', required=True,type=str, help='Home Assistant URL')
parser.add_argument('-T', '--token', required=True, type=str, help='The long-lived access token for Home Assistant')
parser.add_argument('-E', '--entity', required=True, type=str, help='The entity to check the status of')
parser.add_argument('-c', '--critical', required=False, type=str, default='off', help='The critical state of the sensor, assumes "off" for a binary_sensor')
parser.add_argument('-a', '--attribute', required=False, type=str, help='Attribute to display instead of the state')
args = parser.parse_args(sys.argv[1:])
h = HomeAssistant(args.url,args.token)
aState = h.getState(args.entity)
#display attribute, if set
if(args.attribute is not None):
print('%s' % aState['attributes'][args.attribute])
else:
print('%s: %s' % (aState['attributes']['friendly_name'], aState['state']))
#figure out the exit condition
if(aState['state'] == args.critical):
exit(2)
else:
exit(0)
if __name__ == '__main__':
main()
| 1,988 |
testproject/testapp/models.py
|
sheeshmohsin/test_site.com
| 0 |
2170339
|
from django.db import models
from testapp.utils import get_upload_file_path
# Create your models here.
class UploadFile(models.Model):
u_file = models.FileField(upload_to=get_upload_file_path)
def __unicode__(self):
return self.u_file
| 252 |
opencv/sources/modules/video/misc/python/test/test_tracking.py
|
vrushank-agrawal/opencv-x64-cmake
| 0 |
2170398
|
#!/usr/bin/env python
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests, unittest
class tracking_test(NewOpenCVTests):
def test_createTracker(self):
t = cv.TrackerMIL_create()
try:
t = cv.TrackerGOTURN_create()
except cv.error as e:
pass # may fail due to missing DL model files
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| 450 |
setup.py
|
mleszczy/smallfry
| 15 |
2171846
|
from setuptools import find_packages, setup
from os.path import basename, splitext
from glob import glob
setup(name='smallfry',
version='0.1',
description='Code for smallfry.',
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
url='https://github.com/HazyResearch/smallfry.git',
author='<NAME> / <NAME>',
author_email='<EMAIL>',
license='Apache Version 2',
install_requires = ['numpy',
'torch']
)
| 593 |
src/helpers/dataworks_kafka_producer_perf_test_helper.py
|
dwp/dataworks-behavioural-framework
| 0 |
2171149
|
import base64
import json
from datetime import datetime
from helpers import aws_helper, dataworks_kafka_producer_helper, console_printer
def create_data_file(message_count):
data = []
for journal_id in range(message_count):
element = json_element(journal_id)
data.append(element)
data_str = f"[{','.join(data)}]"
return data_str
def json_element(journal_id):
element = {
"journal_id": str(journal_id),
"flagged": "yes",
"timestamp": datetime.today().strftime("%d%m%Y"),
"modelIdentifier": "v1",
"modelRunDate": "01062021",
}
return json.dumps(element)
def upload_file_to_s3(context, data_obj, data_key, file_counter):
# Encrypt the data
(encrypted_data, iv,) = dataworks_kafka_producer_helper.encrypt_data_aes_ctr(
plaintext_string=str(data_obj), data_key=data_key
)
# Set metadata
metadata = {
"iv": iv,
"ciphertext": context.encryption_encrypted_key,
"datakeyencryptionkeyid": context.encryption_master_key_id,
}
# s3 object key
object_key = f"{context.dataworks_model_output_s3_prefix}/perf_test_data-{str(file_counter)}.json"
console_printer.print_info(f"Uploading file to S3: {object_key}")
# Upload the data to s3
aws_helper.put_object_in_s3_with_metadata(
body=encrypted_data,
s3_bucket=context.dataworks_model_output_s3_bucket,
metadata=metadata,
s3_key=object_key,
)
console_printer.print_info(f"Uploading file to S3: {object_key}...complete")
def get_message_count(context):
instance_id = aws_helper.get_instance_id("dataworks-kafka-producer")
linux_command = "sh /home/ec2-user/kafka/utils/run_get_topic_last_offset.sh"
response = aws_helper.execute_linux_command(instance_id, linux_command)
message_count = response["StandardOutputContent"].rstrip()
if message_count == "":
message_count = "0"
return int(message_count)
| 1,978 |
bin/microspec_emulator.py
|
microspectrometer/microspec
| 0 |
2171435
|
#!/usr/bin/env python
# Copyright 2020 by Chromation, Inc
# All Rights Reserved by Chromation, Inc
"""
Example Usage
=============
NOTE
----
You probably never need to run this, unless you really need to turn on debugging trace on the
emulator in order to see what you're sending to it. Usually, specifying emulator=True is enough
to use the emulator, in the simple and expert API.
Also, this will only work on Mac OSX and Linux systems, not on Windows.
Longwinded example
------------------
dir=`mktemp -d`
socat PTY,raw,echo=0,link=$dir/microspec.software PTY,raw,echo=0,link=$dir/microspec.hardware &
microspec_emulator.py -f $dir/microspec.hardware
# Then connect interface to $dir/microspec.software file
# And stop the socat background command, and clean up the $dir and it's contents
Short example
-------------
microspec_emulator.py -s -p
# Then connect interface to filename that the script prints to stdout
"""
def main():
import sys
import argparse
parser = argparse.ArgumentParser(
description="Create software instance of Chromation hardware for use with testing access software")
parser.add_argument("-v", "--verbose", help="Print verbose trace", action="count", default=0)
parser.add_argument("-d", "--debug", help="Internal debugging", action="count", default=0)
parser.add_argument("-p", "--print", help="Print socket to connect to", action="count", default=0)
parser.add_argument("-t", "--timeout", help="Timeout in (partial float) seconds", nargs=1, default=[0],
action="store", type=float)
parser.add_argument("-s", "--spawn", help="Spawn socat instance?", action="count", default=False)
parser.add_argument("-f", "--file", help="File to use as a pipe - default create anew and print location",
default=None)
args = parser.parse_args()
from microspeclib.internal.emulator import MicroSpecEmulator
from microspeclib.internal.stream import MicroSpecEmulatedStream
from microspeclib.logger import debug
import logging
log = logging.getLogger("microspec_emulator")
if args.verbose or args.debug:
debug( args.debug>0 )
log.setLevel( logging.DEBUG )
serial = MicroSpecEmulatedStream(hardware=args.file, timeout=args.timeout[0], socat=args.spawn, fork=False)
emulator = MicroSpecEmulator()
if args.file is None or args.print:
print("%s"%(serial.software))
# Note: necessary so that the emulator object that spawns this program and waits for some input
# on stdout definitely gets data, otherwise it gets stuck in the buffer and never triggers select:
sys.stdout.flush()
while True:
log.info("Waiting for command...")
command = serial.receiveCommand()
if not command:
log.info("No command found")
continue
log.info("Received command %s"%(command))
reply = emulator.process(command)
if reply:
log.info("Sending reply %s"%(reply))
for packet in reply:
serial.sendReply(packet)
else:
log.info("No reply to send")
if __name__ == "__main__":
main()
| 3,297 |
vampire/liberec.py
|
honzasp/vampire
| 0 |
2171835
|
import re
from .helpers import get_html, inner_text, BLOOD_TEXT_TO_TYPE
UUID = "166ca624cc61984ab44e0397b9586c9b"
SHORT_ID = "liberec"
URL = "https://www.nemlib.cz/darovani-krve/"
NAME = "<NAME>"
BLOOD_LEVEL_TO_STATUS = ["urgent", "urgent", "normal", "normal", "full"]
async def scrape(client):
doc = await get_html(client, URL)
blood_statuses = {}
for div in doc.cssselect("#blood-supplies .type"):
type_text = inner_text(div.cssselect("div.lablel")[0]).strip()
blood_type = BLOOD_TEXT_TO_TYPE[type_text]
level = int(re.search(r'supply-(\d)', div.get("class"))[1])
blood_status = BLOOD_LEVEL_TO_STATUS[level]
blood_statuses[blood_type] = blood_status
return blood_statuses
| 734 |
admin_ui.py
|
Pdocw/Database
| 0 |
2170765
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'admin_ui.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual modifys made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from course_inquiry_ui import Ui_Window_course_inquiry
from score_inquiry_ui import Ui_Window_score_inquiry
from stuinfo_inquiry_ui import Ui_Window_stuinfo_inquiry
from score_add_ui import Ui_Window_score_add
from score_delete_ui import Ui_Window_score_delete
from score_modify_ui import Ui_Window_score_modify
from stuinfo_add_ui import Ui_Window_stuinfo_add
from stuinfo_delete_ui import Ui_Window_stuinfo_delete
from stuinfo_modify_ui import Ui_Window_stuinfo_modify
from help_ui import Ui_Window_help
class Ui_Window_admin(object):
def setupUi(self, Window_admin):
Window_admin.setObjectName("Window_admin")
Window_admin.resize(1280, 720)
Window_admin.setMinimumSize(QtCore.QSize(1280, 720))
Window_admin.setMaximumSize(QtCore.QSize(1280, 720))
self.icon = QtGui.QIcon()
self.icon.addPixmap(QtGui.QPixmap(":/icon/res/icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.centralwidget = QtWidgets.QWidget(Window_admin)
self.centralwidget.setObjectName("centralwidget")
Window_admin.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Window_admin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 26))
self.menubar.setObjectName("menubar")
self.action_course = QtWidgets.QAction(Window_admin)
self.action_course.setObjectName("action_course")
self.action_score = QtWidgets.QAction(Window_admin)
self.action_score.setObjectName("action_score")
self.action_stuinfo = QtWidgets.QAction(Window_admin)
self.action_stuinfo.setObjectName("action_stuinfo")
self.menu_stuinfo = QtWidgets.QMenu(self.menubar)
self.menu_stuinfo.setObjectName("menu_stuinfo")
self.menu_score = QtWidgets.QMenu(self.menubar)
self.menu_score.setObjectName("menu_score")
self.action_help = QtWidgets.QAction(Window_admin)
self.action_help.setObjectName("action_help")
self.action_exit = QtWidgets.QAction(Window_admin)
self.action_exit.setObjectName("action_exit")
Window_admin.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Window_admin)
self.statusbar.setObjectName("statusbar")
Window_admin.setStatusBar(self.statusbar)
self.action_score_add = QtWidgets.QAction(Window_admin)
self.action_score_add.setObjectName("action_score_add")
self.action_score_modify = QtWidgets.QAction(Window_admin)
self.action_score_modify.setObjectName("action_score_modify")
self.action_score_delete = QtWidgets.QAction(Window_admin)
self.action_score_delete.setObjectName("action_score_delete")
self.action_stuinfo_add = QtWidgets.QAction(Window_admin)
self.action_stuinfo_add.setObjectName("action_stuinfo_add")
self.action_stuinfo_modify = QtWidgets.QAction(Window_admin)
self.action_stuinfo_modify.setObjectName("action_stuinfo_modify")
self.action_stuinfo_delete = QtWidgets.QAction(Window_admin)
self.action_stuinfo_delete.setObjectName("action_stuinfo_delete")
self.menu_stuinfo.addAction(self.action_stuinfo_add)
self.menu_stuinfo.addAction(self.action_stuinfo_modify)
self.menu_stuinfo.addAction(self.action_stuinfo_delete)
self.menu_score.addAction(self.action_score_add)
self.menu_score.addAction(self.action_score_modify)
self.menu_score.addAction(self.action_score_delete)
self.menubar.addAction(self.action_course)
self.menubar.addAction(self.action_score)
self.menubar.addAction(self.action_stuinfo)
self.menubar.addAction(self.menu_score.menuAction())
self.menubar.addAction(self.menu_stuinfo.menuAction())
self.menubar.addAction(self.action_help)
self.menubar.addAction(self.action_exit)
self.retranslateUi(Window_admin)
QtCore.QMetaObject.connectSlotsByName(Window_admin)
Window_admin.setWindowIcon(self.icon)
# 退出登录
self.action_exit.triggered.connect(Window_admin.close)
# 跳转开课情况查询
self.action_course.triggered.connect(lambda: self.go_course_inquiry(Window_admin))
self.action_score.triggered.connect(lambda: self.go_score_inquiry(Window_admin))
self.action_stuinfo.triggered.connect(lambda: self.go_stuinfo_inquiry(Window_admin))
self.action_score_add.triggered.connect(lambda: self.go_score_add(Window_admin))
self.action_score_delete.triggered.connect(lambda: self.go_score_delete(Window_admin))
self.action_score_modify.triggered.connect(lambda: self.go_score_modify(Window_admin))
self.action_help.triggered.connect(lambda: self.go_help(Window_admin))
self.action_stuinfo_add.triggered.connect(lambda: self.go_stuinfo_add(Window_admin))
self.action_stuinfo_delete.triggered.connect(lambda: self.go_stuinfo_delete(Window_admin))
self.action_stuinfo_modify.triggered.connect(lambda: self.go_stuinfo_modify(Window_admin))
def retranslateUi(self, Window_admin):
_translate = QtCore.QCoreApplication.translate
Window_admin.setWindowTitle(_translate("Window_admin", "管理员系统"))
self.action_course.setText(_translate("Window_admin", "开课情况查询"))
self.action_score.setText(_translate("Window_admin", "学生成绩查询"))
self.action_stuinfo.setText(_translate("Window_admin", "学生信息查询"))
self.menu_stuinfo.setTitle(_translate("Window_admin", "学生记录维护"))
self.menu_score.setTitle(_translate("Window_admin", "学生成绩维护"))
self.action_help.setText(_translate("Window_admin", "帮助"))
self.action_exit.setText(_translate("Window_admin", "退出登录"))
self.action_score_add.setText(_translate("Window_admin", "添加成绩记录"))
self.action_score_modify.setText(_translate("Window_admin", "修改成绩记录"))
self.action_score_delete.setText(_translate("Window_admin", "删除成绩记录"))
self.action_stuinfo_add.setText(_translate("Window_admin", "添加学生记录"))
self.action_stuinfo_modify.setText(_translate("Window_admin", "修改学生记录"))
self.action_stuinfo_delete.setText(_translate("Window_admin", "删除学生记录"))
def go_course_inquiry(self, Window_admin):
course_inquiry = Ui_Window_course_inquiry()
Maintain = QtWidgets.QMainWindow()
course_inquiry.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_score_inquiry(self, Window_admin):
score_inquiry = Ui_Window_score_inquiry()
Maintain = QtWidgets.QMainWindow()
score_inquiry.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_stuinfo_inquiry(self, Window_admin):
stuinfo_inquiry = Ui_Window_stuinfo_inquiry()
Maintain = QtWidgets.QMainWindow()
stuinfo_inquiry.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_score_add(self, Window_admin):
score_add = Ui_Window_score_add()
Maintain = QtWidgets.QMainWindow()
score_add.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_score_delete(self, Window_admin):
score_delete = Ui_Window_score_delete()
Maintain = QtWidgets.QMainWindow()
score_delete.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_score_modify(self, Window_admin):
score_modify = Ui_Window_score_modify()
Maintain = QtWidgets.QMainWindow()
score_modify.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_help(self, Window_admin):
help = Ui_Window_help()
Maintain = QtWidgets.QMainWindow()
help.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_stuinfo_add(self, Window_admin):
stuinfo_add = Ui_Window_stuinfo_add()
Maintain = QtWidgets.QMainWindow()
stuinfo_add.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_stuinfo_delete(self, Window_admin):
stuinfo_delete = Ui_Window_stuinfo_delete()
Maintain = QtWidgets.QMainWindow()
stuinfo_delete.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
def go_stuinfo_modify(self, Window_admin):
stuinfo_modify = Ui_Window_stuinfo_modify()
Maintain = QtWidgets.QMainWindow()
stuinfo_modify.setupUi(Maintain)
Window_admin.setCentralWidget(Maintain)
| 8,719 |
things/views.py
|
spAm25/korobasy
| 0 |
2170647
|
from things.models import Thing,ThingInstance,ThingCategory
from things.serializers import ThingSerializer,ThingInstanceSerializer,ThingCategorySerializer
from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import IsAuthenticated
from drf_spectacular import utils
@utils.extend_schema(
tags=['Thing']
)
class ThingViewset(ModelViewSet):
queryset = Thing.objects.all()
serializer_class = ThingSerializer
permission_classes = (IsAuthenticated,)
@utils.extend_schema(
tags=['Thing Instance']
)
class ThingInstanceViewset(ModelViewSet):
queryset = ThingInstance.objects.all()
serializer_class = ThingInstanceSerializer
permission_classes = (IsAuthenticated,)
@utils.extend_schema(
tags=['Thing Category']
)
class ThingCategoryViewset(ModelViewSet):
queryset = ThingCategory.objects.all()
serializer_class = ThingCategorySerializer
permission_classes = (IsAuthenticated,)
| 950 |
dcloud/restful/views.py
|
kairos03/2018-1-cloud-dropbox
| 1 |
2171505
|
from django.http import Http404
from django.contrib.auth.decorators import login_required
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
import os
from restful import s3_interface
from restful.models import File
from restful.serializers import FileSerializer
class FileList(APIView):
"""
List all file, or create a new snippet.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
"""
list files or view detail
"""
def get(self, request, path="/", format=None):
user = request.user
data = s3_interface.list_path(s3_interface.BUCKET, user.username, path)
return Response(data)
"""
upload file
"""
def post(self, request, path="/", format=None):
# file upload
# upload to server
file_serializer = FileSerializer(data=request.data)
if file_serializer.is_valid():
file_serializer.save()
# upload to s3
file_path = '.' + file_serializer.data.get('file')
user = request.user
data = s3_interface.upload_file(s3_interface.BUCKET, user.username, file_path, path+file_path.split('/')[-1])
if os.path.exists(file_path):
os.remove(file_path)
# TODO upload check
# TODO remove local file
return Response(file_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
"""
make directory
"""
def put(self, request, path="/", format=None):
user = request.user
data = s3_interface.make_directory(s3_interface.BUCKET, user.username, path)
return Response(data, status=status.HTTP_201_CREATED)
class FileDetail(APIView):
"""
Download or delete a file instance.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, path="/", format=None):
# download file from s3
file = 'media/'+path.split('/')[-1]
user = request.user
s3_interface.download_file(s3_interface.BUCKET, user.username, file, path)
# TODO error
return Response({'file': file})
def delete(self, request, path="/", format=None):
user = request.user
result = s3_interface.delete_path(s3_interface.BUCKET, user.username, path)
return Response(result)
class FileCopyMove(APIView):
"""
Download or delete a file instance.
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
#TODO is folder move, copy well?
# move
def post(self, request, old_path, new_path, format=None):
user = request.user
if request.data.get('method') == 'mv':
s3_interface.move_file(s3_interface.BUCKET, user.username, old_path, new_path)
elif request.data.get('method') == 'cp':
s3_interface.copy_file(s3_interface.BUCKET, user.username, old_path, new_path)
else:
return Response({'stats': 'bad_request'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'old_path': old_path, 'new_path': new_path})
| 3,614 |
mincut.py
|
quake0day/oj
| 0 |
2171139
|
import sys
class Solution:
# @param s, a string
# @return an integer
def minCut(self, s):
# write your code here
n = len(s)
f = [0] * (n+1)
isPalindrome = self.getIsPalndrome(s)
if len(s) < 1:
return 0
if len(s) == 1:
return 0
if len(s) == 2:
if s[0] != s[1]:
return 1
else:
return 0
for i in xrange(1, n+1):
f[i] = sys.maxint
for j in xrange(1, i+1):
if isPalindrome[i - j][i - 1] and f[i-j] != sys.maxint:
f[i] = min(f[i], f[i-j]+1)
return f[-1] -1
def getIsPalndrome(self,s):
n = len(s)
isPalindrome = [[False for col in range(n)] for row in range(n)]
for i in xrange(n):
isPalindrome[i][i] = True
for i in xrange(n-1):
isPalindrome[i][i + 1] = (s[i] == s[i+1])
for length in xrange(2, n):
for start in xrange(n):
if start + length < n:
isPalindrome[start][start + length] = isPalindrome[start + 1][start + length - 1] and (s[start] == s[start + length])
return isPalindrome
def isPalindrome(self, s):
return str(s) == str(s)[::-1]
a = Solution()
print a.minCut("cdd")
#a.getIsPalndrome("aabbs")
| 1,367 |
solved/problem_19.py
|
GustasG/project-euler
| 0 |
2168981
|
from datetime import date
def is_saturday(year: int, month: int, day: int) -> bool:
return date(year, month, day).weekday() == 6
def sunday_count(year_begin: int, year_end: int) -> int:
return sum(is_saturday(year, month, 1)
for month in range(1, 13)
for year in range(year_begin, year_end + 1))
def main() -> None:
c = sunday_count(1901, 2000)
print(c)
if __name__ == '__main__':
main()
| 447 |
python/context.py
|
bluefoot/addtoplaylist
| 0 |
2171598
|
import sys
import addtoplaylist
if __name__ == '__main__':
path = sys.listitem.getPath()
label = sys.listitem.getLabel()
if path and label:
addtoplaylist.add_to_playlist(path, label)
| 205 |
matrix_registration/app.py
|
williamkray/matrix-registration
| 0 |
2170813
|
import logging
import logging.config
import click
import json
from flask import Flask
from flask.cli import FlaskGroup, pass_script_info
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr
from flask_cors import CORS
from waitress import serve
from . import config
from . import tokens
from .tokens import db
import os
def create_app(testing=False):
app = Flask(__name__)
app.testing = testing
with app.app_context():
from .api import api
app.register_blueprint(api)
return app
@click.group(cls=FlaskGroup, add_default_commands=False, create_app=create_app, context_settings=dict(help_option_names=['-h', '--help']))
@click.option("--config-path", default="config.yaml", help='specifies the config file to be used')
@pass_script_info
def cli(info, config_path):
"""a token based matrix registration app"""
config.config = config.Config(config_path)
logging.config.dictConfig(config.config.logging)
app = info.load_app()
with app.app_context():
app.config.from_mapping(
SQLALCHEMY_DATABASE_URI=config.config.db.format(cwd=f"{os.getcwd()}/"),
SQLALCHEMY_TRACK_MODIFICATIONS=False
)
db.init_app(app)
db.create_all()
tokens.tokens = tokens.Tokens()
@cli.command("serve", help="start api server")
@pass_script_info
def run_server(info):
app = info.load_app()
Limiter(
app,
key_func=get_ipaddr,
default_limits=config.config.rate_limit
)
if config.config.allow_cors:
CORS(app)
serve(app, host=config.config.host, port=config.config.port, url_prefix=config.config.base_url)
@cli.command("generate", help="generate new token")
@click.option("-o", "--one-time", is_flag=True, help="make token one-time-useable")
@click.option("-e", "--expires", type=click.DateTime(formats=["%Y-%m-%d"]), default=None, help='expire date: in ISO-8601 format (YYYY-MM-DD)')
def generate_token(one_time, expires):
token = tokens.tokens.new(ex_date=expires, one_time=one_time)
print(token.name)
@cli.command("status", help="view status or disable")
@click.option("-s", "--status", default=None, help="token status")
@click.option("-l", "--list", is_flag=True, help="list tokens")
@click.option("-d", "--disable", default=None, help="disable token")
def status_token(status, list, disable):
if disable:
if tokens.tokens.disable(disable):
print("Token disabled")
else:
print("Token couldn't be disabled")
if status:
token = tokens.tokens.get_token(status)
if token:
print(f"This token is{' ' if token.valid else ' not '}valid")
print(json.dumps(token.toDict(), indent=2))
else:
print("No token with that name")
if list:
print(tokens.tokens)
| 2,841 |
django_project/bucket_list/migrations/0001_initial.py
|
ugauniyal/Bucket-List-App
| 0 |
2171832
|
# Generated by Django 3.1.4 on 2020-12-17 12:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('creator', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('complete', models.BooleanField(default=False)),
('date_created', models.DateField(auto_now_add=True)),
('category', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='bucket_list.category')),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1,604 |
project_visu/SupermarketClass.py
|
JinglinLi/oop_supermarket_simulation
| 0 |
2170921
|
"""
Supermarket Class including visualization.
"""
import cv2
import time
import pandas as pd
import numpy as np
from SupermarketMapClass import SupermarketMap
from CustomerClass import Customer
CUSTOMER_PER_MINUTE = 1 # Poisson distribution
MAX_N_CUSTOMERS = 6 # restrict maximal number of customers in supermarket
T_RESOLUTION = '1 min'
class Supermarket:
"""manages multiple Customer instances that are currently in the market including."""
def __init__(self):
self.customers = [] # a list of Customer objects
self.minutes = 0
self.last_id = 0
def __repr__(self):
return f'It is {self.minutes} now, and there are {len(self.customers)} customers in the supermarket.'
def update_time(self, current_time):
"""update current time : pandas datetime."""
self.minutes = current_time
return self.minutes
def add_new_customers(self):
"""add new customers."""
if len(self.customers) < MAX_N_CUSTOMERS: # retrict maximal n customers in supermarket
n_add = np.random.poisson(lam=CUSTOMER_PER_MINUTE)
for i in range(n_add):
customer_id = self.last_id # last customer id
one_customer = Customer(str(customer_id), 'entrance', self) # instanciate one customer
self.customers.append(one_customer) # add one customer
self.last_id += 1 # update last customer id
def remove_exiting_customers(self):
"""removes every customer that is not active any more."""
for one_customer in self.customers:
if one_customer.is_active() is False:
self.customers.remove(one_customer)
def next_minute(self):
"""propagates all customers to the next state."""
for one_customer in self.customers:
one_customer.next_state_rowcol() # propagates state and location
one_customer.path_between_states() # calculate path
# resample path into 1s resolution
t_all = pd.date_range(self.minutes, self.minutes + pd.Timedelta('1 min'),
periods=len(one_customer.path))
path_orig = pd.DataFrame(one_customer.path, index=t_all, columns=['row','col'])
path_resample = path_orig.resample('s').ffill()
one_customer.path_row_col = path_resample.values # [[row, col], [row, col], ...]
def draw_one_min(self):
"""draw the movements of all customers within one minute in 1 second resolution."""
background = np.zeros((500, 700, 3), np.uint8) # black background
frame = background.copy()
marketmap = SupermarketMap() # instanciate supermarket map
for i_sec in range(60):
marketmap.draw(frame) # draw supermarket
for customer in self.customers:
customer.draw_sec(frame, i_sec)
cv2.imshow("frame", frame)
key = cv2.waitKey(50) # ms
def print_customers(self):
"""print all customers : current time, customer_id, state."""
for one_customer in self.customers:
print(self.minutes, ',', one_customer.name, ',', one_customer.state_after)
def simulate(self, t_start, t_end):
"""run supermarket simulation."""
for t in pd.date_range(t_start, t_end, freq=T_RESOLUTION):
self.update_time(t)
self.add_new_customers()
self.next_minute()
self.draw_one_min()
self.print_customers() # print : time, id, state
print(self) # print : time and number of customers in supermarket
self.remove_exiting_customers()
cv2.destroyAllWindows()
if __name__ == "__main__":
rewe = Supermarket()
rewe.simulate('2021-09-17 07:00:00', '2021-09-17 07:15:00')
| 3,797 |
trajopt/algos/trajopt_base.py
|
AllanYangZhou/trajopt
| 0 |
2171253
|
"""
Base trajectory class
"""
import numpy as np
import imageio
class Trajectory:
def __init__(self, env, H=32, seed=123):
self.env, self.seed = env, seed
self.n, self.m, self.H = env.observation_dim, env.action_dim, H
# following need to be populated by the trajectory optimization algorithm
self.sol_state = []
self.sol_act = []
self.sol_reward = []
self.sol_obs = []
self.env.reset_model(seed=self.seed)
self.sol_state.append(self.env.get_env_state())
self.sol_obs.append(self.env._get_obs())
self.act_sequence = np.zeros((self.H, self.m))
def update(self, paths):
"""
This function should accept a set of trajectories
and must update the solution trajectory
"""
raise NotImplementedError
def animate_rollout(self, t, act):
"""
This function starts from time t in the solution trajectory
and animates a given action sequence
"""
self.env.set_env_state(self.sol_state[t])
for k in range(act.shape[0]):
try:
self.env.env.env.mujoco_render_frames = True
except AttributeError:
self.env.render()
self.env.set_env_state(self.sol_state[t+k])
self.env.step(act[k])
print(self.env.env_timestep)
print(self.env.real_step)
try:
self.env.env.env.mujoco_render_frames = False
except:
pass
def animate_result(self):
self.env.reset(self.seed)
self.env.set_env_state(self.sol_state[0])
for k in range(len(self.sol_act)):
self.env.env.env.mujoco_render_frames = True
self.env.render()
self.env.step(self.sol_act[k])
self.env.env.env.mujoco_render_frames = False
def render_result(self, fname):
self.env.reset(self.seed)
self.env.set_env_state(self.sol_state[0])
vid_writer = imageio.get_writer(fname, mode="I", fps=20)
solved = False
for k in range(len(self.sol_act)):
curr_frame = self.env.env.sim.render(width=640, height=480, mode='offscreen', device_id=0)
vid_writer.append_data(curr_frame[::-1,:,:])
_, _, _, info = self.env.step(np.clip(self.sol_act[k], -0.999, .999))
solved = solved or info["solved"]
vid_writer.close()
return solved
| 2,448 |
src/similarity.py
|
vinitra/entity-matcher
| 2 |
2168785
|
import numpy as np
import tensorflow_hub as hub
import pandas as pd
from dataclasses import dataclass
model = None
blocking_file = "..\data\X2_blocking_keys.csv"
clusters_file = "..\data\clustered_x2.csv"
@dataclass(frozen=True)
class Record:
"""
Data structure representing record titles.
"""
id: int
title: str
blocking_key: str
def read_file(blocked_table=None):
"""
Read records from file preprocessed with blocking keys.
"""
if blocked_table:
records = []
for id, title, blockingKey in blocked_table.items:
rec = Record(id, title, blockingKey)
records.append(rec)
else:
with open(blocking_file) as f:
content = f.readlines()
records = []
firstLine = True
for line in content:
if (firstLine):
firstLine = False
continue
attrs = line.split(",")
id = attrs[1]
title = attrs[14]
# some times we have "\n" at the end of the blocking key
blockingKey = attrs[17]
blockingKey = blockingKey.split("\n")[0]
rec = Record(id, title, blockingKey)
# print(rec.id + rec.blockingKey)
records.append(rec)
return records
# encoder-based similarity functions
def cosine_similarity(r1_encoded, r2_encoded):
"""
Compute cosine similarity between two encoded vectors.
"""
return np.dot(r1_encoded, r2_encoded) / (np.linalg.norm(r1_encoded) * np.linalg.norm(r2_encoded))
def calculate_encoding(r1_id, r2_id, r1_encoded, r2_encoded):
"""
Encode row titles and calculate cosine distance between encodings.
"""
# distance between the same row is 0
if r1_id == r2_id:
return 0
sim = cosine(r1_encoded, r2_encoded)
return sim
def prepare_encoder():
"""
Prepare Universal Sentence Encoder model. Takes a few minutes to run.
"""
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
model = hub.load(module_url)
print("module %s loaded" % module_url)
return model
def prepare_records(records):
if not model:
model = prepare_encoder()
for r in records:
r.tokenized = model([r.title])[0]
return records
def generate_similarity_table(blocking_table, distance_metric="jaccard"):
"""
Generate table of similarities based on distance_metric.
| r1 | r2 | r3 | r4 ...
----------------------------
r1 | 0 | 0.7 | 0.3 | 0.4 ...
r2 | 0.4 | 0 | 0.6 | 0.3 ...
r3 | 0.5 | 0.2 | 0 | 0.1 ...
r4 | 0.1 | 0.3 | 0.1 | 0 ...
...
"""
records = read_file(blocking_table)
distance_function = None
distance_table = []
titles = []
if distance_metric == "jaccard":
distance_function = calculate_jaccard
elif distance_metric == "encoding":
distance_function = calculate_encoding
else:
raise Exception('distance_metric should be either "jaccard" \
or "encoding" to use the generate_similarity_table function.')
records = prepare_records(records)
for r1 in records:
for r2 in records:
distance_row = []
distance = distance_function(r1_id, r2_id, r1, r2)
distance_row.append(distance)
distance_table.append(distance_row)
titles.append(r1.rtitle)
distance_df = pd.DataFrame(distance_table, columns=titles)
distance_df.set_index(titles)
return distance_df
# jaccard similarity functions
def find_clusters():
"""
Identify clusters from the ground truth (Y) dataset.
Used in run_jaccard_analysis function.
"""
with open(clusters_file) as f:
content = f.readlines()
clusters = []
firstLine = True
cluster = set()
for line in content:
if (firstLine):
firstLine = False
continue
attrs = line.split(",")
if (attrs[0] == "-----"):
clusters.append(cluster)
cluster = set()
continue
cluster.add(attrs[0])
return clusters
def calculate_jaccard(r1, r2):
"""
Calculate jaccard similarity for 2 records. Return distance.
"""
# if the records are the same, return 0
if r1.id == r2.id:
return 0
tokenizedBlKey1 = tokenize(r1.blockingKey)
tokenizedBlKey2 = tokenize(r2.blockingKey)
# if the records have different blocking key return 0
if len(tokenizedBlKey1) != len(tokenizedBlKey2):
return 0
for token in tokenizedBlKey1:
if not token in tokenizedBlKey2:
return 0
# otherwise tokenize titles and find their jaccard
tokenizedTitle1 = tokenize(r1.title)
tokenizedTitle2 = tokenize(r2.title)
if (len(tokenizedTitle1) > len(tokenizedTitle2)):
return calc_jac_titles(tokenizedTitle1, tokenizedTitle2)
return calc_jac_titles(tokenizedTitle2, tokenizedTitle1)
# title1 has more tokens than 2
def calc_jac_titles(title1, title2):
"""
Identify titles for jaccard similarity (used in calculate_jaccard)
"""
countIntersection = 0
for token in title2:
if token in title1:
countIntersection += 1
countUnion = len(title1) + len(title2) - countIntersection
return countIntersection / countUnion
def tokenize(s):
"""
Tokenize and preprocess strings
"""
s = s.replace("/", " ")
tokens = s.split()
return tokens
def run_jaccard_analysis():
"""
Create a jaccard.csv file by computing jaccard distance among rows inside a cluster
and rows outside the cluster for analysis of jaccard metric.
"""
records = read_file()
clusters = find_clusters()
resultString = "id,avgInnerSim,maxInnerSim,minInnerSim,avgOuterSim,maxOuterSim,minOuterSim\n"
for r1 in records:
cluster = set()
maxInnerSim = 0
sumInnerSim = 0
minInnerSim = 1
maxOuterSim = 0
sumOuterSim = 0
minOuterSim = 1
# find the cluster of r1
for c in clusters:
if r1.id in c:
cluster = c
break
for r2 in records:
if r1.id != r2.id:
jac = calculate_jaccard(r1, r2)
if r2.id in cluster: # if records in the same cluster
# update inner similarities
sumInnerSim += jac
if (jac > maxInnerSim):
maxInnerSim = jac
if (jac < minInnerSim):
minInnerSim = jac
else: # if records in different clusters
# update outer similarities
sumOuterSim += jac
if (jac > maxOuterSim):
maxOuterSim = jac
if (jac < minOuterSim):
minOuterSim = jac
avgInnerSim = sumInnerSim / (len(cluster) - 1)
avgOuterSim = sumOuterSim / (len(records) - len(cluster) - 1)
resultString += (r1.id + "," + str(avgInnerSim) + "," + str(maxInnerSim) + "," + str(minInnerSim) + "," + str(
avgOuterSim) + "," + str(maxOuterSim) + "," + str(minOuterSim) + "\n")
f = open("jaccard.csv", "w")
f.write(resultString)
f.close()
| 7,454 |
eval_info.py
|
Correct-Syntax/PyNodeEval
| 0 |
2171518
|
class EvalInfo(object):
"""
Evaluate node properties and parameters
"""
def __init__(self, node):
if node == None:
raise TypeError
self.node = node
def EvaluateParameter(self, name):
"""
Evaluates the value of a parameter.
"""
param = self.node._parameters[name]
if param.binding:
# Evaluate the next node
info = EvalInfo(param.binding)
return param.binding.EvaluateNode(info)
return param.value
def EvaluateProperty(self, name):
"""
Evaluates the value of a property.
"""
prop = self.node._properties[name]
return prop.value
| 707 |
build.py
|
dragazo/PyBlox
| 0 |
2171289
|
import aiohttp
import asyncio
import certifi
import ssl
import re
import meta
from string import Template
ssl_context = ssl.create_default_context(cafile=certifi.where())
with open('template/init.py', 'r') as f:
INIT_TEMPLATE = Template(f.read())
with open('template/service-class.py', 'r') as f:
SERVICE_CLASS_TEMPLATE = Template(f.read())
with open('template/client-class.py', 'r') as f:
CLIENT_CLASS_TEMPLATE = Template(f.read())
FN_NAME_SPECIAL_RULES = { # truly special cases go here
'PhoneIoT': 'phone_iot',
}
FN_NAME_KEYWORD_FIXES = { # in case we run into a reserved word
'from': '_from',
}
def clean_fn_name(name: str) -> str:
if name in FN_NAME_SPECIAL_RULES:
return FN_NAME_SPECIAL_RULES[name]
name = re.sub('[^\w]+', '', name) # remove characters that make symbols invalid
pieces = ['']
chars = [None, *name, None]
for i in range(len(name)):
prev_ch, curr_ch, next_ch = chars[i:i+3]
if not curr_ch.isdigit():
ch_upper = curr_ch.isupper()
boundary = any([
ch_upper and prev_ch is not None and prev_ch.islower(),
ch_upper and next_ch is not None and next_ch.islower(),
])
if boundary: pieces.append('')
pieces[-1] += curr_ch
name = '_'.join(x.lower() for x in pieces)
name = re.sub(r'^_+|_+$', '', name) # remove lead/tail underscores
name = FN_NAME_KEYWORD_FIXES.get(name) or name
return name
def clean_class_name(name: str) -> str:
name = re.sub('[^\w]+', '', name) # remove characters that make symbols invalid
name = re.sub(r'^_+|_+$', '', name) # remove lead/tail underscores
return name
tests = [
('PhoneIoT', 'phone_iot'),
('getSensors', 'get_sensors'), ('ThisXDoesNotExist', 'this_x_does_not_exist'),
('getCO2Data', 'get_co2_data'), ('getCO*2*Data', 'get_co2_data'),
('city*', 'city'), ('_city*_', 'city'), ('__city*__', 'city'), ('___city*___', 'city'),
('HelloKitty2021', 'hello_kitty2021'), ('C6H5O6', 'c6h5o6'), ('P2PNetwork', 'p2p_network'),
('getXFromLongitude', 'get_x_from_longitude'), ('getYFromLatitude', 'get_y_from_latitude'),
]
for a, b in tests:
res = clean_fn_name(a)
if res != b: raise RuntimeError(f'clean_fn_name error: {a} -> {res} (expected {b})')
tests = [
('Merp', 'Merp'), ('_Me*rp*_', 'Merp'), ('__*Me*rp__', 'Merp'),
('MerpDerp', 'MerpDerp'), ('MerpDerp203', 'MerpDerp203'),
]
for a, b in tests:
res = clean_class_name(a)
if res != b: raise RuntimeError(f'clean_class_name error: {a} -> {res} (expected {b})')
def indent(input: str, spaces: int) -> str:
pad = ' ' * spaces
return '\n'.join([ f'{pad}{line}' for line in input.split('\n') ])
FIXED_TYPES = {
'float': { 'number', 'boundednumber', 'latitude', 'longitude' },
'int': { 'integer', 'boundedinteger' },
'str': { 'string', 'boundedstring', 'date', 'enum' },
'bool': { 'boolean' },
'dict': { 'object' },
}
# returns type name, type parser
def parse_type(t, types_meta):
if t is None: return 'Any', ''
name = t['name'] if type(t) == dict else t
name_lower = name.lower()
if name_lower == 'array':
if type(t) != dict:
return 'list', ''
params = t.get('params') or []
if len(params) == 0 or len(params) > 1: # non-homogeneous is ill-formed - just default to generic list
return 'list', ''
inner_t, inner_parse = parse_type(params[0].get('type') if type(params[0]) == dict else params[0], types_meta)
inner_t = f'List[{inner_t}]' if inner_t != 'Any' else 'list'
inner_parse = f'_common.vectorize({inner_parse})' if inner_parse else ''
return inner_t, inner_parse
elif name_lower == 'image':
return 'Image.Image', ''
for k, v in FIXED_TYPES.items():
if name_lower in v:
return k, k
return parse_type(types_meta.get(name, {}).get('baseType'), types_meta)
# returns arg meta, type name, description, type parser
def parse_arg(arg_meta, types_meta, override_name: str = None):
if arg_meta is None:
return arg_meta, 'Any', '', ''
t, t_parser = parse_type(arg_meta.get('type'), types_meta)
if arg_meta.get('optional'):
t = f'Optional[{t}]'
desc = [f':{override_name or clean_fn_name(arg_meta["name"])}: {arg_meta.get("description") or ""}']
if (arg_meta.get('type') or {}).get('name') == 'Object':
for param_meta in arg_meta['type'].get('params') or []:
desc.append(f' - :{param_meta["name"]}: ({parse_type(param_meta.get("type"), types_meta)[0]}) {param_meta.get("description") or ""}')
return arg_meta, t, '\n\n'.join(desc), t_parser
# returns either a string containing a class definition for the given service, or None if it should be omitted
async def generate_service(session, base_url: str, service_name: str, types_meta):
async with session.get(f'{base_url}/services/{service_name}', ssl = ssl_context) as res:
meta = await res.json(content_type=None) # ignore content type in case response mime type is wrong
if 'servicePath' not in meta or not meta['servicePath']:
return None # only generate code for fs services
rpcs = []
for rpc_name, rpc_meta in meta['rpcs'].items():
is_deprecated = rpc_meta.get('deprecated', False)
required, non_required = [], []
for arg_meta in rpc_meta['args']:
(non_required if 'optional' in arg_meta and arg_meta['optional'] else required).append(parse_arg(arg_meta, types_meta))
ret_info = parse_arg(rpc_meta.get('returns'), types_meta, 'returns')
args = ['self'] + [f'{clean_fn_name(x[0]["name"])}: {x[1]}' for x in required] + [f'{clean_fn_name(x[0]["name"])}: {x[1]} = None' for x in non_required]
payloads = [f"'{x[0]['name']}': {clean_fn_name(x[0]['name'])}" for x in required + non_required]
desc = ([rpc_meta['description']] if rpc_meta.get('description') else []) + [x[2] for x in required + non_required] + ([ret_info[2]] if 'returns' in rpc_meta else [])
desc = '\n\n'.join(desc)
desc = indent(f"'''\n{desc}\n'''", 8)
code = f"self._client.call('{service_name}', '{rpc_name}', **{{ {', '.join(payloads)} }})"
code = f'res = {code}\nreturn {ret_info[3]}(res)' if ret_info[3] else f'return {code}'
fn_name = clean_fn_name(rpc_name)
meta_name = f'_{fn_name}' if is_deprecated else fn_name
prefix = ' @deprecated()\n' if is_deprecated else ''
ret_str = f' -> {ret_info[1]}' if 'returns' in rpc_meta else ''
rpcs.append((fn_name, f"{prefix} def {meta_name}({', '.join(args)}){ret_str}:\n{desc}\n{indent(code, 8)}"))
rpcs = [x[1] for x in sorted(rpcs)] # sort rpcs so they'll be in alphabetical order by name
service_desc = f"'''\n{meta['description']}\n'''" if 'description' in meta and meta['description'] else ''
formatted = SERVICE_CLASS_TEMPLATE.substitute({ 'service_name': clean_class_name(service_name), 'service_desc': indent(service_desc, 4), 'rpcs': '\n'.join(rpcs) })
return (service_name, formatted, service_desc)
async def generate_client(base_url, client_name):
async with aiohttp.ClientSession() as session:
async with session.get(f'{base_url}/services', ssl = ssl_context) as res:
services_meta = await res.json(content_type=None) # ignore content type in case response mime type is wrong
async with session.get(f'{base_url}/services/input-types', ssl = ssl_context) as tres:
types_meta = await tres.json(content_type=None) # ignore content type in case response mime type is wrong
services = await asyncio.gather(*[asyncio.ensure_future(generate_service(session, base_url, x['name'], types_meta)) for x in services_meta])
services = sorted([x for x in services if x]) # remove None values (omitted services) and sort to make sure they're in a consistent order
service_classes = '\n'.join([x[1] for x in services])
service_instances = '\n'.join([f' self.{clean_fn_name(x[0])} = {clean_class_name(x[0])}(self)\n{indent(x[2], 8)}\n' for x in services])
return CLIENT_CLASS_TEMPLATE.substitute({ 'client_name': client_name, 'base_url': base_url,
'service_classes': service_classes, 'service_instances': service_instances })
async def generate_client_save(base_url, client_name, save_path):
content = await generate_client(base_url, client_name)
with open(save_path, 'w', encoding = 'utf-8') as f: # explicit encoding needed on windows
f.write(content)
async def main():
init_content = INIT_TEMPLATE.substitute({ 'description': meta.description, 'version': meta.version, 'author': meta.author, 'credits': meta.credits })
with open('netsblox/__init__.py', 'w', encoding = 'utf-8') as f: # explicit encoding needed on windows
f.write(init_content)
args = [
('https://editor.netsblox.org', 'Client', 'netsblox/editor.py'),
('https://dev.netsblox.org', 'Client', 'netsblox/dev.py'),
# ('http://localhost:8080', 'LocalHost', 'netsblox/localhost.py'), # for dev purposes only
]
await asyncio.gather(*[asyncio.ensure_future(generate_client_save(*x)) for x in args])
def main_sync():
loop = asyncio.new_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.sleep(1)) # workaround needed on windows - for some reason they close the proactor event loop early otherwise
loop.close()
if __name__ == '__main__':
main_sync()
| 9,713 |
connect.py
|
george-hawkins/micropython-lighthouse-controls
| 1 |
2171053
|
import micropython
import gc
def connect():
# Display memory available at startup.
gc.collect()
micropython.mem_info()
from wifi_setup.wifi_setup import WiFiSetup
# You should give every device a unique name (to use as its access point name).
ws = WiFiSetup("ding-5cd80b3")
sta = ws.connect_or_setup()
del ws
print("WiFi is setup")
# Display memory available once the WiFi setup process is complete.
gc.collect()
micropython.mem_info()
return sta
| 505 |
server/routes/fireload.py
|
mrodrise/cimc
| 0 |
2171880
|
import urllib3, requests, json
from credentials_cimc import fireload_credentials
class fireLoad(object):
def __init__(self, data_list):
activity_ra = {"Footwear": 1,
"Supply, packaging" :1.5,
"Cardboard": 1.5,
"Wood goods, carpentry": 1.5,
"Synthetic goods": 2}
headers = urllib3.util.make_headers(basic_auth='{username}:{password}'.format(username=fireload_credentials['username'], password=fireload_credentials['password']))
url = '{}/v3/identity/token'.format(fireload_credentials['url'])
response = requests.get(url, headers=headers)
mltoken = json.loads(response.text).get('token')
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}
# NOTE: manually define and pass the array(s) of values to be scored in the next line
print ("fireLoad - lista que se entrega en la llamada a Watson")
print(["House1", data_list[1], activity_ra[data_list[0]], data_list[2], data_list[3], data_list[4], data_list[5], data_list[6], data_list[7]])
payload_scoring = {"fields":["House id", "Area", "Activity Ra", "Product1", "Product2", "Product3", "Product4", "Product5", "Other products"],
"values":[["House1", float(data_list[1]), activity_ra[data_list[0]], float(data_list[2]),
float(data_list[3]), float(data_list[4]), float(data_list[5]), float(data_list[6]), float(data_list[7])]]}
response_scoring = requests.post(fireload_credentials["call"], json=payload_scoring, headers=header)
self.results = json.loads(response_scoring.text)
def get_fire_load(self):
print ("get_fire_load - imprimo los resultados de la llamada a Watson")
print (str(self.results))
print (str(self.results['values'][0]))
print (str(self.results['values'][0][10]))
return str(self.results['values'][0][10])
| 1,913 |
examples/motor.py
|
dchabot/ophyd_hkl
| 0 |
2170652
|
#!/usr/bin/env python2.7
'''A simple test for :class:`EpicsMotor`'''
import time
import config
from ophyd import EpicsMotor
from ophyd.utils.errors import LimitError
def test():
def callback(sub_type=None, timestamp=None, value=None, **kwargs):
logger.info('[callback] [%s] (type=%s) value=%s', timestamp, sub_type, value)
def done_moving(**kwargs):
logger.info('Done moving %s', kwargs)
loggers = ('ophyd.signal',
'ophyd.positioner',
)
config.setup_loggers(loggers)
logger = config.logger
motor_record = config.motor_recs[0]
m1 = EpicsMotor(motor_record)
# m2 = EpicsMotor('MLL:bad_record')
# give it some time to connect...
time.sleep(1.0)
m1.subscribe(callback, event_type=m1.SUB_DONE)
m1.subscribe(callback, event_type=m1.SUB_READBACK)
# print(m1.user_readback.read())
# print(m1.read())
logger.info('---- test #1 ----')
logger.info('--> move to 1')
m1.move(1)
logger.info('--> move to 0')
m1.move(0)
logger.info('---- test #2 ----')
logger.info('--> move to 1')
m1.move(1, wait=False)
time.sleep(0.2)
logger.info('--> stop')
m1.stop()
logger.info('--> sleep')
time.sleep(1)
logger.info('--> move to 0')
m1.move(0, wait=False, moved_cb=done_moving)
time.sleep(2)
logger.debug('limits are: {}'.format(m1.limits))
low_lim, high_lim = m1.low_limit, m1.high_limit
try:
m1.move(high_lim + 1)
except LimitError as ex:
logger.debug('Failed move, as expected (%s)', ex)
else:
raise ValueError('Move should have failed')
try:
m1.move(low_lim - 1)
# LimitError is a ValueError, so either can be caught
except ValueError as ex:
logger.debug('Failed move, as expected (%s)', ex)
else:
raise ValueError('Move should have failed')
try:
m1.check_value(low_lim - 1)
except ValueError as ex:
logger.debug('Failed check_value, as expected (%s)', ex)
else:
raise ValueError('check_value should have failed')
logger.info('--> move to 0')
stat = m1.move(2, wait=False)
while not stat.done:
logger.info('--> moving... %s error=%s', stat, stat.error)
time.sleep(0.1)
logger.debug(m1.get(as_string=True))
fmt = '%%.%df' % m1.precision
logger.debug('precision formatting: %s ', fmt)
string_pos = fmt % m1.position
logger.debug('final position: %s', string_pos)
if __name__ == '__main__':
test()
| 2,540 |
wirecaml/tools/ascii.py
|
jorkro/wirecaml
| 5 |
2171901
|
import datetime
# Got this from https://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class Color:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Level:
WARNING = Color.WARNING + '[WARNING] ' + Color.ENDC
NOTICE = '[NOTICE] '
ERROR = Color.FAIL + '[ERROR] ' + Color.ENDC
def print_banner(text):
l = len(text) + 4
print()
print(Color.BOLD + '=' * l + Color.ENDC)
print(Color.BOLD + "| %s |" % text + Color.ENDC)
print(Color.BOLD + '=' * l + Color.ENDC)
def print_warning(text):
print_text(Level.WARNING, text)
def print_notice(text):
print_text(Level.NOTICE, text)
def print_error(text):
print_text(Level.ERROR, text)
def print_text(lvl, text):
lines = text.splitlines()
time = datetime.datetime.now().strftime('%H:%M:%S')
for line in lines:
print('%s %s%s' % (time, lvl, line))
| 1,039 |
handler/HandlerRequests.py
|
WebisD/http-api-without-lib
| 2 |
2171742
|
from message.StatusCode import StatusCode
from threading import Thread
from message.ParserMessage import ParserMessage
from message.Request import Request
from message.StatusCode import StatusCode
from methods.GET import GET
from methods.POST import POST
from methods.PUT import PUT
from methods.DELETE import DELETE
from handler.HandlerErrors import HandlerErrors
def recv(sock, chunkSize=8192):
fragments = []
data = None
while True:
data = sock.recv(chunkSize)
if data is None or data == b'':
continue
try:
decoded_data = data.decode()
if decoded_data.find("GET /") != -1 or decoded_data.find("DELETE /") != -1:
# Not a post, return
return data
fragments.append(data)
if decoded_data[-1] == '}':
a = b"".join(fragments)
return a
except Exception as e:
print(e)
class Handler(Thread):
def __init__(self, server):
Thread.__init__(self)
self.server = server
def run(self):
while True:
connectionSocket, addr = self.server.serverSocket.accept()
while True:
request = recv(connectionSocket, 8192).decode()
if not request:
break
else:
request = Request(ParserMessage.parseRequest(request))
request.setIp(addr)
self.checkTypeRequest(request, connectionSocket)
break
def checkTypeRequest(self, request, connectionSocket):
response = {}
try:
response = eval(request.type).response(request)
except Exception as e:
print(e)
response = HandlerErrors.sendErrorCode(request, StatusCode.BAD_REQUEST)
connectionSocket.send(response)
connectionSocket.close()
| 1,978 |
tracks.py
|
jakeisnt/soundclot
| 0 |
2169813
|
import csv
from time import time
reader = csv.reader(open("tracks.csv"))
tracks = [row for row in reader]
for track in tracks:
t = track[0].split(":")
track[0] = int(t[0])*60 + int(t[1])
print(tracks)
def getCurrentTrack():
t = time()
# Wrap around
total = sum([track[0] for track in tracks])
t = t % total
index = 0
while True:
print(t)
if tracks[index][0] < t:
t -= tracks[index][0]
index += 1
else:
return index, int(t*1000)
| 528 |
courses/data_streaming_nanodegree/spark_streaming/starter/sparkpyoptionalriskquality.py
|
pyerbiz/re-exploring-python
| 0 |
2169730
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import base64, col, from_json, split, unbase64
from pyspark.sql.types import (
ArrayType,
BooleanType,
StringType,
StructField,
StructType,
)
| 226 |
aoc_2019/day11.py
|
akohen/AdventOfCode
| 0 |
2169780
|
from intcode_computer import intcode_computer
def paint(computer):
position = (0,0)
direction = 0
# 0 = UP, 1 = LEFT, 2 = DOWN, 3 = RIGHT
action = { 0: lambda x : (x[0],x[1]+1), 1: lambda x : (x[0]-1,x[1]), 2: lambda x : (x[0],x[1]-1), 3: lambda x : (x[0]+1,x[1]) }
seen = {}
while computer.execute().halted == False:
seen[position] = computer.output_values.pop()
rotation = computer.execute().output_values.pop()
direction = (direction - 2 * rotation + 1) % 4
position = action[direction](position)
computer.input_values.append(seen.get(position, 0))
return seen
def display_code(paint):
for y in range(7):
print(''.join(['.' if paint.get((x,-y), 0) == 1 else ' ' for x in range(40)]))
if __name__ == "__main__":
with open('./input/day11') as f:
program = [int(i) for i in next(f).split(',')]
print('Phase 1: {}'.format(len(paint(intcode_computer(program, [0], stop_on_output=True)))))
display_code(paint(intcode_computer(program, [1], stop_on_output=True)))
| 1,018 |
submissions/Everett/myCSPs.py
|
dysomni/aima-python
| 0 |
2169930
|
import csp
rgbp = ['R', 'G', 'B', 'P']
# The 23 wards of Tokyo
d2 = {'ADACHI' : rgbp, 'KATSU' : rgbp, 'EDOG' : rgbp, 'KOTO' : rgbp, 'CHUO': rgbp, 'SUMI': rgbp, 'TAITO': rgbp, 'ARAK': rgbp, 'KITA': rgbp, 'BUNK': rgbp, 'CHIY': rgbp, 'MINA': rgbp, 'SHIN': rgbp,
'SHINA': rgbp,
'OTA': rgbp,
'MEGU': rgbp,
'SETA': rgbp,
'SUGI': rgbp,
'NAKA': rgbp,
'NERI': rgbp,
'ITA': rgbp,
'TOSHI': rgbp,
'SHIB' : rgbp,
}
v2 = d2.keys()
n2 = {'ADACHI' : ['KATSU','SUMI','ARAK','KITA'],
'KATSU' : ['ADACHI', 'EDOG','SUMI'],
'EDOG' : ['KATSU', 'KOTO','SUMI'],
'KOTO' : ['EDOG','CHUO','SUMI'],
'CHUO':['KOTO','SUMI','MINA','CHIY','TAITO'],
'TAITO':['CHUO','SUMI','CHIY','BUNK','KITA','ARAK'],
'SUMI':['KATSU','EDOG','KOTO','CHUO','TAITO','ADACHI','ARAK'],
'ARAK': ['TAITO', 'BUNK', 'SUMI', 'ADACHI', 'KITA'],
'KITA': ['ADACHI', 'ARAK', 'BUNK', 'TOSHI', 'ITA','TAITO'],
'BUNK': ['ARAK', 'TAITO', 'CHIY', 'SHIN', 'TOSHI', 'KITA'],
'CHIY': ['CHUO', 'MINA', 'SHIN', 'BUNK', 'TAITO'],
'MINA': ['CHUO', 'CHIY', 'SHIN', 'SHIB', 'SHINA'],
'SHIN': ['MINA', 'CHIY', 'BUNK', 'TOSHI', 'NERI', 'NAKA', 'SHIB'],
'SHINA': ['MINA', 'SHIB', 'MEGU', 'OTA'],
'OTA': ['SHINA', 'MEGU', 'SETA'],
'MEGU': ['SHIB', 'MINA', 'SHINA', 'OTA', 'SETA', ],
'SETA': ['OTA', 'MEGU', 'SHIB', 'SUGI'],
'SUGI': ['SETA', 'NERI', 'NAKA', 'SHIB'],
'NAKA': ['SUGI', 'SHIB', 'SHIN', 'TOSHI', 'NERI'],
'NERI': ['TOSHI', 'NAKA', 'SUGI', 'SHIN', 'ITA'],
'ITA': ['KITA', 'NERI', 'TOSHI'],
'TOSHI': ['KITA', 'BUNK', 'ITA', 'NERI', 'NAKA', 'SHIN'],
'SHIB': ['NAKA', 'SHIN', 'SUGI', 'SETA', 'MEGU', 'SHINA', 'MINA'],}
def constraints(A, a, B, b):
if A == B: # e.g. NSW == NSW
return True
if a == b: # e.g. WA = G and SA = G
return False
return True
c2 = csp.CSP(v2, d2, n2, constraints)
c2.label = 'Map of Japan'
myCSPs = [
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
#'order_domain_values': csp.lcv,
'inference': csp.mac,
#'inference': csp.forward_checking,
},
]
| 2,994 |
jsonit/decorators.py
|
crrobinson14/django-jsonit
| 3 |
2169351
|
from functools import wraps
from jsonit.http import JSONResponse
def catch_ajax_exceptions(func):
"""
Catches exceptions which occur when using an AJAX request.
These exceptions will be returned using a :class:`JSONResponse` rather than
letting the exception propogate.
"""
@wraps(func)
def dec(request, *args, **kwargs):
try:
return func(request, *args, **kwargs)
except Exception, e:
if request.is_ajax():
return JSONResponse(request, exception=e)
raise
return dec
| 582 |
padertorch/contrib/examples/wavenet/train.py
|
thequilo/padertorch
| 0 |
2171678
|
"""
Example call:
export STORAGE_ROOT=<your desired storage root>
python -m padertorch.contrib.examples.wavenet.train
"""
import os
from pathlib import Path
import numpy as np
from padercontrib.database.librispeech import LibriSpeech
from paderbox.utils.timer import timeStamped
from padertorch import modules
from padertorch.contrib.examples.wavenet.wavenet import WaveNet
from padertorch.contrib.je.data.transforms import AudioReader, STFT, \
fragment_signal, Collate
from padertorch.contrib.je.data.utils import split_dataset
from padertorch.train.optimizer import Adam
from padertorch.train.trainer import Trainer
def get_datasets(max_length=1., batch_size=3):
db = LibriSpeech()
train_clean_100 = db.get_dataset('train_clean_100')
train_set, validate_set = split_dataset(train_clean_100, fold=0)
test_set = db.get_dataset('test_clean')
training_data = prepare_dataset(train_set, max_length=max_length, batch_size=batch_size, training=True)
validation_data = prepare_dataset(validate_set, max_length=max_length, batch_size=batch_size, training=False)
test_data = prepare_dataset(test_set, max_length=max_length, batch_size=batch_size, training=False)
return training_data, validation_data, test_data
def prepare_dataset(dataset, max_length=1., batch_size=3, training=False):
dataset = dataset.filter(lambda ex: ex['num_samples'] > 16000, lazy=False)
stft_shift = 160
window_length = 480
target_sample_rate = 16000
def prepare_example(example):
example['audio_path'] = example['audio_path']['observation']
example['speaker_id'] = example['speaker_id'].split('-')[0]
return example
dataset = dataset.map(prepare_example)
audio_reader = AudioReader(
source_sample_rate=16000, target_sample_rate=target_sample_rate
)
dataset = dataset.map(audio_reader)
stft = STFT(
shift=stft_shift, window_length=window_length, size=512, fading='full',
pad=True
)
dataset = dataset.map(stft)
def fragment(example):
audio, features = example['audio_data'], example['stft']
pad_width = window_length - stft_shift
assert pad_width > 0, pad_width
audio = np.pad(
audio, (audio.ndim-1)*[(0, 0)] + [(pad_width, window_length - 1)],
mode='constant')
fragment_step = int(max_length*target_sample_rate)
fragment_length = fragment_step + 2*pad_width
stft_fragment_step = fragment_step / stft_shift
stft_fragment_length = stft.samples_to_frames(fragment_step)
fragments = []
for audio, features in zip(*fragment_signal(
audio, features, axis=1,
step=[fragment_step, stft_fragment_step],
max_length=[fragment_length, stft_fragment_length],
min_length=[fragment_length, stft_fragment_length],
random_start=training
)):
fragments.append({
'example_id': example['example_id'],
'audio_data': audio[..., pad_width:-pad_width].astype(np.float32),
'stft': features.astype(np.float32),
'seq_len': features.shape[1],
})
return fragments
dataset = dataset.map(fragment)
if training:
dataset = dataset.shuffle(reshuffle=True)
return dataset.prefetch(
num_workers=8, buffer_size=10*batch_size
).unbatch().shuffle(reshuffle=True, buffer_size=10*batch_size).batch(
batch_size=batch_size
).map(Collate())
def get_model():
wavenet = modules.wavenet.WaveNet(
n_cond_channels=64, upsamp_window=400, upsamp_stride=160, fading='full'
)
model = WaveNet(
wavenet=wavenet, sample_rate=16000, fft_length=512, n_mels=64, fmin=50
)
return model
def train(model, storage_dir):
train_set, validate_set, _ = get_datasets()
trainer = Trainer(
model=model,
optimizer=Adam(lr=5e-4),
storage_dir=str(storage_dir),
summary_trigger=(1000, 'iteration'),
checkpoint_trigger=(10000, 'iteration'),
stop_trigger=(100000, 'iteration')
)
trainer.test_run(train_set, validate_set)
trainer.register_validation_hook(validate_set)
trainer.train(train_set)
if __name__ == '__main__':
storage_dir = str(
Path(os.environ['STORAGE_ROOT']) / 'wavenet' / timeStamped('')[1:]
)
os.makedirs(storage_dir, exist_ok=True)
model = get_model()
train(model, storage_dir)
| 4,490 |
stanovanja/home/migrations/0004_auto_20210803_1556.py
|
danesjenovdan/najemniski-sos
| 0 |
2171922
|
# Generated by Django 3.2.6 on 2021-08-03 13:56
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0023_add_choose_permissions'),
('wagtailcore', '0062_comment_models_and_pagesubscription'),
('home', '0003_auto_20210803_1401'),
]
operations = [
migrations.AlterField(
model_name='contentpage',
name='body',
field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StreamBlock([('color_section', wagtail.core.blocks.StructBlock([('color', wagtail.core.blocks.ChoiceBlock(choices=[('white', 'Bela'), ('yellow', 'Rumena'), ('purple', 'Vijolična'), ('gradient_green_yellow', 'Zeleno-rumena'), ('gradient_purple_green', 'Vijolično-zelena')], label='Barva')), ('body', wagtail.core.blocks.StreamBlock([('headline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Naslov')), ('description', wagtail.core.blocks.RichTextBlock(label='Opis', required=False)), ('image_left', wagtail.images.blocks.ImageChooserBlock(label='Slika na levi', required=False)), ('image_right', wagtail.images.blocks.ImageChooserBlock(label='Slika na desni', required=False))], icon='title', label='Naslov', template='home/blocks/headline.html'))]))]))]))], default='', verbose_name='Vsebina'),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StreamBlock([('color_section', wagtail.core.blocks.StructBlock([('color', wagtail.core.blocks.ChoiceBlock(choices=[('white', 'Bela'), ('yellow', 'Rumena'), ('purple', 'Vijolična'), ('gradient_green_yellow', 'Zeleno-rumena'), ('gradient_purple_green', 'Vijolično-zelena')], label='Barva')), ('body', wagtail.core.blocks.StreamBlock([('headline', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(label='Naslov')), ('description', wagtail.core.blocks.RichTextBlock(label='Opis', required=False)), ('image_left', wagtail.images.blocks.ImageChooserBlock(label='Slika na levi', required=False)), ('image_right', wagtail.images.blocks.ImageChooserBlock(label='Slika na desni', required=False))], icon='title', label='Naslov', template='home/blocks/headline.html'))]))]))]))], default='', verbose_name='Vsebina'),
),
migrations.CreateModel(
name='MetaSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('header_links', wagtail.core.fields.StreamField([('page_link', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(help_text='Če je prazno se uporabi naslov strani.', label='Ime', required=False)), ('page', wagtail.core.blocks.PageChooserBlock(label='Stran'))])), ('external_link', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(label='Ime')), ('url', wagtail.core.blocks.URLBlock(label='Povezava'))]))], verbose_name='Povezave v glavi')),
('footer_links', wagtail.core.fields.StreamField([('page_link', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(help_text='Če je prazno se uporabi naslov strani.', label='Ime', required=False)), ('page', wagtail.core.blocks.PageChooserBlock(label='Stran'))])), ('external_link', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock(label='Ime')), ('url', wagtail.core.blocks.URLBlock(label='Povezava'))]))], verbose_name='Povezave v nogi')),
('facebook', models.URLField(blank=True, null=True)),
('twitter', models.URLField(blank=True, null=True)),
('instagram', models.URLField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('meta_title', models.CharField(blank=True, max_length=255, null=True)),
('meta_description', models.CharField(blank=True, max_length=255, null=True)),
('share_email_text', models.TextField(blank=True, null=True)),
('share_twitter_text', models.TextField(blank=True, null=True)),
('meta_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.site')),
],
options={
'verbose_name': 'Meta nastavitve',
},
),
]
| 4,759 |
pqg_melspectrogram.py
|
Kurene/pyqtgraph-app
| 7 |
2171813
|
# -*- coding: utf-8 -*-
import sys
import time
import threading
import numpy as np
from numba import jit
import librosa
"""
import os
import PySide6
from PySide6 import QtGui, QtCore
dirname = os.path.dirname(PySide6.__file__)
plugin_path = os.path.join(dirname, 'plugins', 'platforms')
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = plugin_path
"""
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
from rasp_audio_stream import AudioInputStream
class PQGMelSpectrogram():
def __init__(self,
sr,
shape,
n_mels=128,
n_frames=150,
fps=60,
size=(500,500),
title="",
):
# メルスペクトログラム算出用パラメタ
self.n_frames = n_frames
self.n_ch = shape[0]
self.n_chunk = shape[1]
self.n_freqs = self.n_chunk // 2 + 1
self.n_mels = n_mels
self.sig = np.zeros(shape)
self.x = np.zeros(self.n_chunk)
self.specs = np.zeros((self.n_freqs))
self.melspecs = np.zeros((self.n_frames, self.n_mels))
self.window = np.hamming(self.n_chunk)
self.fft = np.fft.rfft
self.melfreqs = librosa.mel_frequencies(n_mels=self.n_mels)
self.melfb = librosa.filters.mel(sr, self.n_chunk, n_mels=self.n_mels)
self.fps = fps
self.iter = 0
#====================================================
## PyQtGraph の初期設定
app = QtGui.QApplication([])
win = pg.GraphicsLayoutWidget()
win.resize(size[0], size[1])
win.show()
## ImageItem の設定
imageitem = pg.ImageItem(border="k")
cmap = pg.colormap.getFromMatplotlib("jet")
bar = pg.ColorBarItem( cmap=cmap )
bar.setImageItem(imageitem)
## ViewBox の設定
viewbox = win.addViewBox()
viewbox.setAspectLocked(lock=True)
viewbox.addItem(imageitem)
## 軸 (AxisItem) の設定
axis_left = pg.AxisItem(orientation="left")
n_ygrid = 6
yticks = {}
for k in range(n_ygrid):
index = k*(self.n_mels//n_ygrid)
yticks[index] = int(self.melfreqs[index])
axis_left.setTicks([yticks.items()])
## PlotItemの設定
plotitem = pg.PlotItem(viewBox=viewbox, axisItems={"left":axis_left})
# グラフの範囲
plotitem.setLimits(
minXRange=0, maxXRange=self.n_frames,
minYRange=0, maxYRange=self.n_mels)
# アスペクト比固定
plotitem.setAspectLocked(lock=True)
# マウス操作無効
plotitem.setMouseEnabled(x=False, y=False)
# ラベルのセット
plotitem.setLabels(bottom="Time-frame",
left="Frequency")
win.addItem(plotitem)
self.app = app
self.win = win
self.viewbox = viewbox
self.plotitem = plotitem
self.imageitem = imageitem
pg.setConfigOptions(antialias=True)
#pg.setConfigOption('useNumba', True)
def run_app(self):
timer = QtCore.QTimer()
timer.timeout.connect(self.update)
timer.start(1/self.fps * 1000)
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def update(self):
if self.iter > 0:
self.viewbox.disableAutoRange()
# 最新をスペクトログラム格納するインデックス
idx = self.iter % self.n_frames
# モノラル信号算出
self.x[:] = 0.5 * (self.sig[0] + self.sig[1])
# FFT => パワー算出
self.x[:] = self.x[:] * self.window
self.specs[:] = np.abs(self.fft(self.x))**2
# メルスペクトログラム算出
self.melspecs[idx, :] = np.dot(self.melfb, self.specs)
# 描画
pos = idx + 1 if idx < self.n_frames else 0
self.imageitem.setImage(
librosa.power_to_db(
np.r_[self.melspecs[pos:self.n_frames],
self.melspecs[0:pos]]
, ref=np.max)
)
self.iter += 1
def callback_sigproc(self, sig):
self.sig[:] = sig
| 4,255 |
client/GUI/encrypting/polybiusSquareCipher.py
|
Dzemoro/Project_OD
| 0 |
2171701
|
import re, random, string
class PolybiusSquareCipher:
def generate_array(self, key=''):
"""Generates Polybius square"""
abc = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻ'
abc += 'aąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż'
abc += '0123456789 '
arr_el = []
arr = []
row = []
if key:
for char in key:
if char not in arr_el:
arr_el.append(char)
for char in abc:
if char not in arr_el:
arr_el.append(char)
else:
arr_el = abc
for i in range(9):
for j in range(9):
row.append(arr_el[i*9 + j])
arr.append(row)
row = []
return arr
def encrypt(self, word, key=''):
"""Encrypts message"""
# word = re.sub('[^A-Z]','', word)
# key = re.sub('[^A-Z]','', key)
arr = self.generate_array(key)
output = ''
for char in word:
for i in range(9):
for j in range(9):
if char is arr[i][j]:
output+=str(j+1)
output+=str(i+1)
return output
def decrypt(self, word, key=''):
"""Decrypts message"""
# key = re.sub('[^A-Z]','', key)
arr = self.generate_array(key)
output = ''
for i in range(int(len(word)/2)):
col = int(word[i*2])
row = int(word[i*2+1])
letter = arr[row-1][col-1]
output+=str(letter)
return output
def gen_key(self):
key_length = random.randint(2,30)
key = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(key_length)).strip()
return key
def test(self):
text_length = random.randint(100,200)
key = self.gen_key()
text = ''.join(random.SystemRandom().choice(string.ascii_letters + ' ') for _ in range(text_length)).strip()
print(text)
encrypted_text = self.encrypt(text, key)
decrypted_text = self.decrypt(encrypted_text, key)
print(decrypted_text)
#polybius = PolybiusSquareCipher()
#polybius.test()
| 2,240 |
util.py
|
kreativeskonto/fairy-chess
| 0 |
2171777
|
BOARD_SIZE = 16
TIME = 3600
def to_coords(square):
return square % BOARD_SIZE, square // BOARD_SIZE
def to_square(xy):
x, y = xy
return y * BOARD_SIZE + x
def format_time(secs):
prefix = '-' if secs < 0 else ''
mins = abs(secs) // 60
secs = abs(secs) % 60
return f"{prefix}{mins:02d}:{secs:02d}"
DIR_NORTH = (BOARD_SIZE, None)
DIR_NORTHWEST = (BOARD_SIZE - 1, 0)
DIR_NORTHEAST = (BOARD_SIZE + 1, BOARD_SIZE - 1)
DIR_WEST = (-1, 0)
DIR_EAST = (1, BOARD_SIZE - 1)
DIR_SOUTH = (-BOARD_SIZE, None)
DIR_SOUTHWEST = (-BOARD_SIZE - 1, 0)
DIR_SOUTHEAST = (-BOARD_SIZE + 1, BOARD_SIZE - 1)
DIRS_ROOK = [DIR_NORTH, DIR_SOUTH, DIR_EAST, DIR_WEST]
DIRS_BISHOP = [DIR_NORTHWEST, DIR_SOUTHWEST, DIR_NORTHEAST, DIR_SOUTHEAST]
DIRS_QUEEN = DIRS_ROOK + DIRS_BISHOP
| 784 |
utils/__init__.py
|
mseitzer/csmri-refinement
| 27 |
2170029
|
def set_cuda_env(gpu_idx):
"""Sets CUDA_VISIBLE_DEVICES environment variable
Parameters
----------
gpu_idx : string
Index of GPU to use, `auto`, or empty string . If `auto`, attempts to
automatically select a free GPU.
Returns
-------
Value environment variable has been set to
Raises
------
Exception if auto selecting GPU has been attempted, but failed
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_idx
return gpu_idx
def set_random_seeds(seed):
import random
import numpy as np
import torch
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def set_worker_seeds(worker_id):
import torch
# Pytorch seed is unique per worker, so we can use it to
# initialize the other seeds
set_random_seeds(torch.initial_seed() % (2**32 - 1))
def cpuify(modules_or_tensors):
if isinstance(modules_or_tensors, dict):
return {key: cpuify(values) for key, values in modules_or_tensors.items()}
elif isinstance(modules_or_tensors, (tuple, list)):
return [cpuify(obj) for obj in modules_or_tensors]
if modules_or_tensors is not None:
return modules_or_tensors.cpu()
else:
return None
def cudaify(modules_or_tensors, device_ids=None):
if isinstance(modules_or_tensors, dict):
return {key: cudaify(values, device_ids)
for key, values in modules_or_tensors.items()}
elif isinstance(modules_or_tensors, (tuple, list)):
return [cudaify(obj, device_ids) for obj in modules_or_tensors]
if device_ids is not None and not device_ids.isnumeric():
# Multi-GPU requested: device_ids has the form of '2,3'
from torch.nn import Module
from utils.custom_data_parallel import CustomDataParallel
if isinstance(modules_or_tensors, Module):
# As we set CUDA_VISIBLE_DEVICES beforehand, device_ids needs to
# start from zero (i.e. in the form of '0,1')
device_ids = range(len(device_ids.split(',')))
return CustomDataParallel(modules_or_tensors,
device_ids=device_ids).cuda()
else:
return modules_or_tensors.cuda() # Tensors are sent to default device
else:
return modules_or_tensors.cuda() # Single GPU: send to default device
def make_variables(tensors, volatile):
from torch.autograd import Variable
if isinstance(tensors, dict):
return {key: Variable(tensor, volatile=volatile)
for key, tensor in tensors.items()}
elif isinstance(tensors, (tuple, list)):
return [Variable(tensor, volatile=volatile) for tensor in tensors]
else:
return Variable(tensors, volatile=volatile)
def make_fresh_variables(variables, volatile):
from torch.autograd import Variable
if isinstance(variables, dict):
return {key: make_fresh_variables(variable, volatile=volatile)
for key, variable in variables.items()}
elif isinstance(variables, (tuple, list)):
return [Variable(variable.data, volatile=volatile) for variable in variables]
else:
return Variable(variables.data, volatile=volatile)
def make_variable_like(tensor, variable):
from torch import Tensor
from torch.autograd import Variable
if isinstance(variable, Tensor):
return tensor
requires_grad = variable.requires_grad
volatile = variable.volatile
tensor = tensor.type_as(variable.data)
return Variable(tensor, requires_grad=requires_grad, volatile=volatile)
def import_function_from_path(import_path):
import importlib
path_elems = import_path.split('.')
fn_name = path_elems[-1]
if len(path_elems) > 1:
module_path = '.'.join(path_elems[:-1])
module = importlib.import_module(module_path)
if hasattr(module, fn_name):
fn = getattr(module, fn_name)
else:
raise ValueError('Could not find {} in module {}'.format(fn_name,
module_path))
else:
if fn_name in globals():
fn = globals()[fn_name]
elif fn_name in locals():
fn = locals()[fn_name]
else:
raise ValueError('Could not find {}'.format(fn_name))
return fn
| 4,075 |
timelineApp/views/login.py
|
Chuckle-Doodle/Chuckle-Doodle.github.io
| 0 |
2171463
|
import flask
import timelineApp
from flask import request
import os
import shutil
import errno
from distutils.dir_util import copy_tree
@timelineApp.app.route('/', methods=['GET', 'POST'])
def show_login():
"""Display / route."""
dbase = timelineApp.model.get_db()
cursor = dbase.cursor()
# Check if a user is currently logged in
if "username" in flask.session:
#also check if username is in database
userExists = cursor.execute("SELECT username from users where username = ?", (flask.session['username'],)).fetchone()
if userExists:
return flask.redirect(flask.url_for('show_index'))
else:
flask.session.clear()
context = {}
if flask.request.method == 'POST':
#check if logging in or signing up
if request.form['action'] == "Login":
given_user = flask.request.form['username']
given_password = flask.request.form['password']
password_check = "SELECT password FROM users WHERE username = '{0}';"
rows = cursor.execute(password_check.format(given_user)).fetchone()
if rows:
if rows['password'] == given_password:
flask.session['username'] = given_user
return flask.redirect(flask.url_for('show_index'))
# Unsuccessful Login
context["unsuccessful_str"] = "Username or password is incorrect. Try again."
return flask.render_template("login.html", **context)
else:
#request.form['action'] == Sign Up should be the case if we get here
new_user = flask.request.form['username']
new_password = flask.request.form['password']
#ensure length between 1 and 20 characters
if len(new_user) < 1 or len(new_user) > 20 or len(new_password) < 1 or len(new_password) > 20:
# Unsuccessful sign up
context["unsuccessful_str"] = "Username and password must be between 1 and 20 characters each. Try again."
return flask.render_template("login.html", **context)
#ensure username is unique
rows = cursor.execute("SELECT username from users where username = ?", (new_user,)).fetchall()
if rows:
#not a unique username. print out error message to webpage
context["not_unique_username"] = "Username must be unique and no more than 20 characters. Try again."
return flask.render_template("login.html", **context)
else: #username IS unique
addUser = cursor.execute(
"INSERT INTO users(username, password) "
"VALUES (?, ?) ", (new_user, new_password)
)
flask.session['username'] = new_user
#create folder in file system for user
initialPath = os.getcwd()
#tempPath = "sql/users/test/stories/"
#sourcePath = os.path.join(initialPath, tempPath)
#print("source path is!")
#print(sourcePath)
#print("wd is :::::")
#print(os.getcwd())
newPath = "timelineApp/static/var/users/"
os.chdir(newPath)
os.mkdir(flask.session['username'])
os.chdir(flask.session['username'])
os.mkdir("stories")
os.chdir(initialPath)
#destinationPath = os.getcwd()
#destinationPath = os.path.join(destinationPath, "Black_Death")
#print(destinationPath)
#copy_tree(sourcePath, destinationPath)
return flask.redirect(flask.url_for('show_index'))
# Initial request to login page
return flask.render_template("login.html", **context)
| 3,880 |
pyGPs/Demo/Clustering/pyGP_extension.py
|
Corentin-LF/pyGPs
| 196 |
2170002
|
"""
__author__ = ['christiaanleysen']
This example divides a set of time-series into two clusters of the most similar time-series using the general
model learn over a set of time-series.
Find more information in the following paper:
"Energy consumption profiling using Gaussian Processes",
<NAME>*, <NAME>†, <NAME>†, <NAME>*
*Dept. Computer Science, KU Leuven, Belgium
†Data Innovation Team, Sirris, Belgium
https://lirias.kuleuven.be/bitstream/123456789/550688/1/conf2.pdf
"""
import pyGPs.Core.gp as pyGPs
import scipy
import numpy as np
import timeit
import logging
logger = logging.getLogger("pyGPs.clustering")
ValuesY = []
def gp_likelihood_independent(hyperparams, model, xs, ys, der=False):
"""
find the aggregated likelihoods of the Gaussian process regression
Parameters:
-----------
hyperparams: hyperparameters for the Gaussian process regression that are used used.
model: GPR model
xs: the list of featureset
ys: the list of valueset
der: boolean to also minimize the derivatives of the hyperparameters
Returns:
--------
the accumulated likelihood of the Gaussian process regression
"""
global ValuesY
# set the hyperparameters
model.covfunc.hyp = hyperparams.tolist()
likelihoodList = []
# accumulate all negative log marginal likelihood (model.nlZ) and the derivative (model.dnlZ)
all_nlZ = 0
all_dnlZ = pyGPs.inf.dnlZStruct(model.meanfunc, model.covfunc, model.likfunc)
for x, y in zip(xs, ys):
model.setData(x, y)
if der:
this_nlZ, this_dnlZ, post = model.getPosterior(der=der)
all_nlZ += this_nlZ
all_dnlZ = all_dnlZ.accumulateDnlZ(this_dnlZ)
likelihoodList.append(this_nlZ)
else:
this_nlZ, post = model.getPosterior(der=der)
all_nlZ += this_nlZ
likelihoodList.append(this_nlZ)
# calculate weighted means by making use of the relative likelihoods.
likelihoodList = [abs(i/np.sum(abs(i) for i in likelihoodList)) for i in likelihoodList]
ValuesY = [i*j.tolist() for i,j in zip(ys,likelihoodList)]
ValuesY = np.array([sum(i) for i in zip(*ValuesY)])
returnValue = all_nlZ
if der:
returnValue = all_nlZ+np.sum(all_dnlZ.cov)+np.sum(all_dnlZ.mean)
return returnValue
def optimizeHyperparameters(initialHyperParameters, model, xs, ys, bounds=None, method='BFGS'):
"""
Optimize the hyperparameters of the general Gaussian process regression
Parameters:
-----------
initialHyperparameters: initial hyper parameters used.
model: GPR model
xs: the list of featureset
ys: the list of valueset
bounds: the bounds needed for the minimize method (if needed).
method: the minimize method that is employed e.g. BFGS
Returns:
--------
the optimal hyperparameters and the model
"""
global ValuesY
ValuesY = []
if bounds is None:
bounds = []
logger.info('optimizing Hyperparameters...')
start = timeit.default_timer()
result = scipy.optimize.minimize(gp_likelihood_independent, initialHyperParameters, args=(model,xs,ys),bounds=bounds,method=method) #powell gaat lang
stop = timeit.default_timer()
logger.info("minimization time:", stop - start)
hyperparams = result.x
model.covfunc.hyp = hyperparams.tolist()
model.getPosterior(xs[0], ValuesY)
return hyperparams, model
| 3,436 |
asgi_cors.py
|
cividi/asgi-cors
| 15 |
2170532
|
import fnmatch
from functools import wraps
def asgi_cors_decorator(
allow_all=False, hosts=None, host_wildcards=None, callback=None
):
hosts = hosts or []
host_wildcards = host_wildcards or []
# We need hosts and host_wildcards to be b""
hosts = set(h.encode("utf8") if isinstance(h, str) else h for h in hosts)
host_wildcards = [
h.encode("utf8") if isinstance(h, str) else h for h in host_wildcards
]
if any(h.endswith(b"/") for h in (hosts or [])) or any(
h.endswith(b"/") for h in (host_wildcards or [])
):
assert False, "Error: CORS origin rules should never end in a /"
def _asgi_cors_decorator(app):
@wraps(app)
async def app_wrapped_with_cors(scope, recieve, send):
async def wrapped_send(event):
if event["type"] == "http.response.start":
original_headers = event.get("headers") or []
access_control_allow_origin = None
if allow_all:
access_control_allow_origin = b"*"
elif hosts or host_wildcards or callback:
incoming_origin = dict(scope.get("headers") or []).get(
b"origin"
)
if incoming_origin:
matches_hosts = incoming_origin in hosts
matches_wildcards = any(
fnmatch.fnmatch(incoming_origin, host_wildcard)
for host_wildcard in host_wildcards
)
matches_callback = False
if callback is not None:
matches_callback = callback(incoming_origin)
if matches_hosts or matches_wildcards or matches_callback:
access_control_allow_origin = incoming_origin
if access_control_allow_origin is not None:
# Construct a new event with new headers
event = {
"type": "http.response.start",
"status": event["status"],
"headers": [
p
for p in original_headers
if p[0] != b"access-control-allow-origin"
]
+ [
[
b"access-control-allow-origin",
access_control_allow_origin,
]
],
}
await send(event)
await app(scope, recieve, wrapped_send)
return app_wrapped_with_cors
return _asgi_cors_decorator
def asgi_cors(app, allow_all=False, hosts=None, host_wildcards=None, callback=None):
return asgi_cors_decorator(allow_all, hosts, host_wildcards, callback)(app)
| 3,102 |
ugly_numbers.py
|
Chinmaykd21/python_practice
| 0 |
2167704
|
"""Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention, 1 is included. Write a program to find Nth Ugly Number.
Example 1:
Input:
N = 10
Output: 12
Explanation: 10th ugly number is 12.
Example 2:
Input:
N = 4
Output: 4
Explanation: 4th ugly number is 4."""
class Solution:
def __init__(self):
self.p2 = 0
self.p3 = 0
self.p5 = 0
self.minimum = 0
def getUglyNumber(self, n):
p2 = self.p2
p3 = self.p3
p5 = self.p5
minimum = self.minimum
ugly_numbers_list = []
ugly_numbers_list.append(1)
for _ in range(0, n):
num2 = 2 * ugly_numbers_list[p2]
num3 = 3 * ugly_numbers_list[p3]
num5 = 5 * ugly_numbers_list[p5]
minimum = min(num2, num3, num5)
ugly_numbers_list.append(minimum)
if num2 == minimum:
p2 += 1
if num3 == minimum:
p3 += 1
if num5 == minimum:
p5 += 1
return ugly_numbers_list[n-1]
def main():
ugn = Solution()
print(ugn.getUglyNumber(1))
if __name__ == "__main__":
main()
| 1,264 |
app.py
|
omsable/mongodb
| 0 |
2171878
|
# -*- coding: utf-8 -*-
import json
import os
from bson import ObjectId
from pymongo import MongoClient
from flask import Flask, make_response, request
class ObjectIdEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return super().encode(o)
class Handler:
app = Flask(__name__)
def __init__(self) -> None:
super().__init__()
self.mongo = MongoClient(os.getenv('MONGODB_URI',
'mongodb://localhost:27017/'))
def find_many(self):
return self.find()
def find_one(self):
return self.find(one=True)
def find(self, one: bool = False):
req = request.get_json()
db = req['db']
coll = req['coll']
query = req['query']
sort = req.get('sort')
fields = req.get('fields', {})
kwargs = {}
if sort:
kwargs['sort'] = sort
if fields:
kwargs['projection'] = fields
if one:
res = self.mongo[db][coll].find_one(query, **kwargs)
return self.end(res)
cur = self.mongo[db][coll].find(query, **kwargs)
records = []
for doc in cur:
records.append(doc)
return self.end(records)
def insert(self):
req = request.get_json()
db = req['db']
coll = req['coll']
doc = req['doc']
return self.end({
'_id': str(self.mongo[db][coll].insert_one(doc).inserted_id)
})
@staticmethod
def end(res):
resp = make_response(json.dumps(res, cls=ObjectIdEncoder))
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
if __name__ == '__main__':
handler = Handler()
handler.app.add_url_rule('/find', 'find', handler.find,
methods=['post'])
handler.app.add_url_rule('/findOne', 'findOne', handler.find_one,
methods=['post'])
handler.app.add_url_rule('/insert', 'insert',
handler.insert, methods=['post'])
handler.app.run(host='0.0.0.0', port=8000)
| 2,185 |
pelican_bibtex.py
|
pholthaus/pelican-bibtex
| 0 |
2169897
|
"""
Pelican BibTeX
==============
A Pelican plugin that populates the context with a list of formatted
citations, loaded from a BibTeX file at a configurable path.
The use case for now is to generate a ``Publications'' page for academic
websites.
"""
# Author: <NAME> <<EMAIL>>
# Unlicense (see UNLICENSE for details)
import logging
import collections
logger = logging.getLogger(__name__)
from pelican import signals
import os.path
import re
__version__ = '0.2.1'
def add_publications(generator):
"""
Populates context with a list of BibTeX publications.
Configuration
-------------
generator.settings['PUBLICATIONS']:
Dictionary that contains bibliographies:
The key denotes the bibliographies name to use in headers
The values describe the BibTeX files to read
Mandatory for this plugin.
generator.settings['PUBLICATIONS_NAVBAR']:
Bool denoting whether a navigation bar containing links to each bibliography should be produced.
Defaults to 'True'.
generator.settings['PUBLICATIONS_HEADER']:
Bool denoting whether a header (h2) should be produced for each bibliography.
Defaults to 'True'.
generator.settings['PUBLICATIONS_SPLIT']:
Bool denoting whether bibliographies should be split by year (h3).
Defaults to 'True'.
generator.settings['PUBLICATIONS_HIGHLIGHTs']:
String, e.g., a name, that will be entailed in a <strong> tag to highlight.
Default: empty
Output
------
generator.context['publications']:
Dictionary containing the name of the publication list a a key, bibliography entries as a value.
A bibliography entry contains of a list of tuples (key, year, text, bibtex, pdf, slides, poster).
See Readme.md for more details.
"""
if 'PUBLICATIONS' not in generator.settings:
return
if 'PUBLICATIONS_NAVBAR' not in generator.settings:
generator.context['PUBLICATIONS_NAVBAR'] = True
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from pybtex.database.input.bibtex import Parser
from pybtex.database.output.bibtex import Writer
from pybtex.database import BibliographyData, PybtexError
from pybtex.backends import html
from pybtex.style.formatting import plain
except ImportError:
logger.warn('`pelican_bibtex` failed to load dependency `pybtex`')
return
refs = generator.settings['PUBLICATIONS']
generator.context['publications'] = collections.OrderedDict()
for rid in refs:
ref = refs[rid]
bibfile = os.path.join(generator.settings['PATH'], ref['file'])
try:
bibdata_all = Parser().parse_file(bibfile)
except PybtexError as e:
logger.warn('`pelican_bibtex` failed to parse file %s: %s' % (
bibfile,
str(e)))
return
if 'title' in ref:
title = ref['title']
else:
title = rid
if 'header' in ref:
header = ref['header']
else:
header = True
if 'split' in ref:
split = ref['split']
else:
split = True
if 'split_link' in ref:
split_link = ref['split_link']
else:
split_link = True
if 'bottom_link' in ref:
bottom_link = ref['bottom_link']
else:
bottom_link = True
if 'all_bibtex' in ref:
all_bibtex = ref['all_bibtex']
else:
all_bibtex = False
if 'highlight' in ref:
highlights = ref['highlight']
else:
highlights = []
if 'group_type' in ref:
group_type = ref['group_type']
else:
group_type = False
publications = []
# format entries
plain_style = plain.Style()
html_backend = html.Backend()
formatted_entries = plain_style.format_entries(bibdata_all.entries.values())
for formatted_entry in formatted_entries:
key = formatted_entry.key
entry = bibdata_all.entries[key]
year = entry.fields.get('year')
typee = entry.type
if entry.fields.get('tags'):
tags = [tag.strip() for tag in entry.fields.get('tags').split(';')]
else:
tags = []
display_tags = [x for x in tags if x != "doi-open" and x != "url-open"]
# This shouldn't really stay in the field dict
# but new versions of pybtex don't support pop
pdf = entry.fields.get('pdf', None)
slides = entry.fields.get('slides', None)
poster = entry.fields.get('poster', None)
doi = entry.fields.get('doi', None)
url = entry.fields.get('url', None)
#clean fields from appearing in bibtex and on website
entry_tmp = entry
for to_del in ['pdf', 'slides', 'poster', 'tags']:
entry_tmp.fields.pop(to_del, None)
#render the bibtex string for the entry
bib_buf = StringIO()
bibdata_this = BibliographyData(entries={key: entry_tmp})
Writer().write_stream(bibdata_this, bib_buf)
#clean more fields from appearing on website
for to_del in ['doi', 'url']:
entry_tmp.fields.pop(to_del, None)
entry_clean = next(plain_style.format_entries(bibdata_this.entries.values()), None)
# apply highlight (strong)
text = entry_clean.text.render(html_backend)
for replace in highlights:
text = text.replace(replace, '<strong>' + replace + '</strong>')
publications.append((key,
typee,
year,
text,
tags,
display_tags,
bib_buf.getvalue(),
pdf,
slides,
poster,
doi,
url))
generator.context['publications'][rid] = {}
generator.context['publications'][rid]['title'] = title
generator.context['publications'][rid]['path'] = os.path.basename(bibfile)
generator.context['publications'][rid]['header'] = header
generator.context['publications'][rid]['split'] = split
generator.context['publications'][rid]['bottom_link'] = bottom_link
generator.context['publications'][rid]['split_link'] = split_link
generator.context['publications'][rid]['all_bibtex'] = all_bibtex
generator.context['publications'][rid]['data'] = collections.OrderedDict()
if group_type:
generator.context['publications'][rid]['data'] = sorted(publications, key=lambda pub: (-int(pub[2].replace("in press", "9999")), pub[1]))
else:
generator.context['publications'][rid]['data'] = sorted(publications, key=lambda pub: -int(pub[2].replace("in press", "9999")))
def register():
signals.generator_init.connect(add_publications)
| 7,319 |
HoliCity/utils.py
|
Delay-Xili/HoliCity-MaskRCNN
| 10 |
2167804
|
import os
import os.path as osp
import numpy as np
import torch
from tqdm import tqdm
import cv2
_H, _W = 512, 512
_plane_area = 32 * 32
def get_image_dir_list(image_dirs=None):
middle_p = sorted(os.listdir(image_dirs))
filelist = []
for pth in middle_p:
imgs = sorted(os.listdir(osp.join(image_dirs, pth)))
for img in imgs:
filelist.append(osp.join(pth, img))
return filelist
def V1_filelist(split='train', rootdir=None, split_version='v1',
cleanf="clean-filelist.txt",
filef="filelist.txt",
trainf="train-middlesplit.txt",
validf="valid-middlesplit.txt",
validhd="valid-hd.txt",
testf="test-middlesplit.txt",
):
split_dir = osp.join(rootdir, "split", split_version)
image_dir = osp.join(rootdir, "image")
plane_dir = osp.join(rootdir, "plane")
if osp.exists(osp.join(split_dir, cleanf)):
print(f"{osp.join(split_dir, cleanf)} already exited")
with open(osp.join(split_dir, cleanf), "r") as f:
samples = [line[:-1] for line in f.readlines()]
else:
with open(osp.join(split_dir, filef), "r") as f:
samples = [line[:-1] for line in f.readlines()]
samples = data_clean(plane_dir, samples, cleanf, split_dir)
if split in ["train", "valid", "test"]:
with open(osp.join(split_dir, eval(split+'f')), "r") as ft:
check_sample = [line[:-1] for line in ft.readlines()]
elif split == "test+valid":
with open(osp.join(split_dir, validf), "r") as ft:
check_sample_v = [line[:-1] for line in ft.readlines()]
with open(osp.join(split_dir, testf), "r") as ft:
check_sample_t = [line[:-1] for line in ft.readlines()]
check_sample = check_sample_v + check_sample_t
elif split == "validhd":
with open(osp.join(split_dir, validhd), "r") as ft:
check_sample = [line[:-1] for line in ft.readlines()]
elif split == "validld":
with open(osp.join(split_dir, validf), "r") as ft:
check_sample = [line[:-1] for line in ft.readlines()]
check_sample = [s for s in check_sample if s[-2:] == "LD"]
else:
raise ValueError("")
filelist = []
for file in samples:
if file[:-7] in check_sample:
filelist.append(file)
return filelist
def data_clean(plane_dir, filelist, cleanf, output):
new_file = []
print("data cleaning......")
for name_ in tqdm(filelist):
name = osp.join(plane_dir, name_)
# if os.path.isfile(name + "_plan.npz"):
# pass
# else:
# print(name)
# continue
with np.load(name + "_plan.npz") as npz:
# plane = data['plane']
planes_ = npz['ws']
gt_segmentation = cv2.imread(name + "_plan.png", cv2.IMREAD_ANYDEPTH)
uni_idx = np.unique(gt_segmentation)
planes_in_seg = len(uni_idx)
planes_in_nor = planes_.shape[0]
plane_area = []
for idx in uni_idx[1:]:
plane_area.append(np.sum(gt_segmentation == idx)) # (num_planes, )
plane_area = np.array(plane_area)
sat_plane_num = np.sum(plane_area > _plane_area)
if planes_in_seg >= 2 and planes_in_nor + 1 >= planes_in_seg and sat_plane_num >=2:
new_file.append(name_+"\n")
with open(f"{output}/{cleanf}", "w") as f:
f.writelines(new_file)
print(f" The original files number: {len(filelist)}, and the cleaned files number: {len(new_file)}. {len(filelist)-len(new_file)} removed!")
print(f"Saving {output}/{cleanf}")
exit()
return [line[:-1] for line in new_file]
| 3,737 |
docker/brewer.py
|
HuijingHei/jenkinsHenryWang
| 0 |
2170485
|
#!/usr/bin/env python
"""A brew wrapper."""
import re
from xmlrpc.client import ServerProxy
import datetime
import hwlogging
class Brewer():
"""Work with brew to get build information.
This class provides build info, tags, changelog, and bugs information of
this build.
Parameters of this class:
name: A string representing the build's name ("kernel-3.10.0-825.el7").
api: Brew API URL.
"""
def __init__(self, name, api):
"""Init variables and logger."""
self.name = name
self._api = api
self._logger = hwlogging.Logger("brewer.log", "/kernels/").logger
def _get_proxy(self):
return ServerProxy(self._api)
@property
def build(self):
"""Return a dict of build info.
{'package_name': 'kernel',
'extra': xxx,
'creation_time': 'xxx',
'completion_time': 'xxx',
'package_id': xxx,
'id': xxx,
'build_id': xxx,
'epoch': xxx,
'source': xxx,
'state': xxx,
'version': 'xxx',
'completion_ts': xxx,
'owner_id': xxx,
'owner_name': 'xxx',
'nvr': 'xxx',
'start_time': 'xxx',
'creation_event_id': xxx,
'start_ts': xxx,
'volume_id': xxx,
'creation_ts': xxx,
'name': 'xxx',
'task_id': xxx,
'volume_name': 'xxx',
'release': 'xxx'}.
"""
return self._get_proxy().getBuild(self.name)
@property
def tags(self):
"""Return a tuple of tags of this build.
('rhel-7.5-candidate',)
"""
raw_tags = self._get_proxy().listTags(self.name)
self._logger.debug("Raw tags fetched from brew {0}"
.format(raw_tags))
return tuple(x['name'] for x in raw_tags)
@property
def changelog(self):
r"""Return a raw changelog dict."""
# Work around log time is not at the same day as build "start_time"
# Like kernel-3.10.0-512.el7, its log time is 2016-09-30 12:00:00
# but its build start time is 2016-10-01 03:27:46.300467
dt_obj = datetime.datetime.strptime(
self.build["start_time"], "%Y-%m-%d %H:%M:%S.%f")
dt_delta = dt_obj - datetime.timedelta(days=3)
dt_delta_str = dt_delta.strftime("%Y-%m-%d %H:%M:%S")
self._logger.debug(
"Build was built at {0}, changelogs will be fetched after {1}."
.format(dt_obj, dt_delta_str)
)
changelogs = self._get_proxy().getChangelogEntries(
self.name, '', '', '', '', dt_delta_str
)
self._logger.debug("Raw changelogs: {0}.".format(changelogs))
for changelog in changelogs:
if self.build["version"] in changelog["author"]:
return changelog
def _bug_id_fetcher(self, raw_bug_list):
bug_id_pattern = re.compile(r"\d{7}")
bug_id_list = re.findall(bug_id_pattern, raw_bug_list)
self._logger.debug("Bug IDs in current build: {}.".format(bug_id_list))
return set(bug_id_list)
@property
def bugs(self):
"""Return a set of bugs which are fixed in this build.
{'1521092', '1506382', '1516644', '1514371', '1432288', '1501882',
'1441965', '1508380', '1516680', '1525027'}
"""
raw_bug_list = self.changelog["text"]
self._logger.debug("Raw bug list {0}".format(raw_bug_list))
return self._bug_id_fetcher(raw_bug_list)
def download_url(self, task_id):
"""Retrun a URL for kernel download.
Parameters of this method:
task_id: ID of brew task which compiled this build
"""
URL_prefix = "http://download.eng.bos.redhat.com/brewroot/work/"
download_urls = []
sub_tasks = self._get_proxy().getTaskChildren(task_id)
self._logger.debug("Sub tasks {0}".format(sub_tasks))
for sub_task in sub_tasks:
if sub_task["arch"] == "x86_64":
sub_task_id = str(sub_task["id"])
self._logger.info("Found task ID {0}".format(sub_task_id))
rpms = self._get_proxy().getTaskResult(sub_task_id)["rpms"]
self._logger.debug("All build RPMS {0}".format(rpms))
for rpm in rpms:
if self.name in rpm or \
self.name.replace("kernel", "kernel-core") in rpm or \
self.name.replace("kernel", "kernel-modules") in rpm:
download_urls.append(URL_prefix + rpm)
self._logger.info("Found RPM {0}".format(download_urls))
return download_urls
| 4,618 |
test/fixtures/general.py
|
olasirtep/ahjo
| 0 |
2172055
|
"""Fixtures for preparing sample projects for test run.
When you add new sample projects...
@pytest.fixture(scope='session')
def new_sample(prepared_sample):
return prepared_sample(
sample_name='new_sample_project',
host_param='new_host_param',
port_param='new_port_param',
usrn_param='new_username_param',
pass_param='new_password_param'
)
"""
import csv
from argparse import Namespace
from base64 import b64encode
from distutils.dir_util import copy_tree
from os import environ, getcwd, path
import commentjson as json
import pytest
from alembic import command
from alembic.config import Config
from sqlalchemy import MetaData, Table
PROJECT_ROOT = getcwd()
SAMPLE_DATA_DIR = './database/data'
@pytest.fixture(scope='session')
def project_root():
return PROJECT_ROOT
@pytest.fixture(scope='session')
def git_setup(project_root):
environ["GIT_DIR"] = path.join(project_root, '.git')
@pytest.fixture(scope='session')
def ahjo_config():
"""Return read sample Ahjo config.
This fixture is used when creating engine fixture.
"""
def read_samples_ahjo_config(sample_directory):
sample_config = path.join(sample_directory, 'config_development.jsonc')
with open(sample_config) as f:
config = json.load(f)
config = config['BACKEND']
return config
return read_samples_ahjo_config
@pytest.fixture(scope='session')
def prepared_sample(tmpdir_factory, pytestconfig):
def prepare_sample_for_tests(sample_name, host_param, port_param, usrn_param, pass_param):
sample_directory = tmpdir_factory.mktemp(sample_name).strpath
copy_sample_project(pytestconfig.rootdir,
sample_name, sample_directory)
rewrite_sample_configuration(
sample_directory,
pytestconfig.getoption(host_param),
pytestconfig.getoption(port_param)
)
write_sample_password_files(
sample_directory,
pytestconfig.getoption(usrn_param),
pytestconfig.getoption(pass_param)
)
return sample_directory
return prepare_sample_for_tests
def copy_sample_project(test_root, sample_name, sample_target):
"""Copy sample project to temporary directory."""
tests_dir = path.join(test_root, 'test')
sample_source = path.join(tests_dir, 'samples', sample_name)
copy_tree(sample_source, sample_target)
def rewrite_sample_configuration(sample_root, hostname, port_number):
"""Open sample project configuration file and rewrite
server hostname, port number, username file path and password file path.
"""
test_configuration = path.join(sample_root, 'config_development.jsonc')
with open(test_configuration, 'r') as f:
config = json.load(f)
config['BACKEND']['target_server_hostname'] = hostname
config['BACKEND']['sql_port'] = port_number
config['BACKEND']['username_file'] = path.join(sample_root, 'username')
config['BACKEND']['password_file'] = path.join(sample_root, 'password')
with open(test_configuration, 'w') as f:
json.dump(config, f)
def write_sample_password_files(sample_root, username, password):
"""Create files containing server credentials."""
username_file = path.join(sample_root, 'username')
username = 'cred=' + username
with open(username_file, 'w', encoding='utf-8') as f:
f.write(username)
password_file = path.join(sample_root, 'password')
password = '<PASSWORD>=' + b64encode(password.encode()).decode()
with open(password_file, 'w', encoding='utf-8') as f:
f.write(password)
@pytest.fixture(scope='function')
def run_alembic_action():
"""When executing, CWD must be set correctly to sample root!"""
def execute_alembic(action, target):
alembic_config = Config('alembic.ini')
# main section options are set when main section is read
main_section = alembic_config.config_ini_section
alembic_config.get_section(main_section)
alembic_config.cmd_opts = Namespace(
x=["main_config=config_development.jsonc"])
if action == 'upgrade':
command.upgrade(alembic_config, target)
elif action == 'downgrade':
command.downgrade(alembic_config, target)
return execute_alembic
@pytest.fixture(scope='function')
def populate_table():
"""When executing, CWD must be set correctly to sample root!"""
def insert_to_table(engine, table_name):
source_file = path.join(SAMPLE_DATA_DIR, table_name)
splitted = table_name.split('.')
if len(splitted) > 1:
table_name = splitted[1]
table_schema = splitted[0]
target_table = Table(table_name, MetaData(
bind=engine), schema=table_schema, autoload=True)
else:
target_table = Table(table_name, MetaData(
bind=engine), autoload=True)
for rows in chunkreader(source_file):
engine.execute(target_table.insert(), rows)
return insert_to_table
def chunkreader(file_path, chunksize=200):
with open(file_path, 'r', encoding='utf-8', newline='') as f:
reader = csv.DictReader(f, delimiter=';')
chunk = []
for i, line in enumerate(reader):
if (i % chunksize == 0 and i > 0):
yield chunk
chunk = []
chunk.append(line)
yield chunk
| 5,464 |
catkin_ws/src/lstm_visual_servoing/src/state_machine.py
|
ravihammond/lstm-visual-servoing
| 0 |
2170728
|
#!/usr/bin/env python3
import rospy
import std_msgs.msg
import sensor_msgs.msg
import geometry_msgs.msg
import lstm_visual_servoing.msg
import tf
import pyquaternion
import math
import numpy as np
import time
class StateMachine:
def __init__(self):
rospy.init_node('state_maching', anonymous=False)
rospy.Subscriber("visual_control", lstm_visual_servoing.msg.Control, self.visual_control_callback)
rospy.Subscriber("gripper_pos", std_msgs.msg.Float32, self.gripper_pos_callback)
self.pub = rospy.Publisher("move_at_speed",geometry_msgs.msg.Twist,queue_size=10)
self.pub_grip = rospy.Publisher("move_gripper_to_pos",std_msgs.msg.Float32,queue_size=10)
self.tf_listener = tf.TransformListener()
self.tf_broadcaster = tf.TransformBroadcaster()
self.last_control_msg_time = time.time()
print("State Machine Spinning")
self.spin()
self.gripper_pos = 0
def spin(self):
#create blank message
self.visual_control_msg = lstm_visual_servoing.msg.Control()
claw_control = 0
prev_claw_state = True
last_claw_change_time = time.time()
#process the latest control message at 30Hz
r = rospy.Rate(20)
while not rospy.is_shutdown():
try:
camera_t,camera_r = self.tf_listener.lookupTransform(
'base','camera_color_optical_frame', rospy.Time())
q_camera = pyquaternion.Quaternion(camera_r[3],camera_r[0],camera_r[1],camera_r[2])
ctrl = self.visual_control_msg
tran_v = q_camera.rotate((ctrl.vx,ctrl.vy,ctrl.vz))
rot_v = q_camera.rotate((ctrl.rx,ctrl.ry,ctrl.rz))
claw_msg = ctrl.claw
safe_trans_v, safe_rot_v = self.get_safety_return_speeds(camera_t,camera_r)
msg = geometry_msgs.msg.Twist()
msg.linear.x, msg.linear.y, msg.linear.z = tran_v
msg.angular.x, msg.angular.y, msg.angular.z = rot_v
dist = math.sqrt(camera_t[0]**2 + camera_t[1]**2)
msg.linear.x += safe_trans_v[0]
msg.linear.y += safe_trans_v[1]
msg.linear.z += safe_trans_v[2]
if camera_t[2] < 0.25 and msg.linear.z < 0:
msg.linear.z = 0.00
percent = 1 if dist < 0.92 else max(0,1 - ((dist - 0.92) / (0.95 - 0.92)))
if camera_t[2] < 0.23 and msg.linear.z <= 0:
msg.linear.z = 0.2 * percent
msg.angular.x += safe_rot_v[0]
msg.angular.y += safe_rot_v[1]
msg.angular.z += safe_rot_v[2]
v_speed = 0.15 #m/s
r_speed = math.radians(20) #deg/s
msg.linear.x *= v_speed
msg.linear.y *= v_speed
msg.linear.z *= v_speed
msg.angular.x *= r_speed
msg.angular.y *= r_speed
msg.angular.z *= r_speed
new_claw_state = claw_msg > 0.5
if new_claw_state == prev_claw_state:
last_claw_change_time = time.time()
if time.time() - self.last_control_msg_time < 1.0:
if time.time() - last_claw_change_time > 0.1:
self.pub_grip.publish(0 if new_claw_state else 100)
prev_claw_state = new_claw_state
self.pub.publish(msg)
except (tf.LookupException,tf.ExtrapolationException) as e:
print("Error: %s" % e)
r.sleep()
def get_safety_return_speeds(self,camera_t, camera_r):
safe_trans_v = [0.0,0.0,0.0]
safe_rot_v = np.array([0.0,0.0,0.0])
q_camera = pyquaternion.Quaternion(camera_r[3],camera_r[0],camera_r[1],camera_r[2])
cam_z = q_camera.rotate(np.array([0.0,0.0,1.0]))
cam_y = q_camera.rotate(np.array([0.0,1.0,0.0]))
#PAN TILT LIMITS
full_rot_speed = 2.2 #degrees per second
full_rot_speed_dist = math.radians(5) #degrees per second
#get the normal to the plane that runs throught the robot z axis and the camera position
v_plane_norm = np.array([-camera_t[1],camera_t[0],0.0])
v_plane_norm /= np.linalg.norm(v_plane_norm)
#project camera z axis onto plane
z_proj = cam_z - np.dot(cam_z,v_plane_norm)*v_plane_norm
#normalise z projection
z_proj /= np.linalg.norm(z_proj)
#PAN
pan_angle_limit = math.radians(1) # degrees
#get the pan rotation axis between camera z and plane
pan_axis = -np.cross(z_proj,cam_z)
#get the pan angle
pan_angle = np.linalg.norm(pan_axis) #TODO This not correct way to get angle from cross product. It needs a sin
#normalise pan rotation axis
pan_axis /= np.linalg.norm(pan_axis)
pan_return_speed = min(
full_rot_speed,
max(
0,
(pan_angle - pan_angle_limit) / full_rot_speed_dist * full_rot_speed
)
)
safe_rot_v += pan_axis * pan_return_speed * 0.5
#TILT LIMIT
out_norm = np.array([camera_t[0],camera_t[1],0.0])
out_norm /= np.linalg.norm(out_norm)
tilt_axis = np.cross(z_proj,out_norm)
tilt_sign_y = np.sign(np.dot(tilt_axis,v_plane_norm))
tilt_sign_x = np.sign(np.dot(z_proj,out_norm))
tilt_angle = math.asin(np.linalg.norm(tilt_axis))
if tilt_sign_x < 0:
tilt_angle = math.pi - tilt_angle
tilt_angle *= tilt_sign_y
tilt_angle_min = math.radians(-40)
tilt_raw = -80
tilt_angle_max = math.radians(tilt_raw)
dist = math.sqrt(camera_t[0]**2 + camera_t[1]**2)
tilt_lock = 0.75
tilt_lock_max = 0.92
# if dist < tilt_lock and camera_t[2] < 0.4:
# tilt_angle_min = -1.3
if dist > tilt_lock:
percent = (dist - tilt_lock) / (tilt_lock_max - tilt_lock)
tilt_angle_max = math.radians(tilt_raw + ((-62-tilt_raw) * percent))
# print(f"tilt angle min: {tilt_angle_min}, tilt angle max: {tilt_angle_max}")
# print("camera: ", camera_t)
tilt_return_speed = -min(full_rot_speed,max(0,
(tilt_angle-tilt_angle_max)/full_rot_speed_dist*full_rot_speed))
tilt_return_speed += min(full_rot_speed,max(0,
(tilt_angle_min-tilt_angle)/full_rot_speed_dist*full_rot_speed))
# tilt_return_spee
safe_rot_v -= v_plane_norm * tilt_return_speed * 0.5
#LOOK UP
if tilt_angle > -math.pi/4 and tilt_angle < math.pi/4:
up_dir = np.array([0.0,0.0,1.0])
else:
up_dir = out_norm
#project up direction onto camera xy plane
up_proj = up_dir - np.dot(up_dir,cam_z)*cam_z
#normalise project up direction
up_proj /= np.linalg.norm(up_proj)
#If up direction is withing +- 90deg cam_y
if np.dot(up_proj,-cam_y) > 0:
#Get rotation speed from cross product
safe_rot_v += -np.cross(up_proj,-cam_y)*5
full_speed_dist = 0.04
full_speed = 2.0
#Inner Cylinder
cylinder_radius = 0.28
dist = math.sqrt( camera_t[0]**2 + camera_t[1]**2)
cylinder_return_speed = max(0,(cylinder_radius-dist)/full_speed_dist*full_speed)
safe_trans_v[0] += camera_t[0]/dist * cylinder_return_speed
safe_trans_v[1] += camera_t[1]/dist * cylinder_return_speed
#Outer Sphere
sphere_radius = 0.72
dist = math.sqrt( (camera_t[0] * 0.73)**2 + (camera_t[1] * 0.73)**2 + (camera_t[2] * 1)**2)
cylinder_return_speed = min(0,(sphere_radius-dist)/full_speed_dist*full_speed)
safe_trans_v[0] += camera_t[0]/dist * cylinder_return_speed
safe_trans_v[1] += camera_t[1]/dist * cylinder_return_speed
safe_trans_v[2] += camera_t[2]/dist * cylinder_return_speed
#back Wall
wall_unit_norm = [0.7071,-0.7071]
dist = camera_t[0] * wall_unit_norm[0] + camera_t[1] * wall_unit_norm[1] + 0.13
wall_return_speed = -min(0,dist/full_speed_dist*full_speed)
safe_trans_v[0] += wall_unit_norm[0] * wall_return_speed
safe_trans_v[1] += wall_unit_norm[1] * wall_return_speed
return (safe_trans_v, safe_rot_v.tolist())
def gripper_pos_callback(self, data):
self.gripper_pos = data
def visual_control_callback(self, data):
self.visual_control_msg = data
self.last_control_msg_time = time.time()
if __name__ == "__main__":
StateMachine()
| 8,662 |
lifoid/commands/test.py
|
LifoidLabs/lifoid
| 1 |
2171770
|
"""
Launch lifoid server
Author: <NAME> <<EMAIL>>
Copyright (C) 2017-2018 <NAME>
"""
import os
import sys
import traceback
import yaml
import unittest
import json
from importlib import import_module
from commis import Command, color
from lifoid.exceptions import LifoidTestError
from lifoid.constants import HEADER
from lifoid.config import settings
from lifoid.message.repository import MessageRepository
from lifoid.webhook.handler import Handler
sys.path.insert(0, os.getcwd())
PATH = FILE = None
class ConversationsTestCase(unittest.TestCase):
"""
Conversations test class
"""
FILE = None
PATH = None
def setUp(self):
"""
Tests initialization
"""
from lifoid.www.app import app
app.testing = True
self.app = app.test_client()
app_settings_module = import_module(
settings.lifoid_settings_module
)
app_settings_module.HANDLERS = [Handler]
self.test_file = ConversationsTestCase.FILE
if ConversationsTestCase.PATH is not None:
self.tests_path = ConversationsTestCase.PATH
else:
self.tests_path = app_settings_module.TESTS_PATH
self.messages = MessageRepository(
settings.repository,
settings.message_prefix
)
settings.dev_auth = 'yes'
def tearDown(self):
pass
def test_conversation(self):
if self.test_file is not None:
self.run_test(self.test_file)
else:
for filename in os.listdir(self.tests_path):
self.run_test(os.path.join(self.tests_path, filename))
def run_test(self, filepath):
print(color.format(filepath, color.CYAN))
with open(filepath) as file_handle:
loaded = yaml.load(file_handle.read())
print('language {}'.format(loaded['lang']))
print('{} messages loaded...'.format(len(loaded['messages'])))
lang = loaded['lang']
tests = [(loaded['messages'][i], loaded['messages'][i + 1])
for i in range(0, len(loaded['messages']) - 1, 2)]
for mess, resp in tests:
print(color.format('> {}', color.CYAN, mess))
try:
try:
mess = json.loads(mess)
mess.update({
'lifoid_id': ConversationsTestCase.LIFOID_ID,
'lang': lang
})
rv = self.app.post(
'/webhook',
data=json.dumps(mess),
content_type='application/json',
follow_redirects=True)
except Exception:
rv = self.app.post(
'/webhook',
data=json.dumps({
'lifoid_id': ConversationsTestCase.LIFOID_ID,
'access_token': 'access_token',
'q': {
'text': mess,
'attachments': None
},
'user': {
'username': 'me'
},
'lang': lang
}),
content_type='application/json',
follow_redirects=True)
from_date = rv.data.decode('utf8')
assert('200' in rv.status)
rv = self.app.post(
'/messages',
data=json.dumps({
'lifoid_id': ConversationsTestCase.LIFOID_ID,
'access_token': 'access_token',
'from_date': from_date,
'user': {
'username': 'me'
}
}),
content_type='application/json',
follow_redirects=True)
json_rv = json.loads(rv.data.decode('utf8'))
assert('200' in rv.status)
valid = True
if resp != 'DONT_CARE':
valid = False
for el in json_rv:
if resp in el['payload']['text']:
valid = True
self.assertTrue(valid)
for msg in json_rv:
print(color.format('< {}',
color.GREEN,
msg['payload']['text']))
except AssertionError:
print(color.format(filepath, color.RED))
print(color.format('Expected: {}', color.RED, resp))
print(color.format('Received: {}', color.RED,
json_rv))
raise
class TestCommand(Command):
name = 'test'
help = 'Run tests suite'
args = {
'--path': {
'metavar': 'PATH',
'required': False,
'help': 'location of a group of test files'
},
'--file': {
'metavar': 'FILEPATH',
'required': False,
'help': 'location of a test file'
},
'--debug': {
'action': 'store_true',
'required': False,
'help': 'force debug mode'
},
'--lifoid_id': {
'metavar': 'LIFOID_ID',
'required': True,
'help': 'unique id of lifoid chatbot'
}
}
def handle(self, args):
"""
CLI to test to lifoid
"""
from lifoid.config import settings
print(HEADER)
settings.pasync = 'no'
ConversationsTestCase.LIFOID_ID = args.lifoid_id
if args.path is not None:
ConversationsTestCase.PATH = args.path
print(color.format('* Run tests suite from {}'.format(args.path),
color.GREEN))
else:
ConversationsTestCase.FILE = args.file
print(color.format('* Run tests suite from {}'.format(args.file),
color.GREEN))
try:
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(
ConversationsTestCase)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if len(test_result.errors) > 0 or len(test_result.failures) > 0:
raise LifoidTestError()
return color.format("* all tests passed", color.GREEN)
except LifoidTestError as exc:
print(color.format('* tests suite failed', color.RED))
except Exception:
print(traceback.format_exc())
return color.format("* bye bye", color.GREEN)
| 7,070 |
mi/core/kudu/__init__.py
|
rhan1498/marine-integrations
| 0 |
2171695
|
#!/usr/bin/env python
# Copyright 2012, 2013, 2014 The Regents of the University of California
"""BRTT Antelope API wrapper"""
import os, sys
# Antelope libs need to see this env var, otherwise blocking calls hold the GIL
# preventing other threads from running.
os.environ['ANTELOPE_PYTHON_GILRELEASE'] = '1'
# This is where Antelope puts it's python modules. Update for other antelope
# versions.
sys.path.append('/opt/antelope/5.3/data/python/antelope')
try:
import _Pkt as _pkt
except ImportError:
import warnings
warnings.warn("Failed to import Antelope libs", RuntimeWarning)
__all__ = ['_pkt']
| 619 |
facet/formatter/pickle.py
|
edponce/FACET
| 2 |
2171446
|
import pickle
import cloudpickle
from .base import BaseFormatter
__all__ = [
'PickleFormatter',
'CloudpickleFormatter',
]
class PickleFormatter(BaseFormatter):
NAME = 'pickle'
def _format(self, data):
return pickle.dumps(data)
class CloudpickleFormatter(BaseFormatter):
NAME = 'cloudpickle'
def _format(self, data):
return cloudpickle.dumps(data)
| 397 |
UserAdd/__init__.py
|
f5devcentral/volterra-management
| 0 |
2171475
|
import datetime
import logging
import os
from AAD_helpers import retrieveAccessToken, voltUsers2Add, voltUsers2Remove
from ms_graph import getUser
from volterra_helpers import createVoltSession, updateSO, addUser, removeUserRoles
from quota_helpers import postNewUser
import azure.functions as func
def main(addTimer: func.TimerRequest) -> None:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
if addTimer.past_due:
logging.info('User Add is due to run.')
logging.info('User Add ran at %s', utc_timestamp)
required_vars = {
'AADclientID': False,
'AADtenantID': False,
'AADsecret': False,
'AADGroupName': False,
'VoltTenantApiToken': False,
'VoltTenantName': False,
'TeamsWebhookUrl': False
}
for v in required_vars:
required_vars[v] = os.environ.get(v, False)
if required_vars[v] == False:
raise ValueError("A value must be provided for {0}".format(v))
# Get VoltConsole Session
s = createVoltSession(required_vars['VoltTenantApiToken'], required_vars['VoltTenantName'])
logging.info(s['lastOp'])
# Clean VoltConsole Users
removeUserRoles(s)
logging.info(s['lastOp'])
# Add Users from AAD
AADtoken = retrieveAccessToken(required_vars['AADclientID'], required_vars['AADtenantID'], required_vars['AADsecret'])
addUsers: list[type[dict]] = voltUsers2Add(s, AADtoken, required_vars['AADGroupName'])
newUsers = []
for user in addUsers:
email = user['userPrincipalName']
thisUser = getUser(AADtoken, email)
addUser(s, email, thisUser['givenName'], thisUser['surname'])
newUsers.append(email)
logging.info(s['lastOp'])
# Post New Users in Teams Channel
if len(newUsers) > 0:
postNewUser(required_vars['TeamsWebhookUrl'], newUsers, required_vars['VoltTenantName'])
# Log Users to Remove (Information only)
cleanUsers: list[type[dict]] = voltUsers2Remove(s, AADtoken, required_vars['AADGroupName'])
if len(cleanUsers) > 0:
remUsers = []
for user in cleanUsers:
remUsers.append(user['userPrincipalName'])
updateSO(s, 'cleanUsers', 'success', "Users to be cleaned: {0}".format(remUsers))
else:
updateSO(s, 'cleanUsers', 'success', "No Users to be cleaned.")
logging.info(s['lastOp'])
| 2,434 |
df_websockets/utils.py
|
MasterGowen/df_websockets
| 5 |
2171348
|
# ##############################################################################
# This file is part of df_websockets #
# #
# Copyright (C) 2020 <NAME> <<EMAIL>> #
# All Rights Reserved #
# #
# You may use, distribute and modify this code under the #
# terms of the (BSD-like) CeCILL-B license. #
# #
# You should have received a copy of the CeCILL-B license with #
# this file. If not, please visit: #
# https://cecill.info/licences/Licence_CeCILL-B_V1-en.txt (English) #
# or https://cecill.info/licences/Licence_CeCILL-B_V1-fr.txt (French) #
# #
# ##############################################################################
import hashlib
import io
import mimetypes
import os
import re
from typing import Union
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.http import QueryDict
class RE:
"""used to check if a string value matches a given regexp.
Example (requires Python 3.2+), for a function that can only handle a string of the form 123a456:
.. code-block:: python
@signal(path='myproject.signals.test')
def test(window_info, value: RE('\\d{3}a\\d{3}')):
pass
Your code won't be called for values like "abc".
:param value: regexp pattern
:type value: `str`
:param caster: if not `None`, any callable applied to the value (if valid)
:type caster: `callable` or `None`
:param flags: regexp flags passed to `re.compile`
:type flags: `int`
"""
def __init__(self, value, caster=None, flags=0):
self.caster = caster
self.regexp = re.compile(value, flags=flags)
def __call__(self, value):
matcher = self.regexp.match(str(value))
if not matcher:
raise ValueError
value = matcher.group(1) if matcher.groups() else value
return self.caster(value) if self.caster else value
class Choice:
"""used to check if a value is among some valid choices.
Example (requires Python 3.2+), for a function that can only two values:
.. code-block:: python
@signal(path='myproject.signals.test')
def test(window_info, value: Choice([True, False])):
pass
Your code wan't be called if value is not True or False.
:param caster: callable to convert the provided deserialized JSON data before checking its validity.
"""
def __init__(self, values, caster=None):
self.values = set(values)
self.caster = caster
def __call__(self, value):
value = self.caster(value) if self.caster else value
if value not in self.values:
raise ValueError
return value
class SerializedForm:
"""Transform values sent by JS to a Django form.
Given a form and a :class:`list` of :class:`dict`, transforms the :class:`list` into a
:class:`django.http.QueryDict` and initialize the form with it.
>>> from django import forms
>>> class SimpleForm(forms.Form):
... field = forms.CharField()
...
>>> x = SerializedForm(SimpleForm)
>>> form = x([{'name': 'field', 'value': 'object'}])
>>> form.is_valid()
True
How to use it with Python3:
.. code-block:: python
@signal(path='myproject.signals.test')
def test(window_info, value: SerializedForm(SimpleForm), other: int):
print(value.is_valid())
How to use it with Python2:
.. code-block:: python
@signal(path='myproject.signals.test')
def test(window_info, value, other):
value = SerializedForm(SimpleForm)(value)
print(value.is_valid())
On the JS side, you can serialize the form with JQuery:
.. code-block:: html
<form onsubmit="return $.df.call('myproject.signals.test', {value: $(this).serializeArray(), other: 42})">
<input name='field' value='test' type='text'>
</form>
"""
def __init__(self, form_cls):
self.form_cls = form_cls
def __call__(self, value, *args, **kwargs):
"""
:param value:
:type value: :class:`list` of :class:`dict`
:return:
:rtype: :class:`django.forms.Form`
"""
from django.forms import FileField
if value is None:
return self.form_cls(*args, **kwargs)
post_data = QueryDict("", mutable=True)
file_data = QueryDict("", mutable=True)
for obj in value:
name = obj["name"]
value = obj["value"]
if name in self.form_cls.base_fields and isinstance(
self.form_cls.base_fields[name], FileField
):
mimetypes.init()
basename = os.path.basename(value)
(type_, __) = mimetypes.guess_type(basename)
# it's a file => we need to simulate an uploaded one
content = InMemoryUploadedFile(
io.BytesIO(b"\0"),
name,
basename,
type_ or "application/binary",
1,
"utf-8",
)
file_data.update({name: content})
else:
post_data.update({name: value})
return self.form_cls(post_data, file_data, *args, **kwargs)
def valid_topic_name(x: Union[str, bytes]) -> str:
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha256(x).hexdigest()
| 5,964 |
MiddleKit/Core/ModelUser.py
|
PeaceWorksTechnologySolutions/w4py
| 18 |
2171445
|
import sys
from types import ModuleType
from MiscUtils.MixIn import MixIn
from MiscUtils import NoDefault
class ModelUser(object):
## Init ##
def __init__(self):
self._model = None
## Settings ##
def setting(self, name, default=NoDefault):
"""Return the given setting which is actually just taken from the model."""
return self._model.setting(name, default)
## Models ##
def model(self):
return self._model
def setModel(self, model):
assert model
assert self._model is None, 'Can only set model once.'
self._model = model
self.modelWasSet()
def readModelFileNamed(self, filename, modelClass=None, **keywords):
assert self._model is None, 'Cannot re-read a model.'
if modelClass is None:
from MiddleKit.Core.Model import Model as modelClass
self._model = modelClass(**keywords)
self._model.read(filename)
self.modelWasSet()
def modelWasSet(self):
"""Perform additional set up of the store after the model is set.
Invoked by setModel() or readModelFileNamed() as a hook for taking
action on this event. Invokes installMixIns().
"""
self.installMixIns()
## Mix-ins ##
def installMixIns(self, verbose=False):
if verbose:
print '>> installMixIns()'
print 'class =', self.__class__
modules = self.modulesForClass(self.__class__)
if verbose:
print 'modules =', ', '.join(modules)
# reverse order so that mix-ins in subclasses override super
for module in reversed(modules):
module = sys.modules[module]
assert type(module) is ModuleType
self.installMixInsForModule(module, verbose)
if verbose:
print
def installMixInsForModule(self, module, verbose=False):
# @@ 2000-10-18 ce: perhaps MixIns should be applied to the actual
# MiddleKit.Core class and not the custom one that possibly was
# passed into model. This would help with "invoking super" which
# may be a non-trivial operation in a mix-in of a generator module.
coreClassNames = self._model.coreClassNames()
if verbose:
print '>>', module
for name in dir(module):
generatorThing = getattr(module, name)
if isinstance(generatorThing, type):
# See if a class with the same name exists in MiddleKit.Core
import MiddleKit.Core as Core
if name in coreClassNames:
baseClass = self._model.coreClass(name)
if baseClass is not generatorThing:
if verbose:
print '>> mixing %s into %s' % (generatorThing, baseClass)
assert isinstance(baseClass, type)
assert isinstance(generatorThing, type)
MixIn(baseClass, generatorThing, mixInSuperMethods=True)
## Warning ##
def warning(self, msg):
"""Output a warning.
Invoked by self for any kind of appropriate warning that doesn't
warrant an exception being thrown. Preferably, this should be invoked
from a method that is invoked when the "bad event" occurs. This allows
subclasses to override that method and potentially customize the
behavior, including providing more debugging information.
This implementation writes the msg to stdout.
"""
print 'WARNING:', msg
## Self utility ##
def modulesForClass(self, pyClass, modules=None):
"""Return the modules for the class.
Returns a list of modules for pyClass, going up the chain of ancestor
classes, stopping short before ModelUser. Utility method for installMixIns.
"""
if modules is None:
modules = []
className = pyClass.__name__
if className != 'ModelUser':
modules.append(pyClass.__module__)
for baseClass in pyClass.__bases__:
self.modulesForClass(baseClass, modules)
return modules
| 4,199 |
evaluation/model/disambiguate/train_model.py
|
rungjoo/dstc10
| 2 |
2171728
|
#! /usr/bin/env python
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the LICENSE file in the
root directory of this source tree.
Trains a simple GPT-2 based disambiguation model.
Author(s): <NAME>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import collections
import json
import os
import torch
import torch.nn as nn
from transformers import (
GPT2Tokenizer,
get_linear_schedule_with_warmup,
AdamW,
)
from tqdm import tqdm as progressbar
from dataloader import Dataloader
from disambiguator import Disambiguator
def evaluate_model(model, loader, batch_size, save_path=None):
num_matches = 0
results = collections.defaultdict(list)
with torch.no_grad():
for batch in progressbar(loader.get_entire_batch(batch_size)):
output = model(batch)
predictions = torch.argmax(output, dim=1)
num_matches += (predictions == batch["gt_label"]).sum().item()
# Save results if need be.
if save_path:
for ii in range(predictions.shape[0]):
new_instance = {
"turn_id": batch["turn_id"][ii],
"disambiguation_label": predictions[ii].cpu().item(),
}
results[batch["dialog_id"][ii]].append(new_instance)
# Restructure results JSON and save.
if save_path:
results = [
{"dialog_id": dialog_id, "predictions": predictions,}
for dialog_id, predictions in results.items()
]
print(f"Saving: {save_path}")
with open(save_path, "w") as file_id:
json.dump(results, file_id)
accuracy = num_matches / loader.num_instances * 100
return accuracy
def main(args):
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
num_added_tokenss = tokenizer.add_special_tokens(
{"additional_special_tokens": ["<USER>", "SYS>"]}
)
# Dataloader.
train_loader = Dataloader(tokenizer, args["train_file"], args)
val_loader = Dataloader(tokenizer, args["dev_file"], args)
test_loader = Dataloader(tokenizer, args["devtest_file"], args)
# Model.
model = Disambiguator(tokenizer, args)
model.train()
# loss function.
criterion = nn.CrossEntropyLoss()
# Prepare optimizer and schedule (linear warmup and decay)
optimizer = AdamW(
model.parameters(), lr=args["learning_rate"], eps=args["adam_epsilon"]
)
total_steps = (
int(train_loader.num_instances / args["batch_size"] * args["num_epochs"]) + 1
)
num_iters_epoch = train_loader.num_instances // args["batch_size"]
num_iters = 0
total_loss = None
# batch = train_loader.get_random_batch(args["batch_size"])
while True:
epoch = num_iters / (float(train_loader.num_instances) / args["batch_size"])
batch = train_loader.get_random_batch(args["batch_size"])
output = model(batch)
loss = criterion(output, batch["gt_label"])
if total_loss:
total_loss = 0.95 * total_loss + 0.05 * loss.item()
else:
total_loss = loss.item()
if num_iters % 100 == 0:
print("[Ep: {:.2f}][Loss: {:.2f}]".format(epoch, total_loss))
loss.backward()
optimizer.step()
model.zero_grad()
# Evaluate_model every epoch.
if num_iters % 1000 == 0:
model.eval()
accuracy = evaluate_model(model, val_loader, args["batch_size"] * 5)
print("Accuracy [dev]: {}".format(accuracy))
# Save devtest results.
if args["result_save_path"]:
save_path = os.path.join(
args["result_save_path"], f"results_devtest_{num_iters}.json"
)
else:
save_path = None
accuracy = evaluate_model(
model, test_loader, args["batch_size"] * 5, save_path
)
print("Accuracy [devtest]: {}".format(accuracy))
model.train()
num_iters += 1
if epoch > args["num_epochs"]:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--train_file", required=True, help="Path to the training file")
parser.add_argument("--dev_file", required=True, help="Path to the dev file")
parser.add_argument(
"--devtest_file", required=True, help="Path to the devtest file"
)
parser.add_argument(
"--result_save_path", default=None, help="Path to save devtest results"
)
parser.add_argument(
"--max_turns", type=int, default=3, help="Number of turns in history"
)
parser.add_argument("--batch_size", type=int, default=128, help="Batch Size")
parser.add_argument(
"--max_length", type=int, default=512, help="Maximum length in utterance"
)
parser.add_argument(
"--num_epochs", type=int, default=10, help="Maximum number of epochs"
)
parser.add_argument(
"--learning_rate", type=float, default=5e-5, help="Learning rate"
)
parser.add_argument(
"--warmup_steps", type=int, default=0, help="Linear warmup over warmup_steps"
)
parser.add_argument(
"--adam_epsilon", type=float, default=1e-8, help="Eps for Adam optimizer"
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay")
parser.add_argument("--use_gpu", dest="use_gpu", action="store_true", default=False)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
| 6,066 |
overhave/test_execution/step_collector.py
|
TinkoffCreditSystems/overhave
| 33 |
2169626
|
import logging
from operator import attrgetter
from typing import Any, Dict, List, Optional, Tuple, cast
from _pytest.fixtures import FixtureDef
from _pytest.main import Session
from overhave.entities import FeatureTypeName, StepPrefixesModel
from overhave.test_execution.objects import BddStepModel
_PYTESTBDD_FIXTURE_MARK = "pytestbdd_"
_PYTESTBDD_FIXTURE_TRACE_MARK = "_trace"
logger = logging.getLogger(__name__)
class BaseStepCollectorException(Exception):
""" Base exception for :class:`StepCollector`. """
class BddStepWithoutDocsError(BaseStepCollectorException):
""" Error for situation when pytest_bdd steps declared without docstring. """
class StepCollector:
""" Class for `pytest-bdd` steps dynamic collection. """
def __init__(self, step_prefixes: Optional[StepPrefixesModel]) -> None:
self._step_prefixes = step_prefixes
self._steps: Dict[FeatureTypeName, List[BddStepModel]] = {}
@staticmethod
def _is_bdd_step(fixture: FixtureDef[Any]) -> bool:
is_bdd_step = (
isinstance(fixture.argname, str)
and fixture.argname.startswith(_PYTESTBDD_FIXTURE_MARK)
and not fixture.argname.endswith(_PYTESTBDD_FIXTURE_TRACE_MARK)
)
logger.debug("Fixture: %s - is_bdd_step=%s", fixture.argname, is_bdd_step)
if is_bdd_step and not isinstance(fixture.func.__doc__, str):
raise BddStepWithoutDocsError(
f"Fixture {fixture} does not have description! Please, set it via docstrings."
)
return is_bdd_step
@classmethod
def _get_pytestbdd_step_fixtures(cls, session: Session) -> Tuple[FixtureDef[Any]]:
return cast(
Tuple[FixtureDef[Any]],
sorted(
(
fx
for fx_list in session._fixturemanager._arg2fixturedefs.values()
for fx in fx_list
if cls._is_bdd_step(fx)
),
key=attrgetter("func.step_type"),
reverse=True,
),
)
def _compile_full_step_name(self, fixture_name: str, step_type: str) -> str:
prefix = step_type.title()
if self._step_prefixes is not None:
prefix = self._step_prefixes.dict()[step_type.upper()].strip()
return f"{prefix} {fixture_name}"
def _compile_step_models(self, steps: Tuple[FixtureDef[Any]]) -> List[BddStepModel]:
return [
BddStepModel(
type=f.func.step_type, # type: ignore
name=self._compile_full_step_name(
fixture_name=f.func.parser.name, # type: ignore
step_type=f.func.step_type, # type: ignore
),
doc=f.func.__doc__,
)
for f in steps
]
def collect_steps(self, session: Session, feature_type: FeatureTypeName) -> None:
logger.debug("Collecting steps for feature_type=%s...", feature_type)
step_fixtures = self._get_pytestbdd_step_fixtures(session)
bdd_steps = self._compile_step_models(step_fixtures)
if bdd_steps:
logger.debug("Loaded steps dict:\n%s", bdd_steps)
else:
logger.warning("Feature type '%s' does not have any pytest_bdd steps!", feature_type)
self._steps[feature_type] = bdd_steps
def get_steps(self, feature_type: FeatureTypeName) -> Optional[List[BddStepModel]]:
return self._steps.get(feature_type)
| 3,508 |
test-framework/test-suites/unit/tests/command/stack/commands/load/test_command_stack_commands_load___init__.py
|
kmcm0/stacki
| 123 |
2171514
|
from stack.commands.load import command
from stack.exception import CommandError
import pytest
from unittest.mock import patch
class CommandUnderTest(command):
"""A subclass of the stack load command that replaces __init__ to remove the database dependency."""
def __init__(self, *args, **kwargs):
self.document = None
self.exec = False
self.force = False
@pytest.fixture
def partitions():
"""Fixture to set up test partition information."""
return [
{
"device": "sda",
"partid": 5,
"mountpoint": "/",
"size": 1024,
"fstype": "ext4",
"options": "host_options",
},
{
"device": "sda",
"partid": 6,
"mountpoint": "/var",
"size": 0,
"fstype": "ext4",
},
]
class TestLoadCommand:
"""A test case for testing the stack load command."""
@pytest.mark.parametrize("force", (False, True))
@pytest.mark.parametrize(
"scope, target, expected_cmd",
(
("global", None, "add.storage.partition"),
("appliance", ["backend", "backend2"], "add.appliance.storage.partition"),
("os", ["sles", "rhel"], "add.os.storage.partition"),
("environment", ["env1", "env2"], "add.environment.storage.partition"),
("host", ["backend-0-0", "backend-0-1"], "add.host.storage.partition"),
)
)
@patch.object(target = CommandUnderTest, attribute = "validate_partition", autospec = True)
@patch.object(target = CommandUnderTest, attribute = "stack", autospec = True)
@patch.object(target = CommandUnderTest, attribute = "get_scope", autospec = True)
def test_load_partition(
self,
mock_get_scope,
mock_stack,
mock_validate_partition,
force,
scope,
target,
expected_cmd,
partitions,
):
# Some parameter names differ from the keys in the json, so we set up a mapping of
# add storage partition parameter names to json key names.
partition_keys = {
"device": "device",
"partid": "partid",
"mountpoint": "mountpoint",
"size": "size",
"type": "fstype",
"options": "options",
}
mock_get_scope.return_value = scope
# Run the command
test_command = CommandUnderTest()
test_command.force = force
test_command.load_partition(partitions = partitions, target = target)
mock_get_scope.assert_called_once_with(test_command)
# Validate that partition commands were generated correctly.
for partition in partitions:
mock_stack.assert_any_call(
test_command,
expected_cmd,
target,
**{
parameter_name: partition.get(json_key)
for parameter_name, json_key in partition_keys.items()
},
)
if not force:
mock_validate_partition.assert_called_once_with(test_command, partitions = partitions)
else:
mock_validate_partition.assert_not_called()
@pytest.mark.parametrize("test_input", (None, []))
@patch.object(target = CommandUnderTest, attribute = "validate_partition", autospec = True)
@patch.object(target = CommandUnderTest, attribute = "stack", autospec = True)
@patch.object(target = CommandUnderTest, attribute = "get_scope", autospec = True)
def test_load_partition_no_partitions(self, mock_get_scope, mock_stack, mock_validate_partition, test_input):
"""Test that load_partition early exits when there are no partitions to load."""
CommandUnderTest().load_partition(partitions = test_input)
mock_get_scope.assert_not_called()
mock_stack.assert_not_called()
mock_validate_partition.assert_not_called()
@patch.object(target = CommandUnderTest, attribute = "validate_partition", autospec = True)
@patch.object(target = CommandUnderTest, attribute = "stack", autospec = True)
@patch.object(target = CommandUnderTest, attribute = "get_scope", autospec = True)
def test_load_partition_non_global_scope_with_no_target(
self,
mock_get_scope,
mock_stack,
mock_validate_partition,
partitions,
):
"""Test that load_partition raises an exception when a non-global scope is used and no target is passed."""
mock_get_scope.return_value = "appliance"
with pytest.raises(AssertionError):
CommandUnderTest().load_partition(partitions = partitions)
mock_stack.assert_not_called()
mock_validate_partition.assert_not_called()
@patch.object(target = CommandUnderTest, attribute = "call", autospec = True)
def test__exec_commands(self, mock_call):
"""Test that exec commands calls the requested command correctly."""
# Set the exec_commands attribute to True
test_command = CommandUnderTest()
test_command.exec_commands = True
cmd = "stack list foo"
args = ["foo", "bar"]
params = {"baz": "bag"}
test_command._exec_commands(cmd = cmd, args = args, params = params)
# Make sure the call was made correctly
mock_call.assert_called_once_with(
test_command,
command = cmd,
args = [*args, *(f"{key}={value}" for key,value in params.items())],
)
@patch.object(target = CommandUnderTest, attribute = "call", autospec = True)
def test__exec_commands_suppresses_command_errors(self, mock_call):
"""Test that command errors are suppressed when calling the stack commands."""
# Set the exec_commands attribute to True
test_command = CommandUnderTest()
test_command.exec_commands = True
cmd = "stack list foo"
args = ["foo", "bar"]
params = {"baz": "bag"}
mock_call.side_effect = CommandError(msg = "foo", cmd = test_command)
test_command._exec_commands(cmd = cmd, args = args, params = params)
@pytest.mark.parametrize("args", (tuple(), ("foo",), ("foo", "bar")))
@pytest.mark.parametrize(
"params, expected_params",
(
({}, {}),
({"baz": "bag"}, {"baz": "bag"}),
({"baz": "bag", "booz": None}, {"baz": "bag"})
),
)
@patch.object(target = CommandUnderTest, attribute = "_exec_commands", autospec = True)
def test_stack(self, mock__exec_commands, args, params, expected_params):
"""Test that stack tries to run the command if exec_commands is True."""
# Set the exec_commands attribute to True
test_command = CommandUnderTest()
test_command.exec_commands = True
cmd = "stack list foo"
test_command.stack(cmd, *args, **params)
# Make sure the call was made correctly
mock__exec_commands.assert_called_once_with(
test_command,
cmd = cmd,
args = args,
params = expected_params,
)
| 6,144 |
5.3.2-using-a-pretrained-convnet-with-fine-tuning.py
|
83286415/DeepLearningWithPythonKeras
| 0 |
2171325
|
import keras
print(keras.__version__) # 2.2.4 This code costs about 24h to run without GPU support.
from keras.applications import VGG16
# pre-trained network
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
print(conv_base.summary())
'''
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 150, 150, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 150, 150, 64) 1792 # the bottom layer
_________________________________________________________________
block1_conv2 (Conv2D) (None, 150, 150, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 75, 75, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 75, 75, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 75, 75, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 37, 37, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 37, 37, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 37, 37, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 37, 37, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 18, 18, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 9, 9, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808 # the top layer
_________________________________________________________________
block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 4, 4, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
'''
# unfreeze the top layer
conv_base.trainable = True # unfreeze all layers here
set_trainable = False
for layer in conv_base.layers: # loop in all layers
if layer.name == 'block5_conv1': # the top layer
set_trainable = True # unfreeze the top layer for fine tuning
if set_trainable:
layer.trainable = True
else:
layer.trainable = False # freeze the other layers
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(conv_base) # add pre-trained network model into this model
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
import os
from keras.preprocessing.image import ImageDataGenerator
base_dir = 'D:/AI/deep-learning-with-python-notebooks-master/cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
# define train data generator (only) with data augmentation
train_datagen = ImageDataGenerator(
rescale=1./255, # All images will be rescaled by 1./255 (the max pixel value is 255. here its max is 1)
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
# config the train data generator after defining it above
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
# compile the model to make conv_base modification work
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc'])
# train the model. This process costs about 8 hours a time without GPU support.
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
model.save('cats_and_dogs_small_4_fine_tuning.h5')
# plot
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
# Exponential average shifting: new point value = previous point value * factor + point value * (1 - factor)
# To make the curve smooth
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
plt.plot(epochs,
smooth_curve(acc), 'bo', label='Smoothed training acc') # smooth_curve the acc dot line
plt.plot(epochs,
smooth_curve(val_acc), 'b', label='Smoothed validation acc') # smooth_curve the val_acc line
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,
smooth_curve(loss), 'bo', label='Smoothed training loss')
plt.plot(epochs,
smooth_curve(val_loss), 'b', label='Smoothed validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# evaluate the model
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
print('test acc:', test_acc)
print('test loss:', test_loss)
print('5.3.2 done')
print('---------------------------------------------------------------------------------------------------------------')
| 7,379 |
dictKDB.py
|
marrowgari/SallySellsSeashells
| 0 |
2171957
|
from pyq import q, K
q.set(':alpha/HDB/', q('.Q.en', ':alpha', q('([]Name:`$(); Ask:`float$(); Bid:`float$(); Last:`float$(); Volume:`float$(); Time:`timestamp$())')))
| 169 |
code/exploding_sequence/sol_492.py
|
bhavinjawade/project-euler-solutions
| 2 |
2169685
|
# -*- coding: utf-8 -*-
'''
File name: code\exploding_sequence\sol_492.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #492 :: Exploding sequence
#
# For more information see:
# https://projecteuler.net/problem=492
# Problem Statement
'''
Define the sequence a1, a2, a3, ... as:
a1 = 1
an+1 = 6an2 + 10an + 3 for n ≥ 1.
Examples:
a3 = 2359
a6 = 269221280981320216750489044576319
a6 mod 1 000 000 007 = 203064689
a100 mod 1 000 000 007 = 456482974
Define B(x,y,n) as ∑ (an mod p) for every prime p such that x ≤ p ≤ x+y.
Examples:
B(109, 103, 103) = 23674718882
B(109, 103, 1015) = 20731563854
Find B(109, 107, 1015).
'''
# Solution
# Solution Approach
'''
'''
| 751 |
spanningtrees/scheduling.py
|
abaldwin/algorithms
| 0 |
2170325
|
DIFF_SORT = lambda job: (job[0] - job[1], job[0])
RATIO_SORT = lambda job: (job[0] / job[1], job[0])
def schedule(input_jobs, sort_key):
num_jobs = input_jobs['num_jobs']
sorted_jobs = sorted(input_jobs['jobs'], key=sort_key, reverse=True)
return sorted_jobs
def get_weighted_completion_times(scheduled_jobs):
clock = 0
weighted_sum = 0
for weight, length in scheduled_jobs:
clock += length
weighted_sum += weight * clock
return weighted_sum
if __name__ == '__main__':
input_jobs = {}
with open('jobs.txt') as f:
input_jobs = {
'num_jobs': f.readline().strip(),
'jobs': [j.strip().split(' ') for j in f.readlines()],
}
input_jobs['jobs'] = [(int(w), int(l)) for w, l in input_jobs['jobs']]
print(input_jobs['num_jobs'])
print(input_jobs['jobs'][:10])
print(schedule(input_jobs, DIFF_SORT)[:10])
print(schedule(input_jobs, RATIO_SORT)[:10])
print(get_weighted_completion_times(schedule(input_jobs, DIFF_SORT)))
print(get_weighted_completion_times(schedule(input_jobs, RATIO_SORT)))
| 1,117 |
Assignment3/code/net.py
|
Young2647/Computer-Vision
| 0 |
2171939
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.utils import model_zoo
import copy
import numpy as np
from modules import E_ResNet18, D, MFF, R
from torchvision import utils
class Net(nn.Module) :
def __init__(self) :
super(Net,self).__init__()
self.E = E_ResNet18()
self.D = D()
self.MFF = MFF()
self.R = R()
def forward(self, x) :
x_block1, x_block2, x_block3, x_block4 = self.E(x) #encode module
x_D = self.D(x_block4) #decode module
x_mff = self.MFF(x_block1, x_block2, x_block3, x_block4) #mff module
x_Rin = torch.cat((x_D, x_mff), 1) #concentate mff and decode result
x_Rout = self.R(x_Rin) # refinement module
return x_Rout
| 765 |
src/armory/rosalind_swat.py
|
cowboysmall/rosalind
| 0 |
2171550
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
from subprocess import call
from Bio import ExPASy, SwissProt, SeqIO
import files
def pairwise_local_alignment(id1, id2):
score = 0
call('water -asequence %s.txt -bsequence %s.txt -datafile EBLOSUM62 -gapopen 10.0 -gapextend 1.0 -aformat pair -outfile water_out.txt' % (id1, id2), shell = True)
with open('water_out.txt') as file:
while not score:
line = file.readline()
if 'Score' in line:
score = int(line.split(':')[1].strip().split('.')[0])
os.remove('water_out.txt')
os.remove('%s.txt' % id1)
os.remove('%s.txt' % id2)
return score
def write_to_file(identifier):
handle = ExPASy.get_sprot_raw(identifier)
record = SeqIO.read(handle, 'swiss')
with open('%s.txt' % identifier, 'w') as file:
SeqIO.write(record, file, 'fasta')
handle.close()
def main(argv):
identifiers = files.read_line_of_words(argv[0])
for identifier in identifiers:
write_to_file(identifier)
print pairwise_local_alignment(identifiers[0], identifiers[1])
if __name__ == "__main__":
main(sys.argv[1:])
| 1,207 |
tests/test_plaintextification.py
|
tinkoffjournal/distiller
| 6 |
2169665
|
from distiller.base import DistilledObject
from distiller.helpers import normalize_whitespace
from distiller.nodes import InvalidNode, node as el, text as _
simple_plaintext = '''Hello, world.
OK:
1st
2nd
3rd
Bare text.'''
def test_simple_plaintext():
bare_text = 'Bare text.'
distilled = DistilledObject(
nodes=[
el('lead', _('Hello, '), el('b', _('world.'))),
el(
'div',
*[
el('p', _('OK:')),
el('ul', *[el('li', _('1st')), el('li', _('2nd')), el('li', _('3rd')),]),
],
),
InvalidNode(tagname='any'),
_(bare_text),
]
)
assert distilled.to_plaintext() == simple_plaintext
def test_whitespace_normalization():
test_string = 'Some text content with spe­cialties\n'
assert normalize_whitespace(test_string) == 'Some text content with specialties'
| 953 |
ddht/v5_1/alexandria/boot_info.py
|
kdeme/ddht
| 17 |
2169934
|
import argparse
from dataclasses import dataclass
import pathlib
from typing import Literal, Optional, Sequence, Tuple, TypedDict, Union
from eth_enr import ENR
from eth_enr.abc import ENRAPI
from ddht.v5_1.alexandria.constants import (
DEFAULT_BOOTNODES,
DEFAULT_COMMONS_STORAGE_SIZE,
DEFAULT_MAX_ADVERTISEMENTS,
)
class AlexandriaBootInfoKwargs(TypedDict, total=False):
bootnodes: Tuple[ENRAPI, ...]
max_advertisement_count: int
commons_storage_size: int
commons_storage: Optional[Union[Literal[":memory:"], pathlib.Path]]
pinned_storage: Optional[Union[Literal[":memory:"], pathlib.Path]]
def _cli_args_to_boot_info_kwargs(args: argparse.Namespace) -> AlexandriaBootInfoKwargs:
if args.alexandria_bootnodes is None:
bootnodes = tuple(ENR.from_repr(enr_repr) for enr_repr in DEFAULT_BOOTNODES)
else:
bootnodes = args.alexandria_bootnodes
max_advertisement_count: int
if args.alexandria_max_advertisement_count is None:
max_advertisement_count = DEFAULT_MAX_ADVERTISEMENTS
else:
max_advertisement_count = args.alexandria_max_advertisement_count
commons_storage_size: int
if args.alexandria_commons_storage_size is None:
commons_storage_size = DEFAULT_COMMONS_STORAGE_SIZE
else:
commons_storage_size = args.alexandria_commons_storage_size
commons_storage: Optional[Union[Literal[":memory:"], pathlib.Path]]
if args.alexandria_commons_storage == ":memory:":
commons_storage = ":memory:"
elif args.alexandria_commons_storage is not None:
commons_storage = (
pathlib.Path(args.alexandria_commons_storage).expanduser().resolve()
)
else:
commons_storage = None
pinned_storage: Optional[Union[Literal[":memory:"], pathlib.Path]]
if args.alexandria_pinned_storage == ":memory:":
pinned_storage = ":memory:"
elif args.alexandria_pinned_storage is not None:
pinned_storage = (
pathlib.Path(args.alexandria_pinned_storage).expanduser().resolve()
)
else:
pinned_storage = None
return AlexandriaBootInfoKwargs(
bootnodes=bootnodes,
max_advertisement_count=max_advertisement_count,
commons_storage_size=commons_storage_size,
commons_storage=commons_storage,
pinned_storage=pinned_storage,
)
@dataclass(frozen=True)
class AlexandriaBootInfo:
bootnodes: Tuple[ENRAPI, ...]
max_advertisement_count: int
commons_storage_size: int
commons_storage: Optional[Union[Literal[":memory:"], pathlib.Path]]
pinned_storage: Optional[Union[Literal[":memory:"], pathlib.Path]]
@classmethod
def from_cli_args(cls, args: Sequence[str]) -> "AlexandriaBootInfo":
# Import here to prevent circular imports
from ddht.cli_parser import parser
namespace = parser.parse_args(args)
return cls.from_namespace(namespace)
@classmethod
def from_namespace(cls, args: argparse.Namespace) -> "AlexandriaBootInfo":
kwargs = _cli_args_to_boot_info_kwargs(args)
return cls(**kwargs)
| 3,124 |
main.py
|
elldi/pi-security-camera
| 0 |
2171443
|
#!/usr/bin/env python
from flask import Flask, render_template, Response, request, redirect
## import cv2
from picamera import PiCamera
from io import BytesIO
app = Flask(__name__)
## camera = cv2.VideoCapture(0)
camera = PiCamera()
camera.color_effects = (128,128)
camera.resolution = (1280, 720)
## camera.led = False
@app.route('/')
def index():
return render_template('live.html')
def gen():
##with camera as camera:
image_stream = BytesIO()
for image in camera.capture_continuous(image_stream, 'jpeg', use_video_port=True):
image_stream.seek(0)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image_stream.read() + b'\r\n')
image_stream.seek(0)
image_stream.truncate()
camera.stop_recording()
@app.route('/video_feed')
def video_feed():
return Response(gen(), mimetype = 'multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host = '0.0.0.0', debug = False, threaded = True)
| 957 |
solver.py
|
SantiagoCanete/maze-game
| 0 |
2171879
|
from maze import MazeGui
class MazeSolver:
def __init__(self, maze_grid, avatar):
self.maze = maze_grid
self.avatar = avatar
def my_solver(self):
"""
Write your maze solver here.
-----------------------------------------
Here is a list of what the avatar can do:
self.avatar.move() -> Avatar will move forward by one cell
self.avatar.turn_left() -> Avatar will turn left by 90 degrees
self.avatar.check_obstacle() -> Will return True if obstacle in front
self.avatar.check_visited() -> Will return True if cell in front has been visited
self.avatar.orientation -> Will return the orientation of the avatar (e.g. North, South...)
self.avatar.location_x -> Will return the x location of the avatar
self.avatar.location_y -> Will return the y location of the avatar
self.avatar.pause(period) -> Will pause the scene so you have time to see the avatar move
-----------------------------------------
Sample code below:
"""
self.avatar.move()
self.avatar.pause(0.25)
self.avatar.move()
self.avatar.pause(0.25)
self.avatar.turn_left()
print([self.avatar.location_x, self.avatar.location_y])
if __name__ == '__main__':
# Initialize Maze GUI
my_gui = MazeGui()
| 1,389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.