max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
note12/code/TestVisualDL.py
|
fluffyrita/LearnPaddle
| 367 |
2168226
|
# coding=utf-8
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.v2 as paddle
from paddle.fluid.initializer import NormalInitializer
from paddle.fluid.param_attr import ParamAttr
from visualdl import LogWriter
from vgg import vgg16_bn_drop
# 创建VisualDL,并指定当前该项目的VisualDL的路径
logdir = "../data/tmp"
logwriter = LogWriter(logdir, sync_cycle=10)
# 创建loss的趋势图
with logwriter.mode("train") as writer:
loss_scalar = writer.scalar("loss")
# 创建acc的趋势图
with logwriter.mode("train") as writer:
acc_scalar = writer.scalar("acc")
# 定义没多少次重新输出一遍
num_samples = 4
# 创建卷积层和输出图像的图形化展示
with logwriter.mode("train") as writer:
conv_image = writer.image("conv_image", num_samples, 1)
input_image = writer.image("input_image", num_samples, 1)
# 创建可视化的训练模型结构
with logwriter.mode("train") as writer:
param1_histgram = writer.histogram("param1", 100)
def train(use_cuda, learning_rate, num_passes, BATCH_SIZE=128):
# 定义图像的类别数量
class_dim = 10
# 定义图像的通道数和大小
image_shape = [3, 32, 32]
# 定义输入数据大小,指定图像的形状,数据类型是浮点型
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
# 定义标签,类型是整型
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# 获取神经网络
net, conv1 = vgg16_bn_drop(image)
# 获取全连接输出,获得分类器
predict = fluid.layers.fc(
input=net,
size=class_dim,
act='softmax',
param_attr=ParamAttr(name="param1", initializer=NormalInitializer()))
# 获取损失函数
cost = fluid.layers.cross_entropy(input=predict, label=label)
# 定义平均损失函数
avg_cost = fluid.layers.mean(x=cost)
# 每个batch计算的时候能取到当前batch里面样本的个数,从而来求平均的准确率
batch_size = fluid.layers.create_tensor(dtype='int64')
print batch_size
batch_acc = fluid.layers.accuracy(input=predict, label=label, total=batch_size)
# 定义优化方法
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(5 * 1e-5))
opts = optimizer.minimize(avg_cost)
# 是否使用GPU
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# 创建调试器
exe = fluid.Executor(place)
# 初始化调试器
exe.run(fluid.default_startup_program())
# 获取训练数据
train_reader = paddle.batch(
paddle.dataset.cifar.train10(), batch_size=BATCH_SIZE)
# 指定数据和label的对于关系
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
step = 0
sample_num = 0
start_up_program = framework.default_startup_program()
param1_var = start_up_program.global_block().var("param1")
accuracy = fluid.average.WeightedAverage()
# 开始训练,使用循环的方式来指定训多少个Pass
for pass_id in range(num_passes):
# 从训练数据中按照一个个batch来读取数据
accuracy.reset()
for batch_id, data in enumerate(train_reader()):
loss, conv1_out, param1, acc, weight = exe.run(fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, conv1, param1_var, batch_acc,
batch_size])
accuracy.add(value=acc, weight=weight)
pass_acc = accuracy.eval()
# 重新启动图形化展示组件
if sample_num == 0:
input_image.start_sampling()
conv_image.start_sampling()
# 获取taken
idx1 = input_image.is_sample_taken()
idx2 = conv_image.is_sample_taken()
# 保证它们的taken是一样的
assert idx1 == idx2
idx = idx1
if idx != -1:
# 加载输入图像的数据数据
image_data = data[0][0]
input_image_data = np.transpose(
image_data.reshape(image_shape), axes=[1, 2, 0])
input_image.set_sample(idx, input_image_data.shape,
input_image_data.flatten())
# 加载卷积数据
conv_image_data = conv1_out[0][0]
conv_image.set_sample(idx, conv_image_data.shape,
conv_image_data.flatten())
# 完成输出一次
sample_num += 1
if sample_num % num_samples == 0:
input_image.finish_sampling()
conv_image.finish_sampling()
sample_num = 0
# 加载趋势图的数据
loss_scalar.add_record(step, loss)
acc_scalar.add_record(step, acc)
# 添加模型结构数据
param1_histgram.add_record(step, param1.flatten())
# 输出训练日志
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(pass_acc))
step += 1
if __name__ == '__main__':
# 开始训练
train(use_cuda=False, learning_rate=0.005, num_passes=300)
| 4,878 |
predict_64x64.py
|
ishandutta2007/faceswap_pytorch
| 230 |
2168400
|
"""
convert a face to another person
"""
from models.swapnet import SwapNet
import torch
from alfred.dl.torch.common import device
import cv2
import numpy as np
from dataset.training_data import random_warp
from utils.umeyama import umeyama
mean_value = np.array([0.03321508, 0.05035182, 0.02038819])
def process_img(ori_img):
img = cv2.resize(ori_img, (256, 256))
range_ = np.linspace( 128-80, 128+80, 5 )
mapx = np.broadcast_to( range_, (5,5) )
mapy = mapx.T
# warp image like in the training
mapx = mapx + np.random.normal( size=(5,5), scale=5 )
mapy = mapy + np.random.normal( size=(5,5), scale=5 )
interp_mapx = cv2.resize(mapx, (80, 80))[8:72, 8:72].astype('float32')
interp_mapy = cv2.resize(mapy, (80, 80))[8:72, 8:72].astype('float32')
warped_image = cv2.remap(img, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
return warped_image
def load_img():
a = 'images/34600_test_A_target.png'
img = cv2.imread(a) / 255.
return img
def predict():
# convert trump to cage
# img_f = 'data/trump/51834796.jpg'
# img_f = 'data/trump/494045244.jpg'
# NOTE: using face extracted (not original image)
img_f = 'data/trump/464669134_face_0.png'
ori_img = cv2.imread(img_f)
img = cv2.resize(ori_img, (64, 64)) / 255.
img = np.rot90(img)
# img = load_img()
in_img = np.array(img, dtype=np.float).transpose(2, 1, 0)
# normalize img
in_img = torch.Tensor(in_img).to(device).unsqueeze(0)
model = SwapNet().to(device)
if torch.cuda.is_available():
checkpoint = torch.load('checkpoint/faceswap_trump_cage_64x64.pth')
else:
checkpoint = torch.load('checkpoint/faceswap_trump_cage_64x64.pth', map_location={'cuda:0': 'cpu'})
model.load_state_dict(checkpoint['state'])
model.eval()
print('model loaded.')
out = model.forward(in_img, select='B')
out = np.clip(out.detach().cpu().numpy()[0]*255, 0, 255).astype('uint8').transpose(2, 1, 0)
cv2.imshow('original image', ori_img)
cv2.imshow('network input image', img)
cv2.imshow('result image', np.rot90(out, axes=(1, 0)))
cv2.waitKey(0)
if __name__ == '__main__':
predict()
| 2,188 |
colin/spoc/dp.py
|
ColinRioux/DeepPseudo
| 0 |
2170311
|
"""
Author: <NAME>
Convert grouped spoc into code\tnl tsvs
*Should be run after group
"""
import glob
import pandas as pd
import string
import random
import argparse
import os
from pathlib import Path
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--data_path', default='./data/in/')
arg_parser.add_argument('--out_path', default='./data/uniq/')
args = arg_parser.parse_args()
files = glob.glob(os.path.join(args.data_path, "*.csv"))
for file in files:
# fname = file.split('/')[-1].split(',')[0]
fname = Path(file).stem
""" Skip non-grouped files """
if "grouped-" not in fname:
continue
if "eval" in fname:
fname = str("valid.csv")
elif "test" in fname:
fname = str("test.csv")
else:
fname = str("train.csv")
df = pd.read_csv(file)
data = []
for index, row in df.iterrows():
d = {}
d["sc"] = row["sc"]
d["ps"] = row["ps"]
data.append(d)
final_df = pd.DataFrame(data)
final_df.to_csv(os.path.join(args.out_path, fname), index=False)
| 1,073 |
src/sphinx_theme_builder/_internal/errors.py
|
yeraydiazdiaz/sphinx-theme-builder
| 16 |
2164700
|
"""Exceptions raised from within this package."""
import re
from typing import TYPE_CHECKING, Optional, Union
from rich.console import Console, ConsoleOptions, RenderResult
from rich.text import Text
if TYPE_CHECKING:
from typing import Literal
_DOCS_URL = "https://sphinx-theme-builder.rtfd.io/errors/#{}"
def _is_kebab_case(s: str) -> bool:
return re.match(r"^[a-z]+(-[a-z]+)*$", s) is not None
def _prefix_with_indent(
s: Union[Text, str],
console: Console,
*,
width_offset: int = 0,
prefix: str,
indent: str,
) -> Text:
if isinstance(s, Text):
text = s
else:
text = console.render_str(s)
lines = text.wrap(console, console.width - width_offset)
return console.render_str(prefix) + console.render_str(f"\n{indent}").join(lines)
class DiagnosticError(Exception):
reference: str
def __init__(
self,
*,
kind: 'Literal["error", "warning"]' = "error",
reference: Optional[str] = None,
message: Union[str, Text],
context: Optional[Union[str, Text]],
hint_stmt: Optional[Union[str, Text]],
note_stmt: Optional[Union[str, Text]] = None,
) -> None:
# Ensure a proper reference is provided.
if reference is None:
assert hasattr(self, "reference"), "error reference not provided!"
reference = self.reference
assert _is_kebab_case(reference), "error reference must be kebab-case!"
self.kind = kind
self.reference = reference
self.message = message
self.context = context
self.note_stmt = note_stmt
self.hint_stmt = hint_stmt
super().__init__(
f"<{self.__class__.__name__}: {_DOCS_URL.format(self.reference)}>"
)
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}("
f"reference={self.reference!r}, "
f"message={self.message!r}, "
f"context={self.context!r}, "
f"note_stmt={self.note_stmt!r}, "
f"hint_stmt={self.hint_stmt!r}"
")>"
)
def __rich_console__(
self,
console: Console,
options: ConsoleOptions,
) -> RenderResult:
colour = "red" if self.kind == "error" else "yellow"
yield f"[{colour} bold]{self.kind}[/]: [bold]{self.reference}[/]"
# Present the main message, with relevant context indented.
if not options.ascii_only:
yield ""
if self.context is not None:
yield _prefix_with_indent(
self.message,
console,
width_offset=2,
prefix=f"[{colour}]×[/] ",
indent=f"[{colour}]│[/] ",
)
yield _prefix_with_indent(
self.context,
console,
width_offset=4,
prefix=f"[{colour}]╰─>[/] ",
indent=f"[{colour}] [/] ",
)
else:
yield _prefix_with_indent(
self.message,
console,
width_offset=4,
prefix="[red]×[/] ",
indent=" ",
)
else: # coverage: skip
yield console.render_str(f"[{colour}]x[/] ") + self.message
if self.context is not None:
yield ""
yield self.context
if self.note_stmt is not None or self.hint_stmt is not None:
yield ""
if self.note_stmt is not None:
yield _prefix_with_indent(
self.note_stmt,
console,
width_offset=6,
prefix="[magenta bold]note[/]: ",
indent=" ",
)
if self.hint_stmt is not None:
yield _prefix_with_indent(
self.hint_stmt,
console,
width_offset=6,
prefix="[cyan bold]hint[/]: ",
indent=" ",
)
yield ""
yield f"Link: {_DOCS_URL.format(self.reference)}"
if __name__ == "__main__":
import rich
errors = [
DiagnosticError(
reference="ooops-an-error-occured",
message=(
"This is an error message describing the issues."
"\nIt can have multiple lines."
),
context=None,
hint_stmt=None,
),
DiagnosticError(
reference="ooops-an-error-occured",
message=(
"This is an error message describing the issues."
"\nIt can have multiple lines."
),
context=(
"This is some context associated with that error."
"\nAny relevant additional details are mentioned here."
),
hint_stmt=(
"This is a hint, that will help you figure this out."
"\nAnd the hint can have multiple lines."
),
note_stmt=(
"This is to draw your attention toward about something important."
"\nAnd this can also have multiple lines."
),
),
DiagnosticError(
reference="you-have-been-warned",
kind="warning",
message=(
"This is an warning message describing the issues."
"\nIt can have multiple lines."
),
context=(
"This is some context associated with that warning."
"\nAny relevant additional details are mentioned here."
),
hint_stmt=(
"This is a hint, that will help you figure this out."
"\nAnd the hint can have multiple lines."
),
note_stmt=(
"This is to draw your attention toward about something important."
"\nAnd this can also have multiple lines."
),
),
]
for error in errors:
rich.get_console().rule()
rich.print(error)
rich.get_console().rule()
| 6,230 |
pyscf/cc/test/test_ccsd_lambda.py
|
nmardirossian/pyscf
| 1 |
2167117
|
#!/usr/bin/env python
import unittest
import numpy
from pyscf import gto, scf, ao2mo
from pyscf import cc
from pyscf.cc import ccsd_lambda
class KnowValues(unittest.TestCase):
def test_ccsd(self):
mol = gto.M()
mf = scf.RHF(mol)
mcc = cc.CCSD(mf)
numpy.random.seed(12)
mcc.nocc = nocc = 5
mcc.nmo = nmo = 12
nvir = nmo - nocc
eri0 = numpy.random.random((nmo,nmo,nmo,nmo))
eri0 = ao2mo.restore(1, ao2mo.restore(8, eri0, nmo), nmo)
fock0 = numpy.random.random((nmo,nmo))
fock0 = fock0 + fock0.T + numpy.diag(range(nmo))*2
t1 = numpy.random.random((nocc,nvir))
t2 = numpy.random.random((nocc,nocc,nvir,nvir))
t2 = t2 + t2.transpose(1,0,3,2)
l1 = numpy.random.random((nocc,nvir))
l2 = numpy.random.random((nocc,nocc,nvir,nvir))
l2 = l2 + l2.transpose(1,0,3,2)
eris = lambda:None
eris.oooo = eri0[:nocc,:nocc,:nocc,:nocc].copy()
eris.ooov = eri0[:nocc,:nocc,:nocc,nocc:].copy()
eris.ovoo = eri0[:nocc,nocc:,:nocc,:nocc].copy()
eris.oovv = eri0[:nocc,:nocc,nocc:,nocc:].copy()
eris.ovov = eri0[:nocc,nocc:,:nocc,nocc:].copy()
idx = numpy.tril_indices(nvir)
eris.ovvv = eri0[:nocc,nocc:,nocc:,nocc:][:,:,idx[0],idx[1]].copy()
eris.vvvv = ao2mo.restore(4,eri0[nocc:,nocc:,nocc:,nocc:],nvir)
eris.fock = fock0
saved = ccsd_lambda.make_intermediates(mcc, t1, t2, eris)
l1new, l2new = ccsd_lambda.update_amps(mcc, t1, t2, l1, l2, eris, saved)
self.assertAlmostEqual(abs(l1new).sum(), 38172.7896467303, 8)
self.assertAlmostEqual(numpy.dot(l1new.flatten(), numpy.arange(35)), 739312.005491083, 8)
self.assertAlmostEqual(numpy.dot(l1new.flatten(), numpy.sin(numpy.arange(35))), 7019.50937051188, 8)
self.assertAlmostEqual(numpy.dot(numpy.sin(l1new.flatten()), numpy.arange(35)), 69.6652346635955, 8)
self.assertAlmostEqual(abs(l2new).sum(), 72035.4931071527, 8)
self.assertAlmostEqual(abs(l2new-l2new.transpose(1,0,3,2)).sum(), 0, 9)
self.assertAlmostEqual(numpy.dot(l2new.flatten(), numpy.arange(35**2)), 48427109.5409886, 7)
self.assertAlmostEqual(numpy.dot(l2new.flatten(), numpy.sin(numpy.arange(35**2))), 137.758016736487, 8)
self.assertAlmostEqual(numpy.dot(numpy.sin(l2new.flatten()), numpy.arange(35**2)), 507.656936701192, 8)
if __name__ == "__main__":
print("Full Tests for CCSD lambda")
unittest.main()
| 2,522 |
gwosc/timeline.py
|
martinberoiz/gwosc
| 16 |
2169640
|
# -*- coding: utf-8 -*-
# Copyright (C) <NAME> (2018-2021)
# SPDX-License-Identifier: MIT
"""
`gwosc.timeline` provides functions to find segments for a given dataset.
You can search for Timeline segments, based on a flag name, and a
GPS time interval as follows:
>>> from gwosc.timeline import get_segments
>>> get_segments('H1_DATA', 1126051217, 1126151217)
[(1126073529, 1126114861), (1126121462, 1126123267), (1126123553, 1126126832), (1126139205, 1126139266), (1126149058, 1126151217)]
The output is a `list` of ``[start, end)`` 2-tuples which each
represent a semi-open time interval.
For documentation on what flags are available, for example for the O1
science run, see `the O1 data release
page <https://gw-openscience.org/O1/>`__ (*Data Quality*).
""" # noqa: E501
from operator import itemgetter
from . import (api, datasets)
def get_segments(flag, start, end, host=api.DEFAULT_URL):
"""Return the [start, end) GPS segments for this flag
Parameters
----------
flag : `str`
name of flag, e.g. ``'H1_DATA'``
start : `int`
the GPS start time of your query
end : `int`
the GPS end time of your query
host : `str`, optional
the URL of the remote GWOSC server
Returns
-------
segments : `list` of `(int, int)` tuples
a list of `[a, b)` GPS segments
"""
return list(map(tuple, api.fetch_json(
timeline_url(flag, start, end, host=host))['segments']))
def timeline_url(flag, start, end, host=api.DEFAULT_URL):
"""Returns the Timeline JSON URL for a flag name and GPS interval
"""
detector = flag.split('_', 1)[0]
dataset = _find_dataset(start, end, detector, host=host)
return '{}/timeline/segments/json/{}/{}/{}/{}/'.format(
host, dataset, flag, start, end - start)
def _find_dataset(start, end, detector, host=api.DEFAULT_URL):
duration = end - start
epochs = []
for run in datasets._iter_datasets(
type="run",
detector=detector,
segment=(start, end),
host=host,
):
segment = datasets.run_segment(run, host=host)
overlap = min(end, segment[1]) - max(start, segment[0])
epochs.append((run, duration - overlap))
if not epochs:
raise ValueError(
"No datasets found matching [{}, {})".format(start, end))
return sorted(epochs, key=itemgetter(1, 0))[0][0]
| 2,417 |
tests/test_handler_errors.py
|
MaxShvets/pytest-httpserver
| 109 |
2169745
|
import pytest
import requests
import werkzeug
from pytest_httpserver import HTTPServer
def test_check_assertions_raises_handler_assertions(httpserver: HTTPServer):
def handler(_):
assert 1 == 2
httpserver.expect_request("/foobar").respond_with_handler(handler)
requests.get(httpserver.url_for("/foobar"))
with pytest.raises(AssertionError):
httpserver.check_assertions()
httpserver.check_handler_errors()
def test_check_handler_errors_raises_handler_error(httpserver: HTTPServer):
def handler(_) -> werkzeug.Response:
raise ValueError("should be propagated")
httpserver.expect_request("/foobar").respond_with_handler(handler)
requests.get(httpserver.url_for("/foobar"))
httpserver.check_assertions()
with pytest.raises(ValueError):
httpserver.check_handler_errors()
def test_check_handler_errors_correct_order(httpserver: HTTPServer):
def handler1(_) -> werkzeug.Response:
raise ValueError("should be propagated")
def handler2(_) -> werkzeug.Response:
raise OSError("should be propagated")
httpserver.expect_request("/foobar1").respond_with_handler(handler1)
httpserver.expect_request("/foobar2").respond_with_handler(handler2)
requests.get(httpserver.url_for("/foobar1"))
requests.get(httpserver.url_for("/foobar2"))
httpserver.check_assertions()
with pytest.raises(ValueError):
httpserver.check_handler_errors()
with pytest.raises(OSError):
httpserver.check_handler_errors()
httpserver.check_handler_errors()
def test_missing_matcher_raises_exception(httpserver):
requests.get(httpserver.url_for("/foobar"))
# missing handlers should not raise handler exception here
httpserver.check_handler_errors()
with pytest.raises(AssertionError):
httpserver.check_assertions()
def test_check_raises_errors_in_order(httpserver):
def handler1(_):
assert 1 == 2
def handler2(_):
pass # does nothing
def handler3(_):
raise ValueError
httpserver.expect_request("/foobar1").respond_with_handler(handler1)
httpserver.expect_request("/foobar2").respond_with_handler(handler2)
httpserver.expect_request("/foobar3").respond_with_handler(handler3)
requests.get(httpserver.url_for("/foobar1"))
requests.get(httpserver.url_for("/foobar2"))
requests.get(httpserver.url_for("/foobar3"))
with pytest.raises(AssertionError):
httpserver.check()
with pytest.raises(ValueError):
httpserver.check()
| 2,557 |
4.feature/ig.py
|
fullmooncj/textmining_edu
| 0 |
2169609
|
import operator
from df import DF
# Information Gain
class IG:
def __init__(self):
self.df = DF()
def get_class(self, doc_list):
class_list = list()
for doc in doc_list:
if list(doc.keys())[0] not in class_list:
class_list.append(list(doc.keys())[0])
return class_list
def get_doc_list_each_class(self, doc_list, class_name):
new_doc_list = list()
for doc in doc_list:
key = list(doc.keys())[0]
if key == class_name:
new_doc_list.append(doc)
return new_doc_list
def generate_merged_dict_by_ig(self, doc_list, cut_off_count):
ig_key_list = list()
class_list = self.get_class(doc_list)
for class_name in class_list:
new_doc_list = self.get_doc_list_each_class(doc_list, class_name)
df_dict = self.df.generate_merged_dict_by_df(new_doc_list)
sorted_df_list = sorted(df_dict.items(), key=operator.itemgetter(1), reverse=True)
cut_off_df_list = sorted_df_list[:int(cut_off_count / len(class_list)) + 1]
for (key, value) in cut_off_df_list:
if key not in ig_key_list:
ig_key_list.append(key)
return ig_key_list
| 1,284 |
gerritstats/querier/querier.py
|
craftslab/gerritstats
| 0 |
2169694
|
# -*- coding: utf-8 -*-
from ..gerrit.gerrit import Gerrit
from ..proto.proto import Commit
class QuerierException(Exception):
def __init__(self, info):
super().__init__(self)
self._info = info
def __str__(self):
return self._info
class Querier(object):
def __init__(self, config=None):
if config is None:
raise QuerierException("config invalid")
self.gerrit = Gerrit(config)
def _build(self, data):
def _labels(data):
buf = []
for key, val in data.items():
if val.get("all", None) is not None:
for item in val["all"]:
if item.get("value", 0) != 0:
buf.append(
"%s:%s:%d" % (key, item["username"], item["value"])
)
return ",".join(buf)
return {
Commit.BRANCH: data["branch"],
Commit.COMMIT: data["_number"],
Commit.DELETIONS: data["deletions"],
Commit.INSERTIONS: data["insertions"],
Commit.LABELS: _labels(data["labels"]),
Commit.MESSAGE: data["subject"].split("\n")[0],
Commit.OWNER: "%s <%s>"
% (data["owner"].get("name", ""), data["owner"].get("email", "")),
Commit.REPO: data["project"],
Commit.SUBMITTED: data.get("submitted", "").split(".")[0],
Commit.UPDATED: data.get("updated", "").split(".")[0],
}
def _fetch(self, search, start):
buf = self.gerrit.query(search, start)
if len(buf) == 0:
return []
if buf[-1].get(u"_more_changes", False) is False:
return buf
buf.extend(self._fetch(search, start + len(buf)))
return buf
def run(self, search):
commits = []
buf = self._fetch(search, 0)
for item in buf:
commit = self.gerrit.get(item["id"])
if commit is not None:
commits.append(self._build(commit))
return commits
| 2,087 |
setup.py
|
KOLANICH/Arpeggio
| 127 |
2169393
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Name: arpeggio.py
# Purpose: PEG parser interpreter
# Author: <NAME> <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) <NAME> <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#
# Arpeggio is an implementation of packrat parser interpreter based on PEG
# grammars.
# Parsers are defined using python language construction or PEG language.
###############################################################################
from io import open
import os
import sys
from setuptools import setup
VERSIONFILE = "arpeggio/__init__.py"
VERSION = None
for line in open(VERSIONFILE, "r", encoding='utf8').readlines():
if line.startswith('__version__'):
VERSION = line.split('"')[1]
if not VERSION:
raise RuntimeError('No version defined in arpeggio/__init__.py')
if sys.argv[-1].startswith('publish'):
if os.system("pip list | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
if os.system("pip list | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
if sys.argv[-1] == 'publishtest':
os.system("twine upload -r test dist/*")
else:
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(VERSION))
print(" git push --tags")
sys.exit()
setup(version=VERSION)
| 1,620 |
python/spdm/mesh/Mesh.py
|
simpla-fusion/spdb
| 0 |
2170389
|
import collections
from functools import cached_property
from typing import Callable, Iterator, Sequence, Tuple, Type, Union
import numpy as np
from ..common.SpObject import SpObject
from ..geometry.GeoObject import GeoObject
from ..common.logger import logger
class Mesh(SpObject):
@staticmethod
def __new__(cls, mesh=None, *args, **kwargs):
if cls is not Mesh:
return object.__new__(cls)
n_cls = None
if mesh is None or mesh == "rectilinear":
from .RectilinearMesh import RectilinearMesh
n_cls = RectilinearMesh
else:
raise NotImplementedError(mesh)
return object.__new__(n_cls)
def __init__(self, mesh=None, *args, ndims=None, uv=None, rank=None, shape=None, name=None, unit=None, cycle=None, **kwargs) -> None:
self._rank = rank or len(shape or [])
self._shape = shape or []
self._ndims = ndims or self._rank
self._uv = uv
name = name or [""] * self._ndims
if isinstance(name, str):
self._name = name.split(",")
elif not isinstance(name, collections.abc.Sequence):
self._name = [name]
unit = unit or [None] * self._ndims
if isinstance(unit, str):
unit = unit.split(",")
elif not isinstance(unit, collections.abc.Sequence):
unit = [unit]
if len(unit) == 1:
unit = unit * self._ndims
# self._unit = [*map(Unit(u for u in unit))]
cycle = cycle or [False] * self._ndims
if not isinstance(cycle, collections.abc.Sequence):
cycle = [cycle]
if len(cycle) == 1:
cycle = cycle * self._ndims
self._cycle = cycle
# logger.debug(f"Create {self.__class__.__name__} rank={self.rank} shape={self.shape} ndims={self.ndims}")
@property
def name(self) -> str:
return self._name
@property
def unit(self):
return self._unit
@property
def cycle(self):
return self._cycle
@property
def ndims(self) -> int:
return self._ndims
@property
def rank(self) -> int:
return self._rank
@property
def shape(self):
return tuple(self._shape)
@property
def topology_rank(self):
return self.ndims
@cached_property
def bbox(self) -> Sequence[float]:
return NotImplemented
@cached_property
def dx(self) -> Sequence[float]:
return NotImplemented
@cached_property
def boundary(self):
return NotImplemented
@property
def xy(self) -> Sequence[np.ndarray]:
return NotImplemented
def new_dataset(self, *args, **kwargs):
return np.ndarray(self._shape, *args, **kwargs)
def interpolator(self, Z):
return NotImplemented
def axis(self, *args, **kwargs) -> GeoObject:
return NotImplemented
def axis_iter(self, axis=0) -> Iterator[GeoObject]:
for idx, u in enumerate(self._uv[axis]):
yield u, self.axis(idx, axis=axis)
| 3,058 |
codeforces/ProblemSet/B393_Xenia_and_Ringroad.py
|
ivyxjc/OnlineJudgePython
| 0 |
2169681
|
a=input()
b=input()
a=a.split(' ')
b=b.split(' ')
a=[int(i) for i in a]
bb=[]
bb.append(1)
for i in b:
bb.append(int(i))
res=0
for i in range(len(bb)-1):
if(bb[i+1]>bb[i]):
res+=bb[i+1]-bb[i]
elif(bb[i+1]<bb[i]):
res+=a[0]+bb[i+1]-bb[i]
else:
res+=0
print(res)
| 303 |
Wkiki1.py
|
tayayan/suisho
| 1 |
2167739
|
import re
import random
import Bboard
import Wboard
import board
import oute
import Wkikimoves1
import Wmoves
import Wkikiboard
def culc():
global kiki1
Wboard.p,Wboard.l,Wboard.n,Wboard.s,Wboard.g,Wboard.b,Wboard.r=0,0,0,0,0,0,0
Wkikimoves1.move1()
Wkikiboard.shoki()
for i in range(len(Wkikimoves1.depth1)):
exec('Wkikiboard.k{} += 1'.format(Wkikimoves1.depth1[i][2:4]))
kiki1=0
kiki1= Wkikiboard.k1a+Wkikiboard.k2a+Wkikiboard.k3a+Wkikiboard.k4a+Wkikiboard.k5a+Wkikiboard.k6a+Wkikiboard.k7a+Wkikiboard.k8a+Wkikiboard.k9a\
+Wkikiboard.k1b+Wkikiboard.k2b+Wkikiboard.k3b+Wkikiboard.k4b+Wkikiboard.k5b+Wkikiboard.k6b+Wkikiboard.k7b+Wkikiboard.k8b+Wkikiboard.k9b\
+Wkikiboard.k1c+Wkikiboard.k2c+Wkikiboard.k3c+Wkikiboard.k4c+Wkikiboard.k5c+Wkikiboard.k6c+Wkikiboard.k7c+Wkikiboard.k8c+Wkikiboard.k9c\
+Wkikiboard.k1d+Wkikiboard.k2d+Wkikiboard.k3d+Wkikiboard.k4d+Wkikiboard.k5d+Wkikiboard.k6d+Wkikiboard.k7d+Wkikiboard.k8d+Wkikiboard.k9d\
+Wkikiboard.k1e+Wkikiboard.k2e+Wkikiboard.k3e+Wkikiboard.k4e+Wkikiboard.k5e+Wkikiboard.k6e+Wkikiboard.k7e+Wkikiboard.k8e+Wkikiboard.k9e\
+Wkikiboard.k1f+Wkikiboard.k2f+Wkikiboard.k3f+Wkikiboard.k4f+Wkikiboard.k5f+Wkikiboard.k6f+Wkikiboard.k7f+Wkikiboard.k8f+Wkikiboard.k9f\
+Wkikiboard.k1g*5+Wkikiboard.k2g*5+Wkikiboard.k3g*5+Wkikiboard.k4g*5+Wkikiboard.k5g*5+Wkikiboard.k6g*5+Wkikiboard.k7g*5+Wkikiboard.k8g*5+Wkikiboard.k9g*5\
+Wkikiboard.k1h*5+Wkikiboard.k2h*5+Wkikiboard.k3h*5+Wkikiboard.k4h*5+Wkikiboard.k5h*5+Wkikiboard.k6h*5+Wkikiboard.k7h*5+Wkikiboard.k8h*5+Wkikiboard.k9h*5\
+Wkikiboard.k1i*5+Wkikiboard.k2i*5+Wkikiboard.k3i*5+Wkikiboard.k4i*5+Wkikiboard.k5i*5+Wkikiboard.k6i*5+Wkikiboard.k7i*5+Wkikiboard.k8i*5+Wkikiboard.k9i*5
| 1,799 |
tests/automatic_testing.py
|
macanepa/mcutils
| 0 |
2170126
|
import mcutils as mc
import logging
mc.activate_mc_logger(console_log_level='info')
mc.ColorSettings.is_dev = True
"""
Automatic testing for mcutils
"""
def menu_testing():
def exp(num, exp_):
num = num ** exp_
print(num)
return num
credits_ = mc.About(authors=['<NAME>'],
company_name='ALM',
github_account='macanepa',
github_repo='https://github.com/macanepa/logistics-solver')
mf_credits = mc.MenuFunction(title='Credits', function=credits_.print_credits)
mf_sqrt = mc.MenuFunction(title='SQRT', function=exp, exp_=4, num=3)
mc_main = mc.Menu(title='Main Menu', options=[mf_sqrt, mf_credits])
mc_main.show()
def validation():
number = mc.get_input(text='input a value greater than 5, different than 10',
valid_options=['!=10', '>5'],
return_type=int)
mc.mcprint(text='input value = {}'.format(number),
color=mc.Color.LIGHTGREEN)
mc.ColorSettings.is_dev = False
mc.ColorSettings.print_color = True
def menu_select_options():
mc.ColorSettings.is_dev = True
mc.ColorSettings.print_color = True
# options = ["Animal", "Wiweño", "Shalashaska Ocelot"]
options_classy = {"Animal": [str, '<10', '>3'],
"Edad": [int, '>0', '<=100']}
mc_main = mc.Menu(title="Testing Selection",
subtitle='Please input all fields',
options=options_classy,
input_each=True)
mc_main.show()
print(mc_main.returned_value)
print(mc_main.function_returned_value)
def function():
def return_name(n):
name = mc.get_input(text=f'This is just the param: {n}')
return name
mf_return_name = mc.MenuFunction(title='Return Name', function=return_name, n=2)
mc_menu = mc.Menu(title='Function Testing',
subtitle='Select an option from below',
text='This is the text',
options=[mf_return_name])
mc_menu.show()
print(f'function returner value: {mc_menu.function_returned_value}')
print(f'menu returned value {mc_menu.returned_value}')
mc_input_each = mc.Menu(title='Input Each',
options={'Name': [str, '>5']},
input_each=True)
mc_input_each.show()
# logging.info(f'Input was {mc_input_each.returned_value}')
mc.mcprint(text='text', color=mc.Color.RED)
menu_testing()
menu_select_options()
function()
# mc.date_generator(True)
import mcutils as mc
def foo(n):
return n**2
mf_foo = mc.MenuFunction(title='do foo', function=foo, n=4)
mc_submenu = mc.Menu(title='Submenu',
text='This is the submenu',
options=[mf_foo])
mc_menu = mc.Menu(title='Main Menu',
subtitle='Subtitle',
text='Please select one of the following options',
options=[mc_submenu, 'Option 2', 'Option 3'])
mc_menu.show()
print(mc_submenu.function_returned_value)
| 3,096 |
main.py
|
descara/spelunk
| 0 |
2169634
|
from __future__ import unicode_literals
#import splunklib.client as client
from prompt_toolkit import prompt
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import WordCompleter
sql_completer = WordCompleter([
'abort', 'action', 'add', 'after', 'all', 'alter', 'analyze', 'and',
'as', 'asc', 'attach', 'autoincrement', 'before', 'begin', 'between',
'by', 'cascade', 'case', 'cast', 'check', 'collate', 'column',
'commit', 'conflict', 'constraint', 'create', 'cross', 'current_date',
'current_time', 'current_timestamp', 'database', 'default',
'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
'exists', 'explain', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
'match', 'natural', 'no', 'not', 'notnull', 'null', 'of', 'offset',
'on', 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
'raise', 'recursive', 'references', 'regexp', 'reindex', 'release',
'rename', 'replace', 'restrict', 'right', 'rollback', 'row',
'savepoint', 'select', 'set', 'table', 'temp', 'temporary', 'then',
'to', 'transaction', 'trigger', 'union', 'unique', 'update', 'using',
'vacuum', 'values', 'view', 'virtual', 'when', 'where', 'with',
'without'], ignore_case=True)
""" Main function that starts the REPL loop """
def main():
# LAPTOP_HOST = "192.168.10.131"
# DESKTOP_HOST = "192.168.100.131"
# PORT = 8089
# USERNAME = "admin"
# PASSWORD = "<PASSWORD>"
# # Create a Service instance and log in
# service = client.connect(
# host=DESKTOP_HOST,
# port=PORT,
# username=USERNAME,
# password=PASSWORD)
# # Switch to nobody context
# service.namespace['owner'] = 'nobody'
# # Create a test KV store
# collections = service.kvstore
# fields = {
# "ip": "string"
# }
# #newkv = collections.create("test", fields=fields )
# # Print out the list of KV store collections in Splunk
# print("KVStore Collections")
# print("-" * 10)
# for a in collections.iter():
# print(a.name)
# Main REPL Loop
session = PromptSession(completer=sql_completer)
while True:
try:
text = session.prompt('> ')
except KeyboardInterrupt:
continue
except EOFError:
break
else:
print('You entered:', text)
print('Goodbye!')
if __name__ == "__main__":
main()
| 2,719 |
arcpyext/_str/__init__.py
|
PeterReyne/arcpyext
| 11 |
2169458
|
from .ExtendedFormatter import ExtendedFormatter as _ExtendedFormatter
from .sql import *
eformat = _ExtendedFormatter()
| 121 |
Cosine_Similarity.py
|
CODEJIN/Tensorflow_Code_for_Tests
| 0 |
2170284
|
import tensorflow as tf;
def Cosine_Similarity(x,y):
"""
Compute the cosine similarity between same row of two tensors.
Args:
x: nd tensor (...xMxN).
y: nd tensor (...xMxN). A tensor of the same shape as x
Returns:
cosine_Similarity: A (n-1)D tensor representing the cosine similarity between the rows. Size is (...xM)
"""
return tf.reduce_sum(x * y, axis=-1) / (tf.sqrt(tf.reduce_sum(tf.pow(x, 2), axis=-1)) * tf.sqrt(tf.reduce_sum(tf.pow(y, 2), axis=-1)));
def Cosine_Similarity2D(x, y):
"""
Compute the cosine similarity between each row of two tensors.
Args:
x: 2d tensor (MxN). The number of second dimension should be same to y's second dimension.
y: 2d tensor (LxN). The number of second dimension should be same to x's second dimension.
Returns:
cosine_Similarity: A `Tensor` representing the cosine similarity between the rows. Size is (M x L)
"""
tiled_X = tf.tile(tf.expand_dims(x, [1]), multiples = [1, tf.shape(y)[0], 1]); #[M, L, N]
tiled_Y = tf.tile(tf.expand_dims(y, [0]), multiples = [tf.shape(x)[0], 1, 1]); #[M, L, N]
cosine_Similarity = tf.reduce_sum(tiled_Y * tiled_X, axis = 2) / (tf.sqrt(tf.reduce_sum(tf.pow(tiled_Y, 2), axis = 2)) * tf.sqrt(tf.reduce_sum(tf.pow(tiled_X, 2), axis = 2)) + 1e-8) #[M, L]
cosine_Similarity = tf.identity(cosine_Similarity, name="cosine_Similarity");
return cosine_Similarity;
def Batch_Cosine_Similarity2D(x, y):
"""
Compute the cosine similarity between each row of two tensors.
Args:
x: 3d tensor (BATCHxMxN). The number of first and third dimension should be same to y's first and third dimension.
y: 3d tensor (BATCHxLxN). The number of first and third dimension should be same to x's first and third dimension.
Returns:
cosine_Similarity: A `Tensor` representing the cosine similarity between the rows. Size is (BATCH x M x L)
"""
tiled_X = tf.tile(tf.expand_dims(x, [2]), multiples = [1, 1, tf.shape(y)[1], 1]); #[Batch, M, L, N]
tiled_Y = tf.tile(tf.expand_dims(y, [1]), multiples = [1, tf.shape(x)[1], 1, 1]); #[Batch, M, L, N]
cosine_Similarity = tf.reduce_sum(tiled_Y * tiled_X, axis = 3) / (tf.sqrt(tf.reduce_sum(tf.pow(tiled_Y, 2), axis = 3)) * tf.sqrt(tf.reduce_sum(tf.pow(tiled_X, 2), axis = 3)) + 1e-8) #[Batch, M, L]
cosine_Similarity = tf.identity(cosine_Similarity, name="cosine_Similarity");
return cosine_Similarity;
| 2,511 |
covid.py
|
Ayomide100/X-ray-scan-app-to-detect-covid-19
| 2 |
2169590
|
from flask import Flask, request, render_template, flash
app = Flask(__name__)
from common import get_tensor
from inference import diagnosis_type
@app.route('/', methods=['GET', 'POST'])
def hello_world():
if request.method == 'GET':
return render_template('index.html', value='hi')
if request.method == 'POST':
print(request.files)
file = request.files['file']
image = file.read()
name = diagnosis_type(image_bytes=image)
return render_template('result.html', name=name)
if __name__ == '__main__':
app.run(debug=True)
| 554 |
main.py
|
mylover106/ORB-detector-and-Match-algorithm-python-implementation
| 0 |
2169018
|
import cv2
import numpy as np
from ORBFeature import ORBFeature, show_match
def read_image(image_name: str, folder: str) -> np.ndarray:
image = cv2.imread(folder + image_name)
# cv2.imshow("bird.jpg", image)
# cv2.waitKey(0)
return image
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
image_data1 = read_image('house1.jpg', '')
# print(image_data.shape)
# print(type(image_data))
gray_image_data1 = cv2.cvtColor(image_data1, cv2.COLOR_BGR2GRAY)
# fast = OrientedFast(gray_image_data, 9, 0.2)
# fast.detector()
# fast.show_interest_points(image_data)
orb1 = ORBFeature(gray_image_data1, 12, 4, 0.2)
orb1.detector()
# orb1.show_corner_points(image_data1)
image_data2 = read_image('house2.jpg', '')
gray_image_data2 = cv2.cvtColor(image_data2, cv2.COLOR_BGR2GRAY)
orb2 = ORBFeature(gray_image_data2, 12, 4, 0.2)
orb2.detector()
# orb2.show_corner_points(image_data2)
show_match(orb1, orb2, image_data1, image_data2)
| 1,039 |
pirates/effects/WaterSplash.py
|
ksmit799/POTCO-PS
| 8 |
2169384
|
# File: W (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from pirates.piratesgui.GameOptions import Options
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class WaterSplash(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self, parent = None):
PooledEffect.__init__(self)
EffectController.__init__(self)
if parent is not None:
self.reparentTo(parent)
self.effectScale = 1.0
self.setDepthWrite(0)
self.setLightOff()
self.f = ParticleEffect.ParticleEffect('WaterSplash')
self.f.reparentTo(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/pir_t_efx_env_waterSplash')
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('RectangleEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(24)
self.p0.setBirthRate(0.014999999999999999)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(0.75)
self.p0.factory.setLifespanSpread(0.10000000000000001)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(0.5)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(1.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.setEffectScale(1.0)
def createTrack(self, lod = Options.SpecialEffectsHigh):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.014999999999999999), Func(self.p0.clearToInitial), Func(self.f.start, self, self), Func(self.f.reparentTo, self))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100.0), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(10.0), self.endEffect)
def setEffectScale(self, effectScale):
self.effectScale = effectScale
self.p0.renderer.setInitialXScale(0.029999999999999999 * self.cardScale * effectScale)
self.p0.renderer.setFinalXScale(0.02 * self.cardScale * effectScale)
self.p0.renderer.setInitialYScale(0.02 * self.cardScale * effectScale)
self.p0.renderer.setFinalYScale(0.050000000000000003 * self.cardScale * effectScale)
self.p0.emitter.setAmplitude(1.0 * effectScale)
self.p0.emitter.setAmplitudeSpread(0.5 * effectScale)
self.p0.emitter.setOffsetForce(Vec3(0.0, 2.0, 10.0) * effectScale)
self.p0.emitter.setMinBound(Point2(-6.0, -0.5) * effectScale)
self.p0.emitter.setMaxBound(Point2(6.0, 0.5) * effectScale)
def playSoundFX(self):
base.playSfx(self.waterfallSfx, volume = 1, looping = 1, node = self)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| 4,144 |
core/ydk/providers/_value_encoder.py
|
tkamata-test/ydk-py
| 0 |
2167238
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
""" _value_encoder.py
Value encoder.
"""
from __future__ import unicode_literals
import logging
import importlib
from ydk._core._dm_meta_info import REFERENCE_BITS, \
REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_LEAFLIST
from ydk.types import Empty, Decimal64, YListItem
from ._importer import _yang_ns
from functools import reduce
import sys
if sys.version_info > (3,):
long = int
class ValueEncoder(object):
def encode(self, member, NSMAP, value):
text = ''
if member.mtype == REFERENCE_IDENTITY_CLASS or member.ptype.endswith('Identity'):
module = importlib.import_module(member.pmodule_name)
clazz = reduce(getattr, member.clazz_name.split('.'), module)
if issubclass(type(value), clazz):
identity_inst = value
if _yang_ns._namespaces[member.module_name] == _yang_ns._namespaces[identity_inst._meta_info().module_name]:
# no need for prefix in this case
text = identity_inst._meta_info().yang_name
else:
NSMAP['idx'] = _yang_ns._namespaces[identity_inst._meta_info().module_name]
text = 'idx:%s' % identity_inst._meta_info().yang_name
elif member.mtype == REFERENCE_BITS or member.ptype.endswith('Bits'):
module = importlib.import_module(member.pmodule_name)
clazz = reduce(getattr, member.clazz_name.split('.'), module)
if isinstance(value, clazz):
bits_value = value
value = " ".join([k for k in bits_value._dictionary if bits_value._dictionary[k] == True])
if (len(value) > 1):
text = value
elif member.mtype == REFERENCE_ENUM_CLASS or member.ptype.endswith('Enum'):
enum_value = value
module = importlib.import_module(member.pmodule_name)
enum_clazz = reduce(getattr, member.clazz_name.split('.'), module)
literal_map = enum_clazz._meta_info().literal_map
for yang_enum_name in literal_map:
literal = literal_map[yang_enum_name]
if enum_value == getattr(enum_clazz, literal) \
or enum_value == literal:
text = yang_enum_name
break
elif member.ptype == 'bool' and isinstance(value, bool):
if value is True:
text = 'true'
else:
text = 'false'
elif member.ptype == 'Empty' and isinstance(value, Empty):
pass
elif member.ptype == 'Decimal64' and isinstance(value, Decimal64):
text = value.s
elif member.ptype == 'str' and isinstance(value, str):
text = str(value)
elif member.ptype == 'int' and isinstance(value, (int, long)):
text = str(value)
else:
ydk_logger = logging.getLogger(__name__)
ydk_logger.info('Could not encode leaf {0}, type: {1}, {2} value: {3}'.format(member.name, member.mtype, member.ptype, value))
return text
| 3,820 |
attic/toil/tasks/MakeSnpLocationFile.py
|
vtrubets/ricopili_bioinfomatics
| 1 |
2170190
|
#!/usr/bin/env python
from toil.job import Job
import gzip as gz
def open_potentially_zipped(f):
if f.endswith('gz'):
res = gz.open(f, 'rb')
else:
res = open(f, 'r')
return res
class MakeSnpLocationFile(Job):
"""
Toil job to create the SNP location file.
Wrapping the following command line:
zcat $daner | awk '{print $2, $1, $3}' > ${snp_loc_file}
"""
def __init__(self, daner_file):
Job.__init__(self, memory="100M", cores=1, disk="100M")
self.daner_file = daner_file
def run(self, fileStore):
fileStore.logToMaster("Creating temporary SNP Location file.")
# write a local file and copy to the global filestore upon completion
snp_loc_file = fileStore.getLocalTempFile()
with open(snp_loc_file, 'w') as snp_loc_conn:
with open_potentially_zipped(self.daner_file) as daner_conn:
for line in daner_conn:
line = line.strip().split()
snp_loc_conn.write('\t'.join((line[1], line[0], line[2])) + '\n')
global_snp_loc_file = fileStore.writeGlobalFile(snp_loc_file)
return global_snp_loc_file
| 1,189 |
caffe_transforms.py
|
ruthcfong/pointing_game
| 3 |
2170233
|
import numpy as np
from skimage.transform import resize
from scipy.ndimage import zoom
import torch
from torchvision import transforms
def get_caffe_transform(size,
bgr_mean=[103.939, 116.779, 123.68],
scale=255):
"""Return a composition of transforms that replicates the caffe data
transformation pipeline.
"""
# Compose transform that replicates the order of transforms in caffe.io.
transform = transforms.Compose([
PILToNumpy(preserve_range=False, dtype=np.float32),
CaffeResize(size),
CaffeTranspose((2, 0, 1)),
CaffeChannelSwap((2, 1, 0)),
CaffeScale(scale),
CaffeNormalize(bgr_mean),
NumpyToTensor(),
])
return transform
class PILToNumpy(object):
"""Converts PIL image to numpy array.
Default behavior: change to numpy float32 array between [0,1].
"""
def __init__(self, preserve_range=False, dtype=np.float32):
self.preserve_range = preserve_range
self.dtype = dtype
def __call__(self, x):
# assert isinstance(x, Image)
x = np.array(x, dtype=self.dtype)
if not self.preserve_range:
x /= 255.
return x
class NumpyToTensor(object):
"""Converts numpy array to PyTorch tensor."""
def __call__(self, img):
x = torch.from_numpy(img)
return x
class CaffeResize(object):
"""Equivalent to caffe.io.resize_image if size = (height, width);
expects a numpy array in (H, W, C) order.
"""
def __init__(self, size, interp_order=1):
assert(isinstance(size, tuple)
or isinstance(size, list)
or isinstance(size, int))
self.size = size
self.interp_order = interp_order
def __call__(self, im):
assert isinstance(im, np.ndarray)
assert im.ndim == 3
# Resize smaller side to size if size is an integer.
if isinstance(self.size, int):
h, w, _ = im.shape
if w < h:
ow = self.size
oh = int(self.size * h / w)
else:
oh = self.size
ow = int(self.size * w / h)
size = (oh, ow)
# Otherwise, resize image to height.
else:
assert len(self.size) == 2
size = self.size
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, size, order=self.interp_order,
mode='constant')
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((size[0], size[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(size, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=self.interp_order)
return resized_im.astype(np.float32)
class CaffeTranspose(object):
"""Equivalent to caffe.io.set_transpose (default: (H,W,C) => (C,H,W))."""
def __init__(self, order=(2, 0, 1)):
self.order = order
def __call__(self, x):
if len(self.order) != x.ndim:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
y = x.transpose(self.order)
return x.transpose(self.order)
class CaffeChannelSwap(object):
"""Equivalent to caffe.io.set_channel_swap.
Default behavior: RGB <=> BGR. Assumes (C,H,W) format.
"""
def __init__(self, order=(2, 1, 0)):
self.order = order
def __call__(self, orig_img):
assert(isinstance(orig_img, np.ndarray)
or isinstance(orig_img, torch.Tensor))
assert(len(orig_img.shape) == 3)
if len(self.order) != orig_img.shape[0]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
new_img = orig_img[self.order, :, :]
return new_img
class CaffeScale(object):
"""Equivalent to caffe.io.set_raw_scale."""
def __init__(self, scale=255.):
assert isinstance(scale, int) or isinstance(scale, float)
self.scale = scale
def __call__(self, x):
assert isinstance(x, np.ndarray)
return x * self.scale
class CaffeNormalize(object):
"""Equivalent to caffe.io.set_mean for """
def __init__(self, mean):
if isinstance(mean, list):
mean = np.array(mean)
assert isinstance(mean, np.ndarray)
if mean.ndim == 1:
mean = mean[:, np.newaxis, np.newaxis]
else:
assert False
self.mean = mean
def __call__(self, x):
if self.mean.shape[0] != x.shape[0]:
raise ValueError('Mean channels incompatible with input.')
x -= self.mean
return x
| 5,346 |
mcp7940.py
|
AnthonyPapageo/micropython-mcp7940
| 0 |
2170242
|
class MCP7940:
"""
Example usage:
# Read time
mcp = MCP7940(i2c)
time = mcp.time # Read time from MCP7940
is_leap_year = mcp.is_leap_year() # Is the year in the MCP7940 a leap year?
# Set time
ntptime.settime() # Set system time from NTP
mcp.time = utime.localtime() # Set the MCP7940 with the system time
"""
ADDRESS = 0x6F
RTCSEC = 0x00 # RTC seconds register
ST = 7 # Status bit
RTCWKDAY = 0x03 # RTC Weekday register
VBATEN = 3 # External battery backup supply enable bit
ALM0EN = 4
ALM1EN = 5
ALM0WKDAY = 0xD
ALM1WKDAY = 0x14
CONTROL = 0x07
OUTPUT = 8
ALMPOL = 7
def __init__(self, bus, status=True, battery_enabled=True):
self._bus = bus
self.clear_output()
self.battery_backup_enable(1)
def start(self):
self._set_bit(MCP7940.RTCSEC, MCP7940.ST, 1)
def stop(self):
self._set_bit(MCP7940.RTCSEC, MCP7940.ST, 0)
def clear_output(self):
self._bus.write_byte_data(MCP7940.ADDRESS, MCP7940.CONTROL, 0x00)
def is_started(self):
return self._read_bit(MCP7940.RTCSEC, MCP7940.ST)
def battery_backup_enable(self, enable):
self._set_bit(MCP7940.RTCWKDAY, MCP7940.VBATEN, enable)
def is_battery_backup_enabled(self):
return self._read_bit(MCP7940.RTCWKDAY, MCP7940.VBATEN)
def _set_bit(self, register, bit, value):
""" Set only a single bit in a register. To do so, need to read
the current state of the register and modify just the one bit.
"""
mask = 1 << bit
current = self._bus.read_byte_data(MCP7940.ADDRESS, register)
updated = (current & ~mask) | ((value << bit) & mask)
self._bus.write_byte_data(MCP7940.ADDRESS, register, updated)
def _read_bit(self, register, bit):
register_val = self._bus.read_byte_data(MCP7940.ADDRESS, register)
return (register_val[0] & (1 << bit)) >> bit
@property
def time(self):
return self._get_time()
@time.setter
def time(self, t):
year, month, date, hours, minutes, seconds, weekday, yearday, _ = t
# Reorder
time_reg = [seconds, minutes, hours, weekday + 1, date, month, year % 100]
# Add ST (status) bit
# Add VBATEN (battery enable) bit
reg_filter = (0x7F, 0x7F, 0x3F, 0x07, 0x3F, 0x3F, 0xFF)
# t = bytes([MCP7940.bcd_to_int(reg & filt) for reg, filt in zip(time_reg, reg_filter)])
t = [(MCP7940.int_to_bcd(reg) & filt) for reg, filt in zip(time_reg, reg_filter)]
# Note that some fields will be overwritten that are important!
# fixme!
self._bus.write_i2c_block_data(MCP7940.ADDRESS, 0x00, t)
def alarm1_time(self):
return self._get_time(start_reg=0x0A)
def alarm1(self, t):
year, month, date, hours, minutes, seconds, weekday, yearday, _ = t # Don't need year or yearday
# Reorder
time_reg = [seconds, minutes, hours, weekday + 1, date, month]
reg_filter = (0x7F, 0x7F, 0x3F, 0x07, 0x3F, 0x3F) # No year field for alarms
t = [(MCP7940.int_to_bcd(reg) & filt) for reg, filt in zip(time_reg, reg_filter)]
self._bus.write_i2c_block_data(MCP7940.ADDRESS, 0x0A, t)
register_val = self._bus.read_byte_data(MCP7940.ADDRESS, MCP7940.ALM0WKDAY)
register_val = register_val | 0x70 #set MSK
register_val = register_val & 0xF7 #clear previous alarm flag
self._bus.write_byte_data(MCP7940.ADDRESS,MCP7940.ALM0WKDAY,register_val)
self._set_bit(MCP7940.CONTROL, MCP7940.ALM0EN,1)
def alarm2_time(self):
return self._get_time(start_reg=0x11)
def alarm2(self, t):
year, month, date, hours, minutes, seconds, weekday, yearday, _ = t # Don't need year or yearday
# Reorder
time_reg = [seconds, minutes, hours, weekday + 1, date, month]
reg_filter = (0x7F, 0x7F, 0x3F, 0x07, 0x3F, 0x3F) # No year field for alarms
t = [(MCP7940.int_to_bcd(reg) & filt) for reg, filt in zip(time_reg, reg_filter)]
self._bus.write_i2c_block_data(MCP7940.ADDRESS, 0x11, t)
register_val = self._bus.read_byte_data(MCP7940.ADDRESS, MCP7940.ALM1WKDAY)
register_val = register_val | 0x70 #set MSK
register_val = register_val & 0xF7 #clear previous alarm flag
self._bus.write_byte_data(MCP7940.ADDRESS,MCP7940.ALM1WKDAY,register_val)
self._set_bit(MCP7940.CONTROL,MCP7940.ALM1EN,1)
def bcd_to_int(bcd):
""" Expects a byte encoded wtih 2x 4bit BCD values. """
# Alternative using conversions: int(str(hex(bcd))[2:])
return (bcd & 0xF) + (bcd >> 4) * 10
def int_to_bcd(i):
return (i // 10 << 4) + (i % 10)
def is_leap_year(year):
""" https://stackoverflow.com/questions/725098/leap-year-calculation """
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
return True
return False
def is_alarm_enabled(self):
register = self._bus.read_byte_data(MCP7940.ADDRESS, MCP7940.CONTROL)
is_alarm1_enabled = register & 0x10
is_alarm2_enabled = register & 0x20
return is_alarm1_enabled,is_alarm2_enabled
def set_alarm_polarity(self, pol):
self._set_bit(MCP7940.ALM0WKDAY,MCP7940.ALMPOL,pol)
def _get_time(self, start_reg = 0x00):
num_registers = 7 if start_reg == 0x00 else 6
time_reg = self._bus.read_i2c_block_data(MCP7940.ADDRESS, start_reg, num_registers) # Reading too much here for alarms
reg_filter = (0x7F, 0x7F, 0x3F, 0x07, 0x3F, 0x3F, 0xFF)[:num_registers]
t = [MCP7940.bcd_to_int(reg & filt) for reg, filt in zip(time_reg, reg_filter)]
# Reorder
t2 = (t[5], t[4], t[2], t[1], t[0], t[3] - 1)
t = (t[6] + 2000,) + t2 + (0,) if num_registers == 7 else t2
print(t)
return t
| 5,965 |
studies/ntuple-RDX_l0_hadron_tos_training_sample/gen_l0hadron_samples.py
|
umd-lhcb/lhcb-ntuples-gen
| 0 |
2169594
|
#!/usr/bin/env python
#
# Script to generate various samples for the trigger emulation
import os
import pathlib
import sys
# ntpIn = '../../ntuples/0.9.4-trigger_emulation/Dst_D0-mc/Dst_D0--21_04_21--mc--MC_2016_Beam6500GeV-2016-MagDown-Nu1.6-25ns-Pythia8_Sim09j_Trig0x6139160F_Reco16_Turbo03a_Filtered_11574021_D0TAUNU.SAFESTRIPTRIG.DST.root'
ntpIn = '../../ntuples/0.9.5-bugfix/Dst_D0-mc/Dst_D0--21_10_08--mc--MC_2016_Beam6500GeV-2016-MagDown-Nu1.6-25ns-Pythia8_Sim09j_Trig0x6139160F_Reco16_Turbo03a_Filtered_11574021_D0TAUNU.SAFESTRIPTRIG.DST.root'
if not pathlib.Path(ntpIn).is_file():
sys.exit(ntpIn+' does not exist, you need to download it')
def runCmd(cmd):
print(' \033[92m'+cmd+'\033[0m')
os.system(cmd)
def slim(tag, ntpIn):
yml = 'l0hadron_sample_'+tag+'.yml'
ntpOut = 'l0hadron_emu_'+tag+'.root'
runCmd('../../scripts/haddcut.py '+ntpOut+' '+ntpIn+' -s -c '+yml)
return ntpOut
## Slimming the trigger-matched ntuple
ntpNtm = slim('tm', ntpIn)
runCmd('root -l \'../../scripts/split_train_vali_test.C("'+ntpNtm+'", "50:50")\'')
## Slimming the non-trigger-matched ntuple
ntpNtm = slim('ntm', ntpIn)
runCmd('root -l \'../../scripts/split_train_vali_test.C("'+ntpNtm+'", "50:50")\'')
## Merge the training samples for XGB
runCmd('hadd -fk run2-rdx-train_xgb.root l0hadron_emu_tm_train.root l0hadron_emu_ntm_train.root')
## Only use trigger-matched training sample for BDG
runCmd('mv ./l0hadron_emu_tm_train.root ./run2-rdx-train_bdt.root')
## Remove unused ntuples
runCmd('rm l0hadron_emu*.root')
| 1,548 |
plot_mnist.py
|
palaviv/ml_mnist
| 0 |
2169172
|
from keras.datasets import mnist
import matplotlib.pyplot as plt
# load (downloaded if needed) the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# plot 4 images as gray scale
plt.subplot(221)
plt.title(y_train[0])
plt.imshow(X_train[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.title(y_train[1])
plt.imshow(X_train[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.title(y_train[2])
plt.imshow(X_train[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.title(y_train[3])
plt.imshow(X_train[3], cmap=plt.get_cmap('gray'))
# show the plot
plt.show()
| 582 |
miniserver_gateway/connectors/modbus/events/listeners.py
|
FastyBird/miniserver-gateway
| 0 |
2169197
|
#!/usr/bin/python3
# Copyright 2021. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modbus connector plugin events module listeners
"""
# Library dependencies
from whistle import Event, EventDispatcher
# Library libs
from miniserver_gateway.connectors.modbus.consumers.consumer import Consumer
from miniserver_gateway.connectors.modbus.events.events import (
DeviceRecordUpdatedEvent,
RegisterActualValueEvent,
)
class EventsListener:
"""
Plugin events listener
@package FastyBird:ModbusConnectorPlugin!
@module listeners
@author <NAME> <<EMAIL>>
"""
__consumer: Consumer
__event_dispatcher: EventDispatcher
# -----------------------------------------------------------------------------
def __init__(
self,
consumer: Consumer,
event_dispatcher: EventDispatcher,
) -> None:
self.__consumer = consumer
self.__event_dispatcher = event_dispatcher
# -----------------------------------------------------------------------------
def open(self) -> None:
"""Open all listeners callbacks"""
self.__event_dispatcher.add_listener(
event_id=DeviceRecordUpdatedEvent.EVENT_NAME,
listener=self.__handle_device_updated_event,
)
self.__event_dispatcher.add_listener(
event_id=RegisterActualValueEvent.EVENT_NAME,
listener=self.__handle_register_actual_value_updated_event,
)
# -----------------------------------------------------------------------------
def close(self) -> None:
"""Close all listeners registrations"""
self.__event_dispatcher.remove_listener(
event_id=DeviceRecordUpdatedEvent.EVENT_NAME,
listener=self.__handle_device_updated_event,
)
self.__event_dispatcher.remove_listener(
event_id=RegisterActualValueEvent.EVENT_NAME,
listener=self.__handle_register_actual_value_updated_event,
)
# -----------------------------------------------------------------------------
def __handle_device_updated_event(self, event: Event) -> None:
if not isinstance(event, DeviceRecordUpdatedEvent):
return
if not event.updated_record.__eq__(event.original_record):
self.__consumer.propagate_device_record(device_record=event.updated_record)
# -----------------------------------------------------------------------------
def __handle_register_actual_value_updated_event(self, event: Event) -> None:
if not isinstance(event, RegisterActualValueEvent):
return
self.__consumer.propagate_register_record_value(register_record=event.updated_record)
| 3,304 |
foodcartapp/migrations/0049_auto_20201219_2057.py
|
Sam1808/Add-orders-to-the-online-store
| 0 |
2170305
|
# Generated by Django 3.0.7 on 2020-12-19 20:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foodcartapp', '0048_auto_20201217_1212'),
]
operations = [
migrations.AlterField(
model_name='customerorder',
name='address',
field=models.CharField(max_length=100, verbose_name='адрес'),
),
]
| 421 |
PyObjCTest/test_nstypesetter.py
|
Khan/pyobjc-framework-Cocoa
| 132 |
2168442
|
from PyObjCTools.TestSupport import *
from AppKit import *
class TestNSTypesetterHelper (NSTypesetter):
def willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_(self, a, b, c, d): return 1
def getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_(self, a, b, c, d, e, f): return 1
def getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_(self, a, b, c, d, e, f, g, h): return 1
class TestNSTypesetter (TestCase):
def testConstants(self):
self.assertEqual(NSTypesetterZeroAdvancementAction, (1 << 0))
self.assertEqual(NSTypesetterWhitespaceAction, (1 << 1))
self.assertEqual(NSTypesetterHorizontalTabAction, (1 << 2))
self.assertEqual(NSTypesetterLineBreakAction, (1 << 3))
self.assertEqual(NSTypesetterParagraphBreakAction, (1 << 4))
self.assertEqual(NSTypesetterContainerBreakAction, (1 << 5))
def testMethods(self):
self.assertArgIsOut(NSTypesetter.characterRangeForGlyphRange_actualGlyphRange_, 1)
self.assertArgIsOut(NSTypesetter.glyphRangeForCharacterRange_actualCharacterRange_, 1)
self.assertArgHasType(NSTypesetter.setBidiLevels_forGlyphRange_, 0, b'n^' + objc._C_CHAR_AS_INT)
self.assertArgSizeInArg(NSTypesetter.setBidiLevels_forGlyphRange_, 0, 1)
self.assertArgIsIn(NSTypesetter.setLocation_withAdvancements_forStartOfGlyphRange_, 1)
self.assertArgSizeInArg(NSTypesetter.setLocation_withAdvancements_forStartOfGlyphRange_, 1, 2)
self.assertArgIsOut(NSTypesetter.layoutGlyphsInLayoutManager_startingAtGlyphIndex_maxNumberOfLineFragments_nextGlyphIndex_, 3)
self.assertArgIsOut(NSTypesetter.getLineFragmentRect_usedRect_forParagraphSeparatorGlyphRange_atProposedOrigin_, 0)
self.assertArgIsOut(NSTypesetter.getLineFragmentRect_usedRect_forParagraphSeparatorGlyphRange_atProposedOrigin_, 1)
self.assertResultHasType(NSTypesetter.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, objc._C_NSUInteger)
self.assertArgHasType(NSTypesetter.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 0, NSRange.__typestr__)
self.assertArgHasType(TestNSTypesetterHelper.willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_, 0, b'N^' + NSRect.__typestr__)
self.assertArgHasType(TestNSTypesetterHelper.willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_, 1, NSRange.__typestr__)
self.assertArgHasType(TestNSTypesetterHelper.willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_, 2, b'N^' + NSRect.__typestr__)
self.assertArgHasType(TestNSTypesetterHelper.willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_, 3, b'N^' + objc._C_CGFloat)
self.assertResultHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, objc._C_NSUInteger)
self.assertArgHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 0, NSRange.__typestr__)
self.assertArgHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 1, b'o^I')
self.assertArgSizeInArg(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 1, 0)
self.assertArgHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 2, b'o^' + objc._C_NSUInteger)
self.assertArgSizeInArg(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 2, 0)
self.assertArgHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 3, b'o^' + objc._C_NSUInteger)
self.assertArgSizeInArg(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 3, 0)
self.assertArgHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 4, b'o^' + objc._C_NSBOOL)
self.assertArgSizeInArg(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 4, 0)
self.assertArgHasType(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 5, b'o^' + objc._C_UCHR)
self.assertArgSizeInArg(TestNSTypesetterHelper.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_bidiLevels_, 5, 0)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 0, b'N^' + NSRect.__typestr__)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 1, b'N^' + NSRect.__typestr__)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 2, b'N^' + NSRect.__typestr__)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 3, objc._C_NSUInteger)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 4, NSRect.__typestr__)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 5, objc._C_CGFloat)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 6, objc._C_CGFloat)
self.assertArgHasType(NSTypesetter.getLineFragmentRect_usedRect_remainingRect_forStartingGlyphAtIndex_proposedRect_lineSpacing_paragraphSpacingBefore_paragraphSpacingAfter_, 7, objc._C_CGFloat)
@min_os_level("10.5")
def testMethods10_5(self):
self.assertArgHasType(NSTypesetter.printingAdjustmentInLayoutManager_forNominallySpacedGlyphRange_packedGlyphs_count_, 2, b'n^v')
self.assertArgSizeInArg(NSTypesetter.printingAdjustmentInLayoutManager_forNominallySpacedGlyphRange_packedGlyphs_count_, 2, 3)
if __name__ == "__main__":
main()
| 6,740 |
home/urls.py
|
sandipsahajoy/Distributed-Social-Networking
| 5 |
2169519
|
from django.urls import path
from .views import *
urlpatterns = [
path('', home, name='home'),
path('authors/<str:author_pk>', profile, name='profile'),
path('authors/<str:author_pk>/posts', post_list, name='home_post_list'),
path('sendfollowrequest', follow_request, name='follow_request'),
path('post/create', post_create, name='home_post_create'),
path('followers', followers, name='followers'),
path('post/<str:post_pk>', post_view, name='home_post'),
path('post/<str:post_pk>/edit', post_edit, name='home_post_edit'),
path('post/<str:post_pk>/share', share_post, name='share_post'),
path('inbox', inbox, name='inbox'),
path('followers/<str:foreign_author_pk>', follower_detail, name='follower_details')
]
| 755 |
export.py
|
18thCentury/CodeSys
| 8 |
2167559
|
# encoding:utf-8
# We enable the new python 3 print syntax
from __future__ import print_function
import os
import shutil
import time
import subprocess
'''
prop_method = Guid('792f2eb6-721e-4e64-ba20-bc98351056db')
tp = Guid('2db5746d-d284-4425-9f7f-2663a34b0ebc') #dut
libm = Guid('adb5cb65-8e1d-4a00-b70a-375ea27582f3')
method_no_ret = Guid('f89f7675-27f1-46b3-8abb-b7da8e774ffd')
act = Guid('8ac092e5-3128-4e26-9e7e-11016c6684f2')
fb = Guid('6f9dac99-8de1-4efc-8465-68ac443b7d08')
itf = Guid('6654496c-404d-479a-aad2-8551054e5f1e')
folder = Guid('738bea1e-99bb-4f04-90bb-a7a567e74e3a')
gvl = Guid('ffbfa93a-b94d-45fc-a329-229860183b1d')
prop = Guid('5a3b8626-d3e9-4f37-98b5-66420063d91e')
textlist = Guid('2bef0454-1bd3-412a-ac2c-af0f31dbc40f')
global_textlist = Guid('63784cbb-9ba0-45e6-9d69-babf3f040511')
Device = Guid('225bfe47-7336-4dbc-9419-4105a7c831fa')
task_config = Guid('ae1de277-a207-4a28-9efb-456c06bd52f3')
method = Guid('f8a58466-d7f6-439f-bbb8-d4600e41d099')
gvl_Persistent = Guid('261bd6e6-249c-4232-bb6f-84c2fbeef430')
Project_Settings =Guid('8753fe6f-4a22-4320-8103-e553c4fc8e04')
Plc_Logic =Guid('40b404f9-e5dc-42c6-907f-c89f4a517386')
Application =Guid('639b491f-5557-464c-af91-1471bac9f549')
Task =Guid('98a2708a-9b18-4f31-82ed-a1465b24fa2d')
Task_pou =Guid('413e2a7d-adb1-4d2c-be29-6ae6e4fab820')
Visualization =Guid('f18bec89-9fef-401d-9953-2f11739a6808')
Visualization_Manager=Guid('4d3fdb8f-ab50-4c35-9d3a-d4bb9bb9a628')
TargetVisualization =Guid('bc63f5fa-d286-4786-994e-7b27e4f97bd5')
WebVisualization =Guid('0fdbf158-1ae0-47d9-9269-cd84be308e9d')
__VisualizationStyle=Guid('8e687a04-7ca7-42d3-be06-fcbda676c5ef')
ImagePool =Guid('bb0b9044-714e-4614-ad3e-33cbdf34d16b')
Project_Information =Guid('085afe48-c5d8-4ea5-ab0d-b35701fa6009')
SoftMotion_General_Axis_Pool=Guid('e9159722-55bc-49e5-8034-fbd278ef718f')
'''
print("--- Saving files in the project: ---")
# git
has_repo=False
save_folder=r'D:\Gitlab\codesys\Yao'
if not os.path.exists(save_folder):
os.makedirs(save_folder)
else:
#非空文件夹 删除多余
a=os.listdir(save_folder)
for f in a:
if not f.startswith("."): #保留 svn,git 目录
sub_path= os.path.join(save_folder,f)
if os.path.isdir(sub_path):
shutil.rmtree(sub_path)
else:
os.remove(sub_path)
elif f==".git":
has_repo=True
info={}
type_dist={
'792f2eb6-721e-4e64-ba20-bc98351056db':'pm', #property method
'2db5746d-d284-4425-9f7f-2663a34b0ebc':'dut', #dut
'adb5cb65-8e1d-4a00-b70a-375ea27582f3':'lib', #lib manager
'f89f7675-27f1-46b3-8abb-b7da8e774ffd':'m', #method no ret
'8ac092e5-3128-4e26-9e7e-11016c6684f2':'act', #action
'6f9dac99-8de1-4efc-8465-68ac443b7d08':'pou', #pou
'6654496c-404d-479a-aad2-8551054e5f1e':'itf', #interface
'738bea1e-99bb-4f04-90bb-a7a567e74e3a':'', # folder
'ffbfa93a-b94d-45fc-a329-229860183b1d':'gvl', #global var
'5a3b8626-d3e9-4f37-98b5-66420063d91e':'prop', #property
'2bef0454-1bd3-412a-ac2c-af0f31dbc40f':'tl', #textlist
'63784cbb-9ba0-45e6-9d69-babf3f040511':'gtl', #global textlist
'225bfe47-7336-4dbc-9419-4105a7c831fa':'dev', #device
'ae1de277-a207-4a28-9efb-456c06bd52f3':'tc', #task configuration
'f8a58466-d7f6-439f-bbb8-d4600e41d099':'m', #method with ret
'261bd6e6-249c-4232-bb6f-84c2fbeef430':'gvl', #gvl_Persistent
'98a2708a-9b18-4f31-82ed-a1465b24fa2d':'task'
};
def save(text,path,name,tp):
if not tp:
tp=''
else:
tp='.'+tp
with open(os.path.join(path,name+tp),'w') as f:
f.write(text.encode('utf-8'))
'''
def get_mtype(a):
b=a.text
b=b.split("\n")
for a in b:
if a.find('FUNCTION_BLOCK ') >=0 :
return "fb"
elif a.find("FUNCTION ") >=0:
return "fct"
elif a.find('METHOD ')>=0 :
return "m"
elif a.find("INTERFACE ")>=0:
return "itf"
elif a.find("TYPE ")>=0:
#
return "tp"
elif a.find("PROPERTY ")>=0 or a.find("PROPERTY\r\n")>=0:
return "prop"
elif a.find("PROGRAM ")>=0 or a.find("PROGRAM\r\n")>=0:
return "prg"
elif a.find("VAR_GLOBAL")>=0 or a.find("VAR_CONFIG") >=0:
return 'gvl'
return ""
'''
def print_tree(treeobj, depth,path):
global info
#record current Path
curpath=path
isfolder=False
t='' #text
tp='' #type
# get object name
name = treeobj.get_name(False)
id = treeobj.type.ToString()
if id in type_dist:
tp = type_dist[treeobj.type.ToString()]
else:
info[id]=name
if treeobj.is_device:
deviceid = treeobj.get_device_identification()
t = 'type='+str(deviceid.type) +'\nid=' +str(deviceid.id) + '\nver='+ str(deviceid.version)
try:
if treeobj.is_folder :
#system.ui.prompt('folder:'+u, PromptChoice.YesNo, PromptResult.Yes)
isfolder=true
pass
except:
pass
if treeobj.has_textual_declaration :
t=t+'(*#-#-#-#-#-#-#-#-#-#---Declaration---#-#-#-#-#-#-#-#-#-#-#-#-#*)\r\n'
a=treeobj.textual_declaration
t=t+a.text
if treeobj.has_textual_implementation:
t=t+'(*#-#-#-#-#-#-#-#-#-#---Implementation---#-#-#-#-#-#-#-#-#-#-#-#-#*)\r\n'
a=treeobj.textual_implementation
t=t+a.text
'''
if treeobj.is_task_configuration:
exports=[treeobj]
projects.primary.export_native(exports,os.path.join(curpath,name+'.tc'))
'''
if treeobj.is_task :
exports=[treeobj]
projects.primary.export_native(exports,os.path.join(curpath,name+'.task'))
if treeobj.is_libman:
exports=[treeobj]
projects.primary.export_native(exports,os.path.join(curpath,name+'.lib'))
if treeobj.is_textlist:
treeobj.export(os.path.join(curpath,name+'.tl'))
children = treeobj.get_children(False)
if children or isfolder:
if tp:
curpath=os.path.join(curpath,name+'.'+tp)
else:
curpath=os.path.join(curpath,name)
if not os.path.exists(curpath):
os.makedirs(curpath)
if t:
save(t,curpath,name,tp)
for child in treeobj.get_children(False):
print_tree(child, depth+1,curpath)
for obj in projects.primary.get_children():
print_tree(obj,0,save_folder)
with open(os.path.join(save_folder,'s.txt'),'w') as f:
f.write(str(info))
if has_repo:
os.chdir(save_folder)
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.call('"D:\\Program Files\\Git\\bin\\git.exe" add .', startupinfo=si)
subprocess.call('"D:\\Program Files\\Git\\bin\\git.exe" commit -m "'+time.strftime('%Y-%m-%d %H:%M',time.localtime(time.time()))+'"', startupinfo=si)
else:
os.chdir(save_folder)
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.call('"D:\\Program Files\\Git\\bin\\git.exe" init', startupinfo=si)#'cd '+ save_folder + " && " + 'git init')
subprocess.call('"D:\\Program Files\\Git\\bin\\git.exe" add .', startupinfo=si)
subprocess.call('"D:\\Program Files\\Git\\bin\\git.exe" commit -m "'+time.strftime('%Y-%m-%d %H:%M',time.localtime(time.time()))+'"', startupinfo=si)
print("--- Script finished. ---")
system.ui.info('save ok')
| 7,056 |
source/code/main.py
|
digitalbitch/thron
| 0 |
2170022
|
#lifted from hacking with python , alphy books
#requires : https://sourceforge.net/projects/pyhook/files/latest/download?source=files
import pyHook
import pythoncom
def keypress(event):
if event.Ascii:
char=chr(event.Ascii)
print char
if char=="~":
exit();
hm=pyHook.HookManager()
hm.KeyDown=keypress
hm.HookKeyboard()
pythoncom.PumpMessages()
exit()
| 393 |
balsa/__version__.py
|
gh-mrice/balsa
| 0 |
2170327
|
__title__ = "balsa"
__description__ = "logging utility"
__url__ = "https://github.com/jamesabel/balsa"
__download_url__ = "https://github.com/jamesabel/balsa/archive/master.zip"
__version__ = "0.7.1"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Copyright 2018-2019 <NAME>"
| 316 |
python_modules/dagster/dagster/core/storage/sqlite.py
|
asamoal/dagster
| 0 |
2169900
|
import os
import sqlite3
import dagster._check as check
def create_db_conn_string(base_dir, db_name):
check.str_param(base_dir, "base_dir")
check.str_param(db_name, "db_name")
path_components = os.path.abspath(base_dir).split(os.sep)
db_file = "{}.db".format(db_name)
return "sqlite:///{}".format("/".join(path_components + [db_file]))
def get_sqlite_version():
return str(sqlite3.sqlite_version)
| 427 |
perf/test_sam_hists.py
|
jlashner/ares
| 10 |
2169042
|
"""
test_sam_histories.py
Author: <NAME>
Affiliation: McGill
Created on: Thu 8 Aug 2019 18:38:06 EDT
Description:
"""
import sys
import ares
N = int(sys.argv[1])
pars = ares.util.ParameterBundle('in_prep:base').pars_by_pop(0, 1)
pars['verbose'] = False
pars['progress_bar'] = False
for i in range(N):
print("Running iteration {}/{}".format(i+1, N))
pop = ares.populations.GalaxyPopulation(**pars)
hist = pop.histories
| 456 |
adspygoogle/adwords/util/XsdToWsdl.py
|
cherry-wb/googleads-python-lib
| 2 |
2169266
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to create a fake WSDL to house an XSD."""
__author__ = '<EMAIL> (<NAME>)'
import re
import urllib
from adspygoogle import SOAPpy
from adspygoogle.SOAPpy import wstools
WSDL_TEMPLATE = ('<?xml version="1.0" encoding="UTF-8"?>'
'<wsdl:definitions xmlns:tns="https://example.com/fake"'
' xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"'
' xmlns:wsdlsoap="http://schemas.xmlsoap.org/wsdl/soap/"'
' xmlns:xsd="http://www.w3.org/2001/XMLSchema"'
' targetNamespace="https://example.com/fake">'
'<wsdl:types>%s</wsdl:types>'
'</wsdl:definitions>')
class FakeWsdlProxy(SOAPpy.WSDL.Proxy):
"""Class that pretends to be a SOAPpy.WSDL.Proxy for XSD defined objects."""
def __init__(self, wsdl):
self.wsdl = wsdl
def ElementToComplexType(xsd):
"""Replaces the first <element> tag with the complexType inside of it.
The complexType tag will use the name attribute from the removed element tag.
Args:
xsd: str XSD string contents.
Returns:
str: Modified XSD.
"""
xsd = re.sub(r'<(\w+:)?element name="(\w+)">\s*<(\w+:)?complexType>',
'<\\3complexType name="\\2">',
xsd)
xsd = re.sub(r'(\s+)?</(\w+:)?element>', '', xsd)
xsd = re.sub(r'<\?xml.*?>', '', xsd)
return xsd
def CreateWsdlFromXsdUrl(xsd_url):
"""Creates a fake WSDL object we can use with our SOAPpy xml logic.
Args:
xsd_url: str URL the XSD can be located at.
Returns:
FakeWsdlProxy: A Fake WSDL proxy.
"""
wsdl = DownloadAndWrapXsdInWsdl(xsd_url)
reader = wstools.WSDLTools.WSDLReader()
parsed_wsdl = reader.loadFromString(wsdl)
return FakeWsdlProxy(parsed_wsdl)
def DownloadAndWrapXsdInWsdl(url):
"""Creates a fake WSDL text from XSD text.
Args:
url: str URL the XSD can be located at.
Returns:
str: WSDL wrapping the provided XSD.
"""
xsd = urllib.urlopen(url).read()
return WSDL_TEMPLATE % ElementToComplexType(xsd)
| 2,651 |
tests/monads_test.py
|
romelperez/substance
| 21 |
2168283
|
import unittest
from substance.monads import *
from collections import namedtuple
class TestMonads(unittest.TestCase):
def testJust(self):
self.assertTrue(Just(5) == Just(5))
self.assertFalse(Just(5) == Just(3))
self.assertEqual(Just(10), Just(5).map(lambda i: i * 2))
self.assertEqual(Just(10), Just(5).bind(lambda x: Just(x * 2)))
def testOperators(self):
base = Just(10)
testRshift = Just(5) >> (lambda x: Just(x * 2))
self.assertEqual(base, testRshift)
testLshift = Just(5)
testLshift <<= (lambda x: Just(x * 2))
self.assertEqual(base, testLshift)
self.assertTrue(isinstance(Maybe.of(True), Just))
self.assertTrue(isinstance(Maybe.of(None), Nothing))
self.assertTrue(isinstance(Maybe.of(), Nothing))
self.assertTrue(isinstance(Maybe.of(False), Just))
self.assertTrue(isinstance(Maybe.of(""), Just))
self.assertTrue(isinstance(Maybe.of(1), Just))
def testNothing(self):
self.assertTrue(Nothing() == Nothing())
self.assertFalse(Nothing() == Just(5))
self.assertEqual(Nothing(), Nothing().map(lambda x: x * 100))
self.assertEqual(Nothing(), Nothing().bind(lambda x: Just(x * 2)))
def testMaybe(self):
maybe = Just(5).map(lambda x: x * 2).bind(lambda x: Nothing()).map(lambda x: x * 2)
self.assertEqual(Nothing(), maybe)
maybe2 = Just(5).map(lambda x: x * 2).bind(lambda x: Just(x + 10)).map(lambda x: x * 2)
self.assertEqual(Just(40), maybe2)
def testRight(self):
self.assertTrue(Right(5) == Right(5))
self.assertFalse(Right(5) == Right(10))
self.assertEqual(Right(10), Right(5).map(lambda x: x * 2))
self.assertEqual(Right(10), Right(5).bind(lambda x: Right(x * 2)))
self.assertEqual("%s" % Right(10), "Right(10)")
def testLeft(self):
self.assertTrue(Left(5) == Left(5))
self.assertFalse(Left(5) == Left(10))
self.assertEqual(Left(5), Left(5).map(lambda x: x * 2))
self.assertEqual(Left(5), Left(5).bind(lambda x: Left(x * 2)))
self.assertEqual("%s" % Left(5), "Left(5)")
def testEither(self):
either = Right(5).map(lambda x: x * 2).bind(lambda x: Left("Did not work")).map(lambda x: x * 2)
self.assertEqual(Left("Did not work"), either)
either2 = Right(5).map(lambda x: x * 2).bind(lambda x: Right(x + 10)).map(lambda x: x * 2)
self.assertEqual(Right(40), either2)
def testOK(self):
self.assertTrue(OK(5) == OK(5))
self.assertFalse(OK(5) == OK(10))
self.assertEqual(OK(10), OK(5).map(lambda x: x * 2))
self.assertEqual(OK(10), OK(5).bind(lambda x: OK(x * 2)))
self.assertEqual("%s" % OK(5), "OK(5)")
def testFail(self):
valError = ValueError()
synError = SyntaxError()
self.assertTrue(Fail(valError) == Fail(valError))
self.assertFalse(Fail(valError) == Fail(synError))
self.assertEqual(Fail(valError), Fail(valError).map(lambda x: x * 2))
self.assertEqual(Fail(valError), Fail(valError).bind(lambda x: OK(x * 2)))
self.assertEqual("%s" % Fail(valError), "Fail(ValueError())")
self.assertEqual(Fail(valError), Fail(valError).catch(lambda x: 1337))
def testTry(self):
valError = ValueError()
synError = SyntaxError()
t3y = OK(10).map(lambda x: x * 2).bind(lambda x: Fail(valError)).map(lambda x: x * 2)
self.assertEqual(Fail(valError), t3y)
t3y2 = OK(10).map(lambda x: x * 2).bind(lambda x: OK(50)).map(lambda x: x * 2)
self.assertEqual(OK(100), t3y2)
def testTryAttempt(self):
t3r3 = Try.attempt(lambda: "Foo")
self.assertEqual(OK("Foo"), t3r3)
t3r3 = Try.attempt(Try.raiseError)
self.assertEqual(t3r3.__class__.__name__, "Fail")
def testTryThen(self):
valError = ValueError()
synError = SyntaxError()
attempt = (OK(10)
.then(lambda: OK(20))
.then(lambda: OK(30))
.then(lambda: OK(40)))
self.assertEqual(OK(40), attempt)
attempt2 = (OK(10)
.then(lambda: OK(20))
.then(lambda: Fail(synError))
.then(lambda: OK(40)))
self.assertEqual(Fail(synError), attempt2)
attemptRecover = (OK(10)
.then(lambda: OK(20))
.then(lambda: Fail(valError))
.then(lambda: OK(40))
.catch(lambda err: OK("Recovered")))
self.assertEqual(OK("Recovered"), attemptRecover)
attemptFail = (OK(10)
.then(lambda: OK(20))
.then(lambda: Fail(valError))
.then(lambda: OK(40))
.catch(lambda err: "foo"))
self.assertEqual(Fail(valError), attemptFail)
attemptReraise = (OK(10)
.then(lambda: OK(20))
.then(lambda: Fail(valError))
.then(lambda: OK(40))
.catch(lambda err: Fail(synError)))
self.assertEqual(Fail(synError), attemptReraise)
attemptCatchOK = (OK(10)
.then(lambda: OK(20))
.then(lambda: OK(40))
.catch(lambda err: OK("123")))
self.assertEqual(OK(40), attemptCatchOK)
def testTryThenIfBool(self):
valError = ValueError()
self.assertEqual(OK(10).thenIfBool(lambda *x: OK(20), lambda *x: Fail(valError)), Fail(valError))
self.assertEqual(OK(True).thenIfBool(lambda *x: OK(20), lambda *x: Fail(valError)), OK(20))
self.assertEqual(OK(False).thenIfBool(lambda *x: OK(20), lambda *x: Fail(valError)), Fail(valError))
self.assertEqual(Fail(valError).thenIfBool(lambda *x: OK(20), lambda *x: OK(20)), Fail(valError))
self.assertEqual(OK(True).thenIfTrue(lambda *x: OK(10)), OK(10))
self.assertEqual(OK(True).thenIfFalse(lambda *x: OK(10)), OK(True))
self.assertEqual(OK(False).thenIfTrue(lambda *x: OK(10)), OK(False))
self.assertEqual(OK(False).thenIfFalse(lambda *x: OK(10)), OK(10))
def testTryThenIfNone(self):
self.assertEqual(OK(10).thenIfNone(lambda *x: OK(20)), OK(10))
self.assertEqual(OK(None).thenIfNone(lambda *x: OK(20)), OK(20))
def testTryBindIf(self):
valError = ValueError()
self.assertEqual(OK(True).bindIf(lambda *x: OK(10), lambda *x: Fail(valError)), OK(10))
self.assertEqual(OK(False).bindIf(lambda *x: OK(10), lambda *x: Fail(valError)), Fail(valError))
self.assertEqual(OK(True).bindIfTrue(lambda *x: OK(10)), OK(10))
self.assertEqual(OK(True).bindIfFalse(lambda *x: OK(10)), OK(True))
self.assertEqual(OK(False).bindIfTrue(lambda *x: OK(10)), OK(False))
self.assertEqual(OK(False).bindIfFalse(lambda *x: OK(10)), OK(10))
def testTryCatchError(self):
valError = ValueError()
synError = SyntaxError()
self.assertEqual(OK(True).catchError(ValueError, lambda *x: OK(10)), OK(True))
self.assertEqual(OK(False).catchError(ValueError, lambda *x: OK(10)), OK(False))
self.assertEqual(Fail(synError).catchError(ValueError, lambda e: OK(10)), Fail(synError))
self.assertEqual(Fail(synError).catchError(SyntaxError, lambda e: OK(10)), OK(10))
def testFunctions(self):
self.assertEqual(defer(lambda x, y: x + y, 1, 2)(), 3)
self.assertEqual(defer(lambda x, y: x + y)(1, 2), 3)
valError = ValueError()
self.assertEqual(failWith(valError)(), Fail(valError))
self.assertEqual(failWith(valError)(1, 2, 3, 4), Fail(valError))
def fa(x):
return x + 2
def fb(x):
return x + 1
def fc(acc, x):
return acc + x
self.assertEqual(compose(fa, fb)(1), fa(fb(1)))
self.assertEqual(fold(fc, [1, 2, 3, 4], 0), 10)
def testMapM(self):
self.assertEqual(mapM(Try, lambda x: OK(x + x), [1, 2, 3, 4]), OK([2, 4, 6, 8]))
testList = []
self.assertEqual(mapM_(Try, lambda x: OK(testList.append(x + x)), [1, 2, 3, 4]), OK([1, 2, 3, 4]))
self.assertEqual([2, 4, 6, 8], testList)
self.assertEqual(OK([1, 2, 3, 4]).mapM(lambda x: OK(x + x)), OK([2, 4, 6, 8]))
testList = []
self.assertEqual(OK([1, 2, 3, 4]).mapM_(lambda x: OK(testList.append(x + x))), OK([1, 2, 3, 4]))
self.assertEqual([2, 4, 6, 8], testList)
def testSequence(self):
self.assertEqual(sequence(Try, [OK(1), OK(2), OK(3), OK(4)]), OK([1, 2, 3, 4]))
self.assertIsInstance(sequence(Try, [OK(1), OK(2), OK(3), Fail(ValueError())]), Fail)
self.assertEqual(Try.sequence([OK(1), OK(2), OK(3)]), OK([1, 2, 3]))
self.assertIsInstance(Try.sequence([OK(1), Fail(ValueError()), OK(3)]), Fail)
def testUnshiftM(self):
self.assertEqual(unshiftM(Try, OK([2, 3]), OK(1)), OK([1, 2, 3]))
def testFlatten(self):
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f'], flatten([['a', 'b'], ['c', 'd'], ['e', 'f']]))
| 9,255 |
meas/hero_run_nm_v2.py
|
kajoel/Simulating_quantum_systems_on_an_emulated_quantum_computer
| 1 |
2170413
|
"""
Recipe for long runs. This makes sure to save often and keeps the data in a
single file.
@author = <NAME>
"""
import core.interface
from os.path import basename
from functools import lru_cache
from core.interface import hamiltonians_of_size, vqe_nelder_mead, vqe_bayes
from core import vqe_eig, parallel
from core import callback as cb
import numpy as np
import sys
#4x4 max_meas=3e6, samples=68e3
#3x3 max_meas=3e6, samples=132e3
#2x2 max_meas=
# Input number of workers
run_kwargs = parallel.script_input(sys.argv)
version = 2
directory = 'hero_run_nm' # directory to save to
def identifier_generator():
max_meas = 3e6
for size, samples in zip([2, 3, 4], [97e3, 132e3, 68e3]):
for V in np.linspace(0, 1, 10):
for hamiltonian_idx in [1, 2]:
yield (size, hamiltonian_idx, V, int(max_meas), int(samples))
@lru_cache(maxsize=1)
def input_3(size, hamiltonian_idx, V):
if V == np.inf:
e = 0.
V = 1.
else:
e = 1
h, eig = hamiltonians_of_size(size, V, e)
return h[hamiltonian_idx], eig[hamiltonian_idx]
@lru_cache(maxsize=1)
def input_5(size, hamiltonian_idx, V, max_meas, samples):
print(f'size={size}, Hamiltonian_idx={hamiltonian_idx}, V={V}'
f'max_meas={max_meas}, samples={samples}')
return ()
input_functions = {3: input_3,
5: input_5}
def simulate(size, hamiltonian_idx, V, max_meas, samples, h, eig):
H, qc, ansatz_, initial_params = \
core.interface.create_and_convert('multi_particle', h)
vqe = vqe_nelder_mead(samples=samples, H=H, fatol=0, xatol=0)
tol_para = 1e-3
max_same_para = 3
callback = cb.restart(max_same_para, tol_para)
result = vqe_eig.smallest(H, qc, initial_params, vqe,
ansatz_, samples,
callback=callback, max_meas=max_meas)
result.correct = eig
return result
def file_from_id(identifier):
return f'size={identifier[0]}'
def metadata_from_id(identifier):
return {'description': 'Data for hero run.',
'identifier_description': ['size', 'hamiltonian_idx', 'V',
'max_meas', 'samples'],
'max_same_para_nm': 3,
'tol_para_nm': 1e-3,
'size': identifier[0]}
parallel.run(
simulate=simulate,
identifier_generator=identifier_generator,
input_functions=input_functions,
directory=directory,
version=version,
script_file=basename(__file__),
file_from_id=file_from_id,
metadata_from_id=metadata_from_id,
**run_kwargs
)
| 2,610 |
Control Structures/iterator.py
|
prathimacode-hub/PythonScripts
| 5 |
2170334
|
class iterator():
def __iter__(self):
self.a=1
return self
def __next__(self):
x=self.a
self.a+=1
return x
s1=iterator()
myiter=iter(s1)
print(next(myiter))
print(next(myiter))
print(next(myiter))
| 250 |
like_and_retweet.py
|
james-flynn-ie/py-twitter-bot
| 2 |
2169016
|
import create_api
import logging
import sys
import tweepy
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger()
class likeRTListener(tweepy.StreamListener):
logger.debug("Creating StreamListener")
def __init__(self, api):
self.api = api
self.me = api.me()
def on_status(self, tweet):
logger.info(f"Liking and RTing id# {tweet.id}")
if tweet.in_reply_to_status_id is not None or \
tweet.user.id == self.me.id:
# We don't do anything if a tweet is either:
# - Written by the Twitter account user.
# - A reply to one of the Twitter account user's tweets.
return
if not tweet.favorited:
try:
tweet.favorite()
except Exception:
logger.exception(f"Failed to Like id# {tweet.id}",
exc_info=True)
if not tweet.retweeted:
try:
tweet.retweet()
except Exception:
logger.exception(f"Failed to Retweet id# {tweet.id}",
exc_info=True)
def on_error(self, status):
logger.error(status)
def main(keywords):
api = create_api.main()
tweets_listener = likeRTListener(api)
stream = tweepy.Stream(api.auth, tweets_listener)
logger.info(f"Filtering by keywords {keywords}")
stream.filter(track=keywords, languages=["en"])
if __name__ == "__main__":
# Each keyword represents an 'OR' condition.
# Choose the list carefully as it can get spammy very quickly!
main(["DevOps", "SRE"])
| 1,650 |
spocktest/model.py
|
kjczarne/spocktest
| 1 |
2168509
|
from typing import Any, Callable, Literal, Tuple, Type, List, NewType, Union, Optional, Dict
Context = Union[Tuple, Any, None]
"""Context passed around from setup to test"""
ExpOutput = Union[Tuple, Any]
"""Expected Output may be a Tuple or a primitive.
This is what the `expected` function should return."""
ActualOutput = Union[Tuple, Any, None]
"""Actual output may also be None, if only print statements
were used and no return statement was used."""
PrintCallback = Optional[Callable]
"""Optional function to populate the simulated standard
output stream."""
Snippet = Callable[[PrintCallback], ActualOutput]
"""The snippet that should be returned from the closure.
This will be injected verbatim into documentation."""
SupportedAssertions = Literal[
'assertEqual',
'assertNotEqual'
]
"""Supported unittest assertion functions."""
SnippetLinesCollection = Dict[str, List[str]]
"""Intermediate Snippet Collection used by the parser"""
SnippetsCollection = Dict[str, str]
"""Snippet Collection returned by the parser"""
| 1,039 |
Segmentation/pipeline_trainer.py
|
Shumway82/U-Net-Segmentation
| 1 |
2169779
|
from trainer import *
#from Segmentation.trainer import *
from tfcore.interfaces.IPipeline_Trainer import *
from tfcore.utilities.preprocessing import Preprocessing
import gflags
import os
import sys
class Pipeline_Params(IPipeline_Trainer_Params):
""" Simple example for Pipeline_Params
"""
def __init__(self,
data_dir_y,
data_dir_x,
validation_dir_x,
validation_dir_y,
output_dir,
convert=True,
epochs=25,
batch_size=16,
shuffle=True,
cache_size=1,
interp='bicubic'):
super().__init__(data_dir_x=data_dir_x,
data_dir_y=data_dir_y,
validation_dir_x=validation_dir_x,
validation_dir_y=validation_dir_y,
output_dir=output_dir,
convert=convert,
epochs=epochs,
batch_size=batch_size,
shuffle=shuffle,
cache_size=cache_size,
interp=interp)
self.validation_dir_x = validation_dir_x
class Training_Pipeline(IPipeline_Trainer):
""" Simple example of inherent from IPipeline and to create an class
# Arguments
trainer: Implementation of meta class ITrainer
params: Implementation of meta class IPipeline_Params
pre_processing: Implementation of class Preprocessing
"""
def __init__(self, trainer, params, pre_processing):
super().__init__(trainer, params, pre_processing)
def get_element(self, idx):
try:
img_y = imageio.imread(self.files_y[idx])
except FileNotFoundError:
raise FileNotFoundError(' [!] File not found of data-set Y')
if not self.params.convert:
try:
img_x = imageio.imread(self.files_x[idx])
except FileNotFoundError:
raise FileNotFoundError(' [!] File not found of data-set X')
else:
img_x = img_y
if self.pre_processing is not None:
img_x, img_y = self.pre_processing.run(img_x, img_y)
return img_x, img_y
def set_validation(self):
if self.params.validation_dir_x is not None:
files_val_x = sorted(get_img_paths(self.params.data_dir_x))
if len(files_val_x) == 0:
raise FileNotFoundError(' [!] No files in validation data-set')
if self.params.validation_dir_y is not None:
files_val_y = sorted(get_img_paths(self.params.data_dir_y))
if len(files_val_y) == 0:
raise FileNotFoundError(' [!] No files in validation data-set')
if self.params.validation_dir_x is not None:
files_unknown_x = sorted(get_img_paths(self.params.validation_dir_x))
if len(files_unknown_x) == 0:
raise FileNotFoundError(' [!] No files in validation data-set')
batch_val_x = np.asarray([imageio.imread(file) for file in files_val_x])
batch_val_y = np.asarray([imageio.imread(file) for file in files_val_y])
batch_val_y = batch_val_y.reshape((batch_val_y.shape[0],batch_val_y.shape[1],batch_val_y.shape[2], 1))
batch_unknown_x = np.asarray([imageio.imread(file) for file in files_unknown_x])
try:
self.trainer.set_validation_set(np.asarray(batch_val_x), np.asarray(batch_val_y), np.asarray(batch_unknown_x))
except Exception as err:
print(' [!] Error in Trainer on set_validation():', err)
raise
# Flaks to configure from shell
flags = gflags.FLAGS
gflags.DEFINE_string("config_path", '', "Path for config files")
gflags.DEFINE_string("dataset", "../Data/", "Dataset path")
gflags.DEFINE_integer("loss", 2, "Dataset path")
gflags.DEFINE_integer("gpu", 0, "Dataset path")
def main():
flags(sys.argv)
# Trainer_Params witch inherits from ITrainer_Params
model_params = Trainer_Params(image_size=256,
params_path=flags.config_path,
loss=flags.loss,
gpu=flags.gpu)
# Trainer witch inherits from ITrainer
model_trainer = UNET_Trainer(model_params)
# Pre-processing Pipeline
pre_processing = Preprocessing()
pre_processing.add_function_xy(Preprocessing.Flip(direction=('horizontal', 'vertical')).function)
pre_processing.add_function_xy(Preprocessing.Rotate(steps=1).function)
# Pipeline_Params witch inherits from IPipeline_Params
pipeline_params = Pipeline_Params(data_dir_x=os.path.join(flags.dataset, 'train_X'),
data_dir_y=os.path.join(flags.dataset, 'train_Y'),
validation_dir_x=os.path.join(flags.dataset, 'test_X'),
validation_dir_y=os.path.join(flags.dataset, 'train_Y'),
batch_size=model_params.batch_size,
epochs=model_params.epoch,
convert=False,
shuffle=True,
output_dir=None)
# Pipeline witch inherits from IPipeline
pipeline = Training_Pipeline(trainer=model_trainer, params=pipeline_params, pre_processing=pre_processing)
pipeline.set_validation()
# Start Training
pipeline.run()
if __name__ == "__main__":
main()
| 5,624 |
login.py
|
mrxiong520/test007
| 0 |
2167421
|
print("张三编写的功能代码")
mun = 10
print("经理编写的功能代码")
num =20
print("张三第二次编写的内容")
num = 100
print("张三第三次编写内容")
num = 1000
print("经理第二次编写代码功能")
num=200
print("经理第三次编写代码功能")
num = 2000
print("经理第四次编写代码功能")
num = 20000
| 212 |
pymnn/pip_package/MNN/tools/mnn_fb/LRN.py
|
xhuan28/MNN
| 3 |
2170341
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: MNN
import flatbuffers
class LRN(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsLRN(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LRN()
x.Init(buf, n + offset)
return x
# LRN
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# LRN
def RegionType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# LRN
def LocalSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# LRN
def Alpha(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# LRN
def Beta(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
def LRNStart(builder): builder.StartObject(4)
def LRNAddRegionType(builder, regionType): builder.PrependInt32Slot(0, regionType, 0)
def LRNAddLocalSize(builder, localSize): builder.PrependInt32Slot(1, localSize, 0)
def LRNAddAlpha(builder, alpha): builder.PrependFloat32Slot(2, alpha, 0.0)
def LRNAddBeta(builder, beta): builder.PrependFloat32Slot(3, beta, 0.0)
def LRNEnd(builder): return builder.EndObject()
| 1,824 |
samples/smoketest/smoke_test.py
|
lmcarreiro/azure-sdk-for-python
| 0 |
2170028
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from key_vault_secrets import KeyVaultSecrets
from key_vault_keys import KeyVaultKeys
from key_vault_certificates import KeyVaultCertificates
from storage_blob import StorageBlob
from event_hubs import EventHub
from cosmos_db import CosmosDB
print("")
print("==========================================")
print(" AZURE TRACK 2 SDKs SMOKE TEST")
print("==========================================")
KeyVaultSecrets().run()
KeyVaultKeys().run()
KeyVaultCertificates().run()
StorageBlob().run()
EventHub().run()
CosmosDB().run()
| 682 |
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/notifications/api/views.py
|
kaka-lin/azure-intelligent-edge-patterns
| 176 |
2168364
|
"""App API views.
"""
import logging
from filters.mixins import FiltersMixin
from rest_framework import filters, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from ..models import Notification
from .serializers import NotificationSerializer
logger = logging.getLogger(__name__)
class NotificationViewSet(FiltersMixin, viewsets.ModelViewSet):
"""Notification ModelViewSet"""
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
filter_backends = (filters.OrderingFilter,)
filter_mappings = {"id": "id"}
@action(detail=False, methods=["delete"])
def delete_all(self, request) -> Response:
"""delete_all.
Args:
request:
Returns:
Response: HTTP_204_NO_CONTENT
"""
noti_objs = self.queryset.all()
if noti_objs.exists():
noti_objs.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 1,003 |
landmark/landmark_pb2.py
|
reasonsolo/mtcnn_caffe
| 0 |
2169078
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: landmark.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='landmark.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x0elandmark.proto\"3\n\x05\x44\x61tum\x12\x0b\n\x03img\x18\x01 \x01(\x0c\x12\x0b\n\x03pts\x18\x02 \x03(\x02\x12\x10\n\x08\x65ye_dist\x18\x03 \x01(\x02')
)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='img', full_name='Datum.img', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pts', full_name='Datum.pts', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eye_dist', full_name='Datum.eye_dist', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=69,
)
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict(
DESCRIPTOR = _DATUM,
__module__ = 'landmark_pb2'
# @@protoc_insertion_point(class_scope:Datum)
))
_sym_db.RegisterMessage(Datum)
# @@protoc_insertion_point(module_scope)
| 2,529 |
RepCRec/workflow.py
|
ryi06/RepCRec
| 1 |
2170391
|
'''
CSCI-GA 2434 Advanced Database Systems Final Project
Replicated Concurrency Control and Recovery
Team:
<NAME>--<EMAIL>
<NAME>--<EMAIL>
Author: <NAME>
'''
import re
from .transaction_manager import TransactionManager
from .site_manager import SiteManager
from .utils import *
class Workflow(object):
def __init__(self):
self.file_name = None
self.stdin = None
def run(self):
# Initialize database
self.site_manager = SiteManager()
self.transaction_manager = TransactionManager(self.site_manager)
# Process
self.process_instructions()
def process_instructions(self):
"""Process stdin or input file"""
if self.file_name is not None:
T = open(self.file_name, 'r')
else:
T = self.stdin
for line in T:
# if record.startswith("//") or record.startswith(" "):
# continue
if line and line[0].isalpha():
record = self.parse_instruction(line)
self.distribute_instruction(record)
if self.file_name is not None:
T.close()
def parse_instruction(self, record):
"""parse transaction instructions one line at a tine"""
record = record.split("//")[0].strip()
keyword, params, _ = re.split(r'\((.*)\)', record)
return (keyword, params)
def distribute_instruction(self, record):
"""Pass instruction to transaction_manager"""
keyword, params = record
self.transaction_manager.clear_deadlocks()
# begin(T1)
if keyword == 'begin':
self.transaction_manager.begin(name2id(params), params)
# beginRO(T2)
if keyword == 'beginRO':
self.transaction_manager.begin_RO(name2id(params), params)
# R(T2,x2)
elif keyword == 'R':
t, x = params.split(",")
self.transaction_manager.read(name2id(t), name2id(x))
# W(T1,x1,101)
elif keyword == 'W':
t, x, v = params.split(",")
self.transaction_manager.write(name2id(t), name2id(x), int(v))
# dump()/dump(i)/dump(xi)
elif keyword == 'dump':
if params == "":
self.transaction_manager.dump()
elif "x" in params:
self.transaction_manager.dump(indices=[name2id(params)])
else:
self.transaction_manager.dump(sites=[int(params)])
# end(T1)
elif keyword == 'end':
self.transaction_manager.end(name2id(params))
# recover(2)
elif keyword == 'recover':
self.transaction_manager.recover(int(params))
# fail(2)
elif keyword == 'fail':
self.transaction_manager.fail(int(params))
| 2,369 |
formations/migrations/0001_initial.py
|
Kgermando/catsr
| 0 |
2167275
|
# Generated by Django 3.1.2 on 2020-12-01 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Formation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titre_formation', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True, help_text='Laissez ce champ vide', unique=True)),
('img_formation', models.ImageField(upload_to='formations_img/')),
('content_formation', models.TextField()),
('editeur', models.CharField(max_length=200)),
('created', models.DateTimeField()),
],
),
]
| 862 |
clustering.py
|
paprikachan/biotool
| 0 |
2169937
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tenxtools.clustering
~~~~~~~~~~~~~~~~~~~~
@Copyright: (c) 2018-09 by <NAME> (<EMAIL>).
@License: LICENSE_NAME, see LICENSE for more details.
"""
from sklearn import decomposition
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(color_codes=True)
plt.switch_backend('agg')
def pca(X, y=None, n_components=2):
pca = decomposition.PCA(n_components=n_components)
pca.fit(X)
X = pca.transform(X)
return X
def draw_hierarchical_clustering(out_prefix, X, y=None,
method='average', metric='euclidean',
standard_scale=None, z_score=None):
if y is not None:
lut = dict(zip(y.unique(), "rb"))
row_colors = y.map(lut)
row_colors.index = X.index
sns.clustermap(X, row_colors=row_colors,
method=method, metric=metric,
standard_scale=standard_scale, z_score=z_score,
)
else:
sns.clustermap(X,
method=method, metric=metric,
standard_scale=standard_scale, z_score=z_score,
)
plt.suptitle('Hierarchical Clustering:' + method + '.' + metric)
plt.savefig(out_prefix + '_' + method + '_' + metric + '.png', format='png')
def draw_PC2(out_prefix, X, y):
plt.figure(figsize=(8, 6))
tumor = X[0:8]
normal = X[8:16]
plt.plot(tumor['PC1'], tumor['PC2'], '*r', label='Tumor')
plt.plot(normal['PC1'], normal['PC2'], '*b', label='Normal')
plt.legend(loc='upper right')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.suptitle('PC2 Cluster')
plt.savefig(out_prefix + '_PC2.png', format='png')
| 1,762 |
Web Server/notify.py
|
Asienwald/SCDF-x-IBM-Lifesavers-Innovation
| 0 |
2170332
|
#!/usr/bin/env python
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
# Check https://app.sendgrid.com/guide/integrate/langs/python for API Key
import os
from API_KEYS import *
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
apikey = SENDGRID_API_KEY
sg = SendGridAPIClient(apikey)#apiKeyos.environ.get('SENDGRID_API_KEY'))
def notify(subject,content,fromEmail="<EMAIL>", toEmail="<EMAIL>"):
print("Notifying", fromEmail, "=>", toEmail)
message = Mail(
from_email=fromEmail,
to_emails=toEmail,
subject=subject,
html_content=content)
try:
#sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
print("Sending")
response = sg.send(message)
print("###Response###################################")
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print("Error:",e)
if __name__ == '__main__':
#sms("helo")
notify("hello","123")
pass
| 1,070 |
exercises/migrations/0002_auto_20210219_2235.py
|
cclauss/Workout-progress-tracker
| 0 |
2169809
|
# Generated by Django 3.1.6 on 2021-02-19 22:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('body', '0001_initial'),
('exercises', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='exercise',
name='body_part',
field=models.ManyToManyField(related_name='body_part', to='body.BodyPart'),
),
migrations.AddField(
model_name='exercise',
name='tips',
field=models.TextField(blank=True, max_length=1000),
),
]
| 616 |
tests/app/handlers/test_total_mobile_handler.py
|
ONSdigital/blaise-totalmobile-client
| 0 |
2168831
|
from unittest import mock
from app.handlers.total_mobile_handler import submit_form_result_request_handler
@mock.patch("app.handlers.total_mobile_handler.update_case_telephone_number")
def test_submit_form_result_request_handler_passes_the_correct_parameters_to_update_case_telephone_number(
mock_update_case_telephone_number, submit_form_result_request_sample
):
# arrange
mock_request = mock.Mock()
mock_request.get_json.return_value = submit_form_result_request_sample
# act
submit_form_result_request_handler(mock_request)
# assert
mock_update_case_telephone_number.assert_called_with(
"DST2111Z", "1001011", "07000000000"
)
| 678 |
html-cca-converter/test_cdr.py
|
sujen1412/memex-scripts
| 0 |
2169679
|
#!/usr/bin/env python2.7
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id$
#
# Author: mattmann
# Description: This program reads a Common Crawl Architecture dump
# directory as generated by Apache Nutch, e.g,. see:
# https://wiki.apache.org/nutch/CommonCrawlDataDumper
# and then uses that CBOR-encoded JSON data as a basis for posting
# the data to Elasticsearch using this simple schema:
#
#
# {
# url : <url of raw page>,
# timestamp: <timestamp for data when scraped, in epoch milliseconds>,
# team: <name of crawling team>,
# crawler: <name of crawler; each type of crawler should have a distinct name or reference>,
# raw_content: <full text of raw crawled page>,
# content_type: <IANA mimetype representing the crawl_data content>,
# crawl_data {
# content: <optional; used to store cleaned/processed text, etc>,
# images:[an array of URIs to the images present within the document],
# videos:[an array of URIs to the videos present within the document]
# }
# To call this program, do something like the following
#
# ./memex_cca_esindex.py -t "JPL" -c "Nutch 1.11-SNAPSHOT" -d crawl_20150410_cca/ -u https://user:pass@localhost:9200/ -i memex-domains -o stuff
#
# If you want verbose logging, turn it on with -v
from tika import parser
from elasticsearch import Elasticsearch
import json
import os
import cbor
import sys
import getopt
_verbose = False
_helpMessage = '''
Usage: memex_cca_esindex [-t <crawl team>] [-c <crawler id>] [-d <cca dir> [-u <url>] [-i <index>] [-o docType]
Operation:
-t --team
The name of the crawler team, e.g. "JPL"
-c --crawlerId
The identifier of the crawler, e.g., "Nutch 1.11-SNAPSHOT"
-d --dataDir
The directory where CCA CBOR JSON files are located.
-u --url
The URL to Elasticsearch. If you need auth, you can use RFC-1738 to specify the url, e.g., https://user:secret@localhost:443
-i --index
The Elasticsearch index, e.g., memex-domains, to index to.
-o --docType
The document type e.g., weapons, to index to.
'''
def list_files(dir):
r = []
subdirs = [x[0] for x in os.walk(dir)]
for subdir in subdirs:
files = os.walk(subdir).next()[2]
if (len(files) > 0):
for file in files:
r.append(subdir + "/" + file)
return r
def getContentType(ccaDoc):
for header in ccaDoc["response"]["headers"]:
if header == "Content-Type":
return ccaDoc["response"]["headers"][header]a
return "application/octet-stream"
def indexDoc(url, doc, index, docType):
print "Inexing "+doc["url"]+" to ES at: ["+url+"]"
es = Elasticsearch([url])
res = es.index(index=index, doc_type=docType, body=doc)
print(res['created'])
def esIndex(ccaDir, team, crawler, url, index, docType):
ccaJsonList = list_files(ccaDir)
print "Processing ["+str(len(ccaJsonList))+"] files."
procList=[]
failedList=[]
failedReasons=[]
CDRVersion = 2.0
for f in ccaJsonList:
ccaDoc = None
newDoc = {}
with open(f, 'r') as fd:
try:
c = fd.read()
# fix for no request body out of Nutch CCA
c.replace("\"body\" : null", "\"body\" : \"null\"")
ccaDoc = json.loads(cbor.loads(c), encoding='utf8')
newDoc["url"] = ccaDoc["url"]
newDoc["timestamp"] = ccaDoc["imported"]
newDoc["team"] = team
newDoc["crawler"] = crawler
newDoc["raw_content"] = ccaDoc["response"]["body"]
newDoc["content_type"] = getContentType(ccaDoc)
parsed = parser.from_buffer(newDoc["raw_content"].encode("utf-8"))
newDoc["crawl_data"] = {}
newDoc["crawl_data"]["content"] = parsed["content"]
# CDR version 2.0 additions
newDoc["_id"] = ccaDoc["key"]
newDoc["extracted_metadata"] = parsed["metadata"]
newDoc["extracted_text"] = parsed["content"]
newDoc["version"] = CDRVersion
open("out","w").write(json.dumps(newDoc))
exit()
verboseLog("Indexing ["+f+"] to Elasticsearch.")
indexDoc(url, newDoc, index, docType)
procList.append(f)
except ValueError, err:
failedList.append(f)
failedReasons.append(str(err))
print "Processed "+str(len(procList))+" CBOR files successfully."
print "Failed files: "+str(len(failedList))
if _verbose:
for i in range(len(failedList)):
verboseLog("File: "+failedList[i]+" failed because "+failedReasons[i])
def verboseLog(message):
if _verbose:
print >>sys.stderr, message
class _Usage(Exception):
'''An error for problems with arguments on the command line.'''
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:],'hvt:c:d:u:i:o:',['help', 'verbose', 'team=', 'crawlerId=', 'dataDir=', 'url=', 'index=', 'docType='])
except getopt.error, msg:
raise _Usage(msg)
if len(opts) == 0:
raise _Usage(_helpMessage)
team=None
crawlerId=None
dataDir=None
url=None
index=None
docType=None
for option, value in opts:
if option in ('-h', '--help'):
raise _Usage(_helpMessage)
elif option in ('-v', '--verbose'):
global _verbose
_verbose = True
elif option in ('-t', '--team'):
team = value
elif option in ('-c', '--crawlerId'):
crawlerId = value
elif option in ('-d', '--dataDir'):
dataDir = value
elif option in ('-u', '--url'):
url = value
elif option in ('-i', '--index'):
index = value
elif option in ('-o', '--docType'):
docType = value
if team == None or crawlerId == None or dataDir == None or url == None or index == None or docType == None:
raise _Usage(_helpMessage)
esIndex(dataDir, team, crawlerId, url, index, docType)
except _Usage, err:
print >>sys.stderr, sys.argv[0].split('/')[-1] + ': ' + str(err.msg)
return 2
if __name__ == "__main__":
sys.exit(main())
| 7,900 |
database.py
|
ArtemAvgustovich/tg_social_credit_bot
| 0 |
2169974
|
import os
import psycopg2
# DB settings and commands
DATABASE_URL = os.getenv('DATABASE_URL')
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS social_credit (
user_id BIGINT NOT NULL,
chat_id BIGINT NOT NULL,
username VARCHAR,
rating INTEGER,
PRIMARY KEY (user_id, chat_id)
);
"""
ADD_USER_RATING = """
INSERT INTO social_credit (user_id, chat_id, username, rating)
VALUES ({user_id}, {chat_id}, '{username}', {rating})
"""
SELECT_RATING = """
SELECT rating
FROM social_credit
WHERE user_id={user_id} AND chat_id={chat_id}
"""
CHANGE_RATING = """
UPDATE social_credit
SET rating = {rating}
WHERE user_id={user_id} AND chat_id={chat_id}
"""
SHOW_STATS = """
SELECT username, rating
FROM social_credit
WHERE chat_id={chat_id}
"""
connection = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = connection.cursor()
def setup_table():
cursor.execute(CREATE_TABLE)
connection.commit()
def change_rating(user_id, chat_id, username, delta):
cursor.execute(SELECT_RATING.format(user_id=user_id, chat_id=chat_id))
data = cursor.fetchone()
if data is None:
new_rating = delta
cursor.execute(ADD_USER_RATING.format(user_id=user_id, chat_id=chat_id, username=username, rating=new_rating))
else:
new_rating = data[0]+delta
cursor.execute(CHANGE_RATING.format(user_id=user_id, chat_id=chat_id, rating=new_rating))
connection.commit()
return new_rating
def chat_stats(chat_id):
cursor.execute(SHOW_STATS.format(chat_id=chat_id))
return cursor.fetchall()
| 1,523 |
11ab.py
|
LaGamma/AOC
| 0 |
2168245
|
from aocd import submit
from copy import deepcopy
#-------------------------------Run once!---------------------
# get data from aocd
#import os
#os.system('del day11.txt')
#os.system('aocd 11 2020 >> day11.txt')
#--------------------------------------------------------------
# ---- simple cubic O(n^3) time complexity / constant O(1) space complexity algorithm ----
#DP = {}
#lines = [0]
#lines.extend(sorted([int(line.strip()) for line in open('day10.txt')]))
#lines.append(max(lines)+3)
def solveb(lines):
next_state, prev_state = None, None
generations = 0
changes = 1
while changes != 0:
changes = 0
prev_state = deepcopy(lines)
next_state = deepcopy(lines)
for i in range(len(lines)):
for j in range(len(lines[i])):
if prev_state[i][j] == '.':
continue
count = 0
for d in ((0,1), (0,-1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)):
look = [k for k in d]
while (0 <= i+look[0] < len(lines) and 0 <= j+look[1] < len(lines[i])):
if prev_state[i+look[0]][j+look[1]] in ('#', 'L'):
if prev_state[i+look[0]][j+look[1]] == '#':
count += 1
break
look[0] += d[0]
look[1] += d[1]
if count == 0 and prev_state[i][j] == 'L':
next_state[i][j] = '#'
changes += 1
elif count > 4 and prev_state[i][j] == '#':
next_state[i][j] = 'L'
changes += 1
generations += 1
lines = next_state
print(changes)
c = 0
for i in range(len(lines)):
for j in range(len(lines[i])):
if lines[i][j] == '#':
c += 1
return c
def solve(lines):
next_state, prev_state = None, None
generations = 0
changes = 1
while changes != 0:
changes = 0
prev_state = deepcopy(lines)
next_state = deepcopy(lines)
for i in range(len(lines)):
for j in range(len(lines[i])):
if prev_state[i][j] == '.':
continue
count = 0
for d in ((0,1), (0,-1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)):
if 0 <= i+d[0] < len(lines) and 0 <= j+d[1] < len(lines[i]):
if prev_state[i+d[0]][j+d[1]] == '#':
count += 1
if count == 0 and prev_state[i][j] == 'L':
next_state[i][j] = '#'
changes += 1
elif count > 3 and prev_state[i][j] == '#':
next_state[i][j] = 'L'
changes += 1
generations += 1
lines = next_state
print(changes)
c = 0
for i in range(len(lines)):
for j in range(len(lines[i])):
if lines[i][j] == '#':
c += 1
return c
if __name__ == '__main__':
# read in input (get_data() returns string)
lines = [[x for x in line.strip()] for line in open('day11.txt')]
#submit(sol1(i))
#submit(sol1(i), part="a", day=2, year=2020)
#print(dp(0))
print(solveb(lines))
| 2,737 |
charlie2/tools/audiowidget.py
|
sammosummo/Charlie2
| 5 |
2169807
|
"""Defines a Qt widget containing convenience methods for playing sounds.
"""
from logging import getLogger
from PyQt5.QtMultimedia import QSound
from .debugging import DebuggingWidget
from .paths import get_aud_stim_paths
logger = getLogger(__name__)
class AudioWidget(DebuggingWidget):
def __init__(self, parent=None) -> None:
"""Visual widget.
Not called directly. Serves as a base class for BaseTestWidget, providing
methods for drawing to the GUI.
"""
super(AudioWidget, self).__init__(parent)
inherit = (
"proband_id",
"test_name",
"language",
"fullscreen",
"computer_id",
"user_id",
"platform",
"resumable",
)
self.kwds = {k: v for k, v in self.parent().kwds.items() if k in inherit}
logger.debug(f"initialised {type(self)} with parent={parent}")
# stimuli paths
self.aud_stim_paths = get_aud_stim_paths(self.kwds["test_name"])
# silence
self.silence = QSound(self.aud_stim_paths["silence.wav"])
# 440 pip
self.pip = QSound(self.aud_stim_paths["440.wav"])
# feedback
self.correct = QSound(self.aud_stim_paths["correct.wav"])
self.incorrect = QSound(self.aud_stim_paths["incorrect.wav"])
self.feedback_sounds = [self.incorrect, self.correct]
# other sounds
self.test_over = QSound(self.aud_stim_paths["test_over.wav"])
self.new_block = QSound(self.aud_stim_paths["new_block.wav"])
def play_feeback(self, correct):
"""Play either the correct or incorrect sound."""
sound = self.feedback_sounds[correct]
if not sound.isFinished():
pass
else:
sound.play()
def play_pip(self, sleep):
"""Play a regular pip but optionally sleep while its playing."""
if not self.pip.isFinished():
pass
else:
self.pip.play()
if sleep:
while not self.pip.isFinished():
self.sleep(10)
| 2,117 |
Organizar/escritor.py
|
carlochess/proyectoComplejidad
| 0 |
2170414
|
from random import randint
from os import listdir
from os.path import isfile, join
archivos = sorted([ f for f in listdir("entrada") if isfile(join("entrada",f)) ])
nombre = int(archivos[-1])+1
f = open("entrada/"+str(nombre),'w')
nCajas = randint(2,6)
f.write(str(nCajas)+"\n")
vMaleta = randint(2,200)
pMaleta = randint(2,200)
f.write(str(vMaleta)+" "+str(pMaleta)+"\n")
for i in range(nCajas):
nCaja = i
vCaja = randint(1,vMaleta)
pCaja = randint(1,pMaleta)
f.write(str(nCaja)+" "+str(vCaja)+" "+str(pCaja)+"\n")
f.close()
| 533 |
python/same_structure_as.py
|
TechieHelper/Codewars
| 0 |
2168204
|
def same_structure_as(original,other):
if type(original) == type(other):
return True
return False
print(same_structure_as([ 1, [ 1, 1 ] ], [ 2, [ 2, 2 ] ]))
| 173 |
August Long Challenge/triplets.py
|
dibyanshushekhardey/Coding-Practice
| 0 |
2170300
|
t = int(input())
for i in range(0, t):
n = int(input())
sum = 0
#c = 1
#b = c
#a = c
for c in range(1, n + 1):
for b in range(c, n+1, c):
for a in range(c, n+1, b):
if(a % b == c and b % c == 0):
sum += 1
print(sum)
| 300 |
Node.py
|
ze-phyr-us/tcviz
| 75 |
2170121
|
# tcviz 1.2
#
# Licensed under the terms of the MIT/X11 license.
# Copyright (c) 2009-2013 <NAME> <http://ze.phyr.us>
import textwrap
from Filter import Filter
from Id import Id
class Node:
def __init__(self, spec=None):
self._id = None
self._nodeType = None
self._parent = None
self._type = None
self._params = []
if spec is not None:
self.parseSpec(spec)
def parseSpec(self, spec):
spec = spec.split(' ')
self._nodeType = spec.pop(0)
self._type = spec.pop(0)
self._id = Id(spec.pop(0))
if spec.pop(0) == 'parent':
self._parent = Id(spec.pop(0))
else:
self._parent = Id("{}:".format(self._id._major)) if self._nodeType == 'class' else None
self._params = self._filterParams(spec)
def _filterParams(self, spec):
params = []
while spec:
item = spec.pop(0)
if item == 'leaf': # remove unwanted "leaf" specs
spec.pop(0)
else:
params.append(item)
return params
def getParent(self):
return self._parent
def getNodeSpec(self):
desc = '<br/>'.join(textwrap.wrap(' '.join(self._params), 30)) or ' '
label = '<font color="blue">%s</font><br/>%s<br/><font point-size="10">%s</font>' % (
self._id, self._type, desc)
shape = 'box' if self._nodeType == 'qdisc' else 'ellipse'
return '"%s" [ label = <%s>, shape = "%s" ];' % (self._id, label, shape)
def getEdgeSpec(self):
ret = ''
if self._parent:
ret = '"%s" -> "%s" [ arrowhead = "none", arrowtail = "normal", dir = "both"];' % (self._parent, self._id)
if self._nodeType == 'qdisc' and 'default' in self._params:
dcls_minor = self._params[self._params.index('default') + 1].lstrip('0x')
ret += '\n' + Filter(' {0}: default classid {0}:{1}'.format(self._id._major, dcls_minor)).getEdgeSpec()
return ret
| 2,028 |
labs/lab05/lab05Tests.py
|
tntptntp/CMPTGCS-20
| 0 |
2169361
|
# lab05Tests.py Tests for lab05, UCSB CS20, <NAME>, 04/26/2016
import unittest
from lab05Funcs import *
class TestLab05Functions(unittest.TestCase):
# tests for isList
def test_isList1(self):
self.assertEqual( isList(3), False)
def test_isList2(self):
self.assertEqual( isList([3]), True)
def test_isList3(self):
self.assertEqual( isList([5,10,15,20]), True)
def test_isList4(self):
self.assertEqual( isList("foo"), False)
def test_isList5(self):
self.assertEqual( isList(["John","Paul","Ringo","George"]), True)
def test_isList6(self):
self.assertEqual( isList([]), True)
# tests for largestInt
def test_largestInt_1(self):
self.assertEqual( largestInt([]), False)
def test_largestInt_2(self):
self.assertEqual( largestInt('foo'), False)
def test_largestInt_3(self):
self.assertEqual( largestInt([3,5,4.5,6]), False)
def test_largestInt_4(self):
self.assertEqual( largestInt([4]), 4)
def test_largestInt_5(self):
self.assertEqual( largestInt([-9,4,7,8,2]), 8)
# tests for indexOfLargestInt
def test_indexOfLargestInt_1(self):
self.assertEqual( indexOfLargestInt([]), False)
def test_indexOfLargestInt_2(self):
self.assertEqual( indexOfLargestInt('foo'), False)
def test_indexOfLargestInt_3(self):
self.assertEqual( indexOfLargestInt([3,5,4.5,6]), False)
def test_indexOfLargestInt_4(self):
self.assertEqual( indexOfLargestInt([40]), 0)
def test_indexOfLargestInt_5(self):
self.assertEqual( indexOfLargestInt([-90,40,70,80,20]), 3)
def test_indexOfLargestInt_6(self):
self.assertEqual( indexOfLargestInt([10,30,50,20,50]), 2)
# tests for indexOfSmallestInt
def test_indexOfSmallestInt_1(self):
self.assertEqual( indexOfSmallestInt([]), False )
def test_indexOfSmallestInt_2(self):
self.assertEqual( indexOfSmallestInt('foo'), False )
def test_indexOfSmallestInt_3(self):
self.assertEqual( indexOfSmallestInt([3,5,4.5,6]), False )
def test_indexOfSmallestInt_4(self):
self.assertEqual( indexOfSmallestInt([40]), 0 )
def test_indexOfSmallestInt_5(self):
self.assertEqual( indexOfSmallestInt([20,-90,40,70,80]), 1 )
def test_indexOfSmallestInt_6(self):
self.assertEqual( indexOfSmallestInt([50,30,10,30,50,10]), 2 )
def test_indexOfSmallestInt_7(self):
self.assertEqual( indexOfSmallestInt([50,30,10,30,50,-10]), 5 )
# tests for longestString
def test_longestString_1(self):
self.assertEqual( longestString([]), False)
def test_longestString_2(self):
self.assertEqual( longestString('foo'), False )
def test_longestString_3(self):
self.assertEqual( longestString(['foo']), 'foo' )
def test_longestString_4(self):
self.assertEqual( longestString(['bear','cat','dog','mouse']), 'mouse' )
def test_longestString_5(self):
self.assertEqual( longestString(['cat','wolf','bear','dog']), 'wolf' )
# tests for indexOfShortestString
def test_indexOfShortestString_1(self):
self.assertEqual( indexOfShortestString([]), False )
def test_indexOfShortestString_2(self):
self.assertEqual( indexOfShortestString('foo'), False )
def test_indexOfShortestString_3(self):
self.assertEqual( indexOfShortestString(['foo']), 0 )
def test_indexOfShortestString_4(self):
self.assertEqual( indexOfShortestString(['bear','cat','dog','mouse']), 1 )
# tests for smallestInt
def test_smallestInt_1(self):
self.assertEqual( smallestInt([]), False )
def test_smallestInt_2(self):
self.assertEqual( smallestInt('foo'), False )
def test_smallestInt_3(self):
self.assertEqual( smallestInt([40]), 40 )
def test_smallestInt_4(self):
self.assertEqual( smallestInt([1,2,3,4,5,6]), 1 )
def test_smallestInt_5(self):
self.assertEqual( smallestInt([50,30,10,30,50,10]), 10 )
def test_smallestInt_6(self):
self.assertEqual( smallestInt([20,-90,40,70,80]), -90 )
# End of tests for lab05
def runTestsWithPrefix(testFile,prefix):
"""
run only tests from testFile with a certain prefix
Example: runTestsWithPrefix("lab03Tests.py","test_isPrimaryColor")
"""
loader = unittest.TestLoader()
loader.testMethodPrefix = prefix
suite = loader.discover('.', pattern = testFile)
unittest.TextTestRunner(verbosity=2).run(suite)
# When you run this file, it runs either ALL the tests, or
# just some tests. It depends on which line you comment out (or not)
if __name__ == '__main__':
# To run ALL tests, uncomment the "unittest.main(exit=False)" line
unittest.main(exit=False)
# Uncomment "runTestsWithPrefix" line to run just SOME tests
# First parameter is name of file with tests
# Second parameter is prefix starting with test_
# such as test_FtoC or test_isString
# runTestsWithPrefix("lab05Tests.py","test_indexOfSmallestInt")
| 5,180 |
src/worker/celery/src/worker.py
|
furea2/ProofGame
| 0 |
2169437
|
import os
from celery import Celery
CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL")
CELERY_BACKEND_URL = os.getenv("CELERY_BACKEND_URL")
celery = Celery("celery", broker=CELERY_BROKER_URL, backend=CELERY_BACKEND_URL)
| 224 |
mailsend.py
|
Jacopx/automailsender
| 2 |
2169546
|
import smtplib
import datetime
import time
import getpass
hs=-1
ts=-1
ss=-1
print ('Choose the send time')
# Choose the send time
while hs>23 or hs<0:
hs = int(input('Hours: '))
while ts>59 or ts<0:
ts = int(input('Minute: '))
while ss>59 or ss<0:
ss = int(input('Second: '))
print ('The message will be sent at: {0}:{1}.{2}'.format(hs,ts,ss))
while True:
username = input('Sender mail (omit <EMAIL>): ')
username = ('<EMAIL>' % username)
password = getpass.getpass('Password for ' + username + ': ')
try:
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
break
except:
print ('Wrong login, retry:')
print ('*---------------*')
print ('| Correct login |')
print ('*---------------*')
# Destination, Subject and Text
TO = input('Destinator: ')
SUBJECT = input('Subject: ')
TEXT = input('Text: ')
# Merging message
MSG = ("""From: {0}\nTo: {1}\nSubject: {2}\n\n{3}""".format(username, TO, SUBJECT, TEXT))
force=False
# Start countdown
while True:
isnow = datetime.datetime.now() #get time
if isnow.hour == hs and isnow.minute == ts and isnow.second == ss or force == True:
hold = datetime.datetime.now()
try: # try to send
server.sendmail(username, TO, MSG)
server.quit()
issent = datetime.datetime.now()
delay = issent - hold
print ('*----------------------------------------------------------------------*')
print ('| Successfully sent the mail at {0}:{1}.{2} with {3} s of delay |'.format(isnow.hour, isnow.minute, isnow.second, delay))
print ('*----------------------------------------------------------------------*\n')
break
except: # force mode in case of fail to send
print ('Failed to send mail from {0} at {1}:{2}.{3}\n FORCE SEND ACTIVATED!'.format(username, isnow.hour, isnow.minute, isnow.second))
force=True
else: # waiting for the right time
print ('Not yet sended, is {0}:{1}.{2}'.format(isnow.hour, isnow.minute, isnow.second))
time.sleep(0.05)
| 2,158 |
apache-site-conf.py
|
btr1975/apache-py2-django
| 1 |
2170347
|
#!/usr/bin/env python
import os
import subprocess as sub
__author__ = '<NAME>'
__copyright__ = "Copyright (c) 2017, <NAME>"
__credits__ = None
__license__ = 'The MIT License (MIT)'
__status__ = 'prod'
__version_info__ = (1, 0, 1, __status__)
__version__ = '.'.join(map(str, __version_info__))
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
def create_conf(folder_name, dir_count):
"""
Function to create apache conf files for loaded sites
Currently only non-SSL Sites
:param
folder_name: The folder name of the site
dir_count: Count of dirs
:return:
"""
temp_list = list()
temp_list.append('<VirtualHost *:80>')
if os.environ.get('SITE_SERVER_NAME') and dir_count == 1:
temp_list.append(' ServerName %s' % (os.environ.get('SITE_SERVER_NAME'),))
else:
temp_list.append(' # ServerName www.example.com')
if os.environ.get('SITE_SERVER_ADMIN') and dir_count == 1:
temp_list.append(' ServerAdmin %s' % (os.environ.get('SITE_SERVER_ADMIN'),))
else:
temp_list.append(' ServerAdmin webmaster@localhost')
temp_list.append('')
temp_list.append(' Alias /media/ /DjangoSites/%s/media/' % (folder_name,))
temp_list.append('')
temp_list.append(' <Directory /DjangoSites/%s/media>' % (folder_name,))
temp_list.append(' Order deny,allow')
temp_list.append(' Require all granted')
temp_list.append(' </Directory>')
temp_list.append('')
temp_list.append(' Alias /static/ /DjangoSites/%s/static/site/' % (folder_name,))
temp_list.append('')
temp_list.append(' <Directory /DjangoSites/%s/static/site>' % (folder_name,))
temp_list.append(' Order deny,allow')
temp_list.append(' Require all granted')
temp_list.append(' </Directory>')
temp_list.append('')
temp_list.append(' WSGIScriptAlias / /DjangoSites/%s/apache/django.wsgi process-group=%s' % (folder_name,
folder_name))
temp_list.append(' WSGIDaemonProcess %s' % (folder_name,))
temp_list.append(' WSGIProcessGroup %s' % (folder_name,))
temp_list.append('')
temp_list.append(' <Directory /DjangoSites/%s/apache>' % (folder_name,))
temp_list.append(' Order deny,allow')
temp_list.append(' Require all granted')
temp_list.append(' </Directory>')
temp_list.append('')
temp_list.append(' # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,')
temp_list.append(' # error, crit, alert, emerg.')
temp_list.append(' # It is also possible to configure the loglevel for particular')
temp_list.append(' # modules, e.g.')
temp_list.append(' LogLevel info')
temp_list.append('')
temp_list.append(' ErrorLog ${APACHE_LOG_DIR}/error.log')
temp_list.append(' CustomLog ${APACHE_LOG_DIR}/access.log combined')
temp_list.append('')
temp_list.append('</VirtualHost>')
temp_list.append('')
temp_list.append('# vim: syntax=apache ts=4 sw=4 sts=4 sr noet')
def output_file(orig_list, file_name):
"""
Function to output the file
:param orig_list:
:param file_name:
:return: file_name
"""
file_name = '%s.conf' % ('-'.join(file_name.split()).lower(), )
def add_line_break(list_line):
"""
Create a line break at the end of a string
Args:
list_line: string
Returns: A string with a line break
"""
list_line = ('%s\n' % (list_line,))
return list_line
write_file = open('/etc/apache2/sites-available/%s' % (file_name, ), "a")
for orig_list_line in orig_list:
write_file.write(add_line_break(str(orig_list_line)))
write_file.close()
return file_name
site_to_enable = output_file(temp_list, folder_name)
sub.call('a2ensite %s > /dev/null' % (site_to_enable,), stderr=sub.STDOUT, shell=True)
def count_dir():
"""
Function to count how many dirs in a dir
:return:
An integer of how many dirs
"""
temp_list_count = list()
for item_name in os.listdir(os.getcwd()):
if os.path.isdir(item_name):
temp_list_count.append(item_name)
return len(temp_list_count)
def main():
"""
Main script function
:return: None
"""
dir_count = count_dir()
if os.environ.get('SITE_DIRECTORY_NAME'):
create_conf(os.environ.get('SITE_DIRECTORY_NAME'), 1)
else:
for item_name in os.listdir(os.getcwd()):
if os.path.isdir(item_name):
create_conf(item_name, dir_count)
sub.call('a2dissite 000-default.conf > /dev/null', stderr=sub.STDOUT, shell=True)
main()
| 4,850 |
src/metaproj/rules/__init__.py
|
KGerring/metaproj
| 2 |
2170226
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# from __future__ import annotations # isort:skip
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from fixit.common.utils import import_to_namespace
rules = {}
import_to_namespace(locals(), __name__, __path__)
| 423 |
cannabis_reports/tests/test_apis_strains.py
|
kwaaak/python-cannabis-reports
| 13 |
2170309
|
# -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from .api_common import recorder
from .api_product import ApiProductAbstract
from ..models.strain import Strain
class TestApisStrains(ApiProductAbstract):
"""Tests the Strains API endpoint."""
UID = 'VUJCJ4TYMG000000000000000'
def setUp(self):
super(TestApisStrains, self).setUp()
self.endpoint = self.api.Strains
@recorder.use_cassette()
def test_apis_strains_list(self):
"""It should parse the response and return the proper object."""
self._test_apis_objects_list(Strain)
@recorder.use_cassette()
def test_apis_strains_get(self):
"""It should return the proper singleton."""
self._test_apis_objects_get('<NAME>')
@recorder.use_cassette()
def test_apis_strains_get_user(self):
"""It should return the proper user singleton."""
self._test_apis_objects_get_user('David')
@recorder.use_cassette()
def test_apis_strains_get_review(self):
"""It should return the reviews."""
self._test_apis_objects_get_review()
@recorder.use_cassette()
def test_apis_strains_get_effects_flavors(self):
"""It should return the effect & flavor profile."""
self._test_apis_objects_get_effects_flavors()
@recorder.use_cassette()
def test_apis_strains_get_available(self):
"""It should return the menu items."""
self._test_apis_objects_get_available()
@recorder.use_cassette()
def test_apis_strains_search(self):
"""It should parse the response and return the proper objects."""
self._test_apis_objects_search('Blue', Strain)
@recorder.use_cassette()
def test_apis_strains_get_seed_company(self):
"""It should return the seed company."""
seed_company = self.api.Strains.get_seed_company(
'VUJCJ4TYMG000000000000000',
)
self.assertEqual(seed_company.name, 'Sensi Seeds')
@recorder.use_cassette()
def test_apis_strains_get_genetics(self):
"""It should return the parent strains."""
genetics = self.api.Strains.get_genetics('CYGU94JYKY000000000000000')
found_parent = False
for genetic in genetics:
self.assertIsInstance(genetic, Strain)
if genetic.name == 'Afghani No. 1':
found_parent = True
self.assertTrue(found_parent)
@recorder.use_cassette()
def test_apis_strains_get_children(self):
"""It should return the child strains."""
children = self.api.Strains.get_children('VUJCJ4TYMG000000000000000',
limit=self.LIMIT_PAGE)
found_child = False
for child in children:
self.assertIsInstance(child, Strain)
if child.name == '<NAME>':
found_child = True
self.assertTrue(found_child)
| 2,956 |
tests/book/ch08/demo_plane.py
|
xchaoinfo/pyhanlp
| 2,792 |
2168193
|
# -*- coding:utf-8 -*-
# Author:hankcs
# Date: 2018-07-29 23:24
# 《自然语言处理入门》8.6 自定义领域命名实体识别
# 配套书籍:http://nlp.hankcs.com/book.php
# 讨论答疑:https://bbs.hankcs.com/
from tests.book.ch05.perceptron_cws import CWSTrainer
from tests.book.ch07.demo_hmm_pos import AbstractLexicalAnalyzer, PerceptronSegmenter
from tests.book.ch07.demo_perceptron_pos import PerceptronPOSTagger
from tests.book.ch08.demo_sp_ner import NERTrainer, os, PerceptronNERecognizer
from tests.test_utility import ensure_data
PLANE_ROOT = ensure_data("plane-re", "http://file.hankcs.com/corpus/plane-re.zip")
PLANE_CORPUS = os.path.join(PLANE_ROOT, 'train.txt')
PLANE_MODEL = os.path.join(PLANE_ROOT, 'model.bin')
if __name__ == '__main__':
trainer = NERTrainer()
trainer.tagSet.nerLabels.clear() # 不识别nr、ns、nt
trainer.tagSet.nerLabels.add("np") # 目标是识别np
recognizer = PerceptronNERecognizer(trainer.train(PLANE_CORPUS, PLANE_MODEL).getModel())
# 在NER预测前,需要一个分词器,最好训练自同源语料库
CWS_MODEL = CWSTrainer().train(PLANE_CORPUS, PLANE_MODEL.replace('model.bin', 'cws.bin')).getModel()
analyzer = AbstractLexicalAnalyzer(PerceptronSegmenter(CWS_MODEL), PerceptronPOSTagger(), recognizer)
print(analyzer.analyze("米高扬设计米格-17PF:米格-17PF型战斗机比米格-17P性能更好。"))
print(analyzer.analyze("米格-阿帕奇-666S横空出世。"))
| 1,290 |
intel_ecs_wiki_search.py
|
kundansaha82/WikiChatbot
| 0 |
2169722
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 13:25:29 2021
@author: kundankantisaha
"""
from elasticsearch import Elasticsearch
from os import popen
"""import logging"""
import subprocess
from lemma_tokenizer import Splitter as Splitter
from lemma_tokenizer import LemmatizationWithPOSTagger as LemmatizationWithPOSTagger
import json
import lemma_tokenizer
from wiki_reader import *
def start_cluster():
subprocess.Popen('C:\\Users\\kundansa\\Downloads\\elasticsearch-7.10.2\\bin\\elasticsearch.bat')
"""time.sleep(15)"""
def connect_elasticsearch():
es = None
es = Elasticsearch([{'host': '127.0.0.1', 'port': '9200'}])
if es.ping():
print('Yay Connect')
else:
print('Awww it could not connect!')
return es
source_link = ""
keywordoriginal = ""
start_cluster()
es = connect_elasticsearch()
looping_condition = True
while looping_condition:
found_terms = list()
input_string = input("Enter your query/ enter '-1' to exit :")
if ("-1" == input_string):
looping_condition = False
continue
lemma_tokenizer.input_string = input_string
splitter = Splitter()
lemmatization_using_pos_tagger = LemmatizationWithPOSTagger()
#step 1 split document into sentence followed by tokenization
tokens = splitter.split(input_string)
#step 2 lemmatization using pos tagger
lemma_pos_token = lemmatization_using_pos_tagger.pos_tag(tokens)
with open('input_tokens.json', 'w') as f:
for lemmaset in lemma_pos_token:
for i in range(len(lemmaset)):
data = {}
data["words"] = []
data["words"].append({
"Original Word": lemmaset[i][0],
"Lemmatized Word": lemmaset[i][1],
"POS Tag": lemmaset[i][2]
})
json.dump(data, f)
string_found_terms = ""
allowed_pos_tags = [["NNP"],["NNS"],["NN"],["VB"],["NNPS"],["CD"],["VBD"],["VBN"]]
for lemmaset in lemma_pos_token:
for i in range(len(lemmaset)):
if lemmaset[i][2] in allowed_pos_tags:
found_terms.append(lemmaset[i][1])
string_found_terms = ' '.join(found_terms)
search_param = {
"query": {
"simple_query_string" : {
"query": string_found_terms,
"fields": ["title", "heading"],
"default_operator": "and"
}
}
}
res = es.search(index="wikifinal", body=search_param)
"""print("%d documents found" % res['hits']['total'])"""
data = [doc for doc in res['hits']['hits']]
resulting_search = ""
for doc in data:
resulting_search = doc['_source']['heading']
original_search_phrase = doc['_source']['keywordoriginal']
print("")
print("%s" % original_search_phrase)
keywordoriginal = original_search_phrase
lemma_tokenizer.input_string = resulting_search
splitter = Splitter()
lemmatization_using_pos_tagger = LemmatizationWithPOSTagger()
tokens_out = splitter.split(resulting_search)
lemma_pos_token_out = lemmatization_using_pos_tagger.pos_tag(tokens_out)
with open('output_tokens.json', 'w') as f:
for lemmaset_out in lemma_pos_token_out:
for i in range(len(lemmaset_out)):
data = {}
data["words"] = []
data["words"].append({
"Original Word": lemmaset_out[i][0],
"Lemmatized Word": lemmaset_out[i][1],
"POS Tag": lemmaset_out[i][2]
})
json.dump(data, f)
source_link = (doc['_source']['link'])
print_con(source_link,keywordoriginal,string_found_terms)
print("%s" % source_link)
| 4,131 |
bot_files/src/slack_bot.py
|
cybera/twitter_slack_bot
| 2 |
2169776
|
import os
import time
import logging
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
def auth_slack():
return os.environ.get("SLACK_BOT_TOKEN")
def post_message_to_slack(slack_client, msg, attachments=None, channel_id=None):
try:
if channel_id:
slack_client.chat_postMessage(
channel=channel_id,
text=msg,
attachments=attachments,
)
else:
slack_client.chat_postMessage(
channel=os.environ.get("SLACK_CHANNEL_NAME"),
text=msg,
attachments=attachments,
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error: {e.response['error']}")
def slackbot(msg, attachments=None, channel_id=None):
slack_bot_token = auth_slack()
slack_client = WebClient(slack_bot_token)
post_message_to_slack(slack_client, msg, attachments, channel_id)
| 1,126 |
add.py
|
shyed2001/Python_Programming
| 2 |
2169875
|
print("""c=5
while c<10:
c=c+1
print(c)
= """)
c=5
while c<10:
c=c+1
print(c)
print ("print (c) = " , c)
print(""" c=5
while c<10:
print(c)
c=c+1 """)
c=5
while c<10:
print(c)
c=c+1
print ("print (c) = " , c)
print(""" The code stops printing when
value of c becoems, c=10 and not c<10.
""")
print(''' তুমি কি করছো
তুমি কি ভাল
চমৎকার গাড়ী
তুমি কোথায় থাকো
দেখা হবে
যখন আমি তোমাকে দেখি''' )
| 433 |
setup.py
|
mdsmith/pbcoretools
| 1 |
2170166
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, Extension, find_packages
import os.path
import sys
REQUIREMENTS_TXT = "requirements.txt"
if ("install" in sys.argv) and sys.version_info < (2, 7, 0):
print "pbcoretools requires Python 2.7"
sys.exit(-1)
globals = {}
execfile("pbcoretools/__init__.py", globals)
__VERSION__ = globals["__VERSION__"]
def _get_local_file(file_name):
return os.path.join(os.path.dirname(__file__), file_name)
def _get_requirements(file_name):
with open(file_name, 'r') as f:
reqs = [line for line in f if not line.startswith("#")]
return reqs
def _get_local_requirements(file_name):
return _get_requirements(_get_local_file(file_name))
setup(
name = 'pbcoretools',
version=__VERSION__,
author='<NAME>',
author_email='<EMAIL>',
description="Python CLI tools and add-ons for reading and writing PacBio® data files",
license=open('LICENSES.txt').read(),
packages = find_packages('.'),
package_dir = {'':'.'},
package_data = {'pbcoretools': ['data/*.h5', 'data/*.gff', 'data/*.fasta',
'data/*.fasta.fai', 'data/*.fofn', 'data/*.m4',
'data/*.fa', 'data/*.fa.fai',
'data/*.m5', 'data/*.bam', 'data/*.bam.bai', "data/*.bam.pbi",
'chemistry/resources/*.xml',
'data/datasets/*.*',
'data/datasets/yieldtest/*.*']
},
zip_safe = False,
entry_points = {"console_scripts": [
"dataset = pbcoretools.dataset:main",
'pbvalidate = pbcoretools.pbvalidate.main:main',
'bamSieve = pbcoretools.bamSieve:main',
]},
install_requires=_get_local_requirements(REQUIREMENTS_TXT),
test_requires=("pbtestdata",))
| 1,878 |
tests.py
|
imsweb/django-pzip-storage
| 0 |
2168380
|
import os
import tempfile
import unittest
from unittest.mock import MagicMock
import pzip
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from pzip_storage import PZipStorage, bad_keys, needs_encryption, needs_rotation
class CompressedStorageTests(unittest.TestCase):
def setUp(self):
self.storage = PZipStorage()
def test_improper_config(self):
with self.assertRaises(ImproperlyConfigured):
PZipStorage(keys=[])
# PZipStorage does not check callables at creation time.
storage = PZipStorage(keys=lambda: [])
with self.assertRaises(ImproperlyConfigured):
storage.save("testfile", ContentFile(b"test data"))
def test_round_trip(self):
plaintext = b"Hello world!"
name = self.storage.save("hello.txt", ContentFile(plaintext))
with self.storage.open(name) as f:
self.assertEqual(plaintext, f.read())
self.assertEqual(len(plaintext), self.storage.size(name))
self.assertTrue(self.storage.exists(name))
self.storage.delete(name)
self.assertFalse(self.storage.exists(name))
def test_multiple_keys(self):
plaintext = (
b"Answer to the Ultimate Question of Life, The Universe, and Everything."
)
keys = [b"first"]
handler = MagicMock()
needs_rotation.connect(handler, sender=PZipStorage)
storage = PZipStorage(keys=lambda: keys)
name = storage.save("testfile", ContentFile(plaintext))
keys.insert(0, b"second")
with storage.open(name) as f:
self.assertEqual(plaintext, f.read())
handler.assert_called_once_with(
signal=needs_rotation,
sender=PZipStorage,
storage=storage,
name=name,
key=b"first",
)
storage.delete(name)
def test_no_compression(self):
name = self.storage.save("test.jpg", ContentFile(b"JPEG data"))
with self.storage.open(name) as f:
self.assertIsInstance(f, pzip.PZip)
self.assertEqual(f.compression, pzip.Compression.NONE)
def test_unencrypted(self):
handler = MagicMock()
needs_encryption.connect(handler, sender=PZipStorage)
self.assertEqual(self.storage.size("unencrypted"), 11)
with self.storage.open("unencrypted") as f:
self.assertNotIsInstance(f, pzip.PZip)
self.assertEqual(f.read(), b"hello world")
handler.assert_called_once_with(
signal=needs_encryption,
sender=PZipStorage,
storage=self.storage,
name="unencrypted",
)
def test_bad_keys(self):
handler = MagicMock()
bad_keys.connect(handler, sender=PZipStorage)
with self.storage.open("encrypted" + PZipStorage.DEFAULT_EXTENSION) as f:
self.assertNotEqual(f.read(), b"unrecoverable data")
handler.assert_called_once_with(
signal=bad_keys,
sender=PZipStorage,
storage=self.storage,
name="encrypted" + PZipStorage.DEFAULT_EXTENSION,
)
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tempdir:
# Write a pre-existing unencrypted file to the storage root.
with open(os.path.join(tempdir, "unencrypted"), "wb") as f:
f.write(b"hello world")
# Write a pre-existing encrypted file (with a random key) to the storage root.
random_key = os.urandom(32)
with pzip.open(
os.path.join(tempdir, "encrypted" + PZipStorage.DEFAULT_EXTENSION),
"wb",
key=random_key,
) as f:
f.write(b"unrecoverable data")
# Set up Django settings to have a stable SECRET_KEY and MEDIA_ROOT.
settings.configure(SECRET_KEY=os.urandom(32), MEDIA_ROOT=tempdir)
unittest.main()
| 4,038 |
tests/test_ibdb.py
|
bendichter/brainrender
| 1 |
2169919
|
import pytest
from brainrender.atlases.custom_atlases.insects_brains_db import IBDB
from brainrender.scene import Scene
@pytest.fixture
def scene():
return Scene(
atlas=IBDB, # specify that we are using the insects brains databse atlas
atlas_kwargs=dict(
species="Schistocerca gregaria"
), # Specify which insect species' brain to use
)
def test_ibdb(scene):
print(scene.atlas.species_info)
# Add some brain regions in the mushroom body to the rendering
central_complex = [
"CBU-S2",
"CBU-S1",
"CBU-S3",
"NO-S3_left",
"NO-S2_left",
"NO-S2_right",
"NO-S3_right",
"NO_S1_left",
"NO-S1_right",
"NO-S4_left",
"NO-S4_right",
"CBL",
"PB",
]
scene.add_brain_regions(central_complex, alpha=1)
scene.add_brain_regions("PB", colors="red", use_original_color=False)
scene.add_brain_regions("PB", colors=["red"], use_original_color=False)
def test_root(scene):
scene.atlas.make_root_mesh()
| 1,068 |
utils/lr_scheduler.py
|
alwc/fashionAI-keypoints-detection-pytorch
| 7 |
2167351
|
class LRScheduler():
def __init__(self, base_lr, epochs=None, patience=3, factor=0.1, min_lr=1e-7, best_loss=float('inf')):
self.patience = patience
self.base_lr = base_lr
self.epochs = epochs # list
self.factor = factor
self.min_lr = min_lr
self.current_lr = base_lr
self.best_loss = best_loss
self.tolerence = 0
def update_by_rule(self, current_loss):
if current_loss <= self.best_loss:
self.best_loss = current_loss
self.tolerence = 0
else:
self.tolerence += 1
if self.tolerence >= self.patience:
tmp_lr = self.current_lr * self.factor
if tmp_lr >= self.min_lr:
self.current_lr = tmp_lr
self.tolerence = 0
else:
return None
return self.current_lr
def update_by_iter(self, current_epoch):
if current_epoch == self.epochs[-1]:
return None
p = 0
for k, e in enumerate(self.epochs[:-1]):
if current_epoch >= e:
p = k + 1
self.current_lr = self.base_lr * (self.factor**p)
return self.current_lr
| 1,267 |
examples/ok-with-mkdocstrings/module.py
|
gwhitney/mkdocs-simple-plugin
| 15 |
2169580
|
"""md
## Python Version
You can put _markdown_ in triple-quoted strings in Python.
You can even combine it with mkdocstrings to automatically generate your source
documentation!
::: module.main
handler: python
rendering:
show_root_heading: true
show_source: false
heading_level: 3
"""
def main():
"""Test function which takes no parameters.
It says "Hello, world!"
"""
print("Hello, world!")
return 0
| 460 |
python/clean_history.py
|
bopopescu/Heimdallr
| 0 |
2170062
|
#!/usr/bin/env python
import os
import sys
import string
import time
import datetime
import traceback
import MySQLdb
import pymssql
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("clean_history")
path='./include'
sys.path.insert(0,path)
import functions as func
from multiprocessing import Process;
his_retention = func.get_config('common','his_retention')
######################################################################################################
# function clean_history_data
######################################################################################################
def clean_history_data():
try:
logger.info("Clean mysql history data start.")
func.mysql_exec("delete from mysql_status_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from mysql_dr_p_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from mysql_dr_s_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from mysql_bigtable_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
#func.mysql_exec("delete from mysql_slow_query_review_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
logger.info("Clean mysql history data finished.")
logger.info("Clean oracle history data start.")
func.mysql_exec("delete from oracle_status_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_tablespace_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_diskgroup_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_dg_p_status_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_dg_s_status_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_dg_process_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_redo where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_db_time where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_session where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from oracle_flashback where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
logger.info("Clean oracle history data finished.")
logger.info("Clean sqlserver history data start.")
func.mysql_exec("delete from sqlserver_status_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from sqlserver_mirror_p_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from sqlserver_mirror_s_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
logger.info("Clean sqlserver history data finished.")
logger.info("Clean os history data start.")
func.mysql_exec("delete from os_status_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from os_disk_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from os_diskio_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
func.mysql_exec("delete from os_net_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
logger.info("Clean os history data finished.")
logger.info("Clean alert history data start.")
func.mysql_exec("delete from alerts_his where create_time < date_add(now(), interval -%s day);" %(his_retention),'')
logger.info("Clean alert history data finished.")
except Exception, e:
logger.error('traceback.format_exc():\n%s' % traceback.format_exc())
#print 'traceback.print_exc():'; traceback.print_exc()
#print 'traceback.format_exc():\n%s' % traceback.format_exc()
func.mysql_exec("rollback;",'')
sys.exit(1)
finally:
pass
def main():
# Clean history data
logger.info("Clean history data start.")
clean_history_data()
logger.info("Clean history data finished.")
if __name__=='__main__':
main()
| 4,912 |
entsoe_client/Queries/Transmission/__init__.py
|
DarioHett/entsoe-client
| 1 |
2167485
|
from entsoe_client.Queries.Transmission.Transmission import (
Transmission,
ExpansionDismantlingProjects,
ForecastedCapacity,
OfferedCapacity,
FlowbasedParameters,
IntradayTransferLimits,
ExplicitAllocationInformationCapacity,
ExplicitAllocationInformationRevenueonly,
TotalCapacityNominated,
DayAheadPrices,
ImplicitAuctionCongestionIncome,
TotalCommercialSchedules,
PhysicalFlows,
CapacityAllocatedOutsideEU,
CommercialSchedules,
)
| 494 |
install/app_store/tk-framework-qtwidgets/v2.6.5/python/search_completer/search_result_delegate.py
|
JoanAzpeitia/lp_sg
| 0 |
2166881
|
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
# import the shotgun_model and view modules from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
views = sgtk.platform.current_bundle().import_module("views")
from .search_result_widget import SearchResultWidget
from .utils import CompleterPixmaps
class SearchResultDelegate(views.WidgetDelegate):
"""
Delegate which renders search match entries in the global
search completer.
"""
def __init__(self, view, text=None):
"""
:param view: The view where this delegate is being used
"""
super(SearchResultDelegate, self).__init__(view)
self._pixmaps = CompleterPixmaps()
self._text = text
self.selection_model = view.selectionModel()
if self.selection_model:
self.selection_model.selectionChanged.connect(self._on_selection_changed)
self.__current_index = None
def _on_selection_changed(self, selected, deselected):
"""
Signal triggered when someone changes the selection in the view.
:param selected: A list of the indexes in the model that were selected
:type selected: :class:`~PySide.QtGui.QItemSelection`
:param deselected: A list of the indexes in the model that were deselected
:type deselected: :class:`~PySide.QtGui.QItemSelection`
"""
# clean up
selected_indexes = selected.indexes()
if len(selected_indexes) > 0:
# get the currently selected model index
model_index = selected_indexes[0]
self.__current_index = model_index
def _create_widget(self, parent):
"""
Widget factory as required by base class. The base class will call this
when a widget is needed and then pass this widget in to the various callbacks.
:param parent: Parent object for the widget
"""
return SearchResultWidget(parent)
def _on_before_paint(self, widget, model_index, style_options):
"""
Called by the base class when the associated widget should be
painted in the view. This method should implement setting of all
static elements (labels, pixmaps etc) but not dynamic ones (e.g. buttons)
:param widget: The widget to operate on (created via _create_widget)
:param model_index: The model index to operate on
:param style_options: QT style options
"""
# note: local import to avoid cyclic dependencies
from .search_completer import SearchCompleter
widget.set_selected(model_index == self.__current_index)
mode = shotgun_model.get_sanitized_data(model_index, SearchCompleter.MODE_ROLE)
if mode == SearchCompleter.MODE_LOADING:
widget.set_text("Hold on, loading search results...")
widget.set_thumbnail(self._pixmaps.loading)
elif mode == SearchCompleter.MODE_NOT_ENOUGH_TEXT:
widget.set_text("Type at least %s characters..." % (
SearchCompleter.COMPLETE_MINIMUM_CHARACTERS,))
widget.set_thumbnail(self._pixmaps.keyboard)
elif mode == SearchCompleter.MODE_NOT_FOUND:
widget.set_text("Sorry, no matches found!")
widget.set_thumbnail(self._pixmaps.no_matches)
elif mode == SearchCompleter.MODE_RESULT:
self._render_result(widget, model_index)
else:
widget.set_text("Unknown mode!")
def _underline_search_term(self, matching):
"""
Generates a text string with the searched text underlined.
:param str matching: String that potentially matched the search term.
:returns: The exact same string with the search term underlined. If the search term
was not present, the string is returned as is.
"""
# Previous version of the API didn't take a text string in. If we don't have one,
# we can't highlight
if not self._text:
return matching
match_start = matching.lower().find(self._text.lower())
if match_start == -1:
return matching
match_end = match_start + len(self._text)
return "%s<span style='text-decoration:underline;'>%s</span>%s" % (
matching[: match_start], matching[match_start: match_end], matching[match_end:]
)
def sizeHint(self, style_options, model_index):
"""
Specify the size of the item.
:param style_options: QT style options
:param model_index: Model item to operate on
"""
return SearchResultWidget.calculate_size()
| 5,213 |
src/deeponto/bert/bert_args.py
|
KRR-Oxford/DeepOnto
| 6 |
2169841
|
# Copyright 2021 <NAME> (KRR-Oxford). All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for input arguments of a BERT model"""
from transformers import TrainingArguments
from typing import Optional, Union
import torch
class BERTArgs:
def __init__(
self,
bert_checkpoint: str,
output_dir: str,
num_epochs: float,
batch_size_for_training: int,
batch_size_for_prediction: int,
max_length: int,
device_num: int,
early_stop_patience: Optional[int], # if not specified, no early stopping is performed
resume_from_ckp: Optional[Union[bool, str]], # None; True; specific_checkpoint_dir
):
# basic arguments
self.bert_checkpoint = bert_checkpoint
# training arguments
self.output_dir = output_dir
self.num_epochs = num_epochs
self.batch_size_for_training = batch_size_for_training
self.batch_size_for_prediction = batch_size_for_prediction
self.max_length = max_length
self.device_num = device_num
self.early_stop_patience = early_stop_patience
self.early_stop = True if early_stop_patience else False
self.resume_from_ckp = resume_from_ckp
def generate_training_args(
self,
training_data_size: int,
metric_for_best_model: Optional[str] = None,
greater_is_better: Optional[bool] = None,
) -> TrainingArguments:
# regularizing the steps
epoch_steps = training_data_size // self.batch_size_for_training # total steps of an epoch
if torch.cuda.device_count() > 0:
epoch_steps = epoch_steps // torch.cuda.device_count() # to deal with multi-gpus case
# keep logging steps consisitent even for small batch size
# report logging on every 0.02 epoch
logging_steps = int(epoch_steps * 0.02)
# eval on every 0.1 epoch
eval_steps = 5 * logging_steps
return TrainingArguments(
output_dir=self.output_dir,
# max_steps=eval_steps*4 + 1,
num_train_epochs=self.num_epochs,
per_device_train_batch_size=self.batch_size_for_training,
per_device_eval_batch_size=self.batch_size_for_training,
warmup_ratio=0.0,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{self.output_dir}/tensorboard",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model=metric_for_best_model,
greater_is_better=greater_is_better,
)
| 3,280 |
main.py
|
Enchan1207/CE_ImgSaver
| 0 |
2170384
|
# -*- coding: utf-8 -*-
#
# Twitter画像ダウンローダー
#
from lib.UserHandle import UserHandle
from lib.DBQueue import DBQueue
from lib.Clawler import Clawler
from lib.Saver import Saver
from lib.config import PathConfig
from datetime import datetime
import time, threading, logging
endReq = False #終了リクエスト
with open(PathConfig.PATH_LOGOUTPUT, "a") as f:
pass
logging.basicConfig(filename=PathConfig.PATH_LOGOUTPUT, level=logging.INFO) #ログの出力先とレベル
#--デキュースレッドを立てる
def dequeueThread():
logging.info(str(int(datetime.now().timestamp())) + ": [main] start to dequeue")
queue4Dequeue = DBQueue()
queue4Dequeue.connect(PathConfig.PATH_DBNAME, usemySQL=True)
queue4Dequeue.deQueue(120)
dqthread = threading.Thread(target=dequeueThread)
dqthread.setDaemon(True) #デーモンスレッド化しないとタイムアウトするまで終わらなくなる
dqthread.start()
#--未探索のユーザを探索するスレッドを立てる
def initRecord():
logging.info(str(int(datetime.now().timestamp())) + ": [main - InitRecord] start to init untracked user record")
clawler = Clawler(PathConfig.PATH_DBNAME)
uh = UserHandle()
target = uh.getUnTrackedUser()
while (not endReq):
if(len(target) > 0):
st = time.time()
clawler.update(target[0], 2)
logging.info(str(int(datetime.now().timestamp())) + ": [main - initRecord] track:" + target[0][1])
pt = time.time() - st #処理にかかった時間
if((5 - pt) > 0):
time.sleep(5 - pt)
else:
time.sleep(10)
target = uh.getUnTrackedUser()
logging.info(str(int(datetime.now().timestamp())) + ": [main - initRecord] complete tracking new users.")
#--レコード初期化済みのユーザを更新するスレッドを立てる
def updateUser():
logging.info(str(int(datetime.now().timestamp())) + ": [main - UpdateUser] start to update tracked user data")
clawler = Clawler(PathConfig.PATH_DBNAME)
uh = UserHandle()
target = uh.getNext()
#--endreqがくるまで止まらない、更新対象がいなくても定期的にDB内に対象ユーザがいないかチェック
while (not endReq):
if(len(target) > 0):
st = time.time()
clawler.update(target[0], 0)
clawler.update(target[0], 1)
stat = clawler.getAPIStat()
logging.info(str(int(datetime.now().timestamp())) + ": [main - updateUser] update:" + str(target[0][1]) + " API Status: " + str(stat['remaining']) + "/" + str(stat['limit']))
pt = time.time() - st #処理にかかった時間
if((3 - pt) > 0):
time.sleep(3 - pt)
else:
time.sleep(10)
target = uh.getNext()
if(endReq):
logging.info(str(int(datetime.now().timestamp())) + ": [main - updateUser] accepted endreq")
else:
logging.info(str(int(datetime.now().timestamp())) + ": [main - updateUser] complete update")
#--画像を保存するスレッドを立てる
def saveImages():
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] save tracked image")
saver = Saver(PathConfig.PATH_DBNAME, PathConfig.PATH_IMGSAVE)
uh = UserHandle()
pre_endReq = False #endReqをじかに受け取らない
#--複数枚持ってきてバイナリ取得
files = []
while (not pre_endReq):
images = uh.getImages(30)
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] found:" + str(len(images)) + " images.")
if(len(images) > 0):
for image in images:
st = time.time()
#--サーバから取得して待機
files.append(saver.get(image))
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] get: " + image[5])
pt = time.time() - st #処理にかかった時間
if((3 - pt) > 0):
time.sleep(3 - pt)
#--適当に名前つけて保存(ここはendReqを無視する)
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] started to save " + str(len(files)) + " images...")
saver.save(files)
files = []
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] complete to save.")
else:
time.sleep(4)
#--終了リクエストが来ても'このfor文は'止まらない
if(endReq and (not pre_endReq)):
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] saveImages has received(not ACCEPTED) endreq.")
pre_endReq = True
if endReq:
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] accepted endreq")
else:
logging.info(str(int(datetime.now().timestamp())) + ": [main - saveImage] image tracking completed")
return 0
#--メインスレッドは単純に待機するだけ、GUI組んでも良しCommand.pyに命令投げるインタフェース整えても良し
updateThread = threading.Thread(target=updateUser)
updateThread.setDaemon(True)
saveThread = threading.Thread(target=saveImages)
saveThread.setDaemon(True)
initThread = threading.Thread(target=initRecord)
initThread.setDaemon(True)
logging.info("--- Start CE_ImgSaver:" + datetime.now().strftime('%Y年%m月%d日 %H:%M:%S') + "---")
initThread.start()
updateThread.start()
saveThread.start()
try:
n = 9
time.sleep(n * 60 * 60) #n時間待機
endReq = True
except KeyboardInterrupt:
print("Process end request has requested(not ACCEPTED). please wait other daemon threads...")
logging.info(str(int(datetime.now().timestamp())) + ": [main] **CAUTION:** endReq was requested(not accepted).")
endReq = True
initThread.join()
updateThread.join()
saveThread.join()
print("End request has accepted.")
exit(0)
| 5,398 |
views.py
|
meugenia1/djangoprojeto
| 0 |
2170060
|
from django.shortcuts import render
from django.http.response import HttpResponse
from django.template.context import RequestContext
from django.template.loader import LoaderOrigin
from coded.models import Curso
# Create your views here.
def artigo(request,ano):
return HttpResponse("ola mundo"+ano)
def home(request):
cursosdest= Curso.objects.all().filter(Destaque="Destaque")
cursos= Curso.objects.all()
categoria= Curso.objects.all().filter(Categoria="??") # @UnusedVariable
template=LoaderOrigin('index.html')
context= RequestContext(request,{'cursosdest':cursosdest}, {'cursos':cursos}, {"categoria":categoria})
return HttpResponse(template.render(context))
| 696 |
relentness/utils/files.py
|
zeionara/relentness
| 0 |
2168820
|
import os
def ensure_parent_folders_exist(filename: str):
folder_path = os.path.split(filename)[0]
if len(folder_path) > 0:
os.makedirs(folder_path, exist_ok=True)
| 182 |
xcltk/baf/plp/core.py
|
hxj5/celltk
| 0 |
2167477
|
# core.py
import math
import os
import pickle
import pysam
import sys
from .mcount import MCount
from .sam import check_read, sam_fetch
from .zfile import zopen, ZF_F_GZIP
def sp_region(reg, conf):
reg_ref_umi = {smp:set() for smp in conf.barcodes}
reg_alt_umi = {smp:set() for smp in conf.barcodes}
reg_oth_umi = {smp:set() for smp in conf.barcodes}
mcnt = MCount(conf.barcodes, conf)
for snp in reg.snp_list:
itr = sam_fetch(conf.sam, snp.chrom, snp.pos, snp.pos)
if not itr:
continue
if mcnt.add_snp(snp) < 0: # mcnt reset() inside.
return((-3, None, None, None, None))
for read in itr:
if check_read(read, conf) < 0:
continue
ret = mcnt.push_read(read)
if ret < 0:
if ret == -1:
return((-5, None, None, None, None))
continue
if mcnt.stat() < 0:
return((-7, None, None, None, None))
snp_cnt = sum(mcnt.tcount)
if snp_cnt < conf.min_count:
continue
snp_ref_cnt = mcnt.tcount[mcnt.base_idx[snp.ref]]
snp_alt_cnt = mcnt.tcount[mcnt.base_idx[snp.alt]]
snp_minor_cnt = min(snp_ref_cnt, snp_alt_cnt)
if snp_minor_cnt < snp_cnt * conf.min_maf:
continue
for smp, scnt in mcnt.cell_cnt.items():
for umi, ucnt in scnt.umi_cnt.items():
if not ucnt.allele:
continue
ale_idx = snp.get_region_allele_index(ucnt.allele)
if ale_idx == 0: # ref allele of the region.
reg_ref_umi[smp].add(umi)
elif ale_idx == 1: # alt allele of the region.
reg_alt_umi[smp].add(umi)
else:
reg_oth_umi[smp].add(umi)
reg_ref_cnt = {smp:0 for smp in conf.barcodes}
reg_alt_cnt = {smp:0 for smp in conf.barcodes}
reg_oth_cnt = {smp:0 for smp in conf.barcodes}
reg_dp_cnt = {smp:0 for smp in conf.barcodes}
for smp in conf.barcodes:
reg_ref_cnt[smp] = len(reg_ref_umi[smp])
reg_alt_cnt[smp] = len(reg_alt_umi[smp])
dp_umi = reg_ref_umi[smp].union(reg_alt_umi[smp]) # CHECK ME! theoretically no shared UMIs
reg_oth_umi[smp] = reg_oth_umi[smp].difference(dp_umi)
reg_oth_cnt[smp] = len(reg_oth_umi[smp])
reg_dp_cnt[smp] = len(dp_umi)
return((0, reg_ref_cnt, reg_alt_cnt, reg_oth_cnt, reg_dp_cnt))
# TODO: use clever IPC (Inter-process communication) instead of naive `raise Error`.
# NOTE:
# 1. bgzf errors when using pysam.AlignmentFile.fetch in parallel (with multiprocessing)
# https://github.com/pysam-developers/pysam/issues/397
def sp_count(thdata):
func = "sp_count"
conf = thdata.conf
thdata.ret = -1
conf.sam = pysam.AlignmentFile(conf.sam_fn, "r") # auto detect file format
reg_list = None
if thdata.is_reg_pickle:
with open(thdata.reg_obj, "rb") as fp:
reg_list = pickle.load(fp)
os.remove(thdata.reg_obj)
else:
reg_list = thdata.reg_obj
fp_reg = zopen(thdata.out_region_fn, "wt", ZF_F_GZIP, is_bytes = False)
fp_ad = zopen(thdata.out_ad_fn, "wt", ZF_F_GZIP, is_bytes = False)
fp_dp = zopen(thdata.out_dp_fn, "wt", ZF_F_GZIP, is_bytes = False)
fp_oth = zopen(thdata.out_oth_fn, "wt", ZF_F_GZIP, is_bytes = False)
m_reg = float(len(reg_list))
n_reg = 0
l_reg = 0
k_reg = 1
for reg_idx, reg in enumerate(reg_list):
if conf.debug > 0:
sys.stderr.write("[D::%s][Thread-%d] processing region '%s' ...\n" %
(func, thdata.idx, reg.name))
if reg.snp_list:
ret, reg_ref_cnt, reg_alt_cnt, reg_oth_cnt, reg_dp_cnt = sp_region(reg, conf)
if ret < 0:
raise ValueError("[%s] errcode %d" % (func, -9))
str_reg, str_ad, str_dp, str_oth = "", "", "", ""
for i, smp in enumerate(conf.barcodes):
nu_ad, nu_dp, nu_oth = -1, -1, -1
if reg_ref_cnt[smp] + reg_alt_cnt[smp] != reg_dp_cnt[smp]:
if conf.debug > 0:
msg = "[D::%s][Thread-%d] region '%s', sample '%s':\n" % (
func, thdata.idx, reg.name, smp)
msg += "\tduplicate UMIs: REF, ALT, DP_uniq (%d, %d, %d)!\n" % (
reg_ref_cnt[smp], reg_alt_cnt[smp], reg_dp_cnt[smp])
sys.stderr.write(msg)
if conf.no_dup_hap:
nu_share = reg_ref_cnt[smp] + reg_alt_cnt[smp] - reg_dp_cnt[smp]
nu_ad = reg_alt_cnt[smp] - nu_share
nu_dp = reg_dp_cnt[smp] - nu_share
else:
nu_ad = reg_alt_cnt[smp]
nu_dp = reg_ref_cnt[smp] + reg_alt_cnt[smp]
else:
nu_ad, nu_dp = reg_alt_cnt[smp], reg_dp_cnt[smp]
nu_oth = reg_oth_cnt[smp]
if nu_dp + nu_oth <= 0:
continue
if nu_ad > 0:
str_ad += "%d\t%d\t%d\n" % (k_reg, i + 1, nu_ad)
thdata.nr_ad += 1
if nu_dp > 0:
str_dp += "%d\t%d\t%d\n" % (k_reg, i + 1, nu_dp)
thdata.nr_dp += 1
if nu_oth > 0:
str_oth += "%d\t%d\t%d\n" % (k_reg, i + 1, nu_oth)
thdata.nr_oth += 1
if str_dp or str_oth:
fp_ad.write(str_ad)
fp_dp.write(str_dp)
fp_oth.write(str_oth)
fp_reg.write("%s\t%d\t%d\t%s\n" % (reg.chrom, reg.start, reg.end - 1, reg.name))
k_reg += 1
elif conf.output_all_reg:
fp_reg.write("%s\t%d\t%d\t%s\n" % (reg.chrom, reg.start, reg.end - 1, reg.name))
k_reg += 1
elif conf.output_all_reg:
fp_reg.write("%s\t%d\t%d\t%s\n" % (reg.chrom, reg.start, reg.end - 1, reg.name))
k_reg += 1
n_reg += 1
frac_reg = n_reg / m_reg
if frac_reg - l_reg >= 0.02 or n_reg == m_reg:
sys.stdout.write("[I::%s][Thread-%d] %d%% genes processed\n" %
(func, thdata.idx, math.floor(frac_reg * 100)))
l_reg = frac_reg
thdata.nr_reg = k_reg - 1
fp_reg.close()
fp_ad.close()
fp_dp.close()
fp_oth.close()
conf.sam.close()
thdata.conf = None # sam object cannot be pickled.
thdata.ret = 0
if thdata.out_fn:
with open(thdata.out_fn, "wb") as fp_td:
pickle.dump(thdata, fp_td)
return((0, thdata))
| 6,792 |
performance-storage-service/pss_project/api/models/rest/metadata/Metadata.py
|
cmu-db/noisepage-stats
| 23 |
2170460
|
from pss_project.api.models.rest.metadata.JenkinsMetadata import JenkinsMetadata
from pss_project.api.models.rest.metadata.GithubMetadata import GithubMetadata
from pss_project.api.models.rest.metadata.NoisePageMetadata import NoisePageMetadata
from pss_project.api.models.rest.metadata.EnvironmentMetadata import EnvironmentMetadata
class Metadata(object):
""" This class is the model of the all the metadata data as it is represented in the HTTP API
jenkins - all data relating to the job/build that reported the metrics
github - all github related info (i.e. branch, commit sha)
noisepage - all system specific metadata (i.e. DB version)
environment - all environment metadata relating to the conditions under which the metrics were gathered """
def __init__(self, jenkins, github, noisepage, environment):
self.jenkins = JenkinsMetadata(**jenkins)
self.github = GithubMetadata(**github)
self.noisepage = NoisePageMetadata(**noisepage)
self.environment = EnvironmentMetadata(**environment)
| 1,069 |
ModularityR.py
|
mahdi-zafarmand/SNA
| 0 |
2170041
|
from CommunitySearch import CommunitySearcher
from CommunityDetection import CommunityDetector
import utils
class ModularityRCommunityDiscovery(CommunitySearcher):
def __init__(self, graph):
# initialize the object.
super(ModularityRCommunityDiscovery, self).__init__('Modularity R', graph)
def reset(self):
# resets the object to prepare it for another use.
super(ModularityRCommunityDiscovery, self).reset()
def community_search(self, start_node): # no use for 'with_amend' in this algorithm.
# THE MAIN FUNCTION OF THE CLASS, finds all other nodes that belong to the same community as the start_node does.
self.set_start_node(start_node)
modularity_r = 0.0
neighbors = list(self.graph.neighbors(start_node))
self.exclude_ignored_nodes(neighbors)
T = len(neighbors)
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
delta_r = {} # key: candidate nodes from the shell set, value: total improved strength after a node joins.
delta_T = {} # key: candidate nodes from the shell set, value: delta T (based on notations of the paper).
for node in self.shell:
delta_r[node], delta_T[node] = self.compute_modularity((modularity_r, T), node)
new_node = utils.find_best_next_node(delta_r)
if delta_r[new_node] < CommunitySearcher.minimum_improvement:
break
modularity_r += delta_r[new_node]
T += delta_T[new_node]
self.update_sets_when_node_joins(new_node, change_boundary=True)
return sorted(self.community) # sort is only for a better representation, can be ignored to boost performance.
def compute_modularity(self, auxiliary_info, candidate_node):
R, T = auxiliary_info
neighbors_of_candidate = list(self.graph.neighbors(candidate_node))
self.exclude_ignored_nodes(neighbors_of_candidate)
x, y, z = 0, 0, 0
for neighbor in neighbors_of_candidate:
if neighbor in self.boundary:
x += 1
else:
y += 1
for neighbor in [node for node in neighbors_of_candidate if node in self.boundary]:
if self.should_leave_boundary(neighbor, candidate_node):
for node in self.graph.neighbors(neighbor):
if (node in self.community) and ((node in self.boundary) is False):
z += 1
return float(x - R * y - z * (1 - R)) / float(T - z + y), -z + y
def should_leave_boundary(self, possibly_leaving_node, neighbor_node):
# to find if 'possibly_leaving_node' should leave 'self.boundary' because of the agglomeration of 'neighbor_node'.
neighbors = set(self.graph.neighbors(possibly_leaving_node))
self.exclude_ignored_nodes(neighbors)
neighbors.discard(neighbor_node)
for neighbor in neighbors:
if (neighbor in self.community) is False:
return False
return True
class ModularityRCommunityDetection(CommunityDetector):
# the class to detect all communities of a social network by applying Modularity R algorithm over and over.
def __init__(self, graph):
# initialize the object
super().__init__('Modularity R', graph)
self.local_searcher = ModularityRCommunityDiscovery(graph)
| 3,031 |
yandex_maps_tests/settings.py
|
66ru/yandex-maps
| 8 |
2170275
|
#coding: utf-8
import os, sys
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(PROJECT_ROOT, '..')))
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'db.sqlite'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite'
}
}
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, 'templates'),)
SECRET_KEY = '123'
# мой ключ для домена example.com
YANDEX_MAPS_API_KEY = "<KEY>
INSTALLED_APPS=(
'yandex_maps',
'test_app',
# 'devserver',
'south',
)
| 595 |
on_message/camera_take_picture.py
|
JIIOryo/ams-client
| 1 |
2164881
|
import json
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from service.camera import take_picture
"""
# message
type: json str
-----
{
"cameras": [
{
"camera_id": "82900309bfb57e7c6173cad57daefba9",
}
]
}
"""
def camera_take_picture(message: str) -> None:
target_cameras = json.loads(message)['cameras']
for target_camera in target_cameras:
target_camera_id = target_camera['camera_id']
take_picture(target_camera_id)
| 567 |
python/day-07/solve.py
|
jrrickerson/adventofcode2021
| 0 |
2166320
|
import statistics
import utils
def get_input_data(filename):
return [line.strip() for line in open(filename)]
def part_1(input_data):
positions = utils.parse_horizontal_positions(input_data[0])
spent_fuel = utils.distance_to_median(positions)
return spent_fuel
def part_2(input_data):
positions = utils.parse_horizontal_positions(input_data[0])
mean = round(statistics.mean(positions))
median = round(statistics.median(positions))
min_fuel = None
start, end = min(median, mean), max(median, mean)
# Check from median to mean just to be sure rounding errors aren't
# affecting the result
for center_point in range(start, end + 1):
spent_fuel = sum(
utils.nth_triangular_distances(positions, center_point))
if not min_fuel or spent_fuel < min_fuel:
min_fuel = spent_fuel
return min_fuel
def main(input_file):
input_data = get_input_data(input_file)
part_1_result = part_1(input_data)
part_2_result = part_2(input_data)
solution = f"""
Part 1: {part_1_result}
Part 2: {part_2_result}
"""
return solution
if __name__ == "__main__":
print(
"Solving Puzzle for Day 7:",
"https://adventofcode.com/2021/day/7")
print(main("../puzzles/day-07.input"))
| 1,304 |
athenatools/migrations/0004_document.py
|
fakegit/AthenaTools
| 7 |
2169946
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-11-29 10:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('athenatools', '0003_auto_20181128_2020'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.ImageField(upload_to=b'', verbose_name=b'\xe6\x96\x87\xe4\xbb\xb6')),
('name', models.CharField(blank=True, max_length=255, verbose_name=b'\xe5\x90\x8d\xe7\xa7\xb0')),
('category', models.CharField(blank=True, max_length=255, verbose_name=b'\xe5\x88\x86\xe7\xb1\xbb')),
('keywords', models.CharField(blank=True, max_length=255, verbose_name=b'\xe5\x85\xb3\xe9\x94\xae\xe8\xaf\x8d')),
('remark', models.CharField(blank=True, max_length=255, verbose_name=b'\xe5\xa4\x87\xe6\xb3\xa8')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name=b'\xe5\x88\x9b\xe5\xbb\xba\xe6\x97\xb6\xe9\x97\xb4')),
],
options={
'verbose_name': '\u6587\u6863',
'verbose_name_plural': '\u6587\u6863',
},
),
]
| 1,384 |
sfm2latex/corpus/corpus.py
|
redmer/sfm2latex
| 0 |
2170239
|
import os
from ..SFMFile import File
from ..utils import fix_orthography
from .Example import Example
def collect_examples(read_file):
examples = list()
current = None
for mark, value in File(read_file):
# Reference
if 'ref' == mark: # we have a new example
if current is not None:
examples.append(current)
current = Example(value)
# Morpheme boundaries (per word)
elif 'mb' == mark:
current.mb = value
# Gloss English (per word)
elif 'ge' == mark:
current.ge = value
# Free translation (English)
elif 'ft' == mark:
current.ft = value
# Free comment
elif 'cmt' == mark:
current.cmt = value
return examples
def render(index, settings={}):
output = ''
for item in index:
output += item.render(settings) + '\n'
return output
def build(input_filename, settings={}):
# Get the lexemes from the SFM file
examples = collect_examples(input_filename)
# layout: {'aa' : {001.tex, 002.tex, ...}, 'ab': {001.tex, 002.tex, ...}, ... }
export = dict()
for e in examples:
if e.major() not in export:
export[e.major()] = dict()
export[e.major()][e.minor()] = e
for major, maj_values in export.items():
for minor, example in maj_values.items():
target_file = 'output/{major}/{minor}.tex'.format(
major=major,
minor=minor
)
os.makedirs(os.path.dirname(target_file), exist_ok=True)
out_file = open(target_file, 'w+')
out_file.write(example.render(settings))
out_file.close()
| 1,747 |
CODE/web.py
|
macanepa/cloud-savedata-manager
| 1 |
2169547
|
import mcutils as mc
from flask import Flask
from flaskwebgui import FlaskUI
from flask import render_template, request, redirect
import utilities
import google_api as ga
from pprint import pprint
import sys
import os
from packaging import version
import subprocess
if getattr(sys, 'frozen', False):
template_folder = os.path.join(sys._MEIPASS, 'templates')
static_folder = os.path.join(sys._MEIPASS, 'static')
app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)
else:
app = Flask(__name__)
ui = FlaskUI(app, width=1100, height=550)
mc.ColorSettings.is_dev = False
mc.activate_mc_logger('info')
#utilities.initialize()
@app.route("/check_credentials", methods=['GET'])
def check_credentials():
if not utilities.check_credentials():
utilities.create_credentials(menu=False)
utilities.initialize()
return {'status': 'ok'}
return {'status': 'error'}
@app.route("/")
def index():
if not utilities.check_credentials():
return render_template("documentation.html")
utilities.initialize()
config = mc.get_dict_from_json(utilities.SETTINGS_PATH)
game_list = list(config.keys())
latest_version = utilities.get_latest_version()
new_version = None
if version.parse(latest_version) > version.parse(utilities.APP_VERSION):
new_version = latest_version
return render_template('index.html',
email=ga.get_user_info(),
games=game_list,
len=len(game_list),
APP_VERSION=utilities.APP_VERSION,
new_version=new_version)
@app.route("/documentation")
def documentation():
return render_template('documentation.html')
@app.route('/games', methods=['GET'])
def games():
if request.method == 'GET':
game_name = request.args.get('gameName')
config = mc.get_dict_from_json(utilities.SETTINGS_PATH)
if game_name in list(config.keys()):
return_dict = config[game_name]
return_dict['game_name'] = game_name
is_local = utilities.check_local(utilities.decrypt_path(return_dict['path']))
return_dict['path'] = utilities.decrypt_path(return_dict['path'])
return_dict['is_local'] = is_local
return return_dict
else:
return {'nice': False}
return {'nice': None}
@app.route('/upload_cloud', methods=['GET'])
def upload_cloud():
if request.method == 'GET':
config = mc.get_dict_from_json(utilities.SETTINGS_PATH)
print(request.args)
game_name = request.args.get('gameName')
print(game_name)
if game_name in list(config.keys()):
utilities.update_game(game_id=game_name,
menu=False)
return {'response': 'Ok'}
else:
return {'nice': False}
return {'nice': None}
@app.route('/download_cloud', methods=['GET'])
def download_cloud():
if request.method == 'GET':
config = mc.get_dict_from_json(utilities.SETTINGS_PATH)
print(request.args)
game_name = request.args.get('gameName')
print(game_name)
if game_name in list(config.keys()):
utilities.restore_game(game_id=game_name,
menu=False)
return redirect('') # refresh
else:
return {'nice': False}
return {'nice': None}
@app.route('/addGame', methods=['POST'])
def add_game():
if request.method == 'POST':
pprint(request.form)
data = request.form
game_name = data.get('game')
path = data.get('path')
if os.path.exists(path):
description = data.get('description')
data = {'name': game_name,
'path': path,
'description': description}
utilities.create_game_data(data=data, menu=False)
return {'nice': True}
return {'status': 'error'}
@app.route('/logout', methods=['GET'])
def logout():
if request.method == 'GET':
utilities.change_sync_account(menu=False)
return redirect('/')
return {'nice': False}
@app.route('/delete_cloud', methods=['GET'])
def delete_cloud():
if request.method == 'GET':
utilities.delete_cloud_savedata(game_name=request.args.get('gameName'),
menu=False)
return redirect('/')
return {'nice': False}
@app.route("/open_location", methods=['GET'])
def open_location():
if request.method == 'GET':
print(request.args)
path = request.args.get('path').strip()
if os.path.exists(path):
subprocess.call(f"explorer {path}", shell=True)
return {'status': 'ok'}
return {'status': 'error'}
ui.run()
| 4,865 |
rl_quad/utils/utils.py
|
vivekagra/Biplane-Quadrotor
| 8 |
2169376
|
"""
author: <NAME>
email: <EMAIL>
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import numpy as np
from math import sin, cos, asin, atan2, sqrt
def RotToRPY_ZXY(R):
phi = asin(R[1,2])
theta = atan2(-R[0,2]/cos(phi),R[2,2]/cos(phi))
psi = atan2(-R[1,0]/cos(phi),R[1,1]/cos(phi))
return np.array([phi, theta, psi])
def RotToRPY_ZYX(R):
"""
Get euler angles from rotation matrix using ZYX convention
"""
theta = -asin(R[0,2])
phi = atan2(R[1,2]/cos(theta), R[2,2]/cos(theta))
psi = atan2(R[0,1]/cos(theta), R[0,0]/cos(theta))
return np.array([phi, theta, psi])
def RPYToRot_ZXY(phi, theta, psi):
"""
phi, theta, psi = roll, pitch , yaw
The euler angle convention used is ZXY. This means: first a rotation of psi-degrees
around Z axis, then rotation of phi-degress around X axis, and finally rotation of
theta-degrees around Y axis
"""
return np.array([[cos(psi)*cos(theta) - sin(phi)*sin(psi)*sin(theta), cos(theta)*sin(psi) + cos(psi)*sin(phi)*sin(theta), -cos(phi)*sin(theta)],
[-cos(phi)*sin(psi), cos(phi)*cos(psi), sin(phi)],
[cos(psi)*sin(theta) + cos(theta)*sin(phi)*sin(psi), sin(psi)*sin(theta) - cos(psi)*cos(theta)*sin(phi), cos(phi)*cos(theta)]])
def RPYToRot_ZYX(phi, theta, psi):
"""
phi, theta, psi = roll, pitch , yaw
The euler angle convention used is ZYX. This means: first a rotation of psi-degrees
around Z axis, then rotation of theta-degrees around Y axis, and finally rotation of
phi-degress around X axis
"""
return np.array([[cos(theta)*cos(psi), cos(theta)*sin(psi), -sin(theta)],
[-cos(phi)*sin(psi) + sin(phi)*sin(theta)*cos(psi), cos(phi)*cos(psi) + sin(phi)*sin(theta)*sin(psi), sin(phi)*cos(theta)],
[sin(phi)*sin(psi) + cos(phi)*sin(theta)*cos(psi), -sin(phi)*cos(psi) + cos(phi)*sin(theta)*sin(psi), cos(phi)*cos(theta)]])
def RotToQuat(R):
"""
ROTTOQUAT Converts a Rotation matrix into a Quaternion
from the following website, deals with the case when tr<0
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/index.htm
takes in W_R_B rotation matrix
"""
tr = R[0,0] + R[1,1] + R[2,2]
if tr > 0:
S = sqrt(tr+1.0) * 2 # S=4*qw
qw = 0.25 * S
qx = (R[2,1] - R[1,2]) / S
qy = (R[0,2] - R[2,0]) / S
qz = (R[1,0] - R[0,1]) / S
elif (R[0,1] > R[1,1]) and (R[0,0] > R[2,2]):
S = sqrt(1.0 + R[0,0] - R[1,1] - R[2,2]) * 2 # S=4*qx
qw = (R[2,1] - R[1,2]) / S
qx = 0.25 * S
qy = (R[0,1] + R[1,0]) / S
qz = (R[0,2] + R[2,0]) / S
elif R[1,1] > R[2,2]:
S = sqrt(1.0 + R[1,1] - R[0,0] - R[2,2]) * 2 # S=4*qy
qw = (R[0,2] - R[2,0]) / S
qx = (R[0,1] + R[1,0]) / S
qy = 0.25 * S
qz = (R[1,2] + R[2,1]) / S
else:
S = sqrt(1.0 + R[2,2] - R[0,0] - R[1,1]) * 2 # S=4*qz
qw = (R[1,0] - R[0,1]) / S
qx = (R[0,2] + R[2,0]) / S
qy = (R[1,2] + R[2,1]) / S
qz = 0.25 * S
q = np.sign(qw) * np.array([qw, qx, qy, qz])
return q
def writeNpArrayToFile(data):
with open('state.csv','a') as f:
np.savetxt(f, data, newline=",", fmt='%.2f')
f.write('\n')
def outputTraj(x,y,z):
output = []
output.append((x,y,z))
with open('traj.out', 'w') as fp:
fp.write('\n'.join('%s %s %s' % item for item in output))
def add_plots(ax,x,datas,lines,cols,labs,tit,xlab,ylab):
for (data, line, colr, labl) in zip(datas, lines, cols, labs):
ax.plot(x,data, linestyle = line, color = colr, label = labl)
ax.set_title(tit)
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
return ax
def saturate_scalar_minmax(value, max_value, min_value):
"""
@ description saturation function for a scalar with definded maximum and minimum value
See Q. Quan. Introduction to Multicopter Design (2017), Ch11.3, page 265 for reference
"""
mean = (max_value + min_value)/2.0
half_range = (max_value - min_value)/2.0
return saturate_vector_dg(value-mean, half_range) + mean
# saturation function for vectors
def saturate_vector_dg(v, max_value):
"""
@description saturation function for the magnitude of a vector with maximum magnitude
and guaranteed direction.
See Q. Quan. Introduction to Multicopter Design (2017), Ch. 10.2 for reference
"""
mag = np.linalg.norm(v)
if( mag < max_value):
return v
else:
return np.dot(v/mag,max_value) # return vector in same direction but maximum possible magnitude
| 4,694 |
src/chordparser/music/notes.py
|
titus-ong/chordparser
| 3 |
2170222
|
class Note:
"""A class representing a musical note.
The `Note` class consists of notation A-G with optional unicode accidental symbols \u266d, \u266f, \U0001D12B, or \U0001D12A. It is created by the `NoteEditor`. When printed, only the `value` of the `Note` is displayed.
Parameters
----------
letter : str
The letter part of the `Note`'s notation. Consists of A-G.
symbol : str
The accidental part of the `Note`'s notation. Consists of the unicode characters \u266d, \u266f, \U0001D12B, or \U0001D12A. If there are no accidentals, it is an empty string.
Attributes
----------
letter : str
The letter part of the `Note`'s notation.
symbol : str
The accidental part of the `Note`'s notation.
"""
_flat = '\u266d'
_sharp = '\u266f'
_doubleflat = '\U0001D12B'
_doublesharp = '\U0001D12A'
_symbols = {
-1: _flat, -2: _doubleflat,
+1: _sharp, +2: _doublesharp,
0: '',
}
_symbol_signs = {
_flat: -1, _doubleflat: -2,
_sharp: 1, _doublesharp: 2,
'': 0,
}
_note_values = { # Basis: C = 0
'C': 0,
'D': 2,
'E': 4,
'F': 5,
'G': 7,
'A': 9,
'B': 11,
}
_notes_tuple = (
'C', 'D', 'E', 'F', 'G', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'A', 'B',
)
_sharp_tuple = (
('C', ''), ('C', '\u266f'), ('D', ''), ('D', '\u266f'), ('E', ''),
('F', ''), ('F', '\u266f'), ('G', ''), ('G', '\u266f'), ('A', ''),
('A', '\u266f'), ('B', ''),
)
_flat_tuple = (
('C', ''), ('D', '\u266d'), ('D', ''), ('E', '\u266d'), ('E', ''),
('F', ''), ('G', '\u266d'), ('G', ''), ('A', '\u266d'), ('A', ''),
('B', '\u266d'), ('B', ''),
)
def __init__(self, letter, symbol):
self.letter = letter
self.symbol = symbol
@property
def value(self):
"""str: The full notation of the `Note`."""
return self.letter + self.symbol
def num_value(self):
"""Return the `Note`'s numerical value (basis: C = 0).
The numerical value is based on the number of semitones above C.
Returns
-------
int
The numerical value.
Examples
--------
>>> NE = NoteEditor()
>>> d = NE.create_note("D")
>>> d.num_value()
2
"""
num = (self.letter_value() + self.symbol_value()) % 12
return num
def letter_value(self):
"""Return the `Note`'s letter as an integer value (basis: C = 0).
The value is based on the number of scale degrees above C.
Returns
-------
int
The letter's value.
Examples
--------
>>> NE = NoteEditor()
>>> d = NE.create_note("D")
>>> d.letter_value()
1
"""
return Note._note_values[self.letter]
def symbol_value(self):
"""Return the `Note`'s symbol as an integer value (basis: natural = 0).
The value is based on the number of semitones away from the natural `Note`.
Returns
-------
int
The symbol's value.
Examples
--------
>>> NE = NoteEditor()
>>> d_sharp = NE.create_note("D#")
>>> d_sharp.symbol_value()
1
"""
return Note._symbol_signs[self.symbol]
def accidental(self, value):
"""Change a `Note`'s accidental by specifying a `value` from -2 to 2.
The range of `values` [-2, 2] correspond to the values a symbol can take, from doubleflat (-2) to doublesharp (2).
Parameters
----------
value : int
The accidental's integer value.
Raises
------
ValueError
If `value` is not in the range of [-2, 2].
Examples
--------
>>> NE = NoteEditor()
>>> d_sharp = NE.create_note("D#")
>>> d_sharp.accidental(-1)
D\u266d note
"""
if value not in range(-2, 3):
raise ValueError(
"Only integers between -2 and 2 are accepted"
)
self.symbol = Note._symbols[value]
return self
def shift_s(self, value):
"""Shift a `Note`'s accidental.
The `Note`'s `symbol_value()` must be in the range of [-2, 2] after the shift, which corresponds to the values a symbol can take from doubleflat (-2) to doublesharp (2).
Parameters
----------
value : int
The value of the shift in accidentals.
Raises
------
ValueError
If the `Note`'s `symbol_value()` is not in the range of [-2, 2] after the shift.
Examples
--------
>>> NE = NoteEditor()
>>> d_sharp = NE.create_note("D#")
>>> d_sharp.shift_s(-1)
D note
"""
value += self.symbol_value()
if value not in range(-2, 3):
raise ValueError(
"Only symbols up to doublesharps and doubleflats are accepted"
)
self.symbol = Note._symbols[value]
return self
def shift_l(self, value):
"""Shift a `Note`'s letter.
The `value` corresponds to the change in scale degree of the `Note`.
Parameters
----------
value : int
The value of the letter shift.
Examples
--------
>>> NE = NoteEditor()
>>> d_sharp = NE.create_note("D#")
>>> d_sharp.shift_l(3)
G\u266f note
"""
pos = (Note._notes_tuple.index(self.letter) + value) % 7
new_letter = Note._notes_tuple[pos]
self.letter = new_letter
return self
def transpose(self, semitones, letters):
"""Transpose a `Note` according to semitone and letter intervals.
Parameters
----------
semitones
The difference in semitones to the new transposed `Note`.
letters
The difference in scale degrees to the new transposed `Note`.
Examples
--------
>>> NE = NoteEditor()
>>> c = NE.create_note("C")
>>> c.transpose(6, 3)
F\u266f note
>>> c.transpose(0, 1)
G\u266d note
"""
new_val = (self.num_value() + semitones) % 12
self.shift_l(letters)
curr_val = self.num_value()
shift = (new_val - curr_val) % 12
shift = shift - 12 if shift > 6 else shift # shift downwards if closer
self.shift_s(shift)
return self
def transpose_simple(self, semitones, use_flats=False):
"""Transpose a `Note` according to semitone intervals.
Parameters
----------
semitones : int
The difference in semitones to the new transposed `Note`.
use_flats : boolean, Optional
Selector to use flats or sharps for black keys. Default False when optional.
Examples
--------
>>> NE = NoteEditor()
>>> c = NE.create_note("C")
>>> c.transpose_simple(6)
F\u266f note
>>> c.transpose(2, use_flats=True)
A\u266d note
"""
if use_flats:
note_list = Note._flat_tuple
else:
note_list = Note._sharp_tuple
self.letter, self.symbol = note_list[
(self.num_value() + semitones) % 12
]
return self
def __repr__(self):
return self.value + " note"
def __str__(self):
return self.value
def __eq__(self, other):
"""Compare between other `Notes` and strings.
Checks if the other `Note`'s value or the string is the same as this `Note`.
Parameters
----------
other
The object to be compared with.
Returns
-------
boolean
The outcome of the `value` comparison.
Examples
--------
>>> NE = NoteEditor()
>>> d = NE.create_note("D")
>>> d2 = NE.create_note("D")
>>> d_str = "D"
>>> d == d2
True
>>> d == d_str
True
Note that symbols are converted to their unicode characters when a `Note` is created.
>>> NE = NoteEditor()
>>> ds = NE.create_note("D#")
>>> ds_str = "D#"
>>> ds_str_2 = "D\u266f"
>>> ds == ds_str
False
>>> ds == ds_str_2
True
"""
if isinstance(other, Note):
return self.value == other.value
elif isinstance(other, str):
return self.value == other
else:
return NotImplemented
| 8,709 |
app/configuration/diskcache.py
|
FZJ-INM1-BDA/siibra-api
| 0 |
2170141
|
# Copyright 2018-2020 Institute of Neuroscience and Medicine (INM-1),
# Forschungszentrum Jülich GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import siibra
import os
import logging
logger = logging.getLogger(__name__)
CACHEDIR = siibra.retrieval.CACHE.folder
if os.environ.get('SIIBRA_API_DISABLE_CACHE'):
logger.warning('Not using caching')
def memoize(**kwargs):
def wrapper(func):
return func
return wrapper
else:
from diskcache import FanoutCache
logger.warning('Using diskcahe.FanoutCache')
def memoize(**kwargs):
cache = FanoutCache(CACHEDIR)
return cache.memoize(**kwargs)
| 1,150 |
reclinker/simulate_people.py
|
dave31415/reclinker
| 0 |
2169371
|
from faker import Faker
def stream_fake_people(n_max=None, seed=None):
fake = Faker('en-US')
if seed is not None:
Faker.seed(seed)
n = 0
while True:
person = {'first_name': fake.first_name(),
'last_name': fake.last_name(),
'ssn': fake.ssn(),
'address': fake.address(),
'date_of_birth': fake.date_of_birth().strftime('%Y%m%d'),
'phone_number': fake.phone_number()}
yield person
n += 1
if n_max is not None:
if n == n_max:
break
| 607 |
src/webassets/script.py
|
daniel-werner/stelagifts
| 0 |
2170423
|
import os, sys
import time
import logging
from optparse import OptionParser
from webassets.loaders import PythonLoader
from webassets.bundle import BuildError
class CommandError(Exception):
pass
class CommandLineEnvironment():
"""Implements the core functionality for a command line frontend
to ``webassets``, abstracted in a way to allow frameworks to
integrate the functionality into their own tools, for example,
as a Django management command.
"""
def __init__(self, env, log):
self.environment = env
self.log = log
def invoke(self, command):
"""Invoke ``command``, or throw a CommandError.
This is essentially a simple validation mechanism. Feel free
to call the individual command methods manually.
"""
try:
function = self.Commands[command]
except KeyError, e:
raise CommandError('unknown command: %s' % e)
else:
return function(self)
def rebuild(self):
"""Rebuild all assets now.
"""
for bundle in self.environment:
# TODO: Both the build() and the watch() command (and possibly
# others in the future) need to go through the motions of
# looping over iterbuild(). Can be move this to the environment?
for to_build in bundle.iterbuild():
self.log.info("Building asset: %s" % to_build.output)
try:
to_build.build(force=True)
except BuildError, e:
self.log.error("Failed, error was: %s" % e)
def watch(self):
"""Watch assets for changes.
TODO: This should probably also restart when the code changes.
"""
_mtimes = {}
_win = (sys.platform == "win32")
def check_for_changes():
changed_bundles = []
for possibly_container in self.environment:
for bundle in possibly_container.iterbuild():
for filename in bundle.get_files():
filename = bundle.env.abspath(filename)
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if _mtimes.get(filename, mtime) != mtime:
changed_bundles.append(bundle)
_mtimes[filename] = mtime
break
_mtimes[filename] = mtime
return changed_bundles
try:
self.log.info("Watching %d bundles for changes..." % len(self.environment))
while True:
changed_bundles = check_for_changes()
for bundle in changed_bundles:
self.log.info("Rebuilding asset: %s" % bundle.output)
bundle.build(force=True)
time.sleep(0.1)
except KeyboardInterrupt:
pass
def clean(self):
""" Delete generated assets.
TODO: Clean the cache?
"""
self.log.info('Cleaning generated assets...')
for bundle in self.environment:
if not bundle.output:
continue
file_path = self.environment.abspath(bundle.output)
if os.path.exists(file_path):
os.unlink(file_path)
self.log.info("Deleted asset: %s" % bundle.output)
# List of command methods
Commands = {
'rebuild': rebuild,
'watch': watch,
'clean': clean,
}
def main(argv, env=None):
"""Generic version of the command line utilities, not specific to
any framework.
TODO: Support -c option to load from YAML config file
"""
parser = OptionParser(usage="usage: %%prog [options] [%s]" % (
" | ".join(CommandLineEnvironment.Commands)))
parser.add_option("-v", dest="verbose", action="store_true",
help="be verbose")
parser.add_option("-q", action="store_true", dest="quiet",
help="be quiet")
if not env:
parser.add_option("-m", "--module", dest="module",
help="read environment from a Python module")
(options, args) = parser.parse_args(argv)
if len(args) != 1:
parser.print_help()
return 1
# Setup logging
log = logging.getLogger('webassets')
log.setLevel(logging.DEBUG if options.verbose else (
logging.WARNING if options.quiet else logging.INFO))
log.addHandler(logging.StreamHandler())
# Load the bundles we shall work with
if not env and options.module:
env = PythonLoader(options.module).load_environment()
if not env:
print "Error: No environment given or found. Maybe use -m?"
return 1
# Run the selected command
cmd = CommandLineEnvironment(env, log)
try:
return cmd.invoke(args[0])
except CommandError, e:
print e
return 1
def run():
sys.exit(main(sys.argv[1:]) or 0)
if __name__ == '__main__':
run()
| 5,133 |
server/completers/base.py
|
QualiTorque/torque-vs-code-extensions
| 4 |
2168136
|
from typing import List
from pygls.lsp.types.language_features.completion import (
CompletionItem,
CompletionParams,
)
from pygls.workspace import Workspace
from server.ats.trees.common import BaseTree
class Completer:
def __init__(
self, workspace: Workspace, params: CompletionParams, tree: BaseTree
) -> None:
self.workspace = workspace
self.params = params
self.tree = tree
def get_completions(self) -> List[CompletionItem]:
pass
| 498 |
012 Node Depths/Node_Depths_sandbox.py
|
Iftakharpy/AlgoExpert-Questions
| 3 |
2168771
|
# This file is initialized with a code version of this
# question's sample test case. Feel free to add, edit,
# or remove test cases in this file as you see fit!
import program
import unittest
class TestProgram(unittest.TestCase):
def test_case_1(self):
root = program.BinaryTree(1)
root.left = program.BinaryTree(2)
root.left.left = program.BinaryTree(4)
root.left.left.left = program.BinaryTree(8)
root.left.left.right = program.BinaryTree(9)
root.left.right = program.BinaryTree(5)
root.right = program.BinaryTree(3)
root.right.left = program.BinaryTree(6)
root.right.right = program.BinaryTree(7)
actual = program.nodeDepths(root)
self.assertEqual(actual, 16)
| 780 |
contingent/code/contingent/rendering.py
|
yunkai123/my-500lines-notes
| 0 |
2169677
|
"""与图类型相关的输出例程"""
def as_graphviz(graph):
"""渲染 contingent.Graph 成为 graphviz 代码
要将此例程的输出保存为图像,你可以将文本保存在名为 output.dot 的文件
中,然后运行:
$ dot -Tpng output.dot > output.png
"""
edges = graph.edges()
inputs = set(input for input, consequence in edges)
consequences = set(consequence for input, consequence in edges)
lines = ['digraph {', 'graph [rankdir=LR];']
append = lines.append
def node(task):
return '"{}"'.format(task)
append('node [fontname=Arial shape=rect penwidth=2 color="#DAB21D"')
append(' style=filled fillcolor="#F4E5AD"]')
append('{rank=same')
for task in graph.sorted(inputs - consequences):
append(node(task))
append('}')
append('node [shape=rect penwidth=2 color="#708BA6"')
append(' style=filled fillcolor="#DCE9ED"]')
append('{rank=same')
for task in graph.sorted(consequences - inputs):
append(node(task))
append('}')
append('node [shape=oval penwidth=0 style=filled fillcolor="#E8EED2"')
append(' margin="0.05, 0"]')
for task, consequence in edges:
append('{} -> {}'.format(node(task), node(consequences)))
append('}')
return '\n'.join(lines)
| 1,220 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.