max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
lacusCloud_app/lacus_middleware/lacus_node/implementation/registerNode.py
|
tavog96/distribuidosProyecto
| 0 |
2172595
|
from ...lacus_common.infrastructure.configFileController.configFileController import configFileController
from ...lacus_common.infrastructure.networkManagement.restClient import restClientController
def RegisterNode(trackerHostIP):
configFile=configFileController()
configFile.scanConfigFile()
restClient = restClientController(configFile.appTcpPort, trackerHostIP)
nodeInfo = {}
nodeInfo['ip'] = configFile.localHostIP
nodeInfo['uid'] = configFile.localHostUid
result = restClient.postRegisterNewNode(nodeInfo)
if (result!= False):
return True
return False
| 602 |
django_dynamic_forms/migrations/0016_auto_20180726_2035.py
|
lalfaro1704/django_dynamic_forms
| 0 |
2171864
|
# Generated by Django 2.0.7 on 2018-07-27 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dynamic_forms', '0015_dynamicattribute_tag_name'),
]
operations = [
migrations.AlterField(
model_name='dynamicattribute',
name='parameters',
field=models.ManyToManyField(blank=True, to='django_dynamic_forms.DynamicParameter', verbose_name='parameters'),
),
migrations.AlterField(
model_name='dynamicform',
name='parameters',
field=models.ManyToManyField(blank=True, to='django_dynamic_forms.DynamicParameter', verbose_name='parameters'),
),
]
| 730 |
mlp.py
|
Creling/DM-GAN
| 0 |
2172815
|
# coding=utf8
'''
Author: Creling
Date: 2021-11-29 23:15:19
LastEditors: Creling
LastEditTime: 2021-11-30 09:31:32
Description: file content
'''
import torch
from tqdm import tqdm
M, input_size, hidden_size, output_size = 32, 1000, 100, 10
learning_rate = 0.001
x = torch.rand((M, input_size))
y = torch.rand((M, output_size))
w1 = torch.rand((input_size, hidden_size), requires_grad=True)
w2 = torch.rand((hidden_size, hidden_size), requires_grad=True)
w3 = torch.rand((hidden_size, output_size), requires_grad=True)
b1 = torch.rand((1, hidden_size), requires_grad=True)
b2 = torch.rand((1, hidden_size), requires_grad=True)
b3 = torch.rand((1, output_size), requires_grad=True)
def model(x, w1, w2, b1, b2, w3, b3):
h1 = x.mm(w1) + b1
h1 = h1.clamp(min=0)
h2 = h1.mm(w2) + b2
h2 = h2.clamp(min=0)
output = h2.mm(w3) + b3
return output
def loss_fn(y_pred, y):
return (y - y_pred).pow(2).sum()
def train(x, w1, w2, b1, b2, w3, b3):
for i in tqdm(range(5000)):
output = model(x, w1, w2, b1, b2, w3, b3)
loss = loss_fn(output, y)
if i % 100 == 0:
print("")
print(loss.item())
loss.backward()
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
w3.data -= learning_rate * w3.grad.data
b1.data -= learning_rate * b1.grad.data
b2.data -= learning_rate * b2.grad.data
b3.data -= learning_rate * b3.grad.data
w1.grad.zero_()
w2.grad.zero_()
w3.grad.zero_()
b1.grad.zero_()
b2.grad.zero_()
b3.grad.zero_()
print(w1)
train(x, w1, w2, b1, b2, w3, b3)
print(w1)
| 1,679 |
InspectorMini.roboFontExt/lib/inspectorMini.py
|
mrecord/InspectorMini
| 0 |
2171905
|
# -*- coding: UTF-8 -*-
"""
A basic version of the Inspector.
Glyph | Width | Unicode
"""
#import vanilla
from vanilla import *
from mojo.events import addObserver, removeObserver
from defconAppKit.windows.baseWindow import BaseWindowController
from mojo.UI import OpenGlyphWindow
class inspectorMini(BaseWindowController):
def __init__(self):
# layout
self.windowWidth = 240
self.windowHeight = 80
self.windowHeightMax = self.windowHeight * 3.75
# # # # #
# Window
# # # # #
self.w = FloatingWindow((self.windowWidth, self.windowHeight), "inspectorMini", minSize=(self.windowWidth, self.windowHeight), maxSize=(300, self.windowHeightMax))
self.w.info = List((10, 10, -10, -28),
[],
columnDescriptions=[{"title": "Name", "editable":True}, {"title": "Width", "editable":True}, {"title": "Unicode", "editable":True}],
doubleClickCallback = self.selectGlyph,
)
self.w.clear = Button((10, -24, -10, 20), "clear", callback=self.clear)
# GO!
self.setUpBaseWindowBehavior()
self.w.open()
self.run()
# # # # # # # # #
# FUNCTIONS
# # # # # # # # #
def run(self):
addObserver(self, "setInfo", "currentGlyphChanged")
self.setInfo("hello")
def windowCloseCallback(self, sender):
removeObserver(self, "currentGlyphChanged")
super(inspectorMini, self).windowCloseCallback(sender)
def clear(self, sender):
self.w.info.set([])
self.w.resize(self.w.getPosSize()[2], self.windowHeight)
#self.setInfo(sender)
def uniName(self, sender, uniValue):
return '%s' % (format((uniValue), 'x').zfill(4).upper())
def setInfo(self, sender):
l = self.w.info.get()
if CurrentFont() != None:
if CurrentGlyph() != None:
g = ({"Name": CurrentGlyph().name, "Width": CurrentGlyph().width, "Unicode": ', '.join(map(str, [self.uniName(sender, x) for x in CurrentGlyph().unicodes]))})
if g in l:
l.remove(g)
l.append(g)
else:
for i in CurrentFont().selectedGlyphNames:
uni = [str(self.uniName(sender, x)) for x in CurrentFont()[i].unicodes]
g = ({"Name": i, "Width": CurrentFont()[i].width, "Unicode": ", ".join(uni)})
if g in l:
l.remove(g)
l.append(g)
self.w.info.set(l)
newHeight = self.windowHeight + (len(l)*18)
if newHeight > self.windowHeightMax:
newHeight = self.windowHeightMax
self.w.resize(self.w.getPosSize()[2], newHeight)
self.w.info.setSelection([len(l)-1])
self.w.info.scrollToSelection()
def selectGlyph(self, sender):
g = self.w.info.get()
if self.w.info.getSelection() != []:
gs = self.w.info.getSelection()[0]
if CurrentFont() != None:
OpenGlyphWindow(CurrentFont()[g[gs]["Name"]], newWindow=False)
if __name__ == "__main__":
inspectorMini()
| 2,730 |
pythonAlgorithm/highlevel/Largest Rectangle in Histogram.py
|
Sky-zzt/lintcodePractice
| 1 |
2171892
|
class Solution:
"""
@param height: A list of integer
@return: The area of largest rectangle in the histogram
)。维护一个单调递增栈,逐个将元素 push 到栈里。push 进去之前先把 >= 自己的元素 pop 出来。
每次从栈中 pop 出一个数的时候,就找到了往左数比它小的第一个数(当前栈顶)和往右数比它小的第一个数(即将入栈的数),
从而可以计算出这两个数中间的部分宽度 * 被pop出的数,就是以这个被pop出来的数为最低的那个直方向两边展开的最大矩阵面积。
因为要计算两个数中间的宽度,因此放在 stack 里的是每个数的下标。
"""
# 找出左右两边离他最近的最小值,每个位置i能形成的最大的矩形=heights(i) * ((i-leftnearminIndex)+(rightnearmin-i))
# todo 和Trapping rain water 对比看
# todo 下次想不起来就重写一遍吧 呵呵
def largestRectangleArea(self, heights):
# todo 左边和右边的离他最近的最大最小值 Monotonous stack
heights.insert(0, 0)
leftmin, rightmin = self.MonotonousStack(heights + [0]) # todo 右边加个 0 好处理
maxArea = 0
for i in range(len(heights)):
ans = 0
# if rightmin[i]==-1:
# ans+=heights[i]*(len(heights)-i)
# if leftmin[i]==-1:
# ans+=heights[i]*(i+1)
if i == len(heights) - 1:
ans = heights[i] * (rightmin[i] - leftmin[i])
else:
ans = heights[i] * (rightmin[i] - leftmin[i] + 1)
# ans = heights[i] * ((i - leftmin[i]) + (rightmin[i] - i)) ## todo 这不就是 rightmin[i]-leftmin[i] 呵呵
maxArea = max(maxArea, ans)
# 不能处理相等的情况,想等的话,stack.append([3,3]) 这样做
def MonotonousStack(self, heights):
stack = []
leftmin = [0] * len(heights)
rightmin = [0] * len(heights)
for i in range(len(heights)):
while len(stack) != 0 and heights[stack[-1]] > heights[i]:
idx = stack.pop()
rightmin[idx] = i
if len(stack) > 0: leftmin[idx] = stack[-1]
stack.append(i)
while len(stack) > 0:
idx = stack.pop()
if len(stack) > 0: leftmin[idx] = stack[-1]
# print(leftmin)
# print(rightmin)
return leftmin, rightmin
def MonotonousStack1(self, heights): # todo heights右边加个 0 或者其他好处理
stack = []
heights.append(-1)
leftmin = [-1] * len(heights)
rightmin = [1] * len(heights)
for i in range(len(heights)):
while len(stack) != 0 and heights[stack[-1]] > heights[i]:
idx = stack.pop()
rightmin[idx] = heights[i]
if len(stack) > 0: leftmin[idx] = heights[stack[-1]]
stack.append(i)
print(leftmin)
print(rightmin)
return leftmin, rightmin
s = Solution()
s.MonotonousStack1([1, 2, 4])
a = [1, 2]
a.insert(1, 0)
print(a)
| 2,583 |
setup.py
|
kallewesterling/django-licensing
| 3 |
2171170
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-licensing',
version='1.0.2',
packages=['licensing'],
include_package_data=True,
license='Public Domain',
description='A Django model and data for adding licensing info to data.',
long_description=README,
url='http://github.com/editorsnotes/django-licensing',
download_url='http://github.com/editorsnotes/django-licensing/tarball/1.0.2',
author='<NAME>',
author_email='<EMAIL>',
keywords = ['django', 'licenses', 'licences'],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 1,148 |
main.py
|
ege-kaya/booksync
| 0 |
2172983
|
import sys
import threading
from threading import *
import json
import socket
import netifaces as ni
import time
import select
import os
import bisect
import base64
from ebook import open_book
from datetime import datetime, timedelta
PORT = 12345
BUFFER_SIZE = 10240
HOSTNAME = socket.gethostname()
x = ni.gateways()
y = x['default'][2][1]
LOCAL_IP = ni.ifaddresses(y)[ni.AF_INET][0]['addr']
TYPE1_DICT_HEAD = {"type": 1, "name": HOSTNAME, "IP": LOCAL_IP}
TYPE2_DICT = {"type": 2, "name": HOSTNAME, "IP": LOCAL_IP}
TYPE2_JSTR = json.dumps(TYPE2_DICT).encode("utf-8")
ACKS = {}
CHARS = {}
RECEIVED = []
contacts = {}
contact_names = []
responded_stamps = []
READING_SPEED = .1
escape = True
def discover():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
timestamp = int(time.time())
TYPE1_DICT = TYPE1_DICT_HEAD
TYPE1_DICT["ID"] = timestamp
TYPE1_JSTR = json.dumps(TYPE1_DICT).encode("utf-8")
for i in range(10):
s.sendto(TYPE1_JSTR, ('<broadcast>', PORT))
def print_char(char):
print_cyan(char)
def listen_udp():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind(('', PORT))
result = select.select([s], [], [])
while True:
received = result[0][0].recv(BUFFER_SIZE)
decoded = received.decode("utf-8")
data_json = json.loads(decoded)
if data_json["type"] == 1:
if data_json["ID"] not in responded_stamps \
and data_json["IP"] != LOCAL_IP \
and data_json["name"] not in contact_names:
responded_stamps.append(data_json["ID"])
contacts[data_json["name"]] = data_json["IP"]
contact_names.append(data_json["name"])
destination_ip = data_json["IP"]
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.connect((destination_ip, PORT))
s.sendall(TYPE2_JSTR)
except:
pass
elif data_json["type"] == 4:
char = data_json["body"]
sender_name = data_json["name"]
sender_ip = contacts[sender_name]
timestamp = data_json["timestamp"]
time_to_show = datetime.strptime(data_json["time_to_show"], '%Y-%m-%d %H:%M:%S.%f')
if timestamp not in RECEIVED:
delay = (time_to_show - datetime.now()).total_seconds()
threading.Timer(delay, print_char, [char]).start()
RECEIVED.append(timestamp)
for i in range(10):
send_ack(sender_ip, timestamp)
elif data_json["type"] == 5:
timestamp = data_json["timestamp"]
ACKS[timestamp] = True
def send_ack(recipient_ip, timestamp):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.sendto(type5_wrapper(timestamp), (recipient_ip, PORT))
def print_red(*message):
print('\033[91m' + " ".join(message) + '\033[0m')
def print_green(*message):
print('\033[92m' + " ".join(message) + '\033[0m')
def print_yellow(*message):
print('\033[93m' + " ".join(message) + '\033[0m')
def print_cyan(*message):
print('\033[96m' + " ".join(message) + '\033[0m', flush=True, end="")
def listen_tcp():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((LOCAL_IP, PORT))
while True:
s.listen()
received = b''
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(BUFFER_SIZE)
received += data
if not data:
break
decoded = received.decode("utf-8")
data_json = json.loads(decoded)
if data_json["type"] == 2:
contacts[data_json["name"]] = data_json["IP"]
contact_names.append(data_json["name"])
elif data_json["type"] == 3:
print_red(data_json["name"] + ": " + data_json["body"])
def type3_wrapper(message):
msg_dict = {"type": 3, "name": HOSTNAME, "body": message}
msg_jstr = json.dumps(msg_dict).encode("utf-8")
return msg_jstr
def type4_wrapper(char, now):
timestamp = now.timestamp()
CHARS[timestamp] = char
time_to_show = now + timedelta(milliseconds=100)
msg_dict = {"type": 4, "name": HOSTNAME, "body": char, "timestamp": timestamp, "time_to_show": time_to_show}
msg_jstr = json.dumps(msg_dict, default=str).encode("utf-8")
return msg_jstr
def write(message, recipient):
global escape
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.connect((contacts["{}".format(recipient)], PORT))
s.sendall(type3_wrapper(message))
except (KeyError, ConnectionRefusedError):
contacts.pop(recipient)
contact_names.remove(recipient)
print_yellow("{} seems to have gone offline. Returning to the main menu.".format(recipient))
escape = False
return
def display_contacts():
if not contacts.keys():
print_yellow("There are no online contacts.")
return
for key in contacts.keys():
print_yellow(key)
def chat(recipient):
global escape
print_yellow("chatting with", recipient)
print_yellow("(type --exit to exit a chat)")
while escape:
msg = input()
if msg == "--exit":
return
else:
write(msg, recipient)
escape = True
def type5_wrapper(timestamp):
msg_dict = {"type": 5, "name": HOSTNAME, "timestamp": timestamp}
msg_jstr = json.dumps(msg_dict).encode("utf-8")
return msg_jstr
def read_book(book, recipient_ip):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
for char in book:
timestamp = datetime.now()
msg = type4_wrapper(char, timestamp)
for i in range(10):
s.sendto(msg, (recipient_ip, PORT))
delay = (timestamp + timedelta(milliseconds=100) - datetime.now()).total_seconds()
threading.Timer(delay, print_char, [char]).start()
time.sleep(READING_SPEED)
def main_menu():
while True:
try:
print_green("What would you like to do?")
print_green("contacts: see online contacts")
print_green("chat: start a chat with a user")
print_green("quit: exit the program")
print_green("read: read with someone")
inp = input()
if inp == 'contacts':
display_contacts()
elif inp == 'quit':
try:
print_yellow("Goodbye.")
sys.exit()
except KeyError:
print_yellow("Goodbye.")
sys.exit()
elif inp == 'read':
print_green("Who would you like to read with?")
inp = input()
while inp not in contact_names:
print_yellow("Please enter the name of an online user, or type --exit to return to the main menu.")
inp = input()
if inp == '--exit':
break
if inp != '--exit':
recipient_ip = contacts[inp]
print_yellow("Please enter the absolute path of the epub file you would like to read.")
pathinp = input()
book = open_book(pathinp)
read_book(book, recipient_ip)
elif inp == 'chat':
print_green("Who would you like to chat with?")
inp = input()
while inp not in contact_names:
print_yellow("Please enter the name of an online user, or type --exit to return to the main menu.")
inp = input()
if inp == '--exit':
break
if inp != '--exit':
chat(inp)
while inp not in contact_names:
print_yellow("Please enter the name of an online user, or type --exit to return to the main menu.")
inp = input()
if inp == '--exit':
break
if inp != '--exit':
chat(inp)
else:
print_yellow("Invalid input.")
except KeyboardInterrupt:
main_menu()
def main():
listener_daemon = Thread(target=listen_tcp)
listener_daemon.setDaemon(True)
listener_daemon.start()
udp_listener_daemon = Thread(target=listen_udp)
udp_listener_daemon.setDaemon(True)
udp_listener_daemon.start()
discover()
main_menu()
if __name__ == "__main__":
main()
| 9,193 |
test/versioning_test.py
|
YoSTEALTH/Liburing
| 41 |
2171237
|
from liburing import skip_os
def test_skip_os():
assert not skip_os('5.1')
assert skip_os('10-10.0', 'Windows')
assert skip_os('15.3.0', 'Darwin')
assert not skip_os('5.11', 'linux')
assert not skip_os('5.11', 'LINUx')
assert skip_os('5.10004', 'LINUx')
| 280 |
lib/googlecloudsdk/command_lib/essential_contacts/util.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
| 2 |
2173001
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arg parsing and other utilities for Essential Contacts commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.core import properties
def GetContactName(args):
"""Returns the full contact name from the args or raises an exception."""
c = GetContactId(args)
p = GetParent(args)
return '{}/contacts/{}'.format(p, c)
def GetContactId(args):
_ValidateContact(args.CONTACT_ID)
return args.CONTACT_ID
def _ValidateContact(flag_value):
if not re.match('^[0-9]+$', flag_value):
raise exceptions.InvalidArgumentException('contact', flag_value)
def GetParent(args):
"""Returns the parent resource from args or the active gcloud project."""
if 0 == sum(bool(x) for x in (args.project, args.folder, args.organization)):
# if neither project, folder, org was specified default to the
# current project if available.
args.project = properties.VALUES.core.project.GetOrFail()
parent = None
if args.project:
_ValidateProject(args.project)
parent = 'projects/%s' % args.project
elif args.folder:
_ValidateFolder(args.folder)
parent = 'folders/%s' % args.folder
else:
_ValidateOrganization(args.organization)
parent = 'organizations/%s' % args.organization
return parent
def _ValidateProject(flag_value):
if not re.match('^[a-z0-9-]+$', flag_value):
raise exceptions.InvalidArgumentException('project', flag_value)
def _ValidateFolder(flag_value):
if not re.match('^[0-9]+$', flag_value):
raise exceptions.InvalidArgumentException('folder', flag_value)
def _ValidateOrganization(flag_value):
if not re.match('^[0-9]+$', flag_value):
raise exceptions.InvalidArgumentException('organization', flag_value)
def GetNotificationCategories(args, notification_category_enum_message):
if not args.notification_categories:
return []
return [
arg_utils.ChoiceToEnum(category_choice,
notification_category_enum_message)
for category_choice in args.notification_categories
]
| 2,830 |
multiese2.py
|
obrmmk/multiese-1
| 0 |
2172072
|
import re
import csv
import sys
import pegtree as pg
from naming import type_augmentation
from multiese2_test import test_code
from multiese2_da import multiese_da, encode_text_code
# naming
GRAMMAR = '''
Sentense = { (Block / . )* }
Block = {
{ (!LF .)+ #Code } LF
QUOTE LF
{ (!QUOTE (!LF .)* LF)+ #Doc }
QUOTE LF
#Pair
}
QUOTE = '\\'\\'\\'' _ / '"""' _
LF = '\\r'? '\\n' / !.
'''
parse_as_tree = pg.generate(pg.grammar(GRAMMAR))
# prefix
BEGIN = '([^A-Za-z0-9]|^)'
#END = ('(?![A-Za-z0-9\\[\\{]|$)')
END = ('(?![A-Za-z0-9]|$)')
VARPAT = re.compile(BEGIN+r'([a-z]+)(\d?)'+END)
PREFIX = {
's': ('文字列', ''),
'element': ('[文字列|オブジェクト|]', ''),
'obj': ('[オブジェクト|]', ''),
'alist': ('リスト', ''),
'atuple': ('タプル', ''),
'aset': ('セット', ''),
'adict': ('辞書', ''),
'ty': ('型', '型'),
'fin': ('[ファイル[入力|]|入力[|ストリーム]]', ''),
'fout': ('[ファイル[出力|]|出力[|ストリーム]]', ''),
'iterable': ('[[リスト|タプル|配列]|列|イテラブル|]', ''),
}
def _ta(name, number, prefixdic):
prefix, suffix = prefixdic.get(name, ('', ''))
if prefix == '' and suffix == '':
if name.endswith('func'):
prefix = '関数'
suffix = '関数'
if '|' not in prefix:
prefix = f'[{prefix}|]'
if suffix != '' and '|' not in suffix:
suffix = f'[{suffix}|]'
var = f'{name}{number}'
if suffix == '':
return var, f'{prefix}{var}'
return var, f'[{prefix}{var}|{var}{suffix}]'
def type_augmentation(doc, prefixdic):
names = [_ta(x[1], x[2], prefixdic) for x in VARPAT.findall(doc + ' ')]
doc = re.sub(VARPAT, r'\1@\2\3@', doc + ' ') # @s@
for old, new in names:
if old != new:
doc = doc.replace(f'@{old}@', new)
return doc.replace('@', '').strip()
def _split(s):
if ';' in s:
return s.split(';')
if ',' in s and '|' not in s:
return [x.strip() for x in s.split(',')]
return s.split('|')
def read_settings(docs, settings):
ss = []
settings['option'] = {}
for line in docs:
if line.startswith('@'):
name, _, argument = line.strip().partition('(')
if argument.endswith(')'):
argument = argument[:-1]
if name == '@alt':
key, _, other = argument.partition('|')
if key in other:
argument = other
argument = argument.replace('_', '')
settings['alt'][key] = f'[{argument}]'
elif name == '@X':
settings['X'] = _split(argument)
elif name == '@Y':
settings['Y'] = _split(argument)
elif name == '@prefix':
t = _split(argument)
if len(t) == 2:
t.append('')
settings['prefix'][t[0]] = tuple(t[1:])
#print('@', settings['prefix'])
else:
settings['option'][name] = argument
else:
if line.count('[') != line.count(']') or line.count('{') != line.count('}'):
print('SyntaxError:', line)
ss.append(line)
return ss
def replace_with_rules(s, altdic, prefixdic):
s = type_augmentation(s, prefixdic)
for old, new in altdic.items():
if old in s:
#print('=>', old, new)
s = s.replace(old, new)
return s
def augment_doc(code, docs, altdic, prefixdic):
docs2 = []
for i, doc in enumerate(docs):
doc = replace_with_rules(doc, altdic, prefixdic)
docs2.append(doc)
return code, docs2
T5PREFIX = 'trans: '
def make_triple(ss, code, docs, settings):
option = settings['option']
altdic = settings['alt']
prefixdic = settings['prefix']
code, docs = augment_doc(code, docs, altdic, prefixdic)
test_with = option.get('@test', '$$')
result = test_code(code, test_with)
for doc in docs:
text = multiese_da(doc)
ss.append((T5PREFIX + doc, code, T5PREFIX + text, test_with, result))
print(encode_text_code(doc, code), result)
def scaleXY(ss, code, docs, settings):
if '__X__' not in code:
make_triple(ss, code, docs, settings)
return
for x, y in zip(settings['X'], settings['Y']):
codeX = code.replace('__X__', x)
docYs = [doc.replace('__Y__', y) for doc in docs]
make_triple(ss, codeX, docYs, settings)
def new_altdic():
return {
'に変換する': 'に[変換|]する',
'に設定する': '[に設定する|に変更する|に[セット|指定]する|にする]',
'に代入する': '[に[代入|]する|とする]',
'が_': '[が|は]',
'で_': '[で|として|を[用いて|使って]]',
'の中の': '[[|の][中|内]の|の]', 'の中に': '[[|の][中|内]に|に]', '中で': '[[の|][中|内]で|で]',
'全ての': '[全ての|すべての|全|]',
'の名前': '[名|の名前]',
'まとめて': '[まとめて|一度に|]',
'一つ': '[ひとつ|一つ]', '二つ': '[ふたつ|二つ]',
'1': '[一|1|1]', '2': '[二|2|2]', '3': '[三|3|3]',
'かどうか': '[か[|どうか][調べる||[確認|判定|テスト]する]|]',
'、': '[、|]',
'求める': '[求める|計算する|算出する]',
'見る': '[見る|確認する|調べる]',
'使う': '[使う|用いる|使用する]',
'得る': '[使う|見る|求める]',
'新たに': '[新しく|新たに|]',
'作る': '[[作る|作成する]|[|新規]生成する|[用意|準備]する]',
'作って': '[[作って|作成して]|[|新規]生成して|[用意|準備]して]',
'プリントする': '[表示する|出力する|プリントする]',
'コピーする': '[コピーする|複製する]'
}
def read_corpus(filename):
ss = []
settings = {'alt': new_altdic(), 'prefix': PREFIX.copy()}
with open(filename) as f:
tree = parse_as_tree(f.read())
for t in tree:
code = str(t[0]).strip()
docs = str(t[1]).splitlines()
docs = read_settings(docs, settings)
scaleXY(ss, code, docs, settings)
return ss
def main():
tuples = []
for file in sys.argv[1:]:
tuples.extend(read_corpus(file))
with open('kogi_trans.tsv', 'w') as f:
f = csv.writer(f, delimiter="\t")
for tuple in tuples:
f.writerow(tuple)
if __name__ == '__main__':
main()
| 5,929 |
app/helpers/cloudflare.py
|
NewShadesDAO/api
| 1 |
2171235
|
import json
import logging
from typing import List, Optional
import aiohttp
from aiohttp import FormData
from fastapi import UploadFile
from app.config import get_settings
logger = logging.getLogger(__name__)
CLOUDFLARE_IMAGES_URL = "https://api.cloudflare.com/client/v4/accounts/%s/images/v1"
async def upload_image_url(image_url, prefix: Optional[str] = "", metadata: Optional[dict] = None):
filename = image_url
async with aiohttp.ClientSession() as session:
async with session.get(image_url) as response:
if not response.ok:
response.raise_for_status()
content_type = response.headers.get("content-type", "")
content = await response.read()
image_data = await upload_content(
content=content, filename=filename, content_type=content_type, prefix=prefix, metadata=metadata
)
return image_data
async def upload_images(files: List[UploadFile], prefix: Optional[str] = "", metadata: Optional[dict] = None):
images = []
for file in files:
content = await file.read()
image_data = await upload_content(
content=content, filename=file.filename, content_type=file.content_type, prefix=prefix, metadata=metadata
)
images.append(image_data)
return images
async def upload_content(
content,
filename: str,
content_type: str,
metadata: Optional[dict] = None,
prefix: Optional[str] = "",
) -> dict:
settings = get_settings()
account_id = settings.cloudflare_account_id
url = CLOUDFLARE_IMAGES_URL % account_id
headers = {"Authorization": f"Bearer {settings.cloudflare_images_api_token}"}
if settings.testing:
if not metadata:
metadata = {"environment": "test"}
else:
metadata["environment"] = "test"
async with aiohttp.ClientSession(headers=headers) as session:
data = FormData()
if prefix:
filename = f"{prefix}.{filename}"
data.add_field("file", value=content, filename=filename, content_type=content_type)
if metadata:
data.add_field("metadata", value=json.dumps(metadata))
async with session.post(url, data=data) as resp:
if not resp.ok:
text = await resp.text()
logger.warning(f"problem storing file {filename}: {resp.status} {text}")
resp.raise_for_status()
json_resp = await resp.json()
result = json_resp["result"]
image_data = {"id": result["id"], "filename": result["filename"], "variants": result["variants"]}
return image_data
| 2,676 |
codes/Classic_models/KNN.py
|
jswanglp/MyML
| 7 |
2171640
|
# KNN 类
import numpy as np
class KNN:
def __init__(self, X_train, y_train, n_neighbors=3, p=2):
"""
parameter: n_neighbors 临近点个数, 最好选奇数
parameter: p 距离度量
"""
if n_neighbors % 2 == 0:
print('n_neighbors 最好为奇数!')
self.n = n_neighbors
self.p = p
self.X_train = X_train
self.y_train = y_train.flatten()
def predict(self, X):
# 取出n个点
knn_list = []
for i in range(self.n):
dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
knn_list.append((dist, self.y_train[i]))
# 遍历得到距离最近的 n 个点
for i in range(self.n, len(self.X_train)):
max_index = knn_list.index(max(knn_list, key=lambda x: x[0]))
dist = np.linalg.norm(X - self.X_train[i], ord=self.p)
if knn_list[max_index][0] > dist:
knn_list[max_index] = (dist, self.y_train[i])
# 预测类别
knn = np.array([k[-1] for k in knn_list])
return np.sign(knn.sum()) if knn.sum() != 0 else 1
def score(self, X_test, y_test):
y_test = y_test.flatten()
right_count = 0
for X, y in zip(X_test, y_test):
label = self.predict(X)
if label == y:
right_count += 1
return right_count / X_test.shape[0]
| 1,375 |
cbv/management/commands/populate_cbv.py
|
classy-python/ccbv
| 24 |
2173027
|
import django
from django.conf import settings
from django.core.management.base import BaseCommand
from cbv.importer.importers import InspectCodeImporter
from cbv.importer.storages import DBStorage
class Command(BaseCommand):
args = ""
help = "Wipes and populates the CBV inspection models."
def handle(self, *args, **options):
module_paths = settings.CBV_SOURCES.keys()
importer = InspectCodeImporter(module_paths=module_paths)
DBStorage().import_project_version(
importer=importer,
project_name="Django",
project_version=django.get_version(),
)
| 632 |
zeva12can/bms12.py
|
sectioncritical/zeva12can
| 0 |
2172298
|
import struct
import time
import can
class BMS12(object):
"""Zeva BMS12 Communications Library.
This class provides an abstraction of the Zeva BMS-12 CAN protocol. An
instance of the class represents one Zeva BMS-12 unit on the bus.
This class provides many methods, but only a few are needed to get updates
from the BMS hardware. Here is a typical example:
::
# initialize a CAN bus
bus = can.interface.Bus(...)
# create a bms12 unit
unit1 = BMS12(1, shuntmv=3800, canbus=bus)
# check if unit is present
if not unit1.probe():
# process error
# update the voltage readings
unit1.update()
# read a cell voltage (in millivolts)
cell1 = unit1.cellmv[1]
# change the shunt voltage
unit1.shuntmv = 3700 # change to 3.7 V
# need to run update in order for new value to take effect
unit1.update() # will also update voltage readings
"""
def __init__(self, unit: int, shuntmv: int=0, canbus=None):
"""Create a new bms12 instance.
:param unit: the unit number (0-15)
:param shuntmv: the initial shunt voltage in millivolts
:param canbus: a :meth:`can.interface.Bus` object from the ``can`` module
If ``shuntmv`` is 0 or not specied, then shunting is disabled. The
shunt level can be changed at any time using the property
:meth:`shuntmv`.
The ``canbus`` does not need to be specified to create the object, but
it must be set using the property :meth:`canbus` before any operations
can be performed.
"""
self._unit = unit
self._shuntlvl = shuntmv
self._bus = canbus
self._cellmv = [0] * 12
self._temps = [0, 0]
@property
def unit(self) -> int:
"""Get the unit number."""
return self._unit
@property
def shuntmv(self) -> int:
"""Get the current shunt level, in millivolts."""
return self._shuntlvl
@shuntmv.setter
def shuntmv(self, mv: int):
"""Set a new shunt level, in millivolts.
You need to update the BMS unit by using :meth:`send_query()` before
the new setting will take effect.
"""
self._shuntlvl = mv
@property
def canbus(self):
"""Get the current CAN bus object."""
return self._bus
@canbus.setter
def canbus(self, bus):
"""Set a new CAN bus object to use for communication."""
self._bus = bus
@property
def cellmv(self):
"""Get the list of 12 cell voltages, in millivolts.
You can add an index to get the value for a specific cell. For example
``bmsunit.cellmv[2]``, to get the third cell value. Valid indexes are
0-11.
"""
return self._cellmv
@property
def temperature(self):
"""Get the list of two temperatures, in C."""
return self._temps
def send_query(self):
"""Send query packet with shunt level to unit.
Sends the query message to the unit. This also sets the shunt level.
The shunt level should already be set using the `shuntmv` property.
"""
arbid = 300 + (self._unit * 10)
payload = struct.pack(">H", self._shuntlvl)
msg = can.Message(arbitration_id=arbid, is_extended_id=True, data=payload)
self._bus.send(msg)
def get_msgs(self):
"""Return a list of all pending received messsages.
This function is used when it is expected that messages are received on
the CAN bus (such as after a query). It will return a list of all
received can messages. The list may be empty if there are no messages
received.
"""
msgs =[]
while True:
msg = self._bus.recv(timeout=0.1)
if msg:
msgs = msgs + [msg]
else:
break
return msgs
@staticmethod
def unit_from_arbid(arbid):
"""Decode the unit number from an abritration ID.
:param arbid: a CAN arbitration ID used for BMS communication
:returns: the unit number or ``None`` if error
"""
# valid range to get a reasonable unit number
if arbid < 300 or arbid > 454:
return None
arbid -= 300
arbid = int(arbid / 10)
return arbid
@staticmethod
def type_from_arbid(arbid):
"""Decode the message type from the arbitration ID.
:param arbid: a CAN arbitration ID used for BMS communication
:returns: the message type or ``None`` if error
"""
# valid range to get a reasonable unit number
if arbid < 300 or arbid > 454:
return None
return int(arbid % 10)
@staticmethod
def decode_mv(mvbytes):
"""Return 4-tuple of millivolts from 8 byte input.
:param mvbytes: a bytes-like of length 8 from BMS reply message
:returns: 4-tuple with cell voltages as millivolts or ``None`` if error
This function converts the 8-bytes received from a BMS message and
converts it to millivolts. Each 8-byte message represents 4 cells so
4 voltages are returned as a tuple. The units are millivolts (int). If
the input is not a bytes-like of length 8, then ``None`` is returned.
"""
if not isinstance(mvbytes, (bytes, bytearray)):
return None
if len(mvbytes) != 8:
return None
return struct.unpack(">HHHH", mvbytes)
@staticmethod
def decode_temp(tempbytes):
"""Return 2-tuple of temperature from 2 byte input.
:param tempbytes: a bytes-like of length 2 from BMS reply temp message
:returns: 2-tuple with temperature in C or ``None`` if error
This function converts the 2-bytes received from a BMS temperature message
and converts it to temperature in C. Each 2-byte message represents 2
temperature sensors so 2 temperatures are returned as a tuple. If the
input is not a bytes-like of length 2, then ``None`` is returned.
"""
if not isinstance(tempbytes, (bytes, bytearray)):
return None
if len(tempbytes) != 2:
return None
return (int(tempbytes[0] - 40), int(tempbytes[1] - 40))
def decode_msg(self, msg):
"""Decode a message meant for this unit.
:param msg: the message to decode
This function will decode a message for this unit and will update the
object data values (voltages and temperatures) with the new decoded
data.
If the message is not for this unit, then it is ignored.
"""
arbid = msg.arbitration_id
unit = self.unit_from_arbid(msg.arbitration_id)
if unit != self._unit:
return
msgtype = self.type_from_arbid(msg.arbitration_id)
if msgtype == 0:
return
elif msgtype < 4:
if msg.dlc == 8:
mv = self.decode_mv(msg.data)
offset = (msgtype - 1) * 4
for idx in range(4):
self._cellmv[offset + idx] = mv[idx]
elif msgtype == 4:
if msg.dlc == 2:
temps = self.decode_temp(msg.data)
self._temps[0] = temps[0]
self._temps[1] = temps[1]
def probe(self):
"""Determine if unit is present on the CAN bus.
The CAN bus object must already be set with the `canbus` property.
This function sends a query message on the bus to this unit and checks
for an expected response. It returns ``True`` if the unit is present,
otherwise ``False``.
"""
self.send_query()
msgs = self.get_msgs()
return len(msgs) > 0
def update(self):
"""Query the unit on the bus and update values.
Sends a query on the bus for this unit and processes all reply
messages, decoding and storing the data values for cell voltage
and temperature sensors.
"""
self.send_query()
msgs = self.get_msgs()
for msg in msgs:
self.decode_msg(msg)
| 8,240 |
anuga/file_conversion/tests/test_grd2array.py
|
samcom12/anuga_core
| 136 |
2172618
|
from builtins import str
import unittest
import copy
import os
import numpy as num
from anuga.file_conversion.grd2array import grd2array
#Aux for fit_interpolate.fit example
def linear_function(point):
point = num.array(point)
return point[:,0]+3*point[:,1]
#return point[:,1]
def axes2points(x, y):
"""Generate all combinations of grid point coordinates from x and y axes
Args:
* x: x coordinates (array)
* y: y coordinates (array)
Returns:
* P: Nx2 array consisting of coordinates for all
grid points defined by x and y axes. The x coordinate
will vary the fastest to match the way 2D numpy
arrays are laid out by default ('C' order). That way,
the x and y coordinates will match a corresponding
2D array A when flattened (A.flat[:] or A.reshape(-1))
Note:
Example
x = [1, 2, 3]
y = [10, 20]
P = [[1, 10],
[2, 10],
[3, 10],
[1, 20],
[2, 20],
[3, 20]]
"""
import numpy
# Reverse y coordinates to have them start at bottom of array
y = numpy.flipud(y)
# Repeat x coordinates for each y (fastest varying)
X = numpy.kron(numpy.ones(len(y)), x)
# Repeat y coordinates for each x (slowest varying)
Y = numpy.kron(y, numpy.ones(len(x)))
# Check
N = len(X)
assert len(Y) == N
# Create Nx2 array of x and y coordinates
X = numpy.reshape(X, (N, 1))
Y = numpy.reshape(Y, (N, 1))
P = numpy.concatenate((X, Y), axis=1)
# Return
return P
class Test_grd2array(unittest.TestCase):
def test_grd2array_1(self):
""" Format of asc file
ncols 11
nrows 12
xllcorner 240000
yllcorner 7620000
cellsize 6000
NODATA_value -9999
"""
x0 = 0.0
y0 = 0.0
ncols = 11 # Nx
nrows = 12 # Ny
xllcorner = x0
yllcorner = y0
cellsize = 1.0
NODATA_value = -9999
#Create .asc file
#txt_file = tempfile.mktemp(".asc")from anuga.config import netcdf_float
root = 'test_asc_1'
txt_file = root+'.asc'
datafile = open(txt_file,"w")
datafile.write('ncols '+str(ncols)+"\n")
datafile.write('nrows '+str(nrows)+"\n")
datafile.write('xllcorner '+str(xllcorner)+"\n")
datafile.write('yllcorner '+str(yllcorner)+"\n")
datafile.write('cellsize '+str(cellsize)+"\n")
datafile.write('NODATA_value '+str(NODATA_value)+"\n")
x_ex = num.linspace(xllcorner, xllcorner+(ncols-1)*cellsize, ncols)
y_ex = num.linspace(yllcorner, yllcorner+(nrows-1)*cellsize, nrows)
points = axes2points(x_ex, y_ex)
#print points
#print x.shape, x
#print y.shape, y
datavalues = linear_function(points)
#print datavalues
datavalues = datavalues.reshape(nrows,ncols)
#print datavalues
#print datavalues.shape
for row in datavalues:
#print row
datafile.write(" ".join(str(elem) for elem in row) + "\n")
datafile.close()
#print quantity.vertex_values
#print quantity.centroid_values
x,y,Z = grd2array(txt_file)
#print x
#print y
#print Z
answer = [[ 0., 3., 6., 9., 12., 15., 18., 21., 24., 27., 30., 33.],
[ 1., 4., 7., 10., 13., 16., 19., 22., 25., 28., 31., 34.],
[ 2., 5., 8., 11., 14., 17., 20., 23., 26., 29., 32., 35.],
[ 3., 6., 9., 12., 15., 18., 21., 24., 27., 30., 33., 36.],
[ 4., 7., 10., 13., 16., 19., 22., 25., 28., 31., 34., 37.],
[ 5., 8., 11., 14., 17., 20., 23., 26., 29., 32., 35., 38.],
[ 6., 9., 12., 15., 18., 21., 24., 27., 30., 33., 36., 39.],
[ 7., 10., 13., 16., 19., 22., 25., 28., 31., 34., 37., 40.],
[ 8., 11., 14., 17., 20., 23., 26., 29., 32., 35., 38., 41.],
[ 9., 12., 15., 18., 21., 24., 27., 30., 33., 36., 39., 42.],
[ 10., 13., 16., 19., 22., 25., 28., 31., 34., 37., 40., 43.]]
#print quantity.vertex_values
assert num.allclose(Z, answer)
assert num.allclose(x,x_ex)
assert num.allclose(y,y_ex)
os.remove(root + '.asc')
def test_grd2array_2(self):
""" Format of asc file
ncols 11
nrows 12
xllcorner 240000
yllcorner 7620000
cellsize 6000
NODATA_value -9999
"""
x0 = 240000.0
y0 = 7620000.0
ncols = 11 # Nx
nrows = 12 # Ny
xllcorner = x0
yllcorner = y0
cellsize = 6000.0
NODATA_value = -9999
#Create .asc file
#txt_file = tempfile.mktemp(".asc")from anuga.config import netcdf_float
root = 'test_asc_2'
txt_file = root+'.asc'
datafile = open(txt_file,"w")
datafile.write('ncols '+str(ncols)+"\n")
datafile.write('nrows '+str(nrows)+"\n")
datafile.write('xllcorner '+str(xllcorner)+"\n")
datafile.write('yllcorner '+str(yllcorner)+"\n")
datafile.write('cellsize '+str(cellsize)+"\n")
datafile.write('NODATA_value '+str(NODATA_value)+"\n")
x_ex = num.linspace(xllcorner, xllcorner+(ncols-1)*cellsize, ncols)
y_ex = num.linspace(yllcorner, yllcorner+(nrows-1)*cellsize, nrows)
points = axes2points(x_ex, y_ex)
#print points
#print x_ex.shape, x_ex
#print y_ex.shape, y_ex
datavalues = linear_function(points)
#print datavalues
datavalues = datavalues.reshape(nrows,ncols)
#print datavalues
#print datavalues.shape
for row in datavalues:
#print row
datafile.write(" ".join(str(elem) for elem in row) + "\n")
datafile.close()
#print quantity.vertex_values
#print quantity.centroid_values
x,y,Z = grd2array(txt_file)
#print x
#print y
#print Z
answer = [[ 23100000., 23118000., 23136000., 23154000., 23172000., 23190000.,
23208000., 23226000., 23244000., 23262000., 23280000., 23298000.],
[ 23106000., 23124000., 23142000., 23160000., 23178000., 23196000.,
23214000., 23232000., 23250000., 23268000., 23286000., 23304000.],
[ 23112000., 23130000., 23148000., 23166000., 23184000., 23202000.,
23220000., 23238000., 23256000., 23274000., 23292000., 23310000.],
[ 23118000., 23136000., 23154000., 23172000., 23190000., 23208000.,
23226000., 23244000., 23262000., 23280000., 23298000., 23316000.],
[ 23124000., 23142000., 23160000., 23178000., 23196000., 23214000.,
23232000., 23250000., 23268000., 23286000., 23304000., 23322000.],
[ 23130000., 23148000., 23166000., 23184000., 23202000., 23220000.,
23238000., 23256000., 23274000., 23292000., 23310000., 23328000.],
[ 23136000., 23154000., 23172000., 23190000., 23208000., 23226000.,
23244000., 23262000., 23280000., 23298000., 23316000., 23334000.],
[ 23142000., 23160000., 23178000., 23196000., 23214000., 23232000.,
23250000., 23268000., 23286000., 23304000., 23322000., 23340000.],
[ 23148000., 23166000., 23184000., 23202000., 23220000., 23238000.,
23256000., 23274000., 23292000., 23310000., 23328000., 23346000.],
[ 23154000., 23172000., 23190000., 23208000., 23226000., 23244000.,
23262000., 23280000., 23298000., 23316000., 23334000., 23352000.],
[ 23160000., 23178000., 23196000., 23214000., 23232000., 23250000.,
23268000., 23286000., 23304000., 23322000., 23340000., 23358000.]]
#print quantity.vertex_values
assert num.allclose(Z, answer)
assert num.allclose(x,x_ex)
assert num.allclose(y,y_ex)
os.remove(root + '.asc')
#################################################################################
if __name__ == "__main__":
suite = unittest.makeSuite(Test_grd2array, 'test')
runner = unittest.TextTestRunner(verbosity=1)
runner.run(suite)
| 8,914 |
examples/run_back_health_posture.py
|
YasheshSavani/sense
| 0 |
2172285
|
#!/usr/bin/env python
"""
Run a back health posture detector to notify user every 10 seconds to straighten up their back if not.
Usage:
run_back_health_posture.py --custom_classifier=PATH
[--camera_id=CAMERA_ID]
[--path_in=FILENAME]
[--path_out=FILENAME]
[--title=TITLE]
[--use_gpu]
run_back_health_posture.py (-h | --help)
Options:
--custom_classifier=PATH Path to the custom classifier to use
--path_in=FILENAME Video file to stream from
--path_out=FILENAME Video file to stream to
--title=TITLE This adds a title to the window display
"""
import json
import os
import time
from docopt import docopt
import numpy as np
import torch
import sense.display
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.loading import build_backbone_network
from sense.loading import load_backbone_model_from_config
from sense.loading import update_backbone_weights
from sense.controller import Controller
global_timer = time.perf_counter()
class MyBackHealthController(Controller):
def display_prediction(self, img: np.ndarray, prediction_postprocessed: dict):
super().display_prediction(img, prediction_postprocessed)
global global_timer
local_timer = time.perf_counter()
print(local_timer, global_timer, global_timer + 60)
if local_timer > global_timer + 10:
prediction, prob = prediction_postprocessed['sorted_predictions'][0]
print(prediction, prob)
if 'unhealthy' in prediction:
os.system("notify-send 'Warning!' 'Time to straighten your back!' -t 5000")
os.system("zenity --error --text='Time to straighten your back!' --title='Warning!'")
global_timer = time.perf_counter()
if __name__ == "__main__":
# Parse arguments
args = docopt(__doc__)
camera_id = int(args['--camera_id'] or 0)
path_in = args['--path_in'] or None
path_out = args['--path_out'] or None
custom_classifier = args['--custom_classifier'] or None
title = args['--title'] or None
use_gpu = args['--use_gpu']
# Load backbone network according to config file
backbone_model_config, backbone_weights = load_backbone_model_from_config(custom_classifier)
# Load custom classifier
checkpoint_classifier = torch.load(os.path.join(custom_classifier, 'best_classifier.checkpoint'))
# Update original weights in case some intermediate layers have been finetuned
update_backbone_weights(backbone_weights, checkpoint_classifier)
# Create backbone network
backbone_network = build_backbone_network(backbone_model_config, backbone_weights)
with open(os.path.join(custom_classifier, 'label2int.json')) as file:
class2int = json.load(file)
INT2LAB = {value: key for key, value in class2int.items()}
gesture_classifier = LogisticRegression(num_in=backbone_network.feature_dim,
num_out=len(INT2LAB))
gesture_classifier.load_state_dict(checkpoint_classifier)
gesture_classifier.eval()
# Concatenate feature extractor and met converter
net = Pipe(backbone_network, gesture_classifier)
postprocessor = [
PostprocessClassificationOutput(INT2LAB, smoothing=4)
]
display_ops = [
sense.display.DisplayFPS(expected_camera_fps=net.fps,
expected_inference_fps=net.fps / net.step_size),
sense.display.DisplayTopKClassificationOutputs(top_k=1, threshold=0.1),
]
display_results = sense.display.DisplayResults(title=title, display_ops=display_ops)
# Run live inference
controller = MyBackHealthController(
neural_network=net,
post_processors=postprocessor,
results_display=display_results,
callbacks=[],
camera_id=camera_id,
path_in=path_in,
path_out=path_out,
use_gpu=use_gpu
)
controller.run_inference()
| 4,214 |
testes e exercícios/exercicios/script_076.py
|
LightSnow17/exercicios-Python
| 0 |
2167408
|
lista = ('Pão', 1.50, 'Lápis', 2.10, 'Carne', 8.30, 'Lapiseira', 7.80)
preço = aux1 = aux2 = 0
produto = ''
print('-' * 30)
print('Listagem de preço')
print('-' * 30)
for pos in range(1, len(lista), 2):
preço = lista[pos]
for pos2 in range(aux1, 1):
produto = lista[pos2+aux2]
aux2 += 2
print(f'{produto}......R$ {preço}')
| 350 |
parse.py
|
Darlight/AFN_AFD
| 0 |
2172639
|
"""
Universidad del Valle de Guatemala
CC----
thompson.py
Proposito: Orden y consumo de los inputs
"""
from lexer import Lexer
from token import Token
class Parser:
def __init__(self, pattern):
self.lexer = Lexer(pattern)
self.tokens = []
self.next_token = self.lexer.get_token()
def parse(self):
self.exp()
return self.tokens
def consume(self, name):
if self.next_token.name == name:
self.next_token = self.lexer.get_token()
elif self.next_token.name != name:
pass
#orden de jerarquia, los parentesis son priorirdad #1
def primary(self):
if self.next_token.name == 'LEFT':
self.consume('LEFT')
self.exp()
self.consume('RIGHT')
elif self.next_token.name == 'CHAR':
self.tokens.append(self.next_token)
self.consume('CHAR')
#Esto permite que vaya leyendo la expresion desde el primer parentesis
# ademas,
def term(self):
self.factor()
if self.next_token.value not in ')|':
self.term()
self.tokens.append(Token('CONCAT', '\x08'))
def exp(self):
self.term()
if self.next_token.name == 'ALT':
t = self.next_token # t = token
self.consume('ALT')
self.exp()
self.tokens.append(t)
def factor(self):
self.primary()
# Las divisiones para cada AFN se clasifican desde aqui
if self.next_token.name in ['STAR', 'PLUS', 'QMARK']:
self.tokens.append(self.next_token)
self.consume(self.next_token.name)
| 1,671 |
stream/forms.py
|
Gavin-Kariuki/HyperStream
| 0 |
2170807
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import fields
from stream.models import NewPost, Profile
class RegistrationForm(UserCreationForm):
barua = forms.EmailField()
class Meta:
model = User
fields = ['username','first_name','email','password'] #primary att for the user are username,password,email,first_name and last_name
class UpdateUserForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username','email']
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['photo', 'bio']
class PostForm(forms.Form):
image = forms.ImageField()
image_name = forms.CharField()
image_caption = forms.CharField(widget=forms.Textarea())
class Meta:
model = NewPost
fields = ['image']
class CommentForm(forms.Form):
body = forms.CharField(widget = forms.TextInput(attrs = {"class": "form-control", "placeholder": "Add a comment"}))
| 1,092 |
backend/project/conversations/admin.py
|
winoutt/winoutt-django
| 0 |
2170358
|
# Models Imports
from .models import Chat, Message, LastMessage, ChatArchive
# Utility Imports
from django.contrib import admin
admin.site.register(Chat)
admin.site.register(Message)
admin.site.register(LastMessage)
admin.site.register(ChatArchive)
| 261 |
pyart/tests/base.py
|
Youddha/pyart
| 3 |
2171386
|
from __future__ import absolute_import
from unittest import TestCase as BaseTestCase
class TestCase(BaseTestCase):
pass
| 128 |
gosling/examples/between_link_pandas.py
|
gosling-lang/gos
| 32 |
2172586
|
"""
Between Links Using Pandas
==========================
"""
# category: skip
import gosling as gos
import pandas as pd
"""
Data Transform Using Pandas
"""
df = pd.read_csv(
"https://raw.githubusercontent.com/vigsterkr/circos/master/data/5/segdup.txt",
sep=" ",
header=0,
names=["id", "chr", "start", "end"]
)
# Use chromosome names that are interpretable in gos
df.chr = df.chr.apply(lambda x: x.replace("hs", "chr"))
# Select ids that occur exact two times
df = df[df.groupby("id")["id"].transform("size") == 2]
# Long to wide (i.e., "chr, start, end" --> "first_chr, first_start, first_end, second_chr, second_start, second_end")
df["cumcnt"] = df.groupby("id").cumcount()
df = pd.DataFrame(df.pivot(index="id", columns="cumcnt")[["chr", "start", "end"]].to_records())
df = df.rename(columns={
"('chr', 0)": "first_chr",
"('chr', 1)": "second_chr",
"('start', 0)": "first_start",
"('start', 1)": "second_start",
"('end', 0)": "first_end",
"('end', 1)": "second_end"
})
df_bg = df[(df.first_chr == 'chr1') | (df.second_chr == 'chr1')]
df_hl = df[(df.first_chr != 'chr1') & (df.second_chr != 'chr1')]
column_info = [
{"chromosomeField": "first_chr", "genomicFields": ["first_start", "first_end"]},
{"chromosomeField": "second_chr", "genomicFields": ["second_start", "second_end"]}
]
data_bg = df_bg.gos.csv(genomicFieldsToConvert=column_info)
data_hl = df_hl.gos.csv(genomicFieldsToConvert=column_info)
"""
Encoding
"""
def set_encoding(track):
return track.mark_withinLink().encode(
x=gos.Channel("first_start:G"),
xe=gos.Channel("second_end:G"),
opacity=gos.value(0.2)
)
gos.overlay(
set_encoding(gos.Track(data_bg)).encode(stroke=gos.value("lightgray")),
set_encoding(gos.Track(data_hl)).encode(stroke=gos.Channel("second_chr:N"))
).properties(
width=600,
height=200
)
| 1,886 |
tests/test_aeval.py
|
chris-chambers/aeval
| 0 |
2172891
|
from inspect import trace
from textwrap import dedent
import sys
import pytest
from aeval import __version__, aeval
def test_version():
assert __version__ == '0.1.1'
@pytest.mark.asyncio
async def test_simple_value():
scope = dict(items=[])
value = await aeval(dedent('''
10
'''), scope, None)
assert value == 10
@pytest.mark.asyncio
async def test_sync_for():
scope = dict(items=[])
await aeval(dedent('''
for i in range(3):
items.append(i)
'''), scope, None)
assert scope['items'] == [0, 1, 2]
@pytest.mark.asyncio
async def test_async_def():
scope = dict()
await aeval(dedent('''
async def foo():
await sleep(1)
'''), scope, None)
assert callable(scope['foo'])
@pytest.mark.asyncio
async def test_simple_await():
scope = dict()
await aeval(dedent('''
import asyncio
await asyncio.sleep(0)
'''), scope, None)
@pytest.mark.asyncio
async def test_async_for():
items = []
async def gen():
import asyncio
for i in range(3):
await asyncio.sleep(0)
yield i
scope = dict(items=items, gen=gen)
await aeval(dedent('''
async for i in gen():
items.append(i)
'''), scope, None)
assert items == [0, 1, 2]
@pytest.mark.asyncio
async def test_async_with():
import contextlib
@contextlib.asynccontextmanager
async def mgr():
import asyncio
await asyncio.sleep(0)
yield 7
scope = dict(mgr=mgr)
await aeval(dedent('''
async with mgr() as num:
x = num
'''), scope, None)
assert scope['x'] == 7
@pytest.mark.asyncio
async def test_await_in_for():
async def foo():
return 3
scope = dict(foo=foo)
await aeval(dedent('''
for _ in range(1):
x = await foo()
'''), scope, None)
assert scope['x'] == 3
@pytest.mark.asyncio
async def test_await_in_for():
import contextlib
@contextlib.contextmanager
def foo():
yield 1
scope = dict(foo=foo)
await aeval(dedent('''
with foo() as f:
import asyncio
await asyncio.sleep(0)
x = f
'''), scope, None)
assert scope['x'] == 1
@pytest.mark.asyncio
async def test_del():
scope = dict(x=1)
await aeval(dedent('''
del x
'''), scope, None)
assert 'x' not in scope
@pytest.mark.asyncio
async def test_exposed_annotated_name():
scope = dict()
await aeval(dedent('''
foo: int
foo = 7
'''), scope, None)
assert scope['foo'] == 7
@pytest.mark.asyncio
async def test_exposed_annotated_assign():
scope = dict()
await aeval(dedent('''
foo: int = 10
'''), scope, None)
assert scope['foo'] == 10
@pytest.mark.asyncio
async def test_exposed_aug_assign():
scope = dict(foo=1)
await aeval(dedent('''
foo += 10
'''), scope, None)
assert scope['foo'] == 11
@pytest.mark.asyncio
async def test_exposed_class():
scope = dict()
await aeval(dedent('''
class Foo():
...
'''), scope, None)
assert isinstance(scope['Foo'], type)
@pytest.mark.asyncio
async def test_unexposed_class_annotated_assign():
scope = dict()
await aeval(dedent('''
class Foo():
x: int
y: str = 'abc'
...
'''), scope, None)
assert scope['Foo'].__annotations__['x'] is int
assert scope['Foo'].__annotations__['y'] is str
assert scope['Foo'].y == 'abc'
@pytest.mark.asyncio
async def test_raise():
with pytest.raises(Exception):
await aeval(dedent('''
raise Exception('ha')
'''), dict(), None)
| 3,640 |
mutual_funds_dashboard/dashboard.py
|
ace-racer/shared-apps
| 0 |
2172482
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List
from datetime import datetime, timedelta
import streamlit as st
import joblib
import src.utils as utils
from src.india_mf_nav_obtainer import IndiaMFNavObtainer
india_mf_nav_obtainer = IndiaMFNavObtainer()
st.title("Indian Mutual funds dashboard")
# Get the name of the mutual fund from the user
st.subheader("Mutual fund details")
fund_name = st.text_input('Mutual fund name')
# Show the results sorted by score
funds_df = india_mf_nav_obtainer.fuzzy_search_mf_by_name(fund_name)
st.dataframe(funds_df)
# Get Id of the mutual fund to find
fund_id = st.text_input('Scheme code for the fund (from above table)')
fund_df = india_mf_nav_obtainer.get_historical_nav_for_mf(fund_id)
# Show the NAV values since inception
if fund_df is not None:
st.subheader('NAV values')
fund_df_transformed = utils.transform_mutual_fund_df(fund_df)
st.line_chart(fund_df_transformed['nav'])
# Returns for 1, 3 and 5 years
one_year_return = utils.get_annualized_returns_for_fund(fund_df, 1)
three_year_return = utils.get_annualized_returns_for_fund(fund_df, 3)
five_year_return = utils.get_annualized_returns_for_fund(fund_df, 5)
st.text(f'Annualized 1 year return: {one_year_return}%. 3 year return: {three_year_return}% and 5 year return {five_year_return}%.')
# Metrics - variance, SD, Min, max, average and median NAV values
metrics = utils.get_nav_metrics(fund_df, 3)
print(metrics)
| 1,543 |
tests/basic_engine_test.py
|
TinkerEdgeT/mendel-edgetpu
| 0 |
2172906
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
from . import test_utils
from edgetpu.basic import edgetpu_utils
from edgetpu.basic.basic_engine import BasicEngine
class TestBasicEnginePythonAPI(unittest.TestCase):
def testDebugInfo(self):
engine = BasicEngine(
test_utils.TestDataPath('mobilenet_v1_1.0_224_quant.tflite'))
# Check model's input format.
input_tensor_shape = engine.get_input_tensor_shape()
self.assertListEqual([1, 224, 224, 3], input_tensor_shape.tolist())
self.assertEqual(224 * 224 * 3, engine.required_input_array_size())
# Check model's output.
output_tensors_sizes = engine.get_all_output_tensors_sizes()
self.assertListEqual([1001], output_tensors_sizes.tolist())
self.assertEqual(1, engine.get_num_of_output_tensors())
self.assertEqual(1001, engine.get_output_tensor_size(0))
self.assertEqual(1001, engine.total_output_array_size())
# Check SSD model.
ssd_engine = BasicEngine(
test_utils.TestDataPath(
'mobilenet_ssd_v1_coco_quant_postprocess.tflite'))
# Check model's input format.
input_tensor_shape = ssd_engine.get_input_tensor_shape()
self.assertListEqual([1, 300, 300, 3], input_tensor_shape.tolist())
self.assertEqual(300 * 300 * 3, ssd_engine.required_input_array_size())
# Check model's output.
output_tensors_sizes = ssd_engine.get_all_output_tensors_sizes()
self.assertListEqual([80, 20, 20, 1], output_tensors_sizes.tolist())
self.assertEqual(4, ssd_engine.get_num_of_output_tensors())
self.assertEqual(80, ssd_engine.get_output_tensor_size(0))
self.assertEqual(20, ssd_engine.get_output_tensor_size(1))
self.assertEqual(20, ssd_engine.get_output_tensor_size(2))
self.assertEqual(1, ssd_engine.get_output_tensor_size(3))
self.assertEqual(121, ssd_engine.total_output_array_size())
def testRunInference(self):
for model in test_utils.GetModelList():
print('Testing model :', model)
engine = BasicEngine(test_utils.TestDataPath(model))
input_data = test_utils.GenerateRandomInput(
1, engine.required_input_array_size())
latency, ret = engine.RunInference(input_data)
self.assertEqual(ret.size, engine.total_output_array_size())
# Check debugging functions.
self.assertLess(math.fabs(engine.get_inference_time() - latency), 0.001)
raw_output = engine.get_raw_output()
self.assertEqual(ret.size, raw_output.size)
for i in range(ret.size):
if math.isnan(ret[i]) and math.isnan(raw_output[i]):
continue
self.assertLess(math.fabs(ret[i] - raw_output[i]), 0.001)
def testDevicePath(self):
all_edgetpu_paths = edgetpu_utils.ListEdgeTpuPaths(
edgetpu_utils.EDGE_TPU_STATE_NONE)
engine = BasicEngine(
test_utils.TestDataPath('mobilenet_v1_1.0_224_quant.tflite'),
all_edgetpu_paths[0])
self.assertEqual(engine.device_path(), all_edgetpu_paths[0])
if __name__ == '__main__':
unittest.main()
| 3,558 |
baseline_svr/baseline_svr.py
|
cliffrwong/quality_estimation
| 9 |
2171720
|
import pickle
import math
import numpy as np
import optunity
import optunity.metrics
import sklearn.svm
from sklearn.externals import joblib
from sklearn import svm, datasets
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from scipy.stats import pearsonr
optimal_model_file = "optimal.pkl"
data_dir = '/Users/cliff/Documents/data/qe/'
train_features_file = 'task1_en-de_training.baseline17.features'
train_target_file = 'train.hter'
dev_features_file = 'task1_en-de_dev.baseline17_dev.features'
dev_target_file = 'dev.hter'
test_features_file = 'task1_en-de_test.baseline17.features'
test_target_file = 'test.hter'
train_features = np.loadtxt(data_dir+train_features_file, dtype=np.float32)
train_target = np.loadtxt(data_dir+train_target_file, dtype=np.float32)
dev_features = np.loadtxt(data_dir+dev_features_file, dtype=np.float32)
dev_target = np.loadtxt(data_dir+dev_target_file, dtype=np.float32)
features = np.vstack((train_features, dev_features))
target = np.hstack((train_target, dev_target))
# score function: twice iterated 10-fold cross-validated accuracy
@optunity.cross_validated(x=features, y=target, num_folds=10)
def svr_mse(x_train, y_train, x_test, y_test, logC, logGamma, logEpsilon):
model = sklearn.svm.SVR(C=10 ** logC, gamma=10 ** logGamma,
epsilon=10 ** logEpsilon).fit(x_train, y_train)
decision_values = model.predict(x_test)
return optunity.metrics.mse(y_test, decision_values)
# Model selection/Hyperparameter Optimization
def model_selection():
numparticles = 30
num_generations = 10
hpConstraints = {'logC':[-4, 3], 'logGamma':[-6, 0], 'logEpsilon':[-2, 1]}
solver = optunity.solvers.ParticleSwarm.ParticleSwarm(num_particles,
num_generations,
max_speed=None,
phi1=2.0,
phi2=2.0,
hpConstraints)
optimal_pars, _, _ = optunity.optimize(solver, svr_mse,
maximize = False, max_evals=300)
print(optimal_pars)
optimal_model = sklearn.svm.SVR(C=10 ** optimal_pars['logC'],
gamma=10 ** optimal_pars['logGamma'],
epsilon=10 ** optimal_pars['logEpsilon']
).fit(features, target)
joblib.dump(optimal_model, optimal_model_file)
# Get scores for test file.
# Should be Pearson’s r = 0.3510, MAE = 0.1353, RMSE = 0.1839
def test(features_file, target_file):
clf = joblib.load(optimal_model_file)
features = np.loadtxt(features_file, dtype=np.float32)
target = np.loadtxt(target_file, dtype=np.float32)
prediction = clf.predict(features)
print('Pearson\'s r:', pearsonr(target, prediction))
print('RMSE:', math.sqrt(mean_squared_error(target, prediction)))
print('MAE:', mean_absolute_error(target, prediction))
def main():
# Cross validation to optimize hyperparameters
model_selection()
# Score model on test set
# test(data_dir+test_features_file, data_dir+test_target_file)
if __name__ == "__main__":
main()
| 3,356 |
apps/mds_auth/permissions.py
|
schocco/mds-web
| 0 |
2172847
|
from django.contrib.auth.models import User, Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save
DEFAULT_GROUP_NAME = "default_user"
def get_or_create_default_group():
'''
Returns the default user group. Creates a new group with permissions
for UDHscale UXCscale and Trail if no such group exists.
'''
group, created = Group.objects.get_or_create(name=DEFAULT_GROUP_NAME)
if(created):
trail_ct = ContentType.objects.get(app_label="trails", model="trail")
udh_ct = ContentType.objects.get(app_label="muni_scales", model="udhscale")
uxc_ct = ContentType.objects.get(app_label="muni_scales", model="uxcscale")
udh_perms = Permission.objects.filter(content_type=udh_ct)
uxc_perms = Permission.objects.filter(content_type=uxc_ct)
trail_perms = Permission.objects.filter(content_type=trail_ct)
group.permissions.add(*udh_perms)
group.permissions.add(*uxc_perms)
group.permissions.add(*trail_perms)
group.save()
return group
def set_permissions(sender, **kw):
user = kw["instance"]
if kw.pop("created", False):
user = kw.pop("instance")
default_group = get_or_create_default_group()
user.groups.add(default_group)
user.save()
| 1,356 |
setup.py
|
slaclab/pystand
| 0 |
2171262
|
import versioneer
from setuptools import (setup, find_packages)
setup(name= 'detrot',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='SLAC National Accelerator Laboratory',
packages=find_packages(),
description='Python framework for manipulating the CXI detector stands',
classifiers=[
'Programming Language :: Python :: 3.4',
'Topic :: Robotics, EPICS'
],
)
| 458 |
heap/glibc/_2_12/__init__.py
|
dsanders11/gdb-heapanalyzer
| 1 |
2172919
|
""""glibc 2.12 specific heap implementation"""
from .. import BaseGlibcHeapAnalyzer
class GlibcHeapAnalyzer(BaseGlibcHeapAnalyzer):
def get_heap_description(self):
return "GNU libc 2.12 Heap Implementation"
| 221 |
scripts/disdownload.py
|
Bensmuth/DisWeb
| 0 |
2171843
|
import requests
import os
def urlspliter(index): ##removes first backslash and makes first level dir TODO: add more dir levels support
print("splitter")
slash = 0
index = index[1:]
for y in range(0, len(index)):
if index[y] == "/":
slash = y
if not os.path.exists("scripts/WebServer/" + index[:slash]):
os.makedirs("scripts/WebServer/" + index[:slash])
return index
def pagegrab(host, index, x):
print("pagegrab")
url = ("http://" + str(host[x]) + "/" + str(index))
r = requests.get(url, allow_redirects=True)
open('scripts/WebServer/' + index, 'wb').write(r.content)
print("A Host was found at: " + url)
def hostgrab(host, x): ##get hosts file
print("hostgrab")
url = ("http://" + str(host[x]) + "/hosts.txt")
h = requests.get(url, allow_redirects=True)
open('files/hosts.txt', 'wb').write(h.content)
def download(index):
didconnect = False
hosts = open("files/hosts.txt",'r')
host = hosts.readline().splitlines() #removes \n at line end (also makes it easier to read from in later for loop)
for x in range(len(host)):
try:
index = urlspliter(index)
for x in range(len(host)):
pagegrab(host,index,x)
hostgrab(host,x)
except Exception as e:
print(e)
pass
hosts.close()
| 1,386 |
content_sync/api.py
|
mitodl/ocw-studio
| 2 |
2172710
|
""" Syncing API """
import logging
from typing import List, Optional
from django.conf import settings
from django.utils.module_loading import import_string
from content_sync import tasks
from content_sync.backends.base import BaseSyncBackend
from content_sync.decorators import is_publish_pipeline_enabled, is_sync_enabled
from content_sync.models import ContentSyncState
from content_sync.pipelines.base import BaseSyncPipeline
from websites.models import Website, WebsiteContent
log = logging.getLogger()
def upsert_content_sync_state(content: WebsiteContent):
""" Create or update the content sync state """
ContentSyncState.objects.update_or_create(
content=content, defaults=dict(current_checksum=content.calculate_checksum())
)
def get_sync_backend(website: Website) -> BaseSyncBackend:
""" Get the configured sync backend """
return import_string(settings.CONTENT_SYNC_BACKEND)(website)
def get_sync_pipeline(website: Website) -> BaseSyncPipeline:
""" Get the configured sync publishing pipeline """
return import_string(settings.CONTENT_SYNC_PIPELINE)(website)
@is_sync_enabled
def sync_content(sync_state: ContentSyncState):
""" Sync a piece of content based on its sync state """
backend = get_sync_backend(sync_state.content.website)
backend.sync_content_to_backend(sync_state)
@is_sync_enabled
def create_website_backend(website: Website):
""" Create the backend for a website"""
tasks.create_website_backend.delay(website.name)
@is_publish_pipeline_enabled
def create_website_publishing_pipeline(website: Website):
""" Create the publish pipeline for a website"""
tasks.upsert_website_publishing_pipeline.delay(website.name)
@is_publish_pipeline_enabled
def unpause_publishing_pipeline(website: Website, version: str):
"""Unpause the publishing pipeline"""
pipeline = get_sync_pipeline(website)
pipeline.unpause_pipeline(version)
@is_sync_enabled
def update_website_backend(website: Website):
""" Update the backend content for a website"""
tasks.sync_website_content.delay(website.name)
@is_sync_enabled
def preview_website(website: Website):
""" Create a preview for the website on the backend"""
tasks.preview_website_backend.delay(website.name, website.draft_publish_date)
@is_sync_enabled
def publish_website(website: Website):
""" Publish the website on the backend"""
tasks.publish_website_backend.delay(website.name, website.publish_date)
def sync_github_website_starters(
url: str, files: List[str], commit: Optional[str] = None
):
""" Sync website starters from github """
tasks.sync_github_site_configs.delay(url, files, commit=commit)
| 2,698 |
openpype/hosts/maya/plugins/publish/validate_muster_connection.py
|
Tilix4/OpenPype
| 1 |
2172578
|
import os
import json
import appdirs
import pyblish.api
from openpype.lib import requests_get
from openpype.plugin import contextplugin_should_run
import openpype.hosts.maya.api.action
class ValidateMusterConnection(pyblish.api.ContextPlugin):
"""
Validate Muster REST API Service is running and we have valid auth token
"""
label = "Validate Muster REST API Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
families = ["renderlayer"]
token = None
if not os.environ.get("MUSTER_REST_URL"):
active = False
actions = [openpype.api.RepairAction]
def process(self, context):
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
# test if we have environment set (redundant as this plugin shouldn'
# be active otherwise).
try:
MUSTER_REST_URL = os.environ["MUSTER_REST_URL"]
except KeyError:
self.log.error("Muster REST API url not found.")
raise ValueError("Muster REST API url not found.")
# Load credentials
try:
self._load_credentials()
except RuntimeError:
self.log.error("invalid or missing access token")
assert self._token is not None, "Invalid or missing token"
# We have token, lets do trivial query to web api to see if we can
# connect and access token is valid.
params = {
'authToken': self._token
}
api_entry = '/api/pools/list'
response = requests_get(
MUSTER_REST_URL + api_entry, params=params)
assert response.status_code == 200, "invalid response from server"
assert response.json()['ResponseData'], "invalid data in response"
def _load_credentials(self):
"""
Load Muster credentials from file and set `MUSTER_USER`,
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from settings.
.. todo::
Show login dialog if access token is invalid or missing.
"""
app_dir = os.path.normpath(
appdirs.user_data_dir('pype-app', 'pype')
)
file_name = 'muster_cred.json'
fpath = os.path.join(app_dir, file_name)
file = open(fpath, 'r')
muster_json = json.load(file)
self._token = muster_json.get('token', None)
if not self._token:
raise RuntimeError("Invalid access token for Muster")
file.close()
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if not self.MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
@classmethod
def repair(cls, instance):
"""
Renew authentication token by logging into Muster
"""
api_url = "{}/muster/show_login".format(
os.environ["OPENPYPE_WEBSERVER_URL"])
cls.log.debug(api_url)
response = requests_get(api_url, timeout=1)
if response.status_code != 200:
cls.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
| 3,144 |
src/pyai/nn/layers/linear.py
|
lab-a1/pyai
| 0 |
2172812
|
from pyai import Tensor
from pyai.nn.layers.base import BaseLayer
import numpy as np
class Linear(BaseLayer):
"""
Equation: y = x*W + b
Shapes:
x: (batch_size, input_size)
y: (batch_size, output_size)
"""
def __init__(self, input_size: int, output_size: int) -> None:
super().__init__()
self.params["w"] = np.random.rand(input_size, output_size)
self.params["b"] = np.random.rand(output_size)
def forward(self, x: Tensor) -> Tensor:
self.x = x
return x @ self.params["w"] + self.params["b"]
def backward(self, gradients: Tensor) -> Tensor:
self.gradients["b"] = np.sum(gradients, axis=0)
self.gradients["w"] = self.x.T @ gradients
return gradients @ self.params["w"].T
| 783 |
sameproject/objects/json_serializable_object.py
|
js-ts/fix-same-dataset-tests
| 0 |
2172386
|
from __future__ import annotations
from typing import Any
from abc import ABC
import json
class JSONSerializableObject(ABC):
"""Abstract class that provides a mechanism to convert to/from JSON format.
This must only be used for classes where all the fields are JSON serializable.
Ref: https://medium.com/python-pandemonium/json-the-python-way-91aac95d4041
"""
@classmethod
def to_dict(cls, obj: Any) -> dict:
"""Takes in a custom object and returns a dictionary representation of the object.
This dict representation includes metadata such as the object's module and class names.
"""
# Populate the dictionary with object metadata
obj_dict = {
"__class__": obj.__class__.__name__,
"__module__": obj.__module__
}
# Populate the dictionary with object properties
obj_dict.update(obj.__dict__)
return obj_dict
@classmethod
def from_dict(cls, obj_dict: dict) -> Any:
"""Takes in a dict and returns a custom object associated with the dict.
This function makes use of the "__module__" and "__class__" metadata in the dictionary to verify if the correct
dictionary is being provided.
"""
if "__class__" in obj_dict:
# Pop ensures we remove metadata from the dict to leave only the instance arguments
class_name = obj_dict.pop("__class__")
# Get the module name from the dict and import it
module_name = obj_dict.pop("__module__")
# We use the built in __import__ function since the module name is not yet known at runtime
module = __import__(module_name, fromlist=[None])
# Get the class from the module
class_ = getattr(module, class_name)
obj = class_()
for attribute_name, attribute_value in obj_dict.items():
setattr(obj, attribute_name, attribute_value)
return obj
else:
# Input is not of the appropriate type to be converted into a Step object
raise TypeError(f"Object cannot be converted to an object of type: {cls.__name__}")
@classmethod
def to_json(cls, obj):
json_obj = json.dumps(obj, default=cls.to_dict)
return json_obj
@classmethod
def from_json(cls, json_obj):
obj = json.loads(json_obj, object_hook=cls.from_dict)
return obj
| 2,438 |
check-diff/check-diff.py
|
bgilbert/actions-lib
| 0 |
2172286
|
#!/usr/bin/python3
# Compare two directories (maybe only a subdirectory of them) and add
# GitHub annotations where they're different.
import argparse
import difflib
import io
import itertools
import os.path
import sys
def annotate_file(output, path, severity, message):
print(
f'::{severity} file={path},title=File::{message}',
file=output
)
def annotate_line(output, path, start_line, end_line, severity, message):
'''start_line is zero-indexed; end_line is zero-indexed and points to
the first line not matched.'''
if end_line == start_line + 1:
title = f'Line {start_line + 1}'
else:
title = f'Lines {start_line + 1}-{end_line}'
print(
f'::{severity} file={path},line={start_line + 1},endLine={end_line},title={title}::{message}',
file=output
)
def diff(canon_path, left_lines, right_lines, severity, output=sys.stdout):
seq = difflib.SequenceMatcher(a=left_lines, b=right_lines, autojunk=False)
ok = True
matching = seq.get_matching_blocks()
# Add sentinel at the beginning, corresponding to the sentinel at the
# end, to simplify handling of disjoint files where one of them is empty
matching.insert(0, difflib.Match(0, 0, 0))
for first, second in itertools.pairwise(matching):
left_end = first.a + first.size
right_end = first.b + first.size
left_start = second.a
right_start = second.b
if right_end != right_start and left_end != left_start:
annotate_line(output, canon_path, right_end, right_start, severity, 'Unexpected change')
ok = False
elif right_end != right_start:
annotate_line(output, canon_path, right_end, right_start, severity, 'Unexpected addition')
ok = False
elif left_end != left_start:
# message before the removal is a bit more obvious than after it
annotate_line(output, canon_path, right_start - 1, right_start, severity, 'Unexpected removal on next line')
ok = False
return ok
def recursive_diff(left_root, right_root, subpath, severity):
def handle_error(e):
raise e
subroot = os.path.join(right_root, subpath)
if os.path.isdir(subroot):
iter = os.walk(subroot, onerror=handle_error)
else:
iter = [(os.path.dirname(subroot), [], [os.path.basename(subroot)])]
# walk right tree
ok = True
for (dirpath, dirnames, filenames) in iter:
if os.path.relpath(dirpath, right_root) == '.git':
# stop descent and ignore
dirnames[:] = []
continue
for filename in filenames:
right_path = os.path.join(dirpath, filename)
canon_path = os.path.relpath(right_path, right_root)
left_path = os.path.join(left_root, canon_path)
try:
with open(left_path) as fh:
left = fh.readlines()
except FileNotFoundError:
annotate_file(sys.stdout, canon_path, severity, 'Unexpected file addition')
ok = False
continue
with open(right_path) as fh:
right = fh.readlines()
ok = diff(canon_path, left, right, severity) and ok
# check left tree for files missing from right
subroot = os.path.join(left_root, subpath)
for (dirpath, dirnames, filenames) in os.walk(subroot, onerror=handle_error):
if os.path.relpath(dirpath, left_root) == '.git':
# stop descent and ignore
dirnames[:] = []
continue
for filename in filenames:
left_path = os.path.join(dirpath, filename)
canon_path = os.path.relpath(left_path, left_root)
right_path = os.path.join(right_root, canon_path)
if not os.path.isfile(right_path):
annotate_file(sys.stdout, canon_path, severity, 'Unexpected file removal')
ok = False
return ok
def selftest_one(left, right, expected):
buf = io.StringIO()
diff('a/b/c', left, right, 'alert!', buf)
if buf.getvalue() != expected:
raise Exception(f'Selftest returned unexpected value:\n{buf.getvalue()}')
def selftest():
selftest_one(
['one', 'two', 'three', 'four', 'five', 'seven', 'eight', 'nine'],
['one', 'two', 'none', 'not', 'four', 'five', 'six', 'seven', 'nine'],
'''::alert! file=a/b/c,line=3,endLine=4,title=Lines 3-4::Unexpected change
::alert! file=a/b/c,line=7,endLine=7,title=Line 7::Unexpected addition
::alert! file=a/b/c,line=8,endLine=8,title=Line 8::Unexpected removal on next line
''')
# Check disjoint files
selftest_one(
['a', 'b', 'c'],
['d', 'e', 'f'],
'::alert! file=a/b/c,line=1,endLine=3,title=Lines 1-3::Unexpected change\n'
)
selftest_one(
['a', 'b', 'c'],
[],
'::alert! file=a/b/c,line=0,endLine=0,title=Line 0::Unexpected removal on next line\n'
)
selftest_one(
[],
['d', 'e', 'f'],
'::alert! file=a/b/c,line=1,endLine=3,title=Lines 1-3::Unexpected addition\n'
)
selftest_one(
[],
[],
'',
)
# Check EOF behavior
selftest_one(
['one', 'two', 'three', 'four'],
['one', 'two', 'five', 'six'],
'::alert! file=a/b/c,line=3,endLine=4,title=Lines 3-4::Unexpected change\n'
)
selftest_one(
['one', 'two'],
['one', 'two', 'three', 'four'],
'::alert! file=a/b/c,line=3,endLine=4,title=Lines 3-4::Unexpected addition\n'
)
selftest_one(
['one', 'two', 'three', 'four'],
['one', 'two'],
'::alert! file=a/b/c,line=2,endLine=2,title=Line 2::Unexpected removal on next line\n'
)
def main():
selftest()
parser = argparse.ArgumentParser(description='Compare genericized diffs and add GitHub annotations.')
parser.add_argument('basedir',
help='unmodified source tree (left side of comparison)')
parser.add_argument('patchdir', nargs='?', default='.',
help='modified source tree (right side of comparison)')
parser.add_argument('path', nargs='?', default='.',
help='file or subdirectory within tree')
parser.add_argument('--severity', default='warning',
choices=['notice', 'warning', 'error'],
help='annotation severity (default: warning)')
parser.add_argument('--selftest', action='store_true',
help='only run self-test')
args = parser.parse_args()
if args.selftest:
return 0
ok = recursive_diff(args.basedir, args.patchdir, args.path, args.severity)
return 0 if ok else 1
if __name__ == '__main__':
sys.exit(main())
| 6,744 |
Continuous_Optimization/optimizer.py
|
Rohit-Kundu/Pneumonia-Detection-Local-Search-aided-SCA
| 4 |
2173003
|
'''
The codes have been taken from the following repository:
https://github.com/7ossam81/EvoloPy
'''
from pathlib import Path
import optimizers.AbHCSCA as abhcsca
import benchmarks
import csv
import numpy
import time
import warnings
import os
import plot_convergence as conv_plot
warnings.simplefilter(action="ignore")
def selector(algo, func_details, popSize, Iter):
function_name = func_details[0]
lb = func_details[1]
ub = func_details[2]
dim = func_details[3]
if algo == "AbHCSCA":
x = abhcsca.AbHCSCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
else:
x = None
return x
def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):
# Select general parameters for all optimizers (population size, number of iterations) ....
PopulationSize = params["PopulationSize"]
Iterations = params["Iterations"]
# Export results ?
Export = export_flags["Export_avg"]
Export_convergence = export_flags["Export_convergence"]
Flag = False
Flag_details = False
# CSV Header for for the c0nvergence
CnvgHeader = []
algo_names = '+'.join(optimizer) + "_"
results_directory = algo_names + str(time.strftime("%Y-%m-%d-%H-%M-%S")) + "/"
Path(results_directory).mkdir(parents=True, exist_ok=True)
for l in range(0, Iterations):
CnvgHeader.append("Iter" + str(l + 1))
for i in range(0, len(optimizer)):
for j in range(0, len(objectivefunc)):
convergence = [0] * NumOfRuns
executionTime = [0] * NumOfRuns
for k in range(0, NumOfRuns):
func_details = benchmarks.getFunctionDetails(objectivefunc[j])
x = selector(optimizer[i], func_details, PopulationSize, Iterations)
convergence[k] = x.convergence
optimizerName = x.optimizer
objfname = x.objfname
if Export == True:
ExportToFile = results_directory + "experiment.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag = True
avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(
numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2
).tolist()
a = numpy.concatenate(
[[optimizerName, objfname, avgExecutionTime], avgConvergence]
)
writer.writerow(a)
out.close()
if Export_convergence == True:
conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)
print("Execution completed")
| 3,239 |
ambry_client/library.py
|
CivicKnowledge/ambry-client
| 0 |
2172076
|
""" Library Object for the Ambry Web Client
The Library is a subclass of the CLient, with more interfaces.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
from . import Client
class Library(Client):
remotes_t = "{base_url}/config/remotes"
accounts_t = "{base_url}/config/accounts"
checkin_t = "{base_url}/bundles/{vid}/checkin"
checkout_t = "{base_url}/bundles/{vid}/checkout"
remove_t = "{base_url}/bundles/{ref}"
@property
def remotes(self):
"""Return a list of all of the remotes"""
return self._get(self.remotes_t)['remotes']
@remotes.setter
def remotes(self, new_remotes):
return self._put(self.remotes_t, data=new_remotes)['remotes']
@property
def accounts(self):
"""Return a list of all of the accounts, minus the secrets"""
# Decrypt the passwords, then re-encrypt them.
return self._get(self.accounts_t)['accounts']
@accounts.setter
def accounts(self, new_accounts):
"""Return a list of all of the accounts, minus the secrets"""
return self._put(self.accounts_t, data=new_accounts)['accounts']
def checkin(self, package, checkin_partitions=True, force=False, cb=None):
from ambry.orm.exc import NotFoundError
import os.path
if not os.path.exists(package.path):
raise NotFoundError("Database is not packaged. Create one by building, or run 'bambry package' " )
if cb:
def cb_one_arg(n):
cb('Uploading bundle', n)
else:
cb_one_arg = None
ds = package.package_dataset
self._post_file(package.path, self.checkin_t, vid=ds.identity.vid)
if False and package.library:
from ambry.orm import Bundle
bundle = Bundle(ds, package.library)
for p in bundle.partitions:
if force:
pass
# FIXME! If Force is false, check if the partition exists and don't upload it,
self._put_partition_fs(remote, p, cb=cb)
return self, package.path
def remove(self, ref, cb=None):
from ambry.orm.exc import NotFoundError
import os.path
return self._delete(self.remove_t, ref=ref)
def __str__(self):
return self._url
| 2,417 |
challenge/agoda_cancellation_estimator.py
|
roizhv22/IML.HUJI
| 0 |
2172383
|
from __future__ import annotations
from typing import NoReturn
import sklearn.tree
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import f1_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from IMLearn.base import BaseEstimator
import numpy as np
import pandas as pd
from IMLearn.metrics import misclassification_error
class AgodaCancellationEstimator(BaseEstimator):
"""
An estimator for solving the Agoda Cancellation challenge
"""
model = ""
def __init__(self) -> AgodaCancellationEstimator:
"""
Instantiate an estimator for solving the Agoda Cancellation challenge
Parameters
----------
Attributes
----------
"""
super().__init__()
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit an estimator for given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
"""
self.model = sklearn.tree.DecisionTreeClassifier(max_depth=7)
self.model.fit(X,y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given ####samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.model.predict(X)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under loss function
"""
return 1-sklearn.metrics.f1_score(y, self.predict(X), average="macro")
| 2,387 |
contrib/goodies/dsl_interface.py
|
electronicvisions/pyplusplus
| 9 |
2171581
|
# Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# Authors:
# <NAME>
#
# Dictionary of this module. Useful for adding symbols
mod_dict = globals()
# Bring in the module builder and alias it
import pyplusplus.module_builder
ModuleBuilder = pyplusplus.module_builder.module_builder_t
set_logger_level = pyplusplus.module_builder.set_logger_level
# Bring in all call policy symbols
from pyplusplus.module_builder.call_policies import *
from pyplusplus.decl_wrappers import print_declarations
# Type traits
# - just import them all. This isn't pretty, but it will work for now
from pygccxml.declarations.type_traits import *
# cpptypes
# - import them all and leave them named X_t because they are "types" and
# this seems like a good way to keep that in mind.
# This may end up being a bad idea. I don't know yet, so for now we will
# try it and see what happens.
from pygccxml.declarations.cpptypes import *
from pygccxml.declarations.calldef import *
# Matchers
# - Bring in all matchers but rename then without the '_t' at the end
import pygccxml.declarations.matchers
#for n in ["matcher_base_t","or_matcher_t","and_matcher_t","not_matcher_t",
# "declaration_matcher_t","calldef_matcher_t","namespace_matcher_t",
# "variable_matcher_t","regex_matcher_t","access_type_matcher_t",
# "operator_matcher_t","custom_matcher_t","virtuality_type_matcher_t"]:
# mod_dict[n[:-2]] = pygccxml.declarations.matchers.__dict__[n]
from pygccxml.declarations import (or_matcher, and_matcher, not_matcher, declaration_matcher,
calldef_matcher, namespace_matcher, variable_matcher,
regex_matcher, access_type_matcher, operator_matcher,
custom_matcher, virtuality_type_matcher)
| 1,950 |
dlapp/apps/user_management/views.py
|
edv862/dlapp
| 0 |
2171857
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView
from django.contrib.auth.models import User
from .forms import UserRegisterForm
class UserRegisterView(CreateView):
model = User
template_name = 'user-register.html'
form_class = UserRegisterForm
success_url = reverse_lazy('home')
| 367 |
archive/SerialComPythonCUI.py
|
kayrlas/ArduinoSerialComPythonCUI
| 0 |
2172139
|
#! /usr/bin/env python3.7
# -*- coding: utf-8 -*-
# Created by kayrlas on Jul 18, 2019 (https://github.com/kayrlas)
# SerialComPythonCUI.py
import signal
import time
import threading
from serial import Serial
from serial.tools import list_ports
class SerialCom(object):
"""Class of serial communication"""
def __init__(self):
self.device = None # select_comport()
self.serial = None # open_comport()
self.thread_sread = None # start_thread() for serial_read
self.thread_swrite = None # start_thread() for serial_write
def select_comport(self) -> bool:
"""Select comports from a list and save to self.device"""
# making comports list
_ports = list_ports.comports()
_devices = [info for info in _ports]
# select comport
if len(_devices) == 0:
# No device
print("Device not found.")
return False
elif len(_devices) == 1:
# Only one device
print("Only found %s." % _devices[0])
self.device = _devices[0].device
return True
else:
# Some devices
print("Connected comports are as follows:")
for i in range(len(_devices)):
print("%d : %s" % (i, _devices[i]))
# Select device
inp_num = input("Input the number of your target port >> ")
if not inp_num.isdecimal():
print("%s is not a number!" % inp_num)
return False
elif int(inp_num) in range(len(_devices)):
self.device = _devices[int(inp_num)].device
return True
else:
print("%s is out of the number!" % inp_num)
return False
def open_comport(self, baudrate, timeout) -> bool:
"""After select_comport, open the comport"""
self.serial = Serial(baudrate=baudrate, timeout=timeout)
if self.device is None:
print("Device has not been specified yet! select_comport first.")
return False
else:
self.serial.port = self.device
inp_yn = input("Open %s ? [Yes/No] >> " % self.device).lower()
if inp_yn in ["y", "yes"]:
print("Opening...")
try:
self.serial.open()
except Exception as e:
print(e)
return False
else:
return True
elif inp_yn in ["n", "no"]:
print("Canceled.")
return False
else:
print("Oops, you didn't enter [Yes/No]. Please try again.")
return False
def serial_read(self):
"""Read line from serial port and print with time"""
_format = "%Y/%m/%d %H:%M:%S"
while self.serial.is_open:
_t1 = time.strftime(_format, time.localtime())
_recv_data = self.serial.readline()
if _recv_data != b'':
print(_t1 + " (RX) : " + _recv_data.strip().decode("utf-8"))
time.sleep(1)
def serial_write(self):
"""Write strings to serial port"""
_format = "%Y/%m/%d %H:%M:%S"
while self.serial.is_open:
_t1 = time.strftime(_format, time.localtime())
#_send_data = input(_t1 + " (TX) >> ")
_send_data = input()
self.serial.write(_send_data.encode("utf-8"))
time.sleep(1)
def start_thread(self):
"""Start serial communication thread"""
self.thread_sread = threading.Thread(target=self.serial_read)
self.thread_swrite = threading.Thread(target=self.serial_write)
self.thread_sread.start()
self.thread_swrite.start()
def close_comport(self):
self.serial.close()
def stop_thread(self):
self.thread_sread.join()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_DFL)
com = SerialCom()
if com.select_comport():
if com.open_comport(9600, 0.1):
com.start_thread()
| 4,078 |
sea/extensions.py
|
yangtt0509/sea
| 0 |
2171483
|
import abc
class AbstractExtension(metaclass=abc.ABCMeta):
@abc.abstractmethod
def init_app(self, app):
raise NotImplementedError
| 149 |
digital-twin/deviceManager/db/controllers/ServidorController.py
|
matbmoser/SOTA
| 0 |
2173015
|
from datetime import datetime
from datetime import datetime
from db.controllers.BaseController import BaseController
from db.controllers.UniversidadController import UniversidadController
class ServidorController(BaseController):
def __init__(self) -> None:
self.tableName = 'Servidor'
super().__init__()
self.externalTable = UniversidadController()
def add(self, serverid, socketKey, siglaUni):
time = str(datetime.now())
self.tipos = self.externalTable.getValues()
if(siglaUni not in self.tipos):
return None
self.conn.insertTableElement(elem=(serverid, socketKey, self.tipos[siglaUni], time, time), table=self.tableName)
return True
def deleteByServerId(self, serverid):
return self.conn.deleteTableElement(table=self.tableName, where="serverid='"+str(serverid)+"'")
def get(self, where):
return self.conn.fetchAll(table=self.tableName, where=where)
def getBySocketKey(self, socketKey):
return self.conn.fetchAll(table=self.tableName,where="socketKey='"+str(socketKey)+"'")
def getByServerId(self, serverid):
return self.conn.fetchAll(table=self.tableName,where="serverid='"+str(serverid)+"'")
def getById(self, id):
return self.conn.fetchAll(table=self.tableName,where="id="+str(id)+"")
def getAll(self):
return self.conn.fetchAll(table=self.tableName)
def getValues(self):
self.values = self.conn.getValueIdDict(id="id", value="serverid", table=self.tableName)
return self.values
def getValuesBySocketKey(self):
self.values = self.conn.getValueIdDict(id="id", value="socketKey", table=self.tableName)
return self.values
def update(self, where="all", *args, **kwargs):
if(kwargs["siglaUni"] != None):
self.tipos = self.externalTable.getValues()
if(kwargs["siglaUni"] not in self.tipos):
return None
idUniversidad = self.tipos[kwargs["siglaUni"]]
kwargs["idUniversidad"] = idUniversidad
ignore = ["siglaUni", "self.tipos", "self.externalTable"]
if where == "all":
where=None
setList = []
for var in kwargs:
if var in ignore:
continue
if(kwargs[var] != None): setList.append((var, kwargs[var]))
self.conn.updateTableElement(table=self.tableName, set=setList, where=where)
return True
| 2,548 |
spark_auto_mapper_fhir/value_sets/language_ability_mode.py
|
imranq2/SparkAutoMapper.FHIR
| 1 |
2172948
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class LanguageAbilityMode(GenericTypeCode):
"""
v3.LanguageAbilityMode
From: http://terminology.hl7.org/ValueSet/v3-LanguageAbilityMode in v3-codesystems.xml
A value representing the method of expression of the language. Example:
Expressed spoken, expressed written, expressed signed, received spoken,
received written, received signed. OpenIssue:
Description copied from Concept Domain of same name. Must be verified.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode"
class LanguageAbilityModeValues:
"""
Expressed signed
From: http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode in v3-codesystems.xml
"""
ExpressedSigned = LanguageAbilityMode("ESGN")
"""
Expressed spoken
From: http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode in v3-codesystems.xml
"""
ExpressedSpoken = LanguageAbilityMode("ESP")
"""
Expressed written
From: http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode in v3-codesystems.xml
"""
ExpressedWritten = LanguageAbilityMode("EWR")
"""
Received signed
From: http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode in v3-codesystems.xml
"""
ReceivedSigned = LanguageAbilityMode("RSGN")
"""
Received spoken
From: http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode in v3-codesystems.xml
"""
ReceivedSpoken = LanguageAbilityMode("RSP")
"""
Received written
From: http://terminology.hl7.org/CodeSystem/v3-LanguageAbilityMode in v3-codesystems.xml
"""
ReceivedWritten = LanguageAbilityMode("RWR")
| 2,195 |
setup.py
|
sdtblck/image-dl
| 0 |
2171412
|
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt") as f:
requirements = [r.strip() for r in f.readlines()]
print(requirements)
name = 'image-dl'
setup(
name=name,
packages=find_packages(),
version='0.0.2',
license='MIT',
description='A fast and simple image downloader in python',
long_description=long_description,
long_description_content_type="text/markdown",
url=f'https://github.com/sdtblck/{name}',
author='<NAME>',
author_email='<EMAIL>',
install_requires=[],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
)
| 929 |
wowa/tracker/migrations/0011_character_realm.py
|
arruda/wowa
| 0 |
2172027
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tracker', '0010_character_renaming_real_realmname'),
]
operations = [
migrations.AddField(
model_name='character',
name='realm',
field=models.ForeignKey(related_name='characters', to='tracker.Realm', null=True),
preserve_default=True,
),
]
| 496 |
999Asciigen.py
|
Nycz-lab/999AsciiGen
| 0 |
2172831
|
from PIL import Image, ImageDraw, ImageFont
import random
import sys
def main():
filename = sys.argv[1]
def converter(filename, block=99, char = '9', char_size_mult=1.5):
img = Image.open(filename)
img = img.convert("RGBA")
pix = img.load()
print(img.size)
block_size_x = int(img.size[0] / block)
block_size_y = int(img.size[1] / block)
new_img = Image.new(mode = "RGBA", size=img.size, color=(0,0,0,0))
d1 = ImageDraw.Draw(new_img)
font = ImageFont.truetype(font="glitch.otf", size=int(block_size_x*char_size_mult), index=0, encoding="unic")
img.show()
for x in range(0, img.size[0], block_size_x):
for y in range(0, img.size[1], block_size_y):
R = pix[x,y][0]
G = pix[x,y][1]
B = pix[x,y][2]
A = pix[x,y][3]
if A > 0:
d1.text((x, y), char, fill =(R, G, B, A),font=font)
#print(x,"/",img.size[0], y,"/",img.size[1])
new_img.save(filename + ".ascii.png")
new_img.show()
if __name__ == "__main__":
if(len(sys.argv) > 4):
filename = sys.argv[1]
converter(filename, int(sys.argv[2]), sys.argv[3], float(sys.argv[4]))
elif(len(sys.argv) > 1):
filename = sys.argv[1]
converter(filename)
else:
print("Error wrong format")
print("usage: ", sys.argv[0], "[input_file: String] [block_size: int] [char: char/String] [char_size_mult: float/int]")
| 1,470 |
backend/tests/endpoints/test_category_route_integration.py
|
zelaznymarek/shopping-list
| 0 |
2171906
|
import pytest
def test_get_categories_returns_all(client, category_meat, token):
res = client.get('/categories', headers={'Authorization': f'Bearer {token}'})
assert len(res.json()) == 1
response_category = res.json()[0]
assert res.status_code == 200
assert response_category['id'] == category_meat.id
assert response_category['name'] == category_meat.name
@pytest.mark.parametrize('headers', [
{'Authorization': 'Bearer invalid'},
{'Authorization': 'invalid'},
{'X-Custom': 'Bearer invalid'},
{}
])
def test_get_categories_unavailable_for_unauthorised(client, headers):
res = client.get('/categories', headers=headers)
assert res.status_code == 401
def test_get_category_returns_one(client, category_meat, token):
res = client.get(f'/categories/{category_meat.id}', headers={'Authorization': f'Bearer {token}'})
response_category = res.json()
assert res.status_code == 200
assert response_category['id'] == category_meat.id
assert response_category['name'] == category_meat.name
def test_get_category_returns_not_found(client, token):
res = client.get(f'/categories/1', headers={'Authorization': f'Bearer {token}'})
assert res.status_code == 404
@pytest.mark.parametrize('headers', [
{'Authorization': 'Bearer invalid'},
{'Authorization': 'invalid'},
{'X-Custom': 'Bearer invalid'},
{}
])
def test_get_category_unavailable_for_unauthorised(client, headers):
res = client.get('/categories/1', headers=headers)
assert res.status_code == 401
def test_add_category(client, token):
category_data = {
'name': 'sweets'
}
res = client.post(
'/categories',
json=category_data,
headers={'Authorization': f'Bearer {token}'},
allow_redirects=True
)
assert res.status_code == 200
returned_category = res.json()
assert returned_category['id'] == 1
assert returned_category['name'] == category_data['name']
def test_add_category_returns_unprocessable_entity(client, token):
res = client.post(
'/categories',
json={},
headers={'Authorization': f'Bearer {token}'},
allow_redirects=True
)
assert res.status_code == 422
def test_add_category_returns_bad_request_if_category_exists(client, token):
category_data = {
'name': 'sweets'
}
client.post(
'/categories',
json=category_data,
headers={'Authorization': f'Bearer {token}'},
allow_redirects=True
)
res = client.post(
'/categories',
json=category_data,
headers={'Authorization': f'Bearer {token}'},
allow_redirects=True
)
assert res.status_code == 400
error_detail = res.json().get('detail')
assert error_detail == f'The category "{category_data.get("name")}" already exists in the system.'
@pytest.mark.parametrize('headers', [
{'Authorization': 'Bearer invalid'},
{'Authorization': 'invalid'},
{'X-Custom': 'Bearer invalid'},
{}
])
def test_add_category_unavailable_for_unauthorised(client, headers):
res = client.post('/categories', json={}, headers=headers, allow_redirects=True)
assert res.status_code == 401
def test_remove_category(client, category_meat, token):
res = client.delete(f'/categories/{category_meat.id}', headers={'Authorization': f'Bearer {token}'})
assert res.status_code == 200
assert res.json() is None
def test_remove_category_returns_not_found(client, token):
res = client.delete('/categories/1', headers={'Authorization': f'Bearer {token}'})
assert res.status_code == 404
@pytest.mark.parametrize('headers', [
{'Authorization': 'Bearer invalid'},
{'Authorization': 'invalid'},
{'X-Custom': 'Bearer invalid'},
{}
])
def test_remove_category_unavailable_for_unauthorised(client, headers):
res = client.delete('/categories/1', headers=headers)
assert res.status_code == 401
def test_update_category(client, category_meat, token):
category_to_update = {
'name': 'changed'
}
res = client.put(
f'/categories/{category_meat.id}',
json=category_to_update,
headers={'Authorization': f'Bearer {token}'}
)
assert res.status_code == 200
updated = res.json()
assert updated['id'] == category_meat.id
assert updated['name'] == category_to_update['name']
def test_update_category_returns_not_found(client, token):
category_to_update = {
'name': 'changed'
}
res = client.put(
'/categories/1',
json=category_to_update,
headers={'Authorization': f'Bearer {token}'}
)
assert res.status_code == 404
@pytest.mark.parametrize('headers', [
{'Authorization': 'Bearer invalid'},
{'Authorization': 'invalid'},
{'X-Custom': 'Bearer invalid'},
{}
])
def test_update_category_unavailable_for_unauthorised(client, headers):
category_to_update = {
'name': 'changed'
}
res = client.put('/categories/1', json=category_to_update, headers=headers)
assert res.status_code == 401
| 5,111 |
test.py
|
joshua2352-cmis/joshua2352-cmis-cs2
| 1 |
2172665
|
#PART 1: Terminology
#1) Give 3 examples of boolean expressions.
#a)3==3
#b)3>3
#c)3<3
#
#2) What does 'return' do?
#uses the given information and spits something back out
#
#
#
#3) What are 2 ways indentation is important in python code?
#a)it tells you where the function definition ends
#b)It is needed to run the function or it will get indent error thing
#
#
#PART 2: Reading
#Type the values for 9 of the 12 of the variables below.
#
#problem1_a)36
#problem1_b)1
#problem1_c)0
#problem1_d)5
#
#problem2_a)True
#problem2_b)True
#problem2_c)False
#problem2_d)False
#
#problem3_a)0.3
#problem3_b)0.5
#problem3_c)0.5
#problem3_d)0.5
#
#problem4_a)
#problem4_b)
#problem4_c)
#problem4_d)
import math
def main(a,b,c):
print "type in three different numbers decimals work too:"
a=raw_input("A:")
b=raw_input("B:")
c=raw_input("C:")
if int(a) == int(b)
return "you did not follow instructions"
if int(a) == int(c)
return "you did not follow instructions"
if int(c) == int(b)
return "you did not follow instructions"
if int(c) == int(a)
return "you did not follow instructions"
if int(b) == int(c)
return "you did not follow instructions"
if int(b) == int(a)
return "you did not follow instructions"
if int(a) < int(b) and int(a) < int(c) return "The largest number was"int(a)"."
if int(b) < int(c) and int(a) < int(c) return "The largest number was"int(b)"."
if int(c) < int(b) and int(a) < int(c) return "The largest number was"int(c)"."
| 1,587 |
12/problem1.py
|
muztanger/aoc2018
| 0 |
2171174
|
"""
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied.
Either that, or you've arrived in a
vast cavern network
under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of
small pots as far as you can see to your left and right. A few of them contain
plants - someone is trying to grow things in these geothermally-heated caves.
The pots are numbered, with
0
https://adventofcode.com/2018/day/12
"""
# import aoc
import os
# import re
# import sys
# from operator import add
# from operator import mul
# from itertools import combinations
# from collections import Counter
import re
from pprint import pprint
debug = False
if debug:
lines = [
"initial state: #..#.#..##......###...###",
"",
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
]
if os.path.exists('input_debug'):
with open('input_debug', 'r') as f:
lines = f.readlines()
else:
lines = []
with open('input', 'r') as f:
lines = f.readlines()
# print(len(lines[0]))
N = 180
state = "." * (N * 2)
init = lines[0][len("initial state: "):].strip()
state = state[:N] + init + state[len(init) + N:]
print(" " + "-" * N + "0" + "-" * (N - 1))
# print(state)
lines = lines[2:]
patterns = {}
for line in lines:
arr = re.split(" => ", line)
pattern = arr[0]
result = arr[1][0]
patterns[pattern] = result
print("{:2d}: {}".format(0, state))
for gen in range(20):
next = ["."] * (N * 2)
for i in range(N * 2 - 5):
key = "".join(state[i:i+5])
try:
if key in patterns:
# print(type(next[i + 1]))
# print(type(patterns[key]))
next[i + 2] = patterns[key]
except TypeError as e:
print(e)
raise
state = next
print("{:2d}: {}".format(gen + 1, "".join(state)))
s = 0
for i, x in enumerate(state):
if x == "#":
s += i - N
print(s)
# 2612 is too low
| 2,331 |
bookwyrm/views/list/list_item.py
|
mouse-reeve/fedireads
| 270 |
2172581
|
""" book list views"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.views import View
from bookwyrm import forms, models
from bookwyrm.views.status import to_markdown
# pylint: disable=no-self-use
@method_decorator(login_required, name="dispatch")
class ListItem(View):
"""book list page"""
def post(self, request, list_id, list_item):
"""Edit a list item's notes"""
list_item = get_object_or_404(models.ListItem, id=list_item, book_list=list_id)
list_item.raise_not_editable(request.user)
form = forms.ListItemForm(request.POST, instance=list_item)
if form.is_valid():
item = form.save(commit=False)
item.notes = to_markdown(item.notes)
item.save()
else:
raise Exception(form.errors)
return redirect("list", list_item.book_list.id)
| 984 |
api.py
|
hariganesan/music-tracker
| 0 |
2172475
|
# <NAME> 12/21/13
# main file for track-music
import os
import logging
import json
import webapp2
#########################
# Static Handlers
#########################
# main routes to static pages
class MainPageHandler(webapp2.RequestHandler):
def get(self):
with open("templates/index.html") as index_file:
html = index_file.read()
self.response.write(html)
app = webapp2.WSGIApplication([
('/.*$', MainPageHandler)
], debug=True)
| 448 |
fix_mrc.py
|
milliams/proof
| 0 |
2170593
|
import sys
def fix_file(filename: str) -> None:
"""
Take an MRC file which was written by MotionCor2 and fix it so that it has
the correct string, ie.e. a space character, not a NUL character.
"""
with open(filename, "r+b") as f:
f.seek(208)
map_id = f.read(4)
if map_id == b"MAP\x00":
f.seek(211)
f.write(b"\x20")
if __name__ == "__main__":
filenames = sys.argv[1:]
for filename in filenames:
fix_file(filename)
| 502 |
models.py
|
dkumazaw/mobilenets-tpu
| 12 |
2172870
|
# This project incorporates material from the project listed above, and it
# is accessible under their original license terms (Apache License 2.0)
# ==============================================================================
"""Creates the ConvNet"""
import re
import tensorflow as tf
import numpy as np
from defs import GlobalParams
import model_def
def build_model(images, model_name, training, override_params=None):
"""A helper functiion to creates a ConvNet model and returns predicted logits.
Args:
images: input images tensor.
model_name: string, the model name (either MobileNetV3Large or MobileNetV3Small).
training: boolean, whether the model is constructed for training.
override_params: A dictionary of params for overriding. Fields must exist in
EvalGlobalParams.
Returns:
logits: the logits tensor of classes.
endpoints: the endpoints for each layer.
Raises:
When model_name specified an undefined model, raises NotImplementedError.
When override_params has invalid fields, raises ValueError.
"""
assert isinstance(images, tf.Tensor)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=0.2,
data_format='channels_last',
num_classes=1000,
depth_multiplier=None,
depth_divisor=8,
min_depth=None)
if override_params:
# ValueError will be raised here if override_params has fields not included
# in global_params.
global_params = global_params._replace(**override_params)
if model_name.lower() == 'mobilenetv3small':
with tf.variable_scope(model_name):
model = model_def.MobileNetV3Small(global_params)
logits = model(images, training=training)
elif model_name.lower() == 'mobilenetv3large':
with tf.variable_scope(model_name):
model = model_def.MobileNetV3Large(global_params)
logits = model(images, training=training)
else:
raise NotImplementedError
logits = tf.identity(logits, 'logits')
return logits, model.endpoints
| 2,174 |
multiplayer/client.py
|
adkinsj/Hi-Snake
| 0 |
2170864
|
import asyncore
import asynchat
import socket
import json
import hashlib
import controllers
import event_manager
import events
import view
import tkinter
import tkinter.messagebox as messagebox
class Login:
def __init__(self, caller):
self._caller = caller
self._login = tkinter.Tk()
self._login.title("Login")
self._login.geometry('190x70')
self._center(self._login)
self._usernameLabel = tkinter.Label(self._login, text = "Username:")
self._userEntry = tkinter.Entry(self._login)
self._passwordLabel = tkinter.Label(self._login, text = "Password:")
self._passEntry = tkinter.Entry(self._login, show="*")
self._connectButton = tkinter.Button(self._login, text = "Connect", command = self._connect)
self._usernameLabel.grid(row = 0, column = 0)
self._userEntry.grid(row = 0, column = 1)
self._passwordLabel.grid(row = 1, column = 0)
self._passEntry.grid(row = 1, column = 1)
self._connectButton.grid(row = 2, column = 1)
self._passEntry.bind('<Return>', self._connect)
self._username = ""
self._login.mainloop()
def _center(self, toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
def _connect(self, *args):
self._username = self._userEntry.get()
self._caller.push(bytes("LOGIN_ATTEMPT " + json.dumps(dict([("username", self._username),
("password", hashlib.sha512(bytes(self._passEntry.get(), 'UTF-8')).hexdigest())])) + "\n", 'UTF-8'))
self._login.destroy()
def get_user(self):
return self._username
def _login_fail(self):
messagebox.showerror("Error", "Wrong Password!")
return True
class Lobby:
def __init__(self, caller):
self._caller = caller
self._lobby = tkinter.Tk()
self._lobby.title("Lobby")
self._lobby.geometry('200x150')
self._center(self._lobby)
self._welcomeLabel = tkinter.Label(self._lobby, text = "Welcome " + caller._username + "!")
self._create = tkinter.Button(self._lobby, text = "Create", command = self._create)
self._join = tkinter.Button(self._lobby, text = "Join", command = self._join)
self._entry = tkinter.Entry(self._lobby)
self._message = tkinter.Label(self._lobby, text = "Create a new game \n or join an existing game \n by typing in a username!")
self._welcomeLabel.pack()
self._create.pack()
self._entry.pack()
self._join.pack()
self._message.pack()
self._lobby.mainloop()
def _center(self, toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
def _create(self):
self._create.config(text = "Start", command = self._start)
self._join.config(state = 'disable')
self._caller.push(bytes("NEW_GAME" + "\n", 'UTF-8'))
self._message.config(text = "New game created \n press start when ready to play!")
def _start(self):
self._caller.push(bytes("GAME_START" + "\n", 'UTF-8'))
self._lobby.destroy()
def _join(self):
self._caller.push(bytes("JOIN_GAME " + self._entry.get() + "\n", 'UTF-8'))
self._lobby.destroy()
class Client(asynchat.async_chat):
def __init__(self, host, port, eventManager):
asynchat.async_chat.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self._event_manager = eventManager
self._event_manager.register_listener(self)
self.set_terminator(b'\n')
self._received_data = ""
self._pygame_view = None
self._login_screen = True
self._username = ""
def handle_connect(self):
login = Login(self)
self._username = login.get_user()
def collect_incoming_data(self, data):
self._received_data += data.decode('UTF-8')
def found_terminator(self):
self._received_data.strip('\n')
split_string = self._received_data.split(' ', 1)
key = split_string[0]
if key == "UPDATE":
data = split_string[1]
self._event_manager.post(events.ServerUpdateReceived(data))
elif key == "LOGIN_REQUEST":
login = Login(self)
self._username = login.get_user()
elif key == "LOGIN_FAIL":
print("LOGIN FAILED")
root = tkinter.Tk()
root.withdraw()
messagebox.showerror("Error", "Wrong Password!")
root.destroy()
login = Login(self)
self._username = login.get_user()
elif key == "LOGIN_SUCCESS":
self._pygame_view = view.PygameView(self._event_manager)
lobby = Lobby(self)
elif key == "USER_CREATED":
self._pygame_view = view.PygameView(self._event_manager)
lobby = Lobby(self)
elif key == "GAME_OVER":
self._event_manager.post(events.GameOverEvent())
self._received_data = ""
def notify(self, event):
if isinstance(event, events.QuitEvent):
self.push(bytes("QUIT\n",'UTF-8'))
self.close()
elif isinstance(event, events.MoveEvent):
print("Message: {} sent".format(event.get_direction()))
self.push(bytes("MOVE " + json.dumps(dict([("username", self._username),
("direction", event.get_direction())])) + "\n", 'UTF-8'))
elif isinstance(event, events.RestartEvent):
self.push(bytes("RESTART\n", 'UTF-8'))
def check_ip_addr(ip_addr):
try:
socket.inet_pton(socket.AF_INET, ip_addr)
return True
except socket.error:
return False
def main():
try:
eventManager = event_manager.EventManager()
ip_addr = ""
while not check_ip_addr(ip_addr):
ip_addr = input("Please input server's IP address: ")
port = ""
while not port.isdigit():
port = input("Please input server's port: ")
controller = controllers.Controller(eventManager)
client = Client(ip_addr, int(port), eventManager)
asyncore.loop(timeout=1)
except:
pass
if __name__ == "__main__":
main()
| 6,835 |
tests/test_pim.py
|
klauer/pcdsdevices
| 1 |
2172044
|
import logging
import pytest
from unittest.mock import Mock
from ophyd.device import Component as Cpt
from ophyd.signal import Signal
from ophyd.sim import make_fake_device
from pcdsdevices.areadetector.detectors import PCDSDetector
from pcdsdevices.pim import PIM, PIMMotor
from conftest import HotfixFakeEpicsSignal
logger = logging.getLogger(__name__)
# OK, we have to screw with the class def here. I'm sorry. It's ophyd's fault
# for checking an epics signal value in the __init__ statement.
for comp in (PCDSDetector.image, PCDSDetector.stats):
plugin_class = comp.cls
plugin_class.plugin_type = Cpt(Signal, value=plugin_class._plugin_type)
@pytest.fixture(scope='function')
def fake_pim():
FakePIM = make_fake_device(PIMMotor)
FakePIM.state.cls = HotfixFakeEpicsSignal
pim = FakePIM('Test:Yag', name='test')
pim.state.sim_put(0)
pim.state.sim_set_enum_strs(['Unknown'] + PIMMotor.states_list)
return pim
@pytest.mark.timeout(5)
def test_pim_stage(fake_pim):
logger.debug('test_pim_stage')
pim = fake_pim
# Should return to original position on unstage
pim.move('OUT', wait=True)
assert pim.removed
pim.stage()
pim.move('IN', wait=True)
assert pim.inserted
pim.unstage()
assert pim.removed
pim.move('IN', wait=True)
assert pim.inserted
pim.stage()
pim.move('OUT', wait=True)
assert pim.removed
pim.unstage()
assert pim.inserted
@pytest.mark.timeout(5)
def test_pim_det():
logger.debug('test_pim_det')
FakePIM = make_fake_device(PIM)
FakePIM('Test:Yag', name='test', prefix_det='potato')
FakePIM('Test:Yag', name='test')
@pytest.mark.timeout(5)
def test_pim_subscription(fake_pim):
logger.debug('test_pim_subscription')
pim = fake_pim
cb = Mock()
pim.subscribe(cb, event_type=pim.SUB_STATE, run=False)
pim.state.sim_put(2)
assert cb.called
| 1,899 |
Mobile Devices UI/Message App UI/config.py
|
WithSJ/UI-Templates-for-Kivy
| 5 |
2172718
|
# Set Global variables name
global APP_NAME
global COMPANY_NAME
APP_NAME = "Pigeon"
COMPANY_NAME = "<EMAIL>"
| 109 |
setup.py
|
samuelstanton/upcycle
| 0 |
2169333
|
#!/usr/bin/env python
from os import path
from setuptools import find_packages, setup
AUTHOR = "<NAME>"
NAME = "upcycle"
PACKAGES = find_packages()
REQUIREMENTS_FILE = "requirements.txt"
REQUIREMENTS_PATH = path.join(path.abspath(__file__), REQUIREMENTS_FILE)
with open(REQUIREMENTS_FILE) as f:
requirements = f.read().splitlines()
requirements.append("python-randomnames @ git+https://github.com/concentricsky/python-randomnames.git@master")
setup(
name=NAME,
version='0.0.1',
description='Reusable code snippets',
author=AUTHOR,
author_email='<EMAIL>',
url='https://github.com/samuelstanton/upcycle',
install_requires=requirements,
packages=PACKAGES,
)
| 695 |
rot13/rot13.py
|
bradagy/rot13
| 0 |
2173042
|
#!/usr/bin/env python3
import string
def encrypt(text, n):
in_tab = string.ascii_lowercase
out_tab = in_tab[n % 26:] + in_tab[:n % 26]
trans_tab = str.maketrans(in_tab, out_tab)
return text.translate(trans_tab)
def rot13():
while True:
user_input = input('Please enter the text here: ')
if not user_input.isalpha():
print('The input you entered was not correct. Numbers are also '
'not accepted. Please try again.')
continue
else:
print(f"The encryption is {encrypt(user_input, 13)}.")
break
rot13()
| 619 |
third_party/VeriNet/example/example.py
|
nathzi1505/DNNV
| 33 |
2170717
|
import numpy as np
import torch
import gurobipy as grb
import torch.nn as nn
from src.neural_networks.verinet_nn import VeriNetNN
from src.algorithm.verinet import VeriNet
from src.data_loader.input_data_loader import load_neurify_mnist
from src.data_loader.nnet import NNET
from src.algorithm.verification_objectives import LocalRobustnessObjective
from src.algorithm.verinet_util import Status
def create_input_bounds(image: np.array, eps: int):
"""
Creates the l-infinity input bounds from the given image and epsilon
"""
input_bounds = np.zeros((*image.shape, 2), dtype=np.float32)
input_bounds[:, 0] = image - eps
input_bounds[:, 1] = image + eps
return input_bounds
def local_robustnes_nnet():
"""
An example run of the local robustness verification objective using nnet.
"""
print("\nRunning example run for local robustness verification objective:")
# Load the nnet and convert to VeriNetNN (Found in src/neural_networks/verinet_nn.py
nnet = NNET("../data/models_nnet/neurify/mnist24.nnet")
model = nnet.from_nnet_to_verinet_nn()
# Initialize the solver
solver = VeriNet(model, max_procs=20)
# Load the image and use the predicted class as correct class
image = load_neurify_mnist("../data/mnist_neurify/test_images_100/", img_nums=[0]).reshape(-1)
correct_class = int(model(torch.Tensor(image)).argmax(dim=1))
for eps in [8, 15]:
# Create the input bounds
input_bounds = create_input_bounds(image, eps)
input_bounds = nnet.normalize_input(input_bounds)
# Initialize the verification objective and solve the problem
objective = LocalRobustnessObjective(correct_class, input_bounds, output_size=10)
solver.verify(objective, timeout=3600, no_split=False, verbose=False)
# Store the counter example if Unsafe. Status enum is defined in src.algorithm.verinet_util
if solver.status == Status.Unsafe:
_ = solver.counter_example
print("")
print(f"Statistics for epsilon = {eps}:")
print(f"Verification results: {solver.status}")
print(f"Branches explored: {solver.branches_explored}")
print(f"Maximum depth reached: {solver.max_depth}")
def verinet_nn_example():
"""
An example run of how to use the VeriNetNN class to create a neural network instead of reading from nnet file.
The VeriNetNN class accepts a list of layers arg in init. The forward function should not be modified and it
is assumed that each object in the layers list is applied sequentially.
"""
print("\nRunning example run with custom VeriNetNN and the local robustness verification objective:")
torch.manual_seed(0)
layers = [nn.Linear(2, 2),
nn.ReLU(),
nn.Linear(2, 2),
nn.ReLU(),
nn.Linear(2, 2)]
model = VeriNetNN(layers)
input_bounds = np.array([[-10, 10],
[-10, 10]])
solver = VeriNet(model, max_procs=20)
objective = LocalRobustnessObjective(correct_class=1, input_bounds=input_bounds, output_size=2)
solver.verify(objective, timeout=3600, no_split=False, verbose=False)
# Store the counter example if Unsafe. Status enum is defined in src.algorithm.verinet_util
if solver.status == Status.Unsafe:
_ = solver.counter_example
print("")
print(f"Verification results: {solver.status}")
print(f"Branches explored: {solver.branches_explored}")
print(f"Maximum depth reached: {solver.max_depth}")
if __name__ == '__main__':
# Get the "Academic license" print from gurobi at the beginning
grb.Model()
local_robustnes_nnet()
verinet_nn_example()
| 3,728 |
examples/speech_semantics/datasets/prepare_cgn.py
|
qmeeus/fairseq
| 0 |
2172753
|
#!/usr/bin/env python
import os
from pathlib import Path
import pandas as pd
from bs4 import BeautifulSoup
import gzip
import argparse
import torch
import torchaudio
import torch.functional as F
from logger import logger
"""
Prepare the CGN dataset and extract spoken sentences representations (either the raw waveform, the logmel
or the MFCC) with their written transcription. The script expects a CSV file as input that contains the
paths to the audio recordings (WAV) and orthographic transcriptions (XML).
"""
def parse_args():
parser = argparse.ArgumentParser("Converts raw audio files to torch tensors")
parser.add_argument("--input-file", type=Path,
help="File that contains all the paths to the audiofiles and transcriptions")
parser.add_argument("--data-dir", type=Path,
help="Path from which the paths in input-file descend")
parser.add_argument("--dest-dir", type=Path,
help="Where to save the data")
parser.add_argument("--features", type=str, default="MFCC",
help="The kind of features, one of MFCC or logmels")
parser.add_argument("--n-features", type=int, default=40,
help="Number of mel filterbanks")
parser.add_argument("--max-sequence-length", type=int, default=0,
help="Pad or truncate sequence (0 = no padding)")
return parser.parse_args()
def common_path(path_a, path_b):
"""
Compute the deepest common path between path_a and path_b
:path_a,path_b: pathlib.PurePath objects
returns pathlib.PurePath object that represent the deepest common subpath
"""
return Path(*os.path.commonprefix([path_a.parts, path_b.parts]))
def spoken_sentence_generator(audiofile, textfile,
min_sequence_length=0,
max_sequence_length=0,
feature_type="MFCC",
n_features=40,
**options):
"""
Generator to extract sentences from a text transcription of audio file and spoken sentences
features from the corresponding audio file. The features can be one of None (raw waveform),
MFCC or logmel.
:textfile: the path to the text file
:audiofile: the path to the audio file
:min_sequence_length: int, minimim number of millisecond for an utterance to be valid
:max_sequence_length: int, the length of the sequence
:Preprocessor: class from torchaudio.transform
:preprocessor_options: dict with options to initialise the preprocessor
returns a generator of tuples (sentence_id features, speaker, sentence) where features is a
torch.Tensor, speaker is a string identifier and sentence is a text.
"""
# logger.debug(f"Loading {textfile}")
with gzip.open(textfile, encoding='latin-1', mode='rt') as f:
tree = BeautifulSoup(f, "lxml")
# Load the first channel of the wav file
waveform, sample_rate = torchaudio.load(audiofile)
waveform = waveform[None, :1, :]
logger.debug(f"{audiofile} loaded: size: {tuple(waveform.size())} rate: {sample_rate}")
options.update({"sample_rate": sample_rate})
if feature_type.lower() == "mfcc":
options.update({"n_mfcc": n_features})
f = torchaudio.transforms.MFCC(**options)
elif feature_type == "logmel":
options = {"n_mels": n_features}
f = torchaudio.transforms.MelSpectrogram(**options)
else:
f = None
sentence_id = 0
for utterance in tree.find_all("tau"):
speaker = utterance.get("s")
start = float(utterance.get("tb"))
end = float(utterance.get("te"))
if (end - start) * 1000 < min_sequence_length:
logger.debug(f"Ignoring utterance {sentence_id} (length: {end-start:.2f})")
continue
sentence = " ".join([word.get("w") for word in utterance.find_all("tw")])
logger.debug(f"Found utterance from {start} to {end}: {sentence}")
start, end = (int(t * sample_rate) for t in (start, end))
feats = waveform[:, :, start:end]
logger.debug(f"Truncating signal from {start} to {end}, size: {tuple(feats.size())}")
if f is not None:
feats = f(feats)
yield sentence_id, feats, speaker, sentence
sentence_id += 1
def pad_sequence(seq, maxlen):
"""
Pad / truncate the last axis of a sequence to the specified length
:seq: torch.Tensor, the sequence to be padded
:maxlen: int, the length of the sequence
returns torch.Tensor, the padded sequence
"""
padding = [0] * (seq.ndim - 1) + [maxlen - seq.size(-1)]
return F.pad(seq, padding)
def filter_dataframe(dataframe, exclude=False, **filters):
"""
Apply filters to a pandas.DataFrame. Can either include or exclude the given values
:dataframe: pd.DataFrame, data to be filtered
:exclude: boolean, whether to include (default) or exclude the given values
:filters: key-value pairs, key is the same of a column in the dataframe and value can be
one value or a list of values to include/exclude
returns a filtered dataframe
"""
for key, value in filters.items():
if type(value) is list:
mask = dataframe[key].isin(value)
else:
mask = dataframe[key] == value
if exclude:
mask = ~mask
dataframe = dataframe[mask]
return dataframe
def generate_data_from_file(filename, root=None, include=None, exclude=None, **options):
"""
Generator to bulk create the datasets from a CSV with 4 columns:
- comp (comp-[a-z]): the component to which the file exists (see CGN)
- lang ([nl|vl]): the language of the recording
- name (f[n|v]\d{6}): the identifier of the recording
- audio (path-like): the path to the audio recording (wav)
- text (path-like): the path to the orthographical retranscription (skp.gz)
:filename: the path to the csv file
:root: the path from which the filenames should be considered. (None = same dir as filename)
:include,exclude: dict-like, key-value pairs to filter the csv (keys must be one of comp/name)
:options: additional options to pass to spoken_sentence_generator
returns a generator of tuples (comp, lang, name, sentence_id, features, speaker, sentence) where
features is a torch.Tensor, speaker is a string identifier and sentence is a text.
"""
paths = pd.read_csv(filename)
assert all(col in paths for col in ("comp", "lang", "name", "audio", "text")), "Invalid CSV"
assert len(paths), "Empty CSV"
logger.debug(f"Loaded {filename} with {len(paths)} target recordings")
for flag, filters in enumerate([include, exclude]):
if filters:
paths = filter_dataframe(paths, exclude=flag, **filters)
assert len(paths), "No more results, filters might be too stricts."
logger.debug(f"{len(paths)} target files remaining after filtering")
for _, comp, lang, name, audiofile, textfile in paths.itertuples():
if root is not None:
audiofile, textfile = (Path(root, fn) for fn in (audiofile, textfile))
for retval in spoken_sentence_generator(audiofile, textfile, **options):
yield tuple([comp, lang, name] + list(retval))
def main():
args = parse_args()
# OPTIONS
# determines how files are found and which to include
INPUT_FILE = args.input_file
ROOT = args.data_dir
INCLUDE_FILTERS = None
EXCLUDE_FILTERS = None
# where to save the files
SAVE_DIRECTORY = args.dest_dir
TEXT_OUTPUT_FILE = "sentences.txt"
# to be passed to spoken_sentence_generator
FEATURES_OPTS = {
'min_sequence_length': 2000,
'max_sequence_length': args.max_sequence_length,
'feature_type': args.features,
'n_features': args.n_features,
}
# Sanity check
if not INPUT_FILE.exists():
raise FileNotFoundError(INPUT_FILE)
os.makedirs(SAVE_DIRECTORY, exist_ok=True)
with open(Path(SAVE_DIRECTORY, TEXT_OUTPUT_FILE), 'w') as txtfile:
for comp, lang, name, sent_id, feats, spkr, sent in generate_data_from_file(
INPUT_FILE, ROOT, INCLUDE_FILTERS, EXCLUDE_FILTERS, **FEATURES_OPTS):
output_file = Path(comp, lang, f"{name}.{sent_id:06d}.pt")
output_path = Path(SAVE_DIRECTORY, output_file)
txtfile.write(f"{output_file}\t{sent}\n")
os.makedirs(output_path.parent, exist_ok=True)
torch.save(feats, output_path)
txtfile.flush()
if __name__ == '__main__':
main()
| 8,739 |
global_var/global_param.py
|
spiderkiller13/elevator_gateway
| 3 |
2173036
|
import os
import yaml
from global_var.global_logger import logger
from pprint import pprint, pformat
# Load parameters
f = open(os.path.join(os.path.dirname(__file__), "../param.yaml") ,'r')
params_raw = f.read()
f.close()
param_dict = yaml.load(params_raw)
# Load parameters
table = param_dict['table']
AMR_URI = param_dict['AMR_URI']
AMR_MQTT_NAME = param_dict['AMR_MQTT_NAME']
CLIENT_NAME = param_dict['CLIENT_NAME']
BROKER_IP = param_dict['BROKER_IP']
NOTIFY_MAX_RETRY_TIME = param_dict['NOTIFY_MAX_RETRY_TIME']
REC_TIMEOUT = param_dict['REC_TIMEOUT']
IS_VERBOSE = param_dict['IS_VERBOSE']
IS_SIMULATION = param_dict['IS_SIMULATION']
DOOR_OPEN_LIMIT_TIME = param_dict['DOOR_OPEN_LIMIT_TIME']
WAIT_REACH_LIMIT_TIME = param_dict['WAIT_REACH_LIMIT_TIME']
SLIENCE_MIN_COUNTER = param_dict['SLIENCE_MIN_COUNTER']
FLOOR_LED_CONFIRMATION_MAX_TIME = param_dict['FLOOR_LED_CONFIRMATION_MAX_TIME']
FLOOR_LED_CONFIRMATION_MIN_TIME = param_dict['FLOOR_LED_CONFIRMATION_MIN_TIME']
WAIT_DOOR_LIMIT = param_dict['WAIT_DOOR_LIMIT']
is_using_rss = param_dict['is_using_rss']
IS_USING_MQTT = param_dict['IS_USING_MQTT']
IS_USING_HTTP = param_dict['IS_USING_HTTP']
CORP_ID = param_dict['CORP_ID']
CORP_SECRET = param_dict['CORP_SECRET']
AGENT_ID = param_dict['AGENT_ID']
ENABLE_VERIFY_DOOR_STATUS = param_dict['ENABLE_VERIFY_DOOR_STATUS']
CLOSE_EYE_WAIT_DOOR_SEC = param_dict['CLOSE_EYE_WAIT_DOOR_SEC']
IS_USING_XBEE = param_dict['IS_USING_XBEE']
XBEE_HOST_IP = param_dict['XBEE_HOST_IP']
# Print out parameters
logger.info("[param_loader] table = " + pformat(table))
logger.info("[param_loader] AMR_URI = " + str(AMR_URI))
logger.info("[param_loader] AMR_MQTT_NAME = " + str(AMR_MQTT_NAME))
logger.info("[param_loader] AMR_MQTT_NAME = " + str(CLIENT_NAME))
logger.info("[param_loader] BROKER_IP = " + str(BROKER_IP))
logger.info("[param_loader] NOTIFY_MAX_RETRY_TIME = " + str(NOTIFY_MAX_RETRY_TIME))
logger.info("[param_loader] REC_TIMEOUT = " + str(REC_TIMEOUT))
logger.info("[param_loader] IS_VERBOSE = " + str(IS_VERBOSE))
logger.info("[param_loader] IS_SIMULATION = " + str(IS_SIMULATION))
logger.info("[param_loader] DOOR_OPEN_LIMIT_TIME = " + str(DOOR_OPEN_LIMIT_TIME))
logger.info("[param_loader] WAIT_REACH_LIMIT_TIME = " + str(WAIT_REACH_LIMIT_TIME))
logger.info("[param_loader] SLIENCE_MIN_COUNTER = " + str(SLIENCE_MIN_COUNTER))
logger.info("[param_loader] FLOOR_LED_CONFIRMATION_MAX_TIME = " + str(FLOOR_LED_CONFIRMATION_MAX_TIME))
logger.info("[param_loader] FLOOR_LED_CONFIRMATION_MIN_TIME = " + str(FLOOR_LED_CONFIRMATION_MIN_TIME))
logger.info("[param_loader] WAIT_DOOR_LIMIT = " + str(WAIT_DOOR_LIMIT))
logger.info("[param_loader] is_using_rss = " + str(is_using_rss))
logger.info("[param_loader] IS_USING_MQTT = " + str(IS_USING_MQTT))
logger.info("[param_loader] IS_USING_HTTP = " + str(IS_USING_HTTP))
logger.info("[param_loader] IS_USING_XBEE = " + str(IS_USING_XBEE))
logger.info("[param_loader] CORP_ID = " + str(CORP_ID))
logger.info("[param_loader] CORP_SECRET = " + str(CORP_SECRET))
logger.info("[param_loader] AGENT_ID = " + str(AGENT_ID))
logger.info("[param_loader] ENABLE_VERIFY_DOOR_STATUS = " + str(ENABLE_VERIFY_DOOR_STATUS))
logger.info("[param_loader] CLOSE_EYE_WAIT_DOOR_SEC = " + str(CLOSE_EYE_WAIT_DOOR_SEC))
logger.info("[param_loader] XBEE_HOST_IP = " + str(XBEE_HOST_IP))
| 3,308 |
image_rotate/image_rotate.py
|
WangHongshuo/Image_Algorithms
| 5 |
2172966
|
import cv2 as cv
import numpy as np
import math
def rotateKernel(x,y,cosine,sine):
x1 = x * cosine - y * sine
y1 = x * sine + y * cosine
return (x1, y1)
def iRotateKernel(x,y,cosine,sine):
x1 = x * cosine + y * sine
y1 = -x * sine + y * cosine
return (x1, y1)
# 线性插值
def linear(x,y,src):
srcX = math.floor(x)
srcY = math.floor(y)
if(srcX >= 0 and srcX < src.shape[1] and srcY >= 0 and srcY < src.shape[0]):
u = x - srcX
v = y - srcY
srcX1 = min(srcX + 1,src.shape[1] - 1)
srcY1 = min(srcY + 1,src.shape[0] - 1)
res = 0
# f(sX+u,sY+v) = (1-u)(1-v)f(sX,sY) + (1-u)vf(sX,sY+1) + u(1-v)f(sX+1,sY) + uvf(sX+1,sY+1)
res = res + (1 - u)*(1-v)*src[srcY][srcX]
res = res + (1 - u)*v*src[srcY1][srcX]
res = res + u*(1 - v)*src[srcY][srcX1]
res = res + u*v*src[srcY1][srcX1]
for i in range(0,len(res)):
res[i] = min(res[i],255)
return res
else:
return (-1,)
def rotateFunc(image,center,angle,isExpand,method):
## opencv坐标系为(row, col),对应图像坐标系(y, x)
## 旋转公式坐标系为(x, y)
theta = -angle / 180 * math.pi
cosine = math.cos(theta)
sine = math.sin(theta)
# 旋转中心(a, b),原点平移到(a, b)
a = center[1] # x - col
b = center[0] # y - row
srcRow = image.shape[0] # row - height - y
srcCol = image.shape[1] # col - width - x
# 左上点
x1 = -a
y1 = b
# 右上点
x2 = srcCol - 1 - a
y2 = b
# 右下点
x3 = srcCol -1 - a
y3 = b - srcRow + 1
# 左下点
x4 = -a
y4 = b - srcRow + 1
## 计算以(a, b)为坐标原点旋转后的角点并得出旋转后图像尺寸
(x1, y1) = rotateKernel(x1,y1,cosine,sine)
(x2, y2) = rotateKernel(x2,y2,cosine,sine)
(x3, y3) = rotateKernel(x3,y3,cosine,sine)
(x4, y4) = rotateKernel(x4,y4,cosine,sine)
if (isExpand == 1):
dstRow = round(max(abs(y1 - y3),abs(y2 - y4))) # row - height - y
dstCol = round(max(abs(x1 - x3),abs(x2 - x4))) # col - width - x
else:
dstRow = srcRow
dstCol = srcCol
dst = np.zeros((dstRow,dstCol,image.shape[2]),image.dtype)
# 旋转后的中心
if(isExpand == 1):
c = dstCol // 2
d = dstRow // 2
else:
c = a
d = b
f1 = -c * cosine + d * sine + a
f2 = -c * sine - d * cosine + b
for x in range(0,dstCol - 1):
for y in range(0,dstRow - 1):
(srcX ,srcY) = rotateKernel(x,y,cosine,sine)
srcX = srcX + f1
srcY = srcY + f2
# 0 - nearest, 1 - linear
if(method == 1):
pixelVal = linear(srcX,srcY,image)
srcX = math.floor(srcX)
srcY = math.floor(srcY)
if(not(len(pixelVal) == 1 and pixelVal[0] <= 0)):
dst[y][x] = pixelVal
else:
srcX = round(srcX)
srcY = round(srcY)
if(srcX >= 0 and srcX < srcCol and srcY >= 0 and srcY < srcRow):
dst[y][x] = image[srcY][srcX]
return dst
input = cv.imread("H://lena.jpg")
# @fn 图像旋转
# @param image 输入图像
# @param center 旋转中心(row, col)
# @param angle 旋转角度,顺时针为正
# @param isExpand 0 - 保持和原图像一样大小,1 - 扩充图像
# @param method 0 - nearest, 1 - linear
# @return 旋转后的图像
res = rotateFunc(input,(input.shape[0] // 2, input.shape[1] // 2),45,1,1)
cv.imshow("input",input)
cv.imshow("rotated",res)
cv.waitKey(0)
| 3,472 |
models_temp.py
|
WesGtoX/gestao-clientes
| 0 |
2171246
|
# ./manage.py inspectdb > models_temp.py
#
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class Minhatabela(models.Model):
nome = models.TextField()
salario = models.FloatField()
class Meta:
managed = False
db_table = 'MinhaTabela'
class Tabela1(models.Model):
nome = models.TextField()
salario = models.FloatField()
class Meta:
managed = False
db_table = 'Tabela1'
class Tabela2(models.Model):
nome = models.TextField()
salario = models.FloatField()
class Meta:
managed = False
db_table = 'Tabela2'
class Tabela3(models.Model):
nome = models.TextField()
salario = models.FloatField()
class Meta:
managed = False
db_table = 'Tabela3'
class AccountEmailaddress(models.Model):
verified = models.BooleanField()
primary = models.BooleanField()
user = models.ForeignKey('AuthUser', models.DO_NOTHING)
email = models.CharField(unique=True, max_length=254)
class Meta:
managed = False
db_table = 'account_emailaddress'
class AccountEmailconfirmation(models.Model):
created = models.DateTimeField()
sent = models.DateTimeField(blank=True, null=True)
key = models.CharField(unique=True, max_length=64)
email_address = models.ForeignKey(AccountEmailaddress, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'account_emailconfirmation'
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=150)
class Meta:
managed = False
db_table = 'auth_group'
# Unable to inspect table 'auth_group_permissions'
# The error was: list index out of range
class AuthPermission(models.Model):
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
name = models.CharField(max_length=255)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = <PASSWORD>(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.BooleanField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
email = models.CharField(max_length=254)
is_staff = models.BooleanField()
is_active = models.BooleanField()
date_joined = models.DateTimeField()
last_name = models.CharField(max_length=150)
class Meta:
managed = False
db_table = 'auth_user'
# Unable to inspect table 'auth_user_groups'
# The error was: list index out of range
# Unable to inspect table 'auth_user_user_permissions'
# The error was: list index out of range
class ClientesDocumento(models.Model):
num_doc = models.CharField(max_length=50)
class Meta:
managed = False
db_table = 'clientes_documento'
class ClientesPerson(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
age = models.IntegerField()
salary = models.DecimalField(max_digits=10, decimal_places=5) # max_digits and decimal_places have been guessed, as this database handles decimal fields as float
bio = models.TextField()
photo = models.CharField(max_length=100, blank=True, null=True)
doc = models.ForeignKey(ClientesDocumento, models.DO_NOTHING, unique=True, blank=True, null=True)
class Meta:
managed = False
db_table = 'clientes_person'
class DashboardUserdashboardmodule(models.Model):
title = models.CharField(max_length=255)
module = models.CharField(max_length=255)
app_label = models.CharField(max_length=255, blank=True, null=True)
user = models.PositiveIntegerField()
column = models.PositiveIntegerField()
order = models.IntegerField()
settings = models.TextField()
children = models.TextField()
collapsed = models.BooleanField()
class Meta:
managed = False
db_table = 'dashboard_userdashboardmodule'
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
action_flag = models.PositiveSmallIntegerField()
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class DjangoSite(models.Model):
name = models.CharField(max_length=50)
domain = models.CharField(unique=True, max_length=100)
class Meta:
managed = False
db_table = 'django_site'
class JetBookmark(models.Model):
url = models.CharField(max_length=200)
title = models.CharField(max_length=255)
user = models.PositiveIntegerField()
date_add = models.DateTimeField()
class Meta:
managed = False
db_table = 'jet_bookmark'
class JetPinnedapplication(models.Model):
app_label = models.CharField(max_length=255)
user = models.PositiveIntegerField()
date_add = models.DateTimeField()
class Meta:
managed = False
db_table = 'jet_pinnedapplication'
class ProdutosProduto(models.Model):
descricao = models.CharField(max_length=100)
preco = models.DecimalField(max_digits=10, decimal_places=5) # max_digits and decimal_places have been guessed, as this database handles decimal fields as float
class Meta:
managed = False
db_table = 'produtos_produto'
class SocialaccountSocialaccount(models.Model):
provider = models.CharField(max_length=30)
uid = models.CharField(max_length=191)
last_login = models.DateTimeField()
date_joined = models.DateTimeField()
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
extra_data = models.TextField()
class Meta:
managed = False
db_table = 'socialaccount_socialaccount'
unique_together = (('provider', 'uid'),)
class SocialaccountSocialapp(models.Model):
provider = models.CharField(max_length=30)
name = models.CharField(max_length=40)
client_id = models.CharField(max_length=191)
key = models.CharField(max_length=191)
secret = models.CharField(max_length=191)
class Meta:
managed = False
db_table = 'socialaccount_socialapp'
class SocialaccountSocialappSites(models.Model):
socialapp = models.ForeignKey(SocialaccountSocialapp, models.DO_NOTHING)
site = models.ForeignKey(DjangoSite, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'socialaccount_socialapp_sites'
unique_together = (('socialapp', 'site'),)
class SocialaccountSocialtoken(models.Model):
token = models.TextField()
token_secret = models.TextField()
expires_at = models.DateTimeField(blank=True, null=True)
account = models.ForeignKey(SocialaccountSocialaccount, models.DO_NOTHING)
app = models.ForeignKey(SocialaccountSocialapp, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'socialaccount_socialtoken'
unique_together = (('app', 'account'),)
class VendasItemdopedido(models.Model):
desconto = models.DecimalField(max_digits=10, decimal_places=5) # max_digits and decimal_places have been guessed, as this database handles decimal fields as float
produto = models.ForeignKey(ProdutosProduto, models.DO_NOTHING)
venda = models.ForeignKey('VendasVenda', models.DO_NOTHING)
quantidade = models.FloatField()
class Meta:
managed = False
db_table = 'vendas_itemdopedido'
class VendasVenda(models.Model):
numero = models.CharField(max_length=7)
valor = models.DecimalField(max_digits=10, decimal_places=5) # max_digits and decimal_places have been guessed, as this database handles decimal fields as float
desconto = models.DecimalField(max_digits=10, decimal_places=5) # max_digits and decimal_places have been guessed, as this database handles decimal fields as float
nfe_emitida = models.BooleanField()
pessoa = models.ForeignKey(ClientesPerson, models.DO_NOTHING, blank=True, null=True)
impostos = models.DecimalField(max_digits=10, decimal_places=5) # max_digits and decimal_places have been guessed, as this database handles decimal fields as float
class Meta:
managed = False
db_table = 'vendas_venda'
| 9,768 |
dataset.py
|
zhoufengfan/heavy-weight-network
| 0 |
2173029
|
from torch.utils.data import Dataset
import torch
class Dataset2(Dataset):
def __init__(self, item_of_single_class=10, noise_scope_list=None, data_vector_dim=20, k=2):
if noise_scope_list is None:
noise_scope_list = [1, 1, 1, 1, 1, 1, 1]
self.noise_scope_list = noise_scope_list
self.item_of_single_class = item_of_single_class
self.dataset_list = []
self.data_vector_dim = data_vector_dim
self.k = k
self.creat_dataset_list()
def __getitem__(self, item):
return self.dataset_list[item][0], self.dataset_list[item][1]
def __len__(self):
return self.item_of_single_class * len(self.noise_scope_list)
def creat_dataset_list(self):
self.dataset_list = []
for i, noise_scope in enumerate(self.noise_scope_list):
for j in range(self.item_of_single_class):
self.dataset_list.append(
[(i - 0.5) * self.k + noise_scope * torch.rand(self.data_vector_dim), i])
| 1,021 |
plotter.py
|
abcd-source/CarND-PID-Control-Project
| 0 |
2173069
|
import matplotlib.pyplot as plt
import csv
hand_tuned_data = []
with open('results-hand-tuned.csv', newline='') as f:
reader = csv.DictReader(f)
for r in reader:
if (None not in r.values()):
hand_tuned_data.append(r)
optimized_data = []
with open('results-optimized.csv', newline='') as f:
reader = csv.DictReader(f)
for r in reader:
if (None not in r.values()):
optimized_data.append(r)
plt.subplot(2,1,1)
plt.title("Comparison Between Hand Tuned and Optimized Coefficients")
plt.scatter([float(d['dt']) for d in hand_tuned_data], [float(d['cte']) for d in hand_tuned_data], label="Hand Tuned")
plt.scatter([float(d['dt']) for d in optimized_data], [float(d['cte']) for d in optimized_data], label="Optimized")
plt.legend()
plt.ylabel("Cross Track Error")
plt.subplot(2,1,2)
plt.scatter([float(d['dt']) for d in hand_tuned_data], [float(d['cte_sum']) for d in hand_tuned_data], label="Hand Tuned")
plt.scatter([float(d['dt']) for d in optimized_data], [float(d['cte_sum']) for d in optimized_data], label="Optimized")
plt.legend()
plt.ylabel("Average Cross Track Error")
plt.xlabel("Elapsed Time (seconds)")
plt.show()
| 1,185 |
pyLogStd.py
|
LolexUK/pyStdLogLib
| 0 |
2171992
|
import logging, sys
logging_enabled = False
old_input = input
old_print = print
old_stderr = sys.stderr.write
def stderr_write(string=""):
old_stderr(string)
if logging_enabled:
logging.error(string)
sys.stderr.write = stderr_write
def input(string=""):
string_in = old_input(string)
if logging_enabled:
logging.info("STRING IN " + string_in)
return string_in
logging.basicConfig(level=logging.DEBUG, filename='./OUT.txt') ### Print does not get overridden for some reason and sys.stdout.write needs redirecting. Perhaps redirect print function to sys.stdout.write func call with "\n" (or whatever sep) chosen. Also need to account for file being supplied :face_in_hands:
def print(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
# type: (object, string, string, file_object, boolean) -> None
old_print(*objects, sep = sep, end=end, file=file, flush=flush)
if logging_enabled:
for i in range(0, len(objects) - 1): ## Necessary else the logging module will not convert the tuple into the relevant strings
if i == len(objects):
break
logging.info(objects[i])
| 1,093 |
scripts/scripting_utils.py
|
uerceg/unity_sdk
| 111 |
2171703
|
##
## Various utility methods.
##
import os, shutil, glob, time, sys, platform, subprocess
# ------------------------------------------------------------------
# Windows specific paths.
nuget_dir = 'C:/nuget'
devenv_dir = 'C:/Program Files (x86)/Microsoft Visual Studio 14.0/Common7/IDE'
def set_log_tag(t):
global TAG
TAG = t
# ------------------------------------------------------------------
# Colors for terminal (does not work in Windows).
CEND = '\033[0m'
CBOLD = '\33[1m'
CITALIC = '\33[3m'
CURL = '\33[4m'
CBLINK = '\33[5m'
CBLINK2 = '\33[6m'
CSELECTED = '\33[7m'
CBLACK = '\33[30m'
CRED = '\33[31m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CBLACKBG = '\33[40m'
CREDBG = '\33[41m'
CGREENBG = '\33[42m'
CYELLOWBG = '\33[43m'
CBLUEBG = '\33[44m'
CVIOLETBG = '\33[45m'
CBEIGEBG = '\33[46m'
CWHITEBG = '\33[47m'
CGREY = '\33[90m'
CRED2 = '\33[91m'
CGREEN2 = '\33[92m'
CYELLOW2 = '\33[93m'
CBLUE2 = '\33[94m'
CVIOLET2 = '\33[95m'
CBEIGE2 = '\33[96m'
CWHITE2 = '\33[97m'
CGREYBG = '\33[100m'
CREDBG2 = '\33[101m'
CGREENBG2 = '\33[102m'
CYELLOWBG2 = '\33[103m'
CBLUEBG2 = '\33[104m'
CVIOLETBG2 = '\33[105m'
CBEIGEBG2 = '\33[106m'
CWHITEBG2 = '\33[107m'
# ------------------------------------------------------------------
# File system methods.
def copy_file(sourceFile, destFile):
debug('Copying from {0} to {1}'.format(sourceFile, destFile))
shutil.copyfile(sourceFile, destFile)
def copy_files(fileNamePattern, sourceDir, destDir):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
debug('Copying from {0} to {1}'.format(file, destDir))
shutil.copy(file, destDir)
def remove_files(fileNamePattern, sourceDir, log=True):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
if log:
debug('Deleting ' + file)
os.remove(file)
def rename_file(fileNamePattern, newFileName, sourceDir):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
debug('Renaming file {0} to {1}'.format(file, newFileName))
os.rename(file, sourceDir + '/' + newFileName)
def clear_dir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def recreate_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def remove_dir_if_exists(path):
shutil.rmtree(path)
def change_dir(dir):
os.chdir(dir)
def check_submodule_dir(platform, submodule_dir):
if not os.path.isdir(submodule_dir) or not os.listdir(submodule_dir):
error('Submodule [{0}] folder empty.')
error('Did you forget to run \'git submodule update --init --recursive\' ?'.format(platform))
exit()
# ------------------------------------------------------------------
# Debug messages methods.
def debug(msg):
if not is_windows():
print(('{0}[{1}][INFO]:{2} {3}').format(CBOLD, TAG, CEND, msg))
else:
print(('[{0}][INFO]: {1}').format(TAG, msg))
def debug_green(msg):
if not is_windows():
print(('{0}[{1}][INFO]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CGREEN, msg, CEND))
else:
print(('[{0}][INFO]: {1}').format(TAG, msg))
def debug_blue(msg):
if not is_windows():
print(('{0}[{1}][INFO]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CBLUE, msg, CEND))
else:
print(('[{0}][INFO]: {1}').format(TAG, msg))
def error(msg):
if not is_windows():
print(('{0}[{1}][ERROR]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CRED, msg, CEND))
else:
print(('[{0}][ERROR]: {1}').format(TAG, msg))
# ------------------------------------------------------------------
# Execution and platform methods.
def is_windows():
return platform.system().lower() == 'windows';
def execute_command(cmd_params, log=True):
if log:
debug_blue('Executing ' + str(cmd_params))
subprocess.call(cmd_params)
def xcode_build_debug(target):
execute_command(['xcodebuild', '-target', target, '-configuration', 'Debug', '-UseModernBuildSystem=NO' 'clean', 'build'])
def xcode_build_release(target):
execute_command(['xcodebuild', '-target', target, '-configuration', 'Release', '-UseModernBuildSystem=NO' 'clean', 'build'])
def gradle_make_sdk_jar_debug():
execute_command(['./gradlew', 'clean', 'adjustCoreJarDebug'])
def gradle_make_sdk_jar_release():
execute_command(['./gradlew', 'clean', 'adjustCoreJarRelease'])
def gradle_make_test_jar_debug():
execute_command(['./gradlew', 'clean', ':test-library:adjustTestLibraryJarDebug'])
def gradle_make_test_jar_release():
execute_command(['./gradlew', 'clean', ':test-library:adjustTestLibraryJarRelease'])
def gradle_make_test_library_aar_debug():
execute_command(['./gradlew', 'clean', ':test-library:assembleDebug'])
def gradle_make_test_library_aar_release():
execute_command(['./gradlew', 'clean', ':test-library:assembleRelease'])
def gradle_make_test_options_aar_debug():
execute_command(['./gradlew', 'clean', ':test-options:assembleDebug'])
def gradle_make_test_options_aar_release():
execute_command(['./gradlew', 'clean', ':test-options:assembleRelease'])
def gradle_make_oaid_jar_release():
execute_command(['./gradlew', 'clean', ':sdk-plugin-oaid:adjustOaidAndroidJar'])
def nuget_restore(project_path):
execute_command(['{0}/nuget.exe'.format(nuget_dir), 'restore', project_path])
def devenv_build(solution_path, configuration='Release'):
execute_command(['{0}/devenv.exe'.format(devenv_dir), solution_path, '/build', configuration])
| 5,586 |
backend/routers/users.py
|
okynas/Sworkout
| 0 |
2168878
|
from typing import List
from config.middleware import get_current_user
from fastapi import APIRouter, Depends, status
import database
from schema import UserView, UserCreate, UserUpdate
from sqlalchemy.orm import Session
from repository import UserRepository
router = APIRouter(
prefix="/users",
tags=['Users']
)
get_db = database.get_db
# get all
@router.get("/", response_model=List[UserView])
def show_all(db: Session = Depends(get_db) , get_currrent_user: UserView = Depends(get_current_user)):
return UserRepository.get_all(db)
@router.get("/id/{id}" , response_model = UserView)
def show_one(id: int, db: Session = Depends(get_db) , get_current_user: UserView = Depends(get_current_user)):
return UserRepository.get_one_user(id, db)
@router.get("/username/{username}" , response_model = UserView)
def show_one_by_username(username: str, db: Session = Depends(get_db), get_current_user: UserView = Depends(get_current_user)):
return UserRepository.get_one_user_by_username(username, db)
# delete
@router.delete("/me", status_code=status.HTTP_204_NO_CONTENT)
def delete(db: Session = Depends(get_db), get_current_user: UserView = Depends(get_current_user)):
return UserRepository.destroy(get_current_user, db)
| 1,248 |
test_conanRunner.py
|
odant/test_versions_conan_packages
| 0 |
2173010
|
#!/usr/bin/env python
import unittest
from removeAll import removeAll
from pathlib import Path
from conans import tools
from conanRunner import conanRunner
currentDir = Path.cwd()
conanHome = currentDir / "FAKE_CONAN_HOME"
class Test_conanRunner(unittest.TestCase):
def setUp(self):
removeAll(conanHome)
conanHome.mkdir()
def test_1_conanRunner_normal(self):
print("\n")
with tools.environment_append({"CONAN_USER_HOME": str(conanHome)}):
args = ["help"]
result = conanRunner(args)
self.assertFalse(not result)
print("\nOutput 'conan help':")
for s in result:
print(s)
def test_2_conanRunner_bad(self):
print("\n")
with tools.environment_append({"CONAN_USER_HOME": str(conanHome)}):
args = ["bad_command"]
self.assertRaises(Exception, conanRunner, args)
if __name__ == "__main__":
unittest.main()
| 973 |
saleor/rest/serializers/account/newsletter_subscription.py
|
Chaoslecion123/Diver
| 0 |
2172977
|
from django.apps import apps
from rest_flex_fields import FlexFieldsModelSerializer
__all__ = [
'NewsletterSubscriptionSerializer',
]
NewsletterSubscription = apps.get_model(*'account.NewsletterSubscription'.split())
class NewsletterSubscriptionSerializer(FlexFieldsModelSerializer):
"""Serializer for :model:`account.NewsletterSubscription`:
`**Fields:**`
01. `customer` : `OneToOneField` [:model:`account.User`]
02. `email` : `CharField`
03. `id` : `AutoField`
04. `is_active` : `BooleanField`
`**Reverse Fields:**`
"""
class Meta:
model = NewsletterSubscription
fields = [
# Fields
'customer',
'email',
'id',
'is_active',
# Reverse Fields
]
read_only_fields = []
# def create(self, validated_data):
# return super().create(validated_data)
# def update(self, instance, validated_data):
# return super().update(instance, validated_data)
| 1,116 |
fabfile/testbeds/testbed_vcenter_esxi.py
|
GaryGaryWU/contrail_fabric_util
| 0 |
2172637
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = '[email protected]'
controller = '[email protected]'
#openstack = '[email protected]'
#host2 = '[email protected]'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = '[email protected]'
#Role definition of the hosts.
env.roledefs = {
# 'all': [host1,host2],
'all': [host1],
'cfgm': [controller],
'openstack': [controller],
'control': [controller],
'compute': [host1],
'collector': [controller],
'webui': [controller],
'database': [controller],
'build': [controller],
}
#Openstack admin password
env.openstack_admin_password = '<PASSWORD>'
env.ostypes = {
host1:'ubuntu'
}
#Hostnames
env.hostnames = {
# 'all': ['nodec22', 'nodeg17']
'all': ['nodec22']
}
env.password = '<PASSWORD>'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
controller: 'c0ntrail123',
# host2: 'c0ntrail123',
host_build: 'secret',
}
vcenter = {
'server':'10.84.24.111',
'username': 'admin',
'password': '<PASSWORD>!',
'datacenter': 'kiran_dc',
'cluster': 'kiran_cluster',
'dv_switch': { 'dv_switch_name': 'kiran_dvswitch',
'nic': 'vmnic1',
},
'dv_port_group': { 'dv_portgroup_name': 'kiran_portgroup',
'number_of_ports': '3',
},
}
compute_vm = {
host1: { 'esxi': {'esx_ip': '10.84.24.61',
'esx_username': 'root',
'esx_password': '<PASSWORD>',
'esx_uplink_nic': 'vmnic0',
'esx_fab_vswitch' : 'vSwitch0',
'esx_fab_port_group' : 'contrail-fab-pg',
'esx_ssl_thumbprint' : "62:49:C2:D4:F7:3A:AF:0F:DE:01:FB:52:7C:36:03:B2:33:CC:DC:EE",
},
'server_mac' : "00:50:56:00:BA:BA",
'server_ip': "10.84.24.222",
'esx_vm_name' : "ContrailVM",
#'esx_datastore' : "/vmfs/volumes/b4s4-root/",
'esx_datastore' : "/vmfs/volumes/datastore1/",
#'esx_vmdk' : '/cs-shared/contrail_fcs_images/v1.10/ubuntu/havana/ContrailVM-disk1.vmdk',
'esx_vmdk' : '/users/kirand/vmware_integ/ContrailVM-disk1.vmdk',
'vm' : "ContrailVM",
'vmdk' : "ContrailVM-disk1",
'vm_deb' : '/cs-shared/contrail_fcs_images/v1.10/ubuntu/havana/contrail-install-packages_1.10-34~havana_all.deb',
'esx_vm_vswitch': 'vSwitch1',
'esx_vm_port_group' : 'contrail-vm-pg',
'server_id' : 'contrail-vm',
'password' : '<PASSWORD>',
'domain' : 'englab.juniper.net',
},
}
#OPTIONAL BONDING CONFIGURATION
#==============================
#Inferface Bonding
#bond= {
# host1 : { 'name': 'bond0', 'member': ['p2p0p0','p2p0p1','p2p0p2','p2p0p3'], 'mode':'balance-xor' },
#}
#OPTIONAL SEPARATION OF MANAGEMENT AND CONTROL + DATA
#====================================================
#Control Interface
#control = {
# host1 : { 'ip': '192.168.10.1/24', 'gw' : '192.168.10.254', 'device':'eth0' },
#}
#Data Interface
#data = {
# host1 : { 'ip': '172.16.17.32/24', 'gw' : '172.16.58.3', 'device':'bond0' },
#}
#To disable installing contrail interface rename package
#env.interface_rename = False
#To use existing service_token
#service_token = 'your_token'
#Specify keystone IP
#keystone_ip = '1.1.1.1'
#Specify Region Name
#region_name = 'RegionName'
#To enable multi-tenancy feature
#multi_tenancy = True
#To enable haproxy feature
#haproxy = True
#To Enable prallel execution of task in multiple nodes
#do_parallel = True
env.test_repo_dir='/homes/vjoshi/node22-17/test'
env.mail_from='<EMAIL>'
env.mail_to='<EMAIL>'
env.log_scenario='Single-Node Sanity'
| 3,932 |
test/test_add_contact.py
|
Byelenka/studying_python
| 0 |
2173062
|
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
old_contacts = app.contact.get_contact_list()
contact = Contact(firstname="First", middlename="Middle", lastname="Last", nickname="Nickname",
title="Title", company="Company", address="address", home_phone="123", mobile_phone="222",
work_phone="333", fax="444", email="<EMAIL>", email2="<EMAIL>", email3="<EMAIL>",
homepage="google.com", bday="1", bmonth="January", byear="1999",
address2="secondary address", phone2="456", notes="some text")
app.contact.create(contact)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) + 1 == len(new_contacts)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
def test_add_empty_contact(app):
old_contacts = app.contact.get_contact_list()
contact = Contact(firstname="", middlename="", lastname="", nickname="",
title="", company="", address="", home_phone="", mobile_phone="",
work_phone="", fax="", email="", email2="", email3="",
homepage="", bday="", bmonth="-", byear="",
address2="", phone2="", notes="")
app.contact.create(contact)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) + 1 == len(new_contacts)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| 1,694 |
util/layer.py
|
silentspring2/Network_Calculator
| 3 |
2172105
|
##############################################################################################
# Created by <NAME> at 2018-12/18
# MIT License
# Contain definition of different layer
##############################################################################################
import math
import numpy as np
import util.util as utils
# Basic Layer Class
# All Layer inherit from this
class Layer:
def __init__(self, name='layer', input_k=0, input_channel=0):
self.input_k = utils.make_double(input_k)
self.input_channel = input_channel
self.name = name
# Pass the output structure of former layer to the latter one as input structure
def inherit(self, prev):
self.input_k = prev.output_k
self.input_channel = prev.output_channel
# Re-initialize input parameters
def reinit(self, input_k, input_channel):
self.input_k = utils.make_double(input_k)
self.input_channel = input_channel
# Pooling layer
class PoolLayer(Layer):
def __init__(self, name='pooling', input_k=0, input_channel=0, \
kernel=1, stride=1, padding=0, dilation=1):
super(PoolLayer, self).__init__(name, input_k, input_channel)
self.output_channel = self.input_channel
self.kernel = utils.make_double(kernel)
self.stride = utils.make_double(stride)
self.padding = utils.make_double(padding)
self.dilation = utils.make_double(dilation)
self.params = 0
# Calculation Formula comes from PyTorch documentation
# https://pytorch.org/docs/stable/nn.html?highlight=pool#torch.nn.MaxPool2d
def calculate_output(self):
self.output_k = [1,1]
self.output_k[0] = int(math.floor((self.input_k[0]-self.dilation[0]*(self.kernel[0]-1)\
+2*self.padding[0]-1)/float(self.stride[0]) + 1.0))
self.output_k[1] = int(math.floor((self.input_k[1]-self.dilation[1]*(self.kernel[1]-1)\
+2*self.padding[1]-1)/float(self.stride[1]) + 1.0))
self.output_channel = self.input_channel
# Compare to convolution and fully-connected layers, pooling layer can be ignored
def calculate_FLOPs(self):
self.add_ops = 0
self.times_ops = 0
self.FLOPs = 0
def calculate_all(self):
self.calculate_output()
self.calculate_FLOPs()
# Convolutional Layer, current version only contain 2d
# 1d and 3d will be added later
class ConvLayer(Layer):
def __init__(self, name='conv', input_k=0, input_channel=0, kernel=1,\
output_channel=0, stride=1, padding=0, dilation=1, bias=True):
super(ConvLayer, self).__init__(name, input_k, input_channel)
self.kernel = utils.make_double(kernel)
self.output_channel = output_channel
self.stride = utils.make_double(stride)
self.padding = utils.make_double(padding)
self.dilation = utils.make_double(dilation)
self.bias = bias
# Calculation Formula comes from PyTorch documentation
# https://pytorch.org/docs/stable/nn.html?highlight=conv#torch.nn.Conv2d
def calculate_output(self):
self.output_k = [1,1]
self.output_k[0] = int(math.floor((self.input_k[0]-self.dilation[0]*(self.kernel[0]-1)\
+2*self.padding[0]-1)/float(self.stride[0]) + 1.0))
self.output_k[1] = int(math.floor((self.input_k[1]-self.dilation[1]*(self.kernel[1]-1)\
+2*self.padding[1]-1)/float(self.stride[1]) + 1.0))
def calculate_parameters(self):
self.params = float(np.multiply.reduce(self.kernel)) * self.input_channel * self.output_channel
if self.bias == True:
self.params += self.output_channel
def calculate_FLOPs(self):
self.times_ops = float(np.multiply.reduce(self.kernel)) * self.input_channel *\
np.multiply.reduce(self.output_k) * self.output_channel
if self.bias == True:
self.add_ops = self.times_ops
else:
self.add_ops = (float(np.multiply.reduce(self.kernel)) * self.input_channel - 1) *\
np.multiply.reduce(self.output_k) * self.output_channel
self.FLOPs = self.times_ops + self.add_ops
def calculate_all(self):
self.calculate_output()
self.calculate_parameters()
self.calculate_FLOPs()
# Fully-Connected Layer
class FCLayer(Layer):
def __init__(self, name='FC', input_k=0, input_channel=0, output_k = 1):
super(FCLayer, self).__init__(name, input_k, input_channel)
self.output_k = output_k
def calculate_parameters(self):
self.params = 2 * np.multiply.reduce(self.input_k) * self.input_channel * self.output_k
def calculate_FLOPs(self):
self.times_ops = self.params
self.add_ops = self.params
self.FLOPs = self.times_ops + self.add_ops
def calculate_all(self):
self.calculate_parameters()
self.calculate_FLOPs()
# Concatenation, defined as layer for utility
class ConcateLayer(Layer):
def __init__(self, name='ConcateLayer', layer_list=[]):
self.output_k = layer_list[0].output_k
self.output_channel = 0
for i,l in enumerate(layer_list):
self.output_channel += l.output_channel
#Qprint(self.output_channel)
| 4,900 |
kattis/tenkinds.py
|
calebclark/competition
| 0 |
2173028
|
#!/usr/bin/env python
# 10 Kinds of People
from math import *
from sys import *
grid = []
paths = []
getset = {}
def getkind(p):
return grid[p[0]][p[1]]
def recadd(p, id, lastkind="42"):
if p in getset:
#print (p,1)
return
if (p[0] not in range(rows)) or (p[1] not in range(cols)):
#print (p,2)
return
kind = getkind(p)
#print ("kind",kind)
if (lastkind == "42") or (kind == lastkind):
getset[p] = id
recadd((p[0]+1,p[1]),id,kind)
recadd((p[0]-1,p[1]),id,kind)
recadd((p[0],p[1]+1),id,kind)
recadd((p[0],p[1]-1),id,kind)
#line0 = stdin.readline().split()
#rows = int(line0[0])
#cols = int(line0[1])
#don't be a scrub
rows,cols=map(int,stdin.readline().split())
for i in range(rows):
grid.append(list(stdin.readline()))
n = int(stdin.readline())
setnum = 0
for i in range(n):
data = map(int,stdin.readline().split())
paths.append([(data[0]-1,data[1]-1),(data[2]-1,data[3]-1)])
for i in range(rows):
for j in range(cols):
p = (i,j)
recadd(p,setnum)
setnum += 1
print getset
for [p1,p2] in paths:
if getset[p1] is getset[p2]:
if getkind(p1) is "1":
print "decimal"
if getkind(p1) is "0":
print "binary"
else:
print "neither"
| 1,329 |
configs/bc.py
|
rjgpinel/rlbc
| 43 |
2171600
|
from sacred import Ingredient
from sacred.settings import SETTINGS
model_ingredient = Ingredient('model')
dataset_ingredient = Ingredient('dataset', ingredients=[model_ingredient])
train_ingredient = Ingredient('train', ingredients=[dataset_ingredient])
collect_ingredient = Ingredient('collect', ingredients=[model_ingredient])
SETTINGS.CONFIG.READ_ONLY_CONFIG = False
@model_ingredient.config
def cfg_model():
# name of the model (will be saved in "$RLBC_MODELS/name")
name = ''
# name of the architecture
archi = 'resnet_18_narrow32'
# mode, flat or skills
mode = 'flat'
# number of frames taken as input
num_frames = 3
# number of scalar signals taken as input
num_signals = 0
# dimension of signal
dim_signal = 7
# type of conv layers normalization
normalization = 'batchnorm'
# model input type
input_type = 'depth'
# model action space
action_space = 'tool_lin'
# timesteps of actions in the future to predict
steps_action = (1, 10, 20, 30)
# number of skill heads
num_skills = 1
# device to load the model on
device = 'cuda'
# flag to resume training
resume = True
# flag to resume training using stored optimizer
load_optimizer = True
# epoch to resume training
epoch = 'current'
@dataset_ingredient.config
def cfg_dataset():
# name of the dataset (will be saved in "$RLBC_DATA/name")
name = ''
# max number of demos to train on
max_demos = None
# number of cameras used during data loading
num_cameras = 1
# name of data augmentation to apply
image_augmentation = ''
# name of the signals to load
signal_keys = ['target_position']
# list of signals dimension
signal_lengths = [2]
# flag to load mask, needed for data augmentation
load_masks = True
@train_ingredient.config
def cfg_train():
# gripper loss coefficient
lam_grip = 0.1
# master pretraining loss coefficient
lam_master = 0.0
# mini-batch size
batch_size = 64
# optimizer learning rate
learning_rate = 1e-3
# number of epochs to train the model
epochs = 101
# number of workers to load data
workers = 16
# number of epochs between two evaluations
eval_interval = 4
# proportion of the dataset withold for evaluation
eval_proportion = 0.05
# first epoch to start evaluation
eval_first_epoch = 0
@collect_ingredient.config
def cfg_collect():
# folder to save data or report (will be saved in "$RLBC_DATA/folder")
folder = ''
# agent type: script, bc or rl
agent = 'script'
# dir of the pickle file containing pre-recorded demos if the agent is replay
replay_dir = ''
# database type: demos, video or evaluation
db_type = 'demos'
# environment to record or evaluate on
env = 'UR5-PickCamEnv-v0'
# starting seed
seed = 0
# number of episodes to record
episodes = 1
# override environment max number of steps
max_steps = -1
# skills timescale or a list of them
timescale = 60
# list of skills sequence
skill_sequence = []
# first epoch to evaluate:
first_epoch = None
# last epoch to evaluate
last_epoch = None
# epochs interval between two evaluations
iter_epoch = 2
# number of workers to use
workers = 1
# flag to rewrite the dataset
rewrite = True
# flag to record trajectories even when they are failed
# used for skills data collection
record_failed = False
# flag for skill data collection
skill_collection = False
# flag to use dask
dask = False
# flag to render the environment, used for debug
render = False
# flag to stop data collection when the environment returns done
enforce_stop_when_done = False
# flag to overlay attention maps on top of depth maps
attention_maps = False
# flag to add data augmentation to collected images
# the augmentation flag is used only for BC agent. RL agent reads the RL args
image_augmentation = ''
| 4,051 |
bit/find_missing_number.py
|
javyxu/algorithms-python
| 8 |
2172886
|
def find_missing_number(nums):
"""Returns the missing number from a sequence of unique integers
in range [0..n] in O(n) time and space. The difference between
consecutive integers cannot be more than 1. If the sequence is
already complete, the next integer in the sequence will be returned.
>>> find_missing_number(i for i in range(0, 10000) if i != 1234)
1234
>>> find_missing_number([4, 1, 3, 0, 6, 5, 2])
7
"""
missing = 0
for i, num in enumerate(nums):
missing ^= num
missing ^= i + 1
return missing
| 571 |
test/Access/test_md5size.py
|
gareth-j/acquire
| 21 |
2170261
|
from Acquire.Access import get_filesize_and_checksum
import pytest
import os
from hashlib import md5
def _get_size(filename):
"""Return the file size in bytes"""
return os.path.getsize(filename)
def _get_md5(filename):
"""Return the MD5 checksum of the passed file"""
data = open(filename, "rb").read()
r = md5(data)
return r.hexdigest()
def test_md5size():
# test by calculating sizes and md5s of all files in
# the current directory
for filename in os.listdir("."):
if os.path.isfile(filename):
(filesize, md5) = get_filesize_and_checksum(filename)
checksize = _get_size(filename)
checkmd5 = _get_md5(filename)
assert(filesize == checksize)
assert(md5 == checkmd5)
| 781 |
youtube.py
|
kikeelectronico/comments-to-speech
| 0 |
2172952
|
from gtts import gTTS
import os
import pytchat
youtube_id = os.environ['YOUTUBE_ID']
youtube = pytchat.create(video_id=youtube_id)
def speech(text):
t2s = gTTS(text=text, lang='es', slow=False)
t2s.save("youtube.mp3")
os.system("mpg321 --stereo youtube.mp3")
if __name__ == '__main__':
while True:
for comment in youtube.get().sync_items():
speech(str(comment.author.name) + ' dice ' + str(comment.message) + ' desde YouTube')
print(str(comment.author.name) + ' dice ' + str(comment.message) + ' desde YouTube')
| 570 |
tests/test_config.py
|
pytask-dev/pytask-stata
| 1 |
2172829
|
import pytest
from pytask import main
from pytask_stata.config import _nonnegative_nonzero_integer
@pytest.mark.end_to_end
def test_marker_is_configured(tmp_path):
session = main({"paths": tmp_path})
assert "stata" in session.config
assert "stata" in session.config["markers"]
@pytest.mark.unit
@pytest.mark.parametrize("x, expected", [(None, None), (1, 1), ("1", 1), (1.5, 1)])
def test_nonnegative_nonzero_integer(x, expected):
assert _nonnegative_nonzero_integer(x) == expected
@pytest.mark.unit
@pytest.mark.parametrize(
"x, expectation",
[
(
-1,
pytest.raises(ValueError, match="'stata_check_log_lines' must be greater"),
),
(
"-1",
pytest.raises(ValueError, match="'stata_check_log_lines' must be greater"),
),
(0, pytest.raises(ValueError, match="'stata_check_log_lines' must be greater")),
(
"0",
pytest.raises(ValueError, match="'stata_check_log_lines' must be greater"),
),
(
"1.5",
pytest.raises(ValueError, match="'stata_check_log_lines' must be a"),
),
],
)
def test_nonnegative_nonzero_integer_raises_error(x, expectation):
with expectation:
_nonnegative_nonzero_integer(x)
| 1,303 |
setup.py
|
cdfbdex/hciVisualGesture
| 0 |
2172493
|
from setuptools import setup, find_packages
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
setup(name='hcivisualgesture',
version='0.1.0',
description='HCI based on Computer Vision',
url='',
author='Project: Asistente Virtual (Unicatolica Lumen Gentium)',
author_email='<EMAIL>',
license='BSD (3-clause)',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
zip_safe=False)
| 640 |
carmesi/tenant/views.py
|
RedGranatum/Carmesi
| 0 |
2172754
|
# Django
from rest_framework import serializers, status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny, IsAuthenticated
# Models Serializers
from tenant.models import Client
# Selectors
# Services
from tenant.services import(
cliente_crear
)
from nucleo.services.email import email_enviar_prealta_cliente
from nucleo.services.token import token_verification_email_new_client
class RegistroListadoApi(APIView):
permission_classes = (AllowAny,)
class OutputSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = ('schema_name','email','owner_name')
def get(self, request):
clientes = Client.objects.listado_clientes()
serializer = self.OutputSerializer(clientes, many=True)
return Response(serializer.data)
class RegistroApi(APIView):
permission_classes = (AllowAny,)
class InputSerializer(serializers.Serializer):
email = serializers.EmailField()
owner_name = serializers.CharField()
def post(self, request):
serializer = self.InputSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email_enviar_prealta_cliente(**serializer.validated_data)
return Response(serializer.data,status=status.HTTP_200_OK)
class RegistroVericarNuevoClienteApi(APIView):
permission_classes = (AllowAny,)
class InputSerializer(serializers.Serializer):
token = serializers.CharField()
def post(self, request):
serializer = self.InputSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
payload = token_verification_email_new_client(**serializer.validated_data)
return Response(payload, status=status.HTTP_200_OK)
class RegistroCrearNuevoClienteApi(APIView):
permission_classes = (AllowAny,)
class InputSerializer(serializers.Serializer):
token = serializers.CharField(required=True)
client_name = serializers.CharField(required=True,max_length=500)
password = serializers.CharField(required=True, write_only=True)
class OutputSerializer(serializers.ModelSerializer):
class Meta:
model = Client
fields = ('schema_name','email','owner_name')
def post(self, request):
serializer = self.InputSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# para obtener el domain_url tambien se puede usar request.META['HTTP_HOST']
serializer.validated_data['domain_url'] = request.tenant.domain_url
client = cliente_crear(**serializer.validated_data)
serializer_out = self.OutputSerializer(client)
return Response(serializer_out.data, status=status.HTTP_200_OK)
| 2,826 |
PaycomUz/migrations/0001_initial.py
|
Sardor99/PaycomUz
| 5 |
2170367
|
# Generated by Django 2.1.2 on 2018-11-18 15:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StatusTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(max_length=255)),
('request_id', models.CharField(max_length=255)),
('account_id', models.IntegerField()),
('account_type', models.CharField(blank=True, max_length=255, null=True)),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('state', models.IntegerField(blank=True, null=True)),
('time', models.CharField(max_length=255)),
('date', models.DateTimeField(auto_now_add=True)),
('error', models.TextField(blank=True, default='None', max_length=255, null=True)),
('status', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='PaycomUz.StatusTransaction')),
],
),
]
| 1,555 |
src/common/logger.py
|
muxer-dev/event-pipeline
| 1 |
2172649
|
"""This logger is setup to replace the default logging, so that all
custom logging is stored in json format.
Example:
Code:
logger.info('Event Action')
Default output:
[INFO] 2019-01-22 11:53:42,670Z 5e2ae11b-0dd3-4c5b-8d84-bdfb3dc1a5a2' Event Action
Logger output:
{
"levelname": "INFO",
"asctime": "2019-01-01 00:00:00,000",
"msecs": 500.0000000000000,
"aws_request_id": "5e2ae11b-0dd3-4c5b-8d84-bdfb3dc1a5a2'",
"message": "Event Action"
}
"""
import logging
import os
from pythonjsonlogger import jsonlogger
from src.common.exceptions import InvalidLogLevel
# This is the default pattern aws lambdas use,
# so all logging information is available in json object.
pattern = "[%(levelname)-8s]\t%(asctime)s.%(msecs)dZ\t%(aws_request_id)s\t%(message)s\n"
logger = logging.getLogger()
formatter = jsonlogger.JsonFormatter(pattern)
for h in logger.handlers:
h.setFormatter(formatter)
log_level = os.environ.get("LOG_LEVEL", "INFO")
log_values = [name for name in logging._levelToName.values()]
if log_level not in log_values:
message = f"Invalid log level set: {log_level}"
raise InvalidLogLevel(message)
logger.setLevel(log_level)
| 1,262 |
pygeoid/reduction/atmosphere.py
|
ioshchepkov/pygeoid
| 21 |
2172974
|
"""Calculate atmospheric correction for the gravity anomalies.
"""
import os
import numpy as np
from typing import Callable
from scipy.interpolate import interp1d
from scipy.integrate import trapz
import astropy.units as u
from pygeoid.constants import G
@u.quantity_input
def ussa76_density(alt_arr: u.km = 0.0 * u.km) -> u.kg / u.m**3:
"""Return atmospheric density from USSA76 model.
Refer to the following document (2 doc codes) for details of this model:
NOAA-S/T 76-1562
NASA-TM-X-74335
All assumptions are the same as those in the source documents.
Derived from: https://github.com/mattljc/atmosphere-py
Parameters
----------
alt_arr : ~astropy.units.Quantity
Altitude above sea level.
"""
# Constants
g0 = 9.80665 * u.m / u.s**2
Rstar = 8.31432e3 * u.newton * u.m / u.kilomole / u.K
# Model Parameters
altitude_max = 84852 * u.m
base_alt = np.array([0.0, 11.0, 20.0, 32.0, 47.0, 51.0, 71.0]) * u.km
base_lapse = np.array([-6.5, 0.0, 1.0, 2.8, 0.0, -2.8, -2.0]) * u.K / u.km
base_temp = np.array([288.15, 216.65, 216.650, 228.650, 270.650, 270.650,
214.650]) * u.K
base_press = np.array([1.01325e3, 2.2632e2, 5.4748e1, 8.6801, 1.1090,
6.6938e-1, 3.9564e-2]) * u.mbar
M0 = 28.9644 * u.kg / u.kilomole
# Initialize Outputs
alt_arr = np.atleast_1d(alt_arr)
dens_arr = np.zeros(alt_arr.size) * u.kg / u.m**3
for idx in range(alt_arr.size):
alt = alt_arr[idx]
if alt > altitude_max:
msg = 'Altitude exceeds the model: h > hmax = {} m'.format(
altitude_max)
raise ValueError(msg)
# Figure out base height
if alt <= 0.0:
base_idx = 0
elif alt > base_alt[-1]:
base_idx = len(base_alt) - 1
else:
base_idx = np.searchsorted(base_alt, alt, side='left') - 1
alt_base = base_alt[base_idx]
temp_base = base_temp[base_idx]
lapse_base = base_lapse[base_idx]
press_base = base_press[base_idx]
temp = temp_base + lapse_base * (alt_arr[idx] - alt_base)
if lapse_base == 0.0:
press = press_base * \
np.exp(-g0 * M0 * (alt_arr[idx] -
alt_base) / Rstar / temp_base)
else:
press = press_base * \
(temp_base / temp) ** (g0 * M0 / Rstar / lapse_base)
dens_arr[idx] = press * M0 / Rstar / temp
return dens_arr
@u.quantity_input
def iag_atm_corr_sph(density_function: Callable[[u.Quantity], u.Quantity],
height: u.m, height_max: u.m, samples=1e4) -> u.mGal:
r"""Return atmospheric correction to the gravity anomalies by IAG approach.
This function numerically integrates samples from density function by
trapezoidal rule. The spherical layering of the atmosphere is considered.
IAG approach:
g_atm = G*M(r) / r**2
inf
/
M(r) = 4*pi*| rho(r) * r**2 dr
/
r
Parameters
----------
density_function : callable
The `density_funtion` is called for all height samples to calculate
density of the atmosphere.
height : ~astropy.units.Quantity
Height above sea level.
height_max : ~astropy.units.Quantity
Maximum height of the atmosphere layer above sea level.
samples : float
Number of samples for integration. Default is 1e4.
~astropy.units.Quantity
Atmospheric correction.
"""
Rearth = 6378e3 * u.m
r2 = (Rearth + height)**2
hinf = np.linspace(height, height_max, samples)
density = density_function(hinf) * r2
M = 4 * np.pi * trapz(density.to('kg / m').value,
hinf.to('m').value) * u.kg
gc = (G * M / r2)
return gc
@u.quantity_input
def grs80_atm_corr_interp(height: u.m, kind: str = 'linear') -> u.mGal:
"""Return GRS 80 atmospheric correction, in mGal.
Interpolated from the table data [1]_.
Note: If height < 0 m or height > 40000 m, then correction is extrapolated
Parameters
----------
height : ~astropy.units.Quantity
Height above sea level.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of zeroth, first, second or third order) or as an
integer specifying the order of the spline interpolator to use.
Default is 'linear'.
Returns
-------
~astropy.units.Quantity
Atmospheric correction.
References
----------
.. [1] <NAME>. (1980). Geodetic reference system 1980.
Bulletin Géodésique, 54(3), 395-405
"""
fname = os.path.join(os.path.dirname(__file__),
'data/IAG_atmosphere_correction_table.txt')
table_heights, corr = np.loadtxt(fname, unpack=True, delimiter=',',
skiprows=4, dtype=float)
interp = interp1d(table_heights * 1000, corr, kind=kind,
fill_value='extrapolate', assume_sorted=True)
return interp(height.to('m').value) * u.mGal
@u.quantity_input
def wenzel_atm_corr(height: u.m) -> u.mGal:
"""Return atmospheric correction by Wenzel, in mGal.
Parameters
----------
height : ~astropy.units.Quantity
Height above sea level.
Returns
-------
~astropy.units.Quantity
Atmospheric correction.
References
----------
.. [1] <NAME>., 1985, Hochauflosende Kugelfunktionsmodelle fur des
Gravitationspotential der Erde [1]: Wissenschaftliche arbeiten der
Fachrichtung Vermessungswesen der Universitat Hannover, 137
"""
height = height.to('m').value
return (0.874 - 9.9e-5 * height + 3.56e-9 * height**2) * u.mGal
@u.quantity_input
def pz90_atm_corr(height: u.m) -> u.mGal:
"""Return PZ-90 atmospheric correction, in mGal.
Parameters
----------
height : ~astropy.units.Quantity
Height above sea level.
Returns
-------
~astropy.units.Quantity
Atmospheric correction.
"""
height = height.to('km').value
return 0.87 * np.exp(-0.116 * (height)**(1.047)) * u.mGal
| 6,397 |
src/blip_sdk/extensions/artificial_intelligence/intents/uri_templates.py
|
mirlarof/blip-sdk-python
| 2 |
2170947
|
class UriTemplates:
"""Entities uri templates."""
INTENTIONS = '/intentions'
INTENTION = '/intentions/{0}'
INTENTION_ANSWERS = '/intentions/{0}/answers'
INTENTION_ANSWER = '/intentions/{0}/answers/{1}'
INTENTION_QUESTIONS = '/intentions/{0}/questions'
INTENTION_QUESTION = '/intentions/{0}/questions/{1}'
| 339 |
evo/influx_utils.py
|
andrew-blake/evohome-utils
| 1 |
2171396
|
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.client import InfluxDBClientError
try:
from influx_config import (
db_name,
influx_host,
influx_port,
influx_user,
influx_password,
write_to_influx,
)
except ImportError:
print("Please configure influx_config.py")
exit(1)
def log_to_influx(zone_details):
influx_client = InfluxDBClient(
influx_host, influx_port, influx_user, influx_password, db_name
)
data = []
ts = datetime.utcnow()
ts = ts.replace(microsecond=0)
for zone in zone_details:
temp_actual = zone["temp"]
temp_target = zone["setpoint"]
zone_name = zone["name"]
record_actual, record_target, record_delta = prep_record(
ts, zone_name, temp_actual, temp_target
)
if record_actual:
data.append(record_actual)
if record_target:
data.append(record_target)
if record_delta:
data.append(record_delta)
print("%s : %s (%s, %s)" % (ts, zone_name, temp_actual, temp_target))
try:
if write_to_influx:
influx_client.write_points(data)
except InfluxDBClientError as e:
print(e)
def prep_record(time, zone, actual, target):
record_actual = None
record_target = None
record_delta = None
if actual is not None and actual != "":
try:
record_actual = {
"measurement": "zone_temp.actual",
"tags": {
"zone": zone,
},
"time": time,
"fields": {"value": float(actual)},
}
except Exception as e:
print(e)
if target is not None and target != "":
try:
record_target = {
"measurement": "zone_temp.target",
"tags": {
"zone": zone,
},
"time": time,
"fields": {"value": float(target)},
}
except Exception as e:
print(e)
if record_actual is not None and record_target is not None:
record_delta = {
"measurement": "zone_temp.delta",
"tags": {
"zone": zone,
},
"time": time,
"fields": {"value": float(actual) - float(target)},
}
return record_actual, record_target, record_delta
| 2,484 |
submodules/qdpy/qdpy/metrics.py
|
JiangZehua/control-pcgrl3D
| 0 |
2171938
|
# This file is part of qdpy.
#
# qdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# qdpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with qdpy. If not, see <http://www.gnu.org/licenses/>.
"""TODO"""
import numpy as np
#from scipy.spatial.distance import euclidean
#from itertools import starmap
from typing import Sequence, Callable, Tuple
from qdpy.phenotype import *
from qdpy.base import *
########### METRICS ########### {{{1
#@jit(nopython=True)
def features_distances(individual: IndividualLike, container: Sequence, dist: Union[str, Callable] = "euclidean") -> Sequence:
distances = np.zeros(len(container))
ind_features = individual.features
if isinstance(dist, str):
if dist == "euclidean":
for i, other in enumerate(container):
other_features = other.features
for j in range(len(ind_features.values)):
distances[i] += pow(ind_features.values[j] - other_features.values[j], 2.)
distances = np.power(distances, 1./2.)
else:
raise ValueError(f"Unknown `dist` type: '{dist}'.")
else:
for i, ind in enumerate(container):
distances[i] = dist(ind_features, ind.features)
return distances
def novelty(individual: IndividualLike, container: Sequence, k: int = 1, dist: Union[str, Callable] = "euclidean", ignore_first: bool = False, default_novelty: float = 0.1) -> float:
"""Returns the novelty score of ``individual`` in ``container``.
Novelty is defined as the average distance to the ``k``-nearest neighbours of ``individual``. If ``container`` is empty, return ``default_novelty``."""
if len(container) == 0:
return default_novelty
n_k = min(len(container), k)
distances: Sequence = features_distances(individual, container, dist)
if ignore_first:
nearest_neighbours_dists: Sequence = sorted(distances)[1:n_k+1]
else:
nearest_neighbours_dists = sorted(distances)[:n_k]
return np.mean(nearest_neighbours_dists)
def novelty_nn(individual: IndividualLike, container: Sequence, k: int = 1, nn_size: int = 1, dist: Union[str, Callable] = "euclidean", ignore_first: bool = False, default_novelty: float = 0.1) -> Tuple[float, Sequence]:
"""Returns the novelty score of ``individual`` in ``container`` and the indexes of its ``nn_size`` nearest neighbours.
Novelty is defined as the average distance to the ``k``-nearest neighbours of ``individual``. If ``container`` is empty, return ``default_novelty``."""
if len(container) == 0:
return default_novelty, []
n_k = min(len(container), k)
n_nn_size = min(len(container), nn_size)
distances: Sequence = features_distances(individual, container, dist)
idx_container = list(range(len(container)))
if ignore_first:
nearest_neighbours_dists: Sequence = sorted(distances)[1:n_k+1]
nn: Sequence = sorted(zip(distances, idx_container))[1:n_nn_size+1]
else:
nearest_neighbours_dists = sorted(distances)[:n_k]
nn = sorted(zip(distances, idx_container))[:n_nn_size]
novelty: float = np.mean(nearest_neighbours_dists)
nearest_neighbours_idx: Sequence
_, nearest_neighbours_idx = tuple(zip(*nn))
return novelty, nearest_neighbours_idx
def novelty_local_competition(individual: IndividualLike, container: Sequence, k: int = 1, dist: Union[str, Callable] = "euclidean", ignore_first: bool = False, default_novelty: float = 0.1, default_local_competition: float = 1.0) -> Tuple[float, float]:
"""Returns the novelty and normalised local competition scores of ``individual`` in ``container``.
Novelty is defined as the average distance to the ``k``-nearest neighbours of ``individual``.
Local competition is defined as the number of ``k``-nearest neighbours of ``individual`` that are outperformed by ``individual``. This value is normalised by ``k`` to be in domain [0., 1.].
If ``container`` is empty, return ``default_novelty`` and ``default_local_competition``."""
if len(container) == 0:
return default_novelty, default_local_competition
distances: Sequence = features_distances(individual, container, dist)
nearest_neighbours_dists: Sequence
nearest_neighbours: Sequence
if ignore_first:
nn: Sequence = sorted(zip(distances, container))[1:k+1]
else:
nn = sorted(zip(distances, container))[:k]
nearest_neighbours_dists, nearest_neighbours = tuple(zip(*nn))
novelty: float = np.mean(nearest_neighbours_dists)
local_competition: float = sum((individual.fitness.dominates(ind.fitness) for ind in nearest_neighbours)) / float(k)
return novelty, local_competition
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
| 5,292 |
default_profile/util/timer.py
|
farisachugthai/dynamic_ipython
| 5 |
2173072
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===================================
Timer --- Create a timer decorator.
===================================
Largely this module was simply practice on writing decorators.
Might need to review logging best practices. I don't want the logger from
this module to emit anything, but it seems tedious to place that burden
on any module that imports from here.
.. seealso::
:mod:`cProfile`
:mod:`pstats`
:mod:`timeit`
:magic:`timeit`
"""
import datetime
import functools
import logging
from os import scandir
from runpy import run_path
import time
from timeit import Timer
from IPython.core.getipython import get_ipython
# noinspection PyProtectedMember
from IPython.core.magics.execution import _format_time as format_delta
logging.basicConfig(level=logging.INFO)
def timer(func):
"""Print the runtime of the decorated function.
Utilizes `time.perf_counter`.
.. todo:: Begin using the :mod:`timeit` module.
There are more specialized ways of profiling things in
other modules; however, this works for a rough estimate.
Parameters
----------
func : function
Function to profile
Returns
-------
value : float
Output of function :func:`time.perf_counter()`.
"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
logging.info(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
# class ModuleTimer()
# I mean while we're practicing decorators throw this in the mix
def debug(func):
"""Print the function signature and return value"""
@functools.wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args] # 1
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] # 2
signature = ", ".join(args_repr + kwargs_repr) # 3
print(f"Calling {func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"{func.__name__!r} returned {value!r}") # 4
return value
return wrapper_debug
def exc_timer(statement, setup=None):
"""A non-decorator implementation that uses `timeit`."""
t = Timer(stmt=statement, setup=setup) # outside the try/except
try:
return t.timeit()
except Exception: # noqa E722
t.print_exc()
class ArgReparser:
"""Class decorator that echoes out the arguments a function was called with."""
def __init__(self, func):
"""Initialize the reparser with the function it wraps."""
self.func = func
def __call__(self, *args, **kwargs):
print("entering function " + self.func.__name__)
i = 0
for arg in args:
print("arg {0}: {1}".format(i, arg))
i = i + 1
return self.func(*args, **kwargs)
def time_dir(directory=None):
"""How long does it take to exec(compile(file)) every file in the startup dir?"""
if directory is None:
directory = get_ipython().startup_dir
result = []
for i in scandir("."):
if i.name.endswith(".py"):
file = i.name
print(file)
print(time.time())
start_time = time.time()
exec(compile(open(file).read(), "timer", "exec"))
end = time.time()
diff = end - start_time
print(f"{diff}")
result.append((file, diff))
return result
class LineWatcher:
"""Class that implements a basic timer.
Registers the `start` and `stop` methods with the IPython events API.
"""
def __init__(self):
"""Define the classes start_time parameter."""
self.start_time = self.start()
def start(self):
"""Return `time.time`."""
return time.time()
def __repr__(self):
return f"{self.__class__.__name__} {self.start_time}"
def stop(self):
"""Determine the difference between start time and end time."""
stop_time = time.time()
diff = abs(stop_time - self.start_time)
print("time: {}".format(format_delta(diff)))
return diff
def load_ipython_extension(ip=None, line_watcher=None):
"""Initialize a `LineWatcher` and register start and stop with IPython."""
if ip is None:
ip = get_ipython()
if ip is None:
return
if line_watcher is None:
line_watcher = LineWatcher()
ip.events.register("pre_run_cell", line_watcher.start)
ip.events.register("post_run_cell", line_watcher.stop)
def unload_ipython_extension(ip=None, line_watcher=None):
if ip is None:
ip = get_ipython()
if ip is None:
return
if line_watcher is None:
line_watcher = LineWatcher()
ip.events.unregister("pre_run_cell", line_watcher.start)
ip.events.unregister("post_run_cell", line_watcher.stop)
| 5,023 |
setup.py
|
wuchangsheng951/NOTIONPY
| 0 |
2170951
|
from setuptools import setup, find_packages
setup(
name='nopynotion',
version='0.2.5',
packages=['nopynotion'],
description='a warpper for notion to update data',
author='<NAME>',
author_email='<EMAIL>',
install_requires=[
'requests',
],
url='https://github.com/wuchangsheng951/nopynotion',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
# entry_points='''
# [console_scripts]
# gas=gpugo:main
# ''',
)
| 632 |
009HistogramByGroup.py
|
AlbertZhaoz/albertpython
| 1 |
2173037
|
import pandas as pd
import matplotlib.pyplot as plt
books = pd.read_excel('./TestExcel/output009.xlsx')
print(books)
books.sort_values(by=2021, inplace=True, ascending=False)
books.plot.bar(x='Field', y=[2020, 2021],color=['orange','blue'])
plt.title('National Books', fontsize=16, fontweight='bold')
plt.xlabel('Field', fontweight='bold')
plt.ylabel('Number', fontweight='bold')
# 自定义X轴样式:斜45° 文字最末端-对齐
ax = plt.gca()
ax.set_xticklabels(books.Field, rotation=45, ha='right')
# 将图片左边和底部固定
figures = plt.gcf()
figures.subplots_adjust(left=0.2,bottom=0.42)
# plt.tight_layout()
plt.show()
| 588 |
resources/lib/borntogrill/kodi_notification_handler.py
|
BornToGrill/nfo-watch-status-updater
| 0 |
2172962
|
# -*- coding: utf-8 -*-
from resources.lib.borntogrill import kodi_utils
from resources.lib.borntogrill.kodi_json_rpc import VideoLibrary
from resources.lib.borntogrill.kodi_nfo_updater import update_nfo
from resources.lib.borntogrill.kodi_monitor import MonitorMethod
import logging
import os
import xbmcaddon # pylint: disable=import-error
ADDON = xbmcaddon.Addon()
ADDON_NAME = ADDON.getAddonInfo('name')
ADDON_ID = ADDON.getAddonInfo('id')
logger = logging.getLogger(ADDON_ID)
class VideoInfo():
def __init__(self, id, type, playcount):
self.id = id
self.type = type
self.playcount = playcount
class KodiNotificationHandler():
def __init__(self, monitor):
self.monitor = monitor
self.monitor.on(
MonitorMethod.VIDEO_LIBRARY_ON_UPDATE,
self.on_video_library_update
)
def run_till_abort(self, wait_time):
self.monitor.run_till_abort(wait_time)
@staticmethod
def _video_info_from_notification(msg):
if not msg.has_key('item') or not msg.has_key('playcount'):
return None
item = msg['item']
playcount = msg['playcount']
if not item.has_key('id') or not item.has_key('type'):
return None
id = item['id']
video_type = item['type']
return VideoInfo(id, video_type, playcount)
def on_video_library_update(self, obj):
logger.info("Video library updated: %s", str(obj))
video_info = self._video_info_from_notification(obj)
if video_info is None:
logger.warn('Could not parse video info from update notification')
return
fetch_strategies = {
u'movie': {
'fetch': VideoLibrary.get_movie_details,
'file_path': lambda x: x['moviedetails']['file']
},
u'episode': {
'fetch': VideoLibrary.get_episode_details,
'file_path': lambda x: x['episodedetails']['file']
}
}
try:
strategy = fetch_strategies[video_info.type]
video_details = strategy['fetch'](video_info.id)
video_file_path = strategy['file_path'](video_details)
nfo_file_path = video_file_path.replace(os.path.splitext(video_file_path)[1], '.nfo')
except:
error_message = 'Failed to get video info'
logger.exception(error_message)
kodi_utils.notification(ADDON_NAME, error_message)
try:
update_nfo(nfo_file_path, video_info.playcount)
except IOError:
error_message = 'Failed to update NFO. File could not be found'
logger.exception(error_message)
kodi_utils.notification(ADDON_NAME, error_message)
except:
error_message = 'Failed to update NFO. Check logs for more information'
logger.exception(error_message)
kodi_utils.notification(ADDON_NAME, error_message)
| 3,019 |
components/jk_bms_ble/binary_sensor.py
|
magnetus26/esphome-jk-bms
| 0 |
2170801
|
import esphome.codegen as cg
from esphome.components import binary_sensor
import esphome.config_validation as cv
from esphome.const import CONF_ICON, CONF_ID
from . import CONF_JK_BMS_BLE_ID, JkBmsBle
DEPENDENCIES = ["jk_bms_ble"]
CODEOWNERS = ["@syssi"]
CONF_CHARGING = "charging"
CONF_DISCHARGING = "discharging"
CONF_BALANCING = "balancing"
ICON_CHARGING = "mdi:battery-charging"
ICON_DISCHARGING = "mdi:power-plug"
ICON_BALANCING = "mdi:battery-heart-variant"
BINARY_SENSORS = [
CONF_CHARGING,
CONF_DISCHARGING,
CONF_BALANCING,
]
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(CONF_JK_BMS_BLE_ID): cv.use_id(JkBmsBle),
cv.Optional(CONF_CHARGING): binary_sensor.BINARY_SENSOR_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(binary_sensor.BinarySensor),
cv.Optional(CONF_ICON, default=ICON_CHARGING): cv.icon,
}
),
cv.Optional(CONF_DISCHARGING): binary_sensor.BINARY_SENSOR_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(binary_sensor.BinarySensor),
cv.Optional(CONF_ICON, default=ICON_DISCHARGING): cv.icon,
}
),
cv.Optional(CONF_BALANCING): binary_sensor.BINARY_SENSOR_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(binary_sensor.BinarySensor),
cv.Optional(CONF_ICON, default=ICON_BALANCING): cv.icon,
}
),
}
)
async def to_code(config):
hub = await cg.get_variable(config[CONF_JK_BMS_BLE_ID])
for key in BINARY_SENSORS:
if key in config:
conf = config[key]
sens = cg.new_Pvariable(conf[CONF_ID])
await binary_sensor.register_binary_sensor(sens, conf)
cg.add(getattr(hub, f"set_{key}_binary_sensor")(sens))
| 1,826 |
Python/biopsy/families.py
|
JohnReid/biopsy
| 0 |
2172344
|
#
# Copyright <NAME> 2006
#
from graph import *
def read_families_file( f ):
"""Reads a paralog file generated from ensembl_homologies.pl
subtypes can contain a list of paralog subtypes we are interested in
Yields sequences of genes that form families"""
if isinstance( f, str ): f = open( f, 'r' )
for l in f:
yield l.strip().split(',')
if '__main__' == __name__:
family_graph = graph_generate(
read_families_file( 'C:/Data/Ensembl/mouse_families.txt' )
)
graph_print_info( family_graph )
print graph_are_connected(
family_graph,
graph_vertex( family_graph, 'ENSMUSG00000000103' ),
graph_vertex( family_graph, 'ENSMUSG00000049576' )
)
print graph_are_connected(
family_graph,
graph_vertex( family_graph, 'ENSMUSG00000000001' ),
graph_vertex( family_graph, 'ENSMUSG00000000149' )
)
print graph_are_connected(
family_graph,
graph_vertex( family_graph, 'ENSMUSG00000049576' ),
graph_vertex( family_graph, 'ENSMUSG00000000149' )
)
| 1,116 |
data.py
|
angelikakw/predicting-the-movie-genre
| 0 |
2172785
|
import pandas as pd
import re
import os
FOOTNOTE_RE = re.compile(r'\[[0-9]+\]')
NUMBER_RE = re.compile(r'[0-9]+')
NEW_LINE_RE = re.compile(r'\r\n')
NEW_LINE_2_RE = re.compile(r'\n\n')
def read_data(file_name):
"""Reading and limiting data to the 100 most common genres"""
if not os.path.isfile(file_name):
raise ValueError("No file")
if not file_name[-3:] == 'csv':
raise ValueError("No csv")
data = pd.read_csv(file_name)
genre_counts = data[data['Genre'] != 'unknown']['Genre'].value_counts()
popular_genre = []
for name, count in genre_counts.iteritems():
if count > 100:
popular_genre.append(name)
bools = []
for elem in data['Genre']:
if elem in popular_genre:
bools.append(True)
else:
bools.append(False)
popular_genre_with_plot = data[bools]
popular_genre_with_plot_rnd = popular_genre_with_plot.sample(frac=1)
return popular_genre_with_plot_rnd
def clean(plot):
plot = re.sub(
FOOTNOTE_RE,
'',
plot
)
plot = re.sub(
NEW_LINE_RE,
' ',
plot
)
plot = re.sub(
NEW_LINE_2_RE,
' ',
plot
)
plot = re.sub(
NUMBER_RE,
' ',
plot
)
return plot.replace('\'', '')
| 1,332 |
ARRAYS/Easy/Richest Customer Wealth/Code.py
|
HassanRahim26/LEETCODE
| 3 |
2172805
|
#PROBLEM LINK:- https://leetcode.com/problems/richest-customer-wealth/submissions/
class Solution:
def maximumWealth(self, accounts: List[List[int]]) -> int:
accounts = [sum(wealth) for wealth in accounts]
return max(accounts)
| 248 |
Packs/CommonScripts/Scripts/WordTokenizeTest/WordTokenizeTest.py
|
diCagri/content
| 799 |
2173076
|
import demistomock as demisto
from CommonServerPython import *
import nltk
import re
from html.parser import HTMLParser
from html import unescape
html_parser = HTMLParser()
CLEAN_HTML = (demisto.args().get('cleanHtml', 'yes') == 'yes')
REMOVE_LINE_BREAKS = (demisto.args().get('removeLineBreaks', 'yes') == 'yes')
TOKENIZE_TYPE = demisto.args().get('type', 'word')
TEXT_ENCODE = demisto.args().get('zencoding', 'utf-8')
HASH_SEED = demisto.args().get('hashWordWithSeed')
REMOVE_HTML_PATTERNS = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
def clean_html(text):
if not CLEAN_HTML:
return text
cleaned = text
for pattern in REMOVE_HTML_PATTERNS:
cleaned = pattern.sub(" ", cleaned)
return unescape(cleaned).strip()
def tokenize_text(text):
if not text:
return ''
text = text.lower()
if TOKENIZE_TYPE == 'word':
word_tokens = nltk.word_tokenize(text)
elif TOKENIZE_TYPE == 'punkt':
word_tokens = nltk.wordpunct_tokenize(text)
else:
raise Exception("Unsupported tokenize type: %s" % TOKENIZE_TYPE)
if HASH_SEED:
word_tokens = map(str, map(lambda x: hash_djb2(x, int(HASH_SEED)), word_tokens))
return (' '.join(word_tokens)).strip()
def remove_line_breaks(text):
if not REMOVE_LINE_BREAKS:
return text
return text.replace("\r", "").replace("\n", "")
def main():
text = demisto.args()['value']
if type(text) is not list:
text = [text]
result = list(map(remove_line_breaks, map(tokenize_text, map(clean_html, text))))
if len(result) == 1:
result = result[0]
demisto.results({
'Contents': result,
'ContentsFormat': formats['json'] if type(result) is list else formats['text'],
'EntryContext': {
'WordTokenizeOutput': result
}
})
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 2,089 |
all_functions/configs/proxy_scraper/hide_my_python-master/hide_my_python.py
|
Heroku-elasa/-heroku-buildpack-python-ieee-new
| 0 |
2172207
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
#
# HideMyPython! - A parser for the free proxy list on HideMyAss!
#
# This file contains the main function of the HideMyPython! script.
# It parses the arguments, creates a database, and save the proxies.
#
# Copyright (C) 2013 <NAME> <useless (at) utouch (dot) fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import arguments
import parser
import database
def main():
# We create an argument parser
arg_parser = arguments.create_argument_parser()
# We parse the arguments
args = arg_parser.parse_args(sys.argv[1:])
arguments.process_arguments(args, arg_parser)
# If the verbose mode is on, we display the arguments
if args.verbose:
arguments.print_arguments(args)
# We open the database file where the proxies will be stored
connection, cursor = database.initialize_database(args.database_file)
try:
# We generate the proxies
for proxy in parser.generate_proxy(args):
# And we store them in the database
database.insert_in_database(cursor, proxy)
except KeyboardInterrupt:
if args.verbose:
print('')
print('[warn] received interruption signal')
# We save the changes made to the database, and close the file
connection.commit()
connection.close()
return 0
if __name__ == '__main__':
main()
#~/app-root/runtime/srv/python/bin/python hide_my_python.py -p 80 8080 443 -o ..//..//configs//sites_proxy//all_proxies_list//scraped_list.txt
| 2,050 |
students/k3342/laboratory_works/Shaidullina_Regina/laboratory_work_1/leaderboard/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
| 10 |
2172039
|
from django.contrib import admin
from django.urls import path
from leaderboard import views
from django.contrib.auth.views import LoginView #, LogoutView
urlpatterns = [
path('', views.main, name='main'),
path('leaderboard/', views.leaderboard_view, name='leaderboard'),
path('comments/', views.comments, name='comments'),
path('register/', views.reg, name='register'),
path('login/', LoginView.as_view(), name='login'),
path('logout/', views.LogoutFormView.as_view(), name='logout'),
]
| 495 |
utils/R.py
|
liangzimiao/miyubot
| 0 |
2171531
|
import os
from urllib.parse import urljoin
from urllib.request import pathname2url
from nonebot import logger
from nonebot.adapters.onebot.v11 import MessageSegment
from PIL import Image
from utils import pic2b64
import utils
from configs.path_config import PCR_PATH
# 当QQ客户端与bot端不在同一台计算机时,可用http协议
RES_PROTOCOL = 'file'
# 资源库文件夹,需可读可写,windows下注意反斜杠转义
RES_DIR = PCR_PATH
# 使用http协议时需填写,原则上该url应指向RES_DIR目录
RES_URL = 'http://127.0.0.1:5000/static/'
RES_DIR = os.path.expanduser(RES_DIR)
assert RES_PROTOCOL in ('http', 'file', 'base64')
class ResObj:
def __init__(self, res_path):
res_dir = os.path.expanduser(RES_DIR)
fullpath = os.path.abspath(os.path.join(res_dir, res_path))
if not fullpath.startswith(os.path.abspath(res_dir)):
raise ValueError('Cannot access outside RESOUCE_DIR')
self.__path = os.path.normpath(res_path)
@property
def url(self):
"""资源文件的url,供Onebot(或其他远程服务)使用"""
return urljoin(RES_URL, pathname2url(self.__path))
@property
def path(self):
"""资源文件的路径,供Hoshino内部使用"""
return os.path.join(RES_DIR, self.__path)
@property
def exist(self):
return os.path.exists(self.path)
class ResImg(ResObj):
@property
def cqcode(self) -> MessageSegment:
if RES_PROTOCOL == 'http':
return MessageSegment.image(self.url)
elif RES_PROTOCOL == 'file':
return MessageSegment.image(f'file:///{os.path.abspath(self.path)}')
else:
try:
return MessageSegment.image(utils.pic2b64(self.open()))
except Exception as e:
logger.exception(e)
return MessageSegment.text('[图片出错]')
def open(self) -> Image:
try:
return Image.open(self.path)
except FileNotFoundError:
logger.error(f'缺少图片资源:{self.path}')
raise
def get(path, *paths):
return ResObj(os.path.join(path, *paths))
def img(path, *paths):
return ResImg(os.path.join('img', path, *paths))
| 2,052 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.