max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
motor_skills/cip/MjGraspHead.py
|
babbatem/motor_skills
| 0 |
2024970
|
import time, copy
import pathlib
import pickle
import numpy as np
from mujoco_py import cymj, MjViewer
from scipy.spatial.transform import Rotation as R
import motor_skills.core.mj_control as mjc
ARMDOF=6
GRIPPERDOF=6
DOF=ARMDOF+GRIPPERDOF
PREGRASP_STEPS = 500
PREGRASP_GOAL = [0, -0.00, -0.04]
GRASP_STEPS = 500
MAX_FINGER_DELTA=1.3
parent_dir_path = str(pathlib.Path(__file__).parent.absolute())
GPD_POSES_PATH = parent_dir_path + "/../envs/mj_jaco/assets/MjJacoDoorGrasps"
class MjGraspHead(object):
"""
executes a grasp in MuJoCo with the kinova j2s6s300
"""
def __init__(self, sim, viewer=None):
super(MjGraspHead, self).__init__()
self.sim = sim
self.viewer = viewer
# % compute indices and per timestep delta q
self.delta = np.zeros(DOF)
self.finger_joint_idxs = []
for i in range(1,4):
base_idx = cymj._mj_name2id(self.sim.model, 3,"j2s6s300_joint_finger_" + str(i))
tip_idx = cymj._mj_name2id(self.sim.model, 3,"j2s6s300_joint_finger_tip_" + str(i))
self.finger_joint_idxs.append(base_idx)
self.finger_joint_idxs.append(tip_idx)
self.delta[base_idx] = MAX_FINGER_DELTA/GRASP_STEPS
self.delta[tip_idx] = MAX_FINGER_DELTA/GRASP_STEPS
def pregrasp(self, sim):
"""
approaches object.
Moves to PREGRASP_GOAL in ee coordinates with constant orientation.
"""
# % compute ee pose
obj_type = 1 # 3 for joint, 1 for body
body_idx = cymj._mj_name2id(self.sim.model, obj_type,"j2s6s300_link_6")
ee_frame_goal_homog = np.append(PREGRASP_GOAL, 1)
cur_quat = copy.deepcopy(self.sim.data.body_xquat[body_idx])
rot_mat = R.from_quat([cur_quat[1],
cur_quat[2],
cur_quat[3],
cur_quat[0]])
trans_mat = np.zeros([4,4])
trans_mat[:3,:3] = rot_mat.as_dcm()
trans_mat[3,:3] = 0
trans_mat[3,3] = 1
trans_mat[:3,3] = self.sim.data.body_xpos[body_idx]
world_goal = np.matmul(trans_mat, ee_frame_goal_homog)[:3]
for t in range(PREGRASP_STEPS):
self.sim.data.ctrl[:] = mjc.ee_reg2(world_goal,
self.sim.data.body_xquat[body_idx],
self.sim,
body_idx,
kp=np.eye(3)*300, kv=None, ndof=12)
self.sim.forward()
self.sim.step()
if self.viewer is not None:
self.viewer.render()
def execute(self, sim):
"""
implements a naive grasping strategy.
closes fingers until they make contact, at which point they stop.
this by no means ensures Grasp Stability, but it ought to be okay for grabbing cylinders.
"""
# % approach
self.pregrasp(sim)
# % reset the door
# % reset door
for i in range(2):
sim.data.qpos[DOF+i]=0.0
# % close fingers
new_pos = copy.deepcopy(self.sim.data.qpos[:DOF])
for t in range(GRASP_STEPS):
new_pos += self.delta
# % see which sensors are reporting force
touched = np.where(self.sim.data.sensordata[:6] != 0.0)[0]
# % if they are all in contact, we're done
if len(touched) == 6:
break
# % otherwise, compute new setpoints for those which are not in contact
current_pos = self.sim.data.qpos
for touch_point in touched:
new_pos[self.finger_joint_idxs[touch_point]] = current_pos[self.finger_joint_idxs[touch_point]]
# % compute torque and step
self.sim.data.ctrl[:] = mjc.pd([0] * DOF, [0] * DOF, new_pos, self.sim, ndof=DOF, kp=np.eye(DOF)*300)
self.sim.forward()
self.sim.step()
if self.viewer is not None:
self.viewer.render()
def seed_properly(seed_value=123):
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
import random
random.seed(seed_value)
import numpy as np
np.random.seed(seed_value)
def plan_and_grasp_test():
from motor_skills.envs.mj_jaco import MjJacoDoorImpedanceCIP
from motor_skills.cip.pbplannerWrapper import pbplannerWrapper
seed = 122
while True:
seed+=1
seed_properly(seed)
debug = True
mp = pbplannerWrapper(debug=True)
grasp_file = open(GPD_POSES_PATH, 'rb')
grasp_qs = pickle.load(grasp_file)
env = MjJacoDoorImpedanceCIP(vis=True)
env.reset()
# random state sample
s = mp.planner.validityChecker.sample_state()
env.sim.data.qpos[:6] = s
env.sim.step()
# random grasp candidate
idx = np.random.randint(len(grasp_qs))
g = grasp_qs[idx]
mp.plan(s, g)
mp.execute(env)
# % reset door
for i in range(2):
env.sim.data.qpos[DOF+i]=0.0
# TODO: make sure we got there
# % grasp
head = MjGraspHead(env.sim, debug=True)
head.execute(env.sim)
# % hover at ee pose after.
obj_type = 1 # 3 for joint, 1 for body
body_idx = cymj._mj_name2id(env.sim.model, obj_type, "j2s6s300_link_6")
xpos = env.sim.data.body_xpos[body_idx]
xquat = env.sim.data.body_xquat[body_idx]
for t in range(100):
env.sim.data.ctrl[:] = mjc.ee_reg2(xpos,
xquat,
env.sim,
body_idx,
kp=np.eye(3)*300, kv=None, ndof=12)
env.sim.step()
if debug:
env.render()
def grasp_only_test():
from motor_skills.envs.mj_jaco import MjJacoDoor
seed = 122
while True:
seed+=1
seed_properly(seed)
debug = True
grasp_file = open(GPD_POSES_PATH, 'rb')
grasp_qs = pickle.load(grasp_file)
env = MjJacoDoor(vis=True)
env.reset()
idx = np.random.randint(len(grasp_qs))
s = grasp_qs[idx]
env.sim.data.qpos[:6] = s
env.sim.data.qpos[6:12] = [0.0]*6
env.sim.data.qvel[:6]=[0.0]*6
full_qpos = copy.deepcopy(env.sim.data.qpos[:12])
torque = mjc.pd(None, [0.0]*12, full_qpos, env.sim, ndof=12, kp=np.eye(12)*300)
env.sim.data.ctrl[:]=torque
env.sim.step()
env.render()
time.sleep(2.0)
# % reset door
for i in range(2):
env.sim.data.qpos[DOF+i]=0.0
# % grasp
head = MjGraspHead(env.sim, debug=True)
head.execute(env.sim)
# % hover at ee pose after.
obj_type = 1 # 3 for joint, 1 for body
body_idx = cymj._mj_name2id(env.sim.model, obj_type, "j2s6s300_link_6")
xpos = env.sim.data.body_xpos[body_idx]
xquat = env.sim.data.body_xquat[body_idx]
for t in range(100):
env.sim.data.ctrl[:] = mjc.ee_reg2(xpos,
xquat,
env.sim,
body_idx,
kp=np.eye(3)*300, kv=None, ndof=12)
env.sim.step()
if debug:
env.render()
if __name__ == '__main__':
# plan_and_grasp_test()
grasp_only_test()
| 6,200 |
test/test_either.py
|
brunolange/fun
| 3 |
2025434
|
import unittest
from exos.monads.either import Left, Right
class TestEither(unittest.TestCase):
def test_left_right(self):
for value in [
10,
'10',
0,
Right('hello')
]:
self.assertEqual(Right(value).value, value)
self.assertEqual(Left(value).value, value)
def test_bind(self):
self.assertEqual(Right(6).bind(lambda x: x*7).value, 42)
self.assertEqual(Left(6).bind(lambda x: x*7).value, 6)
| 504 |
scripts/naive/nudge_boundaries.py
|
cadia-lvl/Ossian
| 1 |
2025339
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Project: Simple4All - January 2013 - www.simple4all.org
## Contact: <NAME> - <EMAIL>
## Contact: <NAME> - <EMAIL>
'''
When the input alignment
for a segment shows it to be less than the minimum HTS duration
of 25 ms (5ms shift * 5 states = 25ms), it is ignored by HINIT, so
a "0 examples" error is raised and training stops.
This is only an issue with small toy databases.
This script nudges phone boundaries as necessary to make sure that each monophone has at least
1 instance longer than minumum_duration (ms). This is only a temporary work-around.
'''
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import re
from .naive_util import *
def main_work():
#################################################
# ======== Get stuff from command line ==========
def usage():
print("Usage: ...... ")
sys.exit(1)
# e.g.
try:
label_indir = sys.argv[1]
label_outdir = sys.argv[2]
mindur_ms = int(sys.argv[3])
except:
usage()
if not os.path.isdir(label_outdir):
os.makedirs(label_outdir)
#################################################
minimum_duration = ms_to_htk(mindur_ms)
## Get list of utterances for which utt files exist:
lab_list = sorted(os.listdir(label_indir))
## first pass -- find the problem segments
phones = {}
for labname in lab_list:
#print labname
#print " Apply voice method %s to utt %s"%(method_to_call, utt)
lab = readlist(os.path.join(label_indir, labname))
#print lab
lab = [re.split("[\s\-\+]+", line) for line in lab]
assert len(lab) == sum([len(line)==4 for line in lab]) ## assert all lines are 4 long
# lab = [(phone, int(end)-int(start)) for (start, end, phone, stuff) in lab]
for (start, end, phone, stuff) in lab:
length = int(end)-int(start)
if phone not in phones:
phones[phone] = 1 # 1 means a problem
if length >= minimum_duration:
phones[phone] = 0 # 0 means no problem
print(phones)
#
## 2nd pass -- fix 1st instance of the problem segments
for labname in lab_list:
ends = []
#print " Apply voice method %s to utt %s"%(method_to_call, utt)
lab = readlist(os.path.join(label_indir, labname))
lab = [re.split("[\s\-\+]+", line) for line in lab]
assert len(lab) == sum([len(line)==4 for line in lab]) ## assert all lines are 4 long
lab = [(phone, int(end), int(end)-int(start)) for (start, end, phone, stuff) in lab]
for i in range(len(lab)-1):
(phone, end, length) = lab[i]
if phones[phone] == 1:
if length < minimum_duration:
diff = minimum_duration - length
(next_phone, _next_end, next_length) = lab[i+1]
if next_length >= diff:
end += diff
phones[phone] = 0 # 0 means no problem
ends.append(end)
## remake label with new times:
starts = [0] + ends[:-1]
lab = readlist(os.path.join(label_indir, labname))
lab = [re.split("\s+", line) for line in lab]
names = [name for (s,e,name) in lab]
f = open(os.path.join(label_outdir, labname), "w")
for (s,e,name) in zip(starts, ends, names):
f.write("%s %s %s\n"%(s,e,name))
f.close()
#
# ## 3rd pass -- checked all is fixed
fixed_phones = {}
phone_to_labs = {} ## to track which utts each phone appears in
for labname in lab_list:
#print " Apply voice method %s to utt %s"%(method_to_call, utt)
lab = readlist(os.path.join(label_outdir, labname))
lab = [re.split("[\s\-\+]+", line) for line in lab]
assert len(lab) == sum([len(line)==4 for line in lab]) ## assert all lines are 4 long
# lab = [(phone, int(end)-int(start)) for (start, end, phone, stuff) in lab]
for (start, end, phone, stuff) in lab:
length = int(end)-int(start)
if phone not in phone_to_labs:
phone_to_labs[phone] = []
if labname not in phone_to_labs[phone]:
phone_to_labs[phone].append(labname)
if phone not in fixed_phones:
fixed_phones[phone] = 1 # 1 means a problem
if length >= minimum_duration:
fixed_phones[phone] = 0 # 0 means no problem
## If all else has failed, we will just throw some utterances out to avoid bad phone lengths:
if 1 in fixed_phones.values():
bad_utts = []
for (key,val) in fixed_phones.items():
if val==1:
bad_utts.extend(phone_to_labs[key])
bad_utts = dict(zip(bad_utts, bad_utts)).keys() ## unique it
print('Warning -- phone lengths are problematic: remove the bad utterances: %s'%(" ".join(bad_utts)))
for utt_name in bad_utts:
os.remove(os.path.join(label_outdir, utt_name))
else:
print("phones fixed OK")
if __name__=="__main__":
main_work()
| 5,453 |
LFAutomation/Python/exposure_acquire.py
|
sliakat/SpeReadPy
| 2 |
2024486
|
# Import the .NET class library
import clr
# Import python sys module
import sys
# Import os module
import os
# Import System.IO for saving and opening files
from System.IO import *
# Import C compatible List and String
from System import String
from System.Collections.Generic import List
# Add needed dll references
sys.path.append(os.environ['LIGHTFIELD_ROOT'])
sys.path.append(os.environ['LIGHTFIELD_ROOT']+"\\AddInViews")
clr.AddReference('PrincetonInstruments.LightFieldViewV5')
clr.AddReference('PrincetonInstruments.LightField.AutomationV5')
clr.AddReference('PrincetonInstruments.LightFieldAddInSupportServices')
# PI imports
from PrincetonInstruments.LightField.Automation import Automation
from PrincetonInstruments.LightField.AddIns import CameraSettings
from PrincetonInstruments.LightField.AddIns import DeviceType
def set_value(setting, value):
# Check for existence before setting
# gain, adc rate, or adc quality
if experiment.Exists(setting):
experiment.SetValue(setting, value)
def device_found():
# Find connected device
for device in experiment.ExperimentDevices:
if (device.Type == DeviceType.Camera):
return True
# If connected device is not a camera inform the user
print("Camera not found. Please add a camera and try again.")
return False
# Create the LightField Application (true for visible)
# The 2nd parameter forces LF to load with no experiment
auto = Automation(True, List[String]())
# Get experiment object
experiment = auto.LightFieldApplication.Experiment
if (device_found()==True):
#Set exposure time
set_value(CameraSettings.ShutterTimingExposureTime, 20.0)
# Acquire image
experiment.Acquire()
| 1,808 |
datasets/KSDD2/download_and_extract.py
|
Luckygyana/Fabric-Defect-Detection
| 0 |
2025006
|
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
if __name__ == "__main__":
zipurl = "http://go.vicos.si/kolektorsdd2"
with urlopen(zipurl) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall(".")
| 290 |
setup.py
|
leelew/HRSEPP
| 1 |
2025536
|
import os
from codecs import open
from setuptools import find_packages, setup
# pwd
here = os.path.abspath(os.path.dirname(__file__))
# readme
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# required packages
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f:
install_requires = f.read().splitlines()
# set up
setup(
name='HRSEPP',
version='1.0.0',
description='HRSEPP is a Python library',
license='MIT',
long_description=long_description,
url='https://github.com/leelew/HRSEPP',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha', # 4 - Beta; 5 - Production/Stable
'Intended Audience :: Developers', # registered users
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
],
keywords='machine learning models, deep learning models',
package_dir={'': 'HRSEPP'},
packages=find_packages(where='HRSEPP'),
python_requires='>=3.6, <4',
install_requires=install_requires,
include_package_data=False,
)
| 1,252 |
apps/note/views.py
|
sunmlight/Utools
| 0 |
2022653
|
from django.shortcuts import render
from django.views import generic
from django.http import HttpResponseRedirect, JsonResponse, HttpResponse
# Create your views here.
class Index(generic.View):
def get(self, request):
return HttpResponse("note")
| 260 |
bot/__main__.py
|
under735/botkaca
| 0 |
2023298
|
from os.path import join as os_path_join
from pyrogram import Client, Message, MessageHandler, Filters, CallbackQueryHandler
from bot import CONFIG, COMMAND, LOCAL, LOGGER
from bot.handlers import (
start_message_handler,
password_handler,
wrong_room_handler,
help_message_handler,
leech_handler,
cancel_leech_handler,
leech_list_handler
)
# Initialize bot
app = Client(
"Bot",
bot_token=CONFIG.BOT_TOKEN,
api_id=CONFIG.API_ID,
api_hash=CONFIG.API_HASH,
workdir=os_path_join(CONFIG.ROOT, CONFIG.WORKDIR),
workers=343
)
app.set_parse_mode("html")
# register /start handler
app.add_handler(
MessageHandler(
start_message_handler.func,
filters=Filters.command(COMMAND.START)
)
)
if CONFIG.BOT_PASSWORD:
# register /pass handler
app.add_handler(
MessageHandler(
password_handler.func,
filters = Filters.command(COMMAND.PASSWORD)
)
)
# take action on unauthorized chat room
app.add_handler(
MessageHandler(
wrong_room_handler.func,
filters = lambda msg: not msg.chat.id in CONFIG.CHAT_ID
)
)
# register /help handler
app.add_handler(
MessageHandler(
help_message_handler.func,
filters=Filters.command("help")
)
)
# register /leech handler
app.add_handler(
MessageHandler(
leech_handler.func,
filters=Filters.command("leech")
)
)
# register /cancel handler
app.add_handler(
MessageHandler(
cancel_leech_handler.func,
filters=Filters.command("cancel")
)
)
# register /list handler
app.add_handler(
MessageHandler(
leech_list_handler.func,
filters=Filters.command(COMMAND.LEECH_LIST)
)
)
# cancel button handler
app.add_handler(
CallbackQueryHandler(
cancel_leech_handler.func,
filters=lambda query: query.data.startswith(COMMAND.CANCEL_LEECH)
)
)
# forward any message to leech handler
@app.on_message(filters=Filters.private)
async def default_message_handler(client : Client, message : Message):
message.text = "/" + "leech@Leech2vid_bot" + " " + message.text
return await leech_handler.func(client, message)
if __name__ == '__main__':
app.run()
| 2,266 |
py/test/selenium/webdriver/common/proxy_tests.py
|
davidgonzalezbarbe/Selenium
| 0 |
2024136
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.proxy import Proxy, ProxyType
class TestProxy(object):
MANUAL_PROXY = {
'httpProxy': 'some.url:1234',
'ftpProxy': 'ftp.proxy',
'noProxy': 'localhost, foo.localhost',
'sslProxy': 'ssl.proxy:1234',
'socksProxy': 'socks.proxy:65555',
'socksUsername': 'test',
'socksPassword': '<PASSWORD>',
}
PAC_PROXY = {
'proxyAutoconfigUrl': 'http://pac.url:1234',
}
AUTODETECT_PROXY = {
'autodetect': True,
}
def testCanAddManualProxyToDesiredCapabilities(self):
proxy = Proxy()
proxy.http_proxy = self.MANUAL_PROXY['httpProxy']
proxy.ftp_proxy = self.MANUAL_PROXY['ftpProxy']
proxy.no_proxy = self.MANUAL_PROXY['noProxy']
proxy.sslProxy = self.MANUAL_PROXY['sslProxy']
proxy.socksProxy = self.MANUAL_PROXY['socksProxy']
proxy.socksUsername = self.MANUAL_PROXY['socksUsername']
proxy.socksPassword = self.MANUAL_PROXY['socksPassword']
desired_capabilities = {}
proxy.add_to_capabilities(desired_capabilities)
proxy_capabilities = self.MANUAL_PROXY.copy()
proxy_capabilities['proxyType'] = 'MANUAL'
expected_capabilities = {'proxy': proxy_capabilities}
assert expected_capabilities == desired_capabilities
def testCanAddAutodetectProxyToDesiredCapabilities(self):
proxy = Proxy()
proxy.auto_detect = self.AUTODETECT_PROXY['autodetect']
desired_capabilities = {}
proxy.add_to_capabilities(desired_capabilities)
proxy_capabilities = self.AUTODETECT_PROXY.copy()
proxy_capabilities['proxyType'] = 'AUTODETECT'
expected_capabilities = {'proxy': proxy_capabilities}
assert expected_capabilities == desired_capabilities
def testCanAddPACProxyToDesiredCapabilities(self):
proxy = Proxy()
proxy.proxy_autoconfig_url = self.PAC_PROXY['proxyAutoconfigUrl']
desired_capabilities = {}
proxy.add_to_capabilities(desired_capabilities)
proxy_capabilities = self.PAC_PROXY.copy()
proxy_capabilities['proxyType'] = 'PAC'
expected_capabilities = {'proxy': proxy_capabilities}
assert expected_capabilities == desired_capabilities
def testCanNotChangeInitializedProxyType(self):
proxy = Proxy(raw={'proxyType': 'direct'})
with pytest.raises(Exception):
proxy.proxy_type = ProxyType.SYSTEM
proxy = Proxy(raw={'proxyType': ProxyType.DIRECT})
with pytest.raises(Exception):
proxy.proxy_type = ProxyType.SYSTEM
def testCanInitManualProxy(self):
proxy = Proxy(raw=self.MANUAL_PROXY)
assert ProxyType.MANUAL == proxy.proxy_type
assert self.MANUAL_PROXY['httpProxy'] == proxy.http_proxy
assert self.MANUAL_PROXY['ftpProxy'] == proxy.ftp_proxy
assert self.MANUAL_PROXY['noProxy'] == proxy.no_proxy
assert self.MANUAL_PROXY['sslProxy'] == proxy.sslProxy
assert self.MANUAL_PROXY['socksProxy'] == proxy.socksProxy
assert self.MANUAL_PROXY['socksUsername'] == proxy.socksUsername
assert self.MANUAL_PROXY['socksPassword'] == proxy.socksPassword
def testCanInitAutodetectProxy(self):
proxy = Proxy(raw=self.AUTODETECT_PROXY)
assert ProxyType.AUTODETECT == proxy.proxy_type
assert self.AUTODETECT_PROXY['autodetect'] == proxy.auto_detect
def testCanInitPACProxy(self):
proxy = Proxy(raw=self.PAC_PROXY)
assert ProxyType.PAC == proxy.proxy_type
assert self.PAC_PROXY['proxyAutoconfigUrl'] == proxy.proxy_autoconfig_url
def testCanInitEmptyProxy(self):
proxy = Proxy()
assert ProxyType.UNSPECIFIED == proxy.proxy_type
assert '' == proxy.http_proxy
assert '' == proxy.ftp_proxy
assert '' == proxy.no_proxy
assert '' == proxy.sslProxy
assert '' == proxy.socksProxy
assert '' == proxy.socksUsername
assert '' == proxy.socksPassword
assert proxy.auto_detect is False
assert '' == proxy.proxy_autoconfig_url
desired_capabilities = {}
proxy.add_to_capabilities(desired_capabilities)
proxy_capabilities = {}
proxy_capabilities['proxyType'] = 'UNSPECIFIED'
expected_capabilities = {'proxy': proxy_capabilities}
assert expected_capabilities == desired_capabilities
| 5,241 |
blender/arm/logicnode/animation/LN_animation_state.py
|
Lykdraft/armory
| 0 |
2025409
|
from arm.logicnode.arm_nodes import *
class AnimationStateNode(ArmLogicTreeNode):
"""Get information about the current animation of an object."""
bl_idname = 'LNAnimationStateNode'
bl_label = 'Animation State'
arm_version = 1
def init(self, context):
super(AnimationStateNode, self).init(context)
self.add_input('ArmNodeSocketObject', 'Object')
self.add_output('NodeSocketBool', 'Is Playing')
self.add_output('NodeSocketString', 'Action')
self.add_output('NodeSocketInt', 'Frame')
add_node(AnimationStateNode, category=PKG_AS_CATEGORY)
| 599 |
blurr/examples/text/multilabel_classification.py
|
warner-benjamin/blurr
| 0 |
2025546
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/99d_text-examples-multilabel.ipynb (unless otherwise specified).
__all__ = []
# Cell
import os
import datasets
from transformers import *
from fastai.text.all import *
from fastai.callback.hook import _print_shapes
from ...text.data.core import *
from ...text.modeling.core import *
from ...text.utils import *
from ...utils import *
logging.set_verbosity_error()
| 418 |
git_quality_check/indicators/commits/is_empty_body.py
|
gcattan/git-quality-check
| 0 |
2024461
|
from git_quality_check.utils import (
is_valid_log,
remove_header,
)
def is_empty_body(log: str):
if not is_valid_log(log):
return 1
log = remove_header(log)
if not is_valid_log(log):
return 1
return 0
| 244 |
remove-ics-links.py
|
supersciencegrl/website-tools
| 1 |
2023956
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Production"
import os
import time
from shutil import copy2
print(f'remove-ics-links.py v.{__version__}')
''' This script is modified from the mother script events-from-html.py '''
if os.path.isdir('C:\\Users\\Nessa\\Documents\\GitHub\\supersciencegrl.github.io'):
mydir = 'C:\\Users\\Nessa\\Documents\\GitHub\\supersciencegrl.github.io'
testdir = 'C:\\Users\\Nessa\\Documents\\GitHub\\website-tools'
else:
mydir = 'C:\\Users\\S1024501\\OneDrive - Syngenta\\Documents\\GitHub\\supersciencegrl.github.io'
testdir = 'C:\\Users\\S1024501\\OneDrive - Syngenta\\Documents\\GitHub\\website-tools'
os.chdir(mydir)
def check_html_is_list(html):
if type(html) is str:
htmllist = html.split('\n')
else:
htmllist = html
return htmllist
def updateevent(html):
htmllist = check_html_is_list(html)
event = []
for line in htmllist:
if line.lstrip().startswith('<tr class="covidrow'):
event = [line]
# Remove ics link
elif event and line.lstrip().startswith('<td class="columnb2'):
if '<a class="fa-ics"' in line:
newline = line.partition(' href="https://supersciencegrl.co.uk/')[0] + line.partition('@supersciencegrl.co.uk.ics"')[2]
event.append(newline)
elif event and not line.lstrip().startswith('</tr>'): # For all other rows within the event
event.append(line)
elif event: # Row starting with '</tr>'
event.append(line)
return event
def updatehtml(html):
starttime = time.time()
htmllist = check_html_is_list(html)
html_out = []
firsteventfound = False
lasteventfound = False
eventcount = 0
for n, row in enumerate(htmllist):
if row.lstrip().startswith('<tr class="covidrow'):
eventcount += 1
firsteventfound = True
event = updateevent(htmllist[n:])
html_out = html_out + event
elif row.lstrip().startswith('</tbody>'):
lasteventfound = True
if not firsteventfound or lasteventfound:
html_out.append(row)
elif not row.lstrip():
html_out.append(row)
# Copy original file to testdir in case of corruption
copy2(os.path.join(mydir, inputfile), os.path.join(testdir, inputfile))
with open(os.path.join(mydir, inputfile), 'w') as fout:
fout.writelines(html_out)
endtime = time.time()
return f'{round(endtime - starttime, 4)} s', f'{eventcount} entries'
html_in = []
inputfile = 'online-old.html'
with open(os.path.join(mydir, inputfile), 'r') as fin:
for line in fin:
html_in.append(line)
| 2,807 |
benchmark.py
|
lxndrblz/ccl_chrome_indexeddb
| 0 |
2025261
|
import sys
import pathlib
import ccl_chromium_indexeddb
import time
def main(args):
start = time.time()
ldb_path = pathlib.Path(args[0])
wrapper = ccl_chromium_indexeddb.WrappedIndexDB(ldb_path)
for db_info in wrapper.database_ids:
db = wrapper[db_info.dbid_no]
print("------Database------")
print(f"db_number={db.db_number}; name={db.name}; origin={db.origin}")
print()
print("\t---Object Stores---")
for obj_store_name in db.object_store_names:
obj_store = db[obj_store_name]
print(f"\tobject_store_id={obj_store.object_store_id}; name={obj_store.name}")
try:
one_record = next(obj_store.iterate_records())
except StopIteration:
one_record = None
print()
end = time.time()
print("Elapsed time: {} seconds.".format(int(end-start)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f"USAGE: {pathlib.Path(sys.argv[0]).name} <ldb dir path>")
exit(1)
main(sys.argv[1:])
| 1,062 |
applications/physbam/physbam-lib/Scripts/Archives/pd/send/SERVER.py
|
schinmayee/nimbus
| 20 |
2025177
|
#!/usr/bin/python
from pd.common import CONFIG
from pd.common import SOCKET
import os
import mutex
import time
import threading
import socket
# Host representation: dictionary having hostname, user
class SEND_SERVER:
def __init__(self):
#self.hosts_filename=CONFIG.hosts_filename
self.commands=["Register_Client","Users","Send_Text","Send_Picture"]
self.lookup_hosts=True
self.mutex=threading.Lock()
self.next_claim_id=1
self.users={}
self.clients={}
self.clientid_to_username={}
#self.Read_Host_List()
def Client_Connect(self,x):
self.clients[x.host]=x
def Client_Disconnect(self,x):
del self.clients[x.host]
if self.clientid_to_username.has_key(x.host):
user=self.clientid_to_username[x.host]
del self.clientid_to_username[x.host]
self.users[user].remove(x.host)
print "Unregistered user=%s client=%s"%(user,x.host)
def Registered(self,client_id):
return self.clientid_to_username.has_key(client_id)
def Register_Client(self,client_id,user,host):
if self.Registered(client_id): raise SOCKET.COMMAND_EXCEPTION("Connection already registered for user %s"%self.clientid_to_username[client_id])
if not self.users.has_key(user): self.users[user]=[]
self.clientid_to_username[client_id]=user
self.users[user].append(client_id)
print "Registered user=%s client=%s"%(user,client_id)
def Send(self,client_id,users,data):
if not self.Registered(client_id): raise SOCKET.COMMAND_EXCEPTION("Your client is not registered")
not_found_users=[]
users_and_clientids=[]
for user in users:
if not self.users.has_key(user) or len(self.users.keys())==0: not_found_users.append(user)
else: users_and_clientids.extend(map(lambda x: (user,x),self.users[user]))
if len(not_found_users)>0: raise SOCKET.COMMAND_EXCEPTION("No registration for users: %s"%",".join(not_found_users))
for user,clientid in users_and_clientids:
client=self.clients[clientid]
client.queueWrite((-100,None,("SEND",(self.clientid_to_username[client_id],client_id),users_and_clientids,data)))
def Send_Text(self,client_id,users,message):
return self.Send(client_id,users,("MESSAGE",message))
def Send_Picture(self,client_id,users,picture):
return self.Send(client_id,users,("PICTURE",picture))
def Users(self,client):
return self.users
if __name__ == "__main__":
server=SEND_SERVER()
SOCKET.SERVER(socket.gethostbyname(CONFIG.pdsend_server_host),CONFIG.pdsend_server_port,server) #,(CONFIG.server_private_key_file,CONFIG.server_certificate_file,CONFIG.ca_certificate_file))
| 2,809 |
alembic/versions/9599db59caaa_create_table_for_neural_net_and_gbm_.py
|
Windact/ml_api
| 0 |
2025235
|
"""Create table for neural net and gbm persistence
Revision ID: 9599db59caaa
Revises:
Create Date: 2021-06-23 15:22:05.295203
"""
from alembic import op
import sqlalchemy as sa
# For JSONB datatype
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"gradient_boosting_model_predictions",
sa.Column("id", sa.Integer(), nullable=False,primary_key=True),
sa.Column("user_id", sa.String(length=36), nullable=False),
sa.Column(
"datetime_captured",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("model_version", sa.String(length=36), nullable=False),
sa.Column("inputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("outputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_gradient_boosting_model_predictions_datetime_captured"),
"gradient_boosting_model_predictions",
["datetime_captured"],
unique=False,
)
op.create_table(
"neural_net_model_predictions",
sa.Column("id", sa.Integer(), nullable=False,primary_key=True),
sa.Column("user_id", sa.String(length=36), nullable=False),
sa.Column(
"datetime_captured",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("model_version", sa.String(length=36), nullable=False),
sa.Column("inputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("outputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_neural_net_model_predictions_datetime_captured"),
"neural_net_model_predictions",
["datetime_captured"],
unique=False,
)
def downgrade():
op.drop_index(
op.f("ix_neural_net_model_predictions_datetime_captured"),
table_name="neural_net_model_predictions",
)
op.drop_table("neural_net_model_predictions")
op.drop_index(
op.f("ix_gradient_boosting_model_predictions_datetime_captured"),
table_name="gradient_boosting_model_predictions",
)
op.drop_table("gradient_boosting_model_predictions")
| 2,536 |
so_pip/__main__.py
|
matthewdeanmartin/so_pip
| 3 |
2025548
|
# noinspection PyPep8
"""so_pip/StackOverflow Pip
Not associated with PyPA, nor StackOverflow.
Usage:
so_pip vendorize <name> (--question=<question_id>|--answer=<answer_id>|--package=<package>) [--revision=<revision>] [options]
so_pip search <name> --query=<query> --tags=<tags> [--count=<count>] [options]
so_pip uninstall <names>... [options]
so_pip list [options]
so_pip freeze [options]
so_pip show <names>... [options]
so_pip (-h | --help)
so_pip --version
Options:
-h --help Show this screen.
-v --version Show version.
-c --count=<count> How many posts to get [default: 2].
-o --output=<output> Folder for packages. Defaults to /output
-q --question=<question_id> Stackoverflow question id
-a --answer=<answer_id> Stackoverflow answer id
-r --revision=<revision> Revision id for answer.
--package=<package> Question or answer id in random_name format
--all-in-one Combine all code into one module
--verbose Show logging
--quiet No informational logging
"""
import logging
import sys
import docopt
from random_names.make_name import number_from_name
from so_pip import _version as meta
from so_pip import settings as settings
from so_pip.commands import freeze as freeze
from so_pip.commands import list_all as list_all
from so_pip.commands import search as search
from so_pip.commands import show as show
from so_pip.commands import uninstall as uninstall
from so_pip.commands import vendorize as vendorize
# Do these need to stick around?
LOGGERS = []
LOGGER = logging.getLogger(__name__)
def main() -> int:
"""Get the args object from command parameters"""
arguments = docopt.docopt(__doc__, version=f"so_pip {meta.__version__}")
LOGGER.debug(arguments)
output_folder = arguments["--output"]
settings.OUTPUT_FOLDER = output_folder
package_name = arguments["--package"]
id_by_package_name = 0
if package_name:
try:
id_by_package_name = number_from_name(package_name)
except TypeError:
print(f"{package_name} can't be converted to a question or answer id.")
print("Please check meta data and re-run with a numeric id")
return -1
if arguments["--quiet"]:
settings.QUIET = True
if arguments["--verbose"]:
# root logger, all modules
for root in ("so_pip", "__main__", "url_lib3"):
logger = logging.getLogger(root)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
LOGGERS.append(logger)
if arguments["vendorize"]:
prefix = arguments["<name>"] or ""
question = arguments["--question"]
answer = arguments["--answer"]
# why is this sometimes <revision>?
revision_string = arguments.get("--revision", None)
# will people expect 0 based revisions...
if not revision_string:
revision_string = arguments.get("<revision>", None)
if revision_string is not None:
revision = int(revision_string)
else:
revision = None
all_in_one = arguments["--all-in-one"]
if not question and not answer and not id_by_package_name:
print("Must specify --question or --answer identifier")
return -1
if not output_folder:
print("No --output folder specified")
return -1
if question:
packages_made = vendorize.import_so_question(
prefix, question, output_folder, all_in_one
)
elif answer:
packages_made = vendorize.import_so_answer(
prefix, answer, output_folder, revision
)
elif id_by_package_name:
# HACK: Should check if that id is a q or a.
if "_a_" in package_name:
packages_made = vendorize.import_so_answer(
prefix, id_by_package_name, output_folder, revision
)
else:
packages_made = vendorize.import_so_question(
prefix, id_by_package_name, output_folder, all_in_one
)
else:
raise TypeError("Need to specify a question or answer")
print(f"Vendorized {','.join(packages_made)} at {output_folder}")
elif arguments["uninstall"]:
packages = arguments["<name>"]
if not packages:
print("No packages specified to uninstall")
return -1
for package in packages:
uninstall.uninstall_package(output_folder, package)
print(
f"Uninstalled {','.join(packages)} from vendorized folder.\n"
f"If you also installed with pip you will need to uninstall with pip"
)
elif arguments["list"]:
if not output_folder:
print("Missing --output folder")
return -1
list_all.list_packages(output_folder)
elif arguments["freeze"]:
if not output_folder:
print("Missing --output folder")
return -1
freeze.freeze_environment(output_folder)
elif arguments["show"]:
packages = arguments["<names>"]
if not packages:
print("No packages specified to show")
return -1
for package in packages:
show.show(output_folder, package)
elif arguments["search"]:
prefix = arguments["<name>"]
if not prefix:
prefix = ""
query = arguments["--query"]
if not query:
print("--query required for search")
return -1
try:
count_str = arguments["--count"]
count = int(count_str)
except ValueError:
print(f"Can't convert {arguments.get('--count', '')} to a number")
return -1
# TODO: better way to do this with docopts
if arguments["--tags"]:
tags = arguments["--tags"].split(";,")
else:
tags = []
search.import_so_search(prefix, query, tags, output_folder, count)
else:
print("Don't recognize that command.")
return -1
return 0
if __name__ == "__main__":
sys.exit(main())
| 6,561 |
AI & Domains (ML etc) Library/Titanic_kaggle/pipeline2.py
|
hammad1201/Hacktoberfest-2021
| 27 |
2023979
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
address = 'train.csv'
address2 = 'test.csv'
Data = pd.read_csv(address)
Test_data = pd.read_csv(address2)
features = ['Pclass', 'Sex', 'Fare']
y = Data.Survived
X = Data[features]
X_test = Test_data[features]
cat_col = [cat for cat in X.columns if X[cat].nunique() < 10 and X[cat].dtype == 'object']
num_col = [num for num in X.columns if X[num].dtype in ['int64', 'float64']]
cat_col2 = [cat for cat in X_test.columns if X_test[cat].nunique() < 10 and X[cat].dtype == 'object']
num_col2 = [num for num in X_test.columns if X_test[num].dtype in ['int64', 'float64']]
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, train_size=0.8, random_state=0)
num_trans = SimpleImputer(strategy='constant')
cat_trans = Pipeline(
steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
]
)
preprocessor = ColumnTransformer(
transformers=[
('num', num_trans, num_col),
('cat', cat_trans, cat_col)
]
)
preprocessor = ColumnTransformer(
transformers=[
('num2', num_trans, num_col2),
('cat2', cat_trans, cat_col2)
]
)
model = RandomForestRegressor(n_estimators=100, max_depth=20, random_state=0)
mypip = Pipeline(
steps=[
('preprocessing', preprocessor),
('model', model)
]
)
mypip.fit(X_train, y_train)
preds = mypip.predict(X_test)
A = np.around(preds)
prediction = A.astype(int)
output = pd.DataFrame({'PassengerId' : Test_data.PassengerId, 'Survived' : prediction})
output.to_csv('Submission_pipeline.csv', index=False)
print("Your submission was successfully saved!")
| 2,015 |
cogs/garden.py
|
samuel-hunter/CSSBot_Py
| 6 |
2025256
|
import discord
from discord.ext import commands
import random
class TinyGarden(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.filler = u'\U0001F331' # 🌱seedling
self.flowers = [
u'\U0001F33A', # 🌺 hibiscus
u'\U0001F33B', # 🌻 sunflower
u'\U0001F33C', # 🌼 daisy
u'\U0001F337', # 🌷 tulip
u'\U0001F335', # 🌵 cactus
u'\U0001F339', # 🌹 rose
u'\U0001F338' # 🌸 cherry blossom
]
self.plants = [
u'\U0001F33F', # 🌿 herb
u'\U00002618', # ☘️ shamrock
u'\U0001F340', # 🍀 4-leaf clover
u'\U0001F333', # 🌳 deciduous tree
u'\U0001F332' # 🌲 evergreen tree
]
self.vegetables = [
u'\U0001F344', # 🍄 musroom
u'\U0001F955', # 🥕 carrot
u'\U0001F345', # 🍅 tomato
u'\U0001F351', # 🍑 peach
u'\U0001F352', # 🍒 cherries
u'\U0001F353' # 🍓 strawberry
]
self.animals = [
u'\U0001F41D', # 🐝 bee
u'\U0001F41B', # 🐛 bug
u'\U0001F99A', # 🦚 peacock
u'\U0001F41E', # 🐞 ladybug
u'\U0001F98B', # 🦋 butterfly
u'\U0001F426', # 🐦 bird
u'\U0001F40C' # 🐌 snail
]
# ping command
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.command(name='garden')
async def make_tiny_garden(self, ctx):
async with ctx.channel.typing():
# 8x8 grid: 64 choices
# 16-24 flowers
# 1-3 animals
# 3-5 vegetables
# 5-15 plants
#
# begin as single-dimension list, wrap when sending message
garden = [ u'' for i in range(64) ]
# add the planti bois
idx = 0
# flowers first
for i in range(random.randint(8, 16)):
garden[idx] = random.choice(self.flowers)
idx += 1
# then animals
for i in range(random.randint(1, 3)):
garden[idx] = random.choice(self.animals)
idx += 1
# vegertals
for i in range(random.randint(5, 10)):
garden[idx] = random.choice(self.vegetables)
idx += 1
# other green leafy things
for i in range(random.randint(5, 8)):
garden[idx] = random.choice(self.plants)
idx += 1
# fill remaining array with seedlings
for i in range(idx, 64):
garden[i] = self.filler
# shuffle and assemble garden
random.shuffle(garden)
for i in range(8):
garden[i * 8 + 7] = garden[i * 8 + 7] + '\n'
await ctx.send(''.join(garden))
def setup(bot):
bot.add_cog(TinyGarden(bot))
if __name__=='__main__':
import doctest
doctest.testmod()
| 3,082 |
lintcode/02IntArray/056TwoSum.py
|
zhaoxinlu/leetcode-algorithms
| 0 |
2025324
|
# -*- coding: utf-8 -*-
"""
Editor: <NAME>
School: BUPT
Date: 2018-03-03
算法思想: 俩数之和
"""
class Solution:
"""
@param numbers: An array of Integer
@param target: target = numbers[index1] + numbers[index2]
@return: [index1 + 1, index2 + 1] (index1 < index2)
"""
def twoSum(self, numbers, target):
# write your code here
numDict = {}
for i in range(len(numbers)):
if numbers[i] in numDict:
return [numDict[numbers[i]], i]
else:
numDict[target - numbers[i]] = i
if __name__ == '__main__':
print Solution().twoSum([2, 7, 11, 15], 18)
| 635 |
ml_mnist/nn/rbm.py
|
YashNita/MNIST_Challange_SOL
| 62 |
2025132
|
import numpy as np
import env
from base import BaseEstimator
from utils import RNG, print_inline, width_format, Stopwatch
from layers import FullyConnected
from activations import sigmoid
class RBM(BaseEstimator):
"""
Examples
--------
>>> X = RNG(seed=1337).rand(32, 256)
>>> rbm = RBM(n_hidden=100,
... k=4,
... batch_size=2,
... n_epochs=50,
... learning_rate='0.05->0.005',
... momentum='0.5->0.9',
... verbose=True,
... early_stopping=5,
... random_seed=1337)
>>> rbm
RBM(W=None, batch_size=2, best_W=None, best_epoch=None, best_hb=None,
best_recon=inf, best_vb=None, early_stopping=5, epoch=0, hb=None, k=4,
learning_rate='0.05->0.005', momentum='0.5->0.9', n_epochs=50,
n_hidden=100, persistent=True, random_seed=1337, vb=None, verbose=True)
"""
def __init__(self, n_hidden=256, persistent=True, k=1,
batch_size=10, n_epochs=10, learning_rate=0.1, momentum=0.9,
early_stopping=None, verbose=False, random_seed=None):
self.n_hidden = n_hidden
self.persistent = persistent
self.k = k # k in CD-k / PCD-k
self.batch_size = batch_size
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self._learning_rate = None
self.momentum = momentum
self._momentum = None
self.early_stopping = early_stopping
self._early_stopping = self.early_stopping
self.verbose = verbose
self.random_seed = random_seed
self.W = None
self.vb = None # visible units bias
self.hb = None # hidden units bias
self.epoch = 0
self.best_W = None
self.best_vb = None
self.best_hb = None
self.best_epoch = None
self.best_recon = np.inf
self._dW = None
self._dvb = None
self._dhb = None
self._rng = None
self._persistent = None
self._initialized = False
super(RBM, self).__init__(_y_required=False)
def propup(self, v):
"""Propagate visible units activation upwards to the hidden units."""
z = np.dot(v, self.W) + self.hb
return sigmoid(z)
def sample_h_given_v(self, v0_sample):
"""Infer state of hidden units given visible units."""
h1_mean = self.propup(v0_sample)
h1_sample = self._rng.binomial(size=h1_mean.shape, n=1, p=h1_mean)
return h1_mean, h1_sample
def propdown(self, h):
"""Propagate hidden units activation downwards to the visible units."""
z = np.dot(h, self.W.T) + self.vb
return sigmoid(z)
def sample_v_given_h(self, h0_sample):
"""Infer state of visible units given hidden units."""
v1_mean = self.propdown(h0_sample)
v1_sample = self._rng.binomial(size=v1_mean.shape, n=1, p=v1_mean)
return v1_mean, v1_sample
def gibbs_hvh(self, h0_sample):
"""Performs a step of Gibbs sampling starting from the hidden units."""
v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return v1_mean, v1_sample, h1_mean, h1_sample
def gibbs_vhv(self, v0_sample):
"""Performs a step of Gibbs sampling starting from the visible units."""
raise NotImplementedError()
def free_energy(self, v_sample):
"""Function to compute the free energy."""
raise NotImplementedError()
def update(self, X_batch):
# compute positive phase
ph_mean, ph_sample = self.sample_h_given_v(X_batch)
# decide how to initialize chain
if self._persistent is not None:
chain_start = self._persistent
else:
chain_start = ph_sample
# gibbs sampling
for step in xrange(self.k):
nv_means, nv_samples, \
nh_means, nh_samples = self.gibbs_hvh(chain_start if step == 0 else nh_samples)
# update weights
self._dW = self._momentum * self._dW + \
np.dot(X_batch.T, ph_mean) - np.dot(nv_samples.T, nh_means)
self._dvb = self._momentum * self._dvb +\
np.mean(X_batch - nv_samples, axis=0)
self._dhb = self._momentum * self._dhb +\
np.mean(ph_mean - nh_means, axis=0)
self.W += self._learning_rate * self._dW
self.vb += self._learning_rate * self._dvb
self.hb += self._learning_rate * self._dhb
# remember state if needed
if self.persistent:
self._persistent = nh_samples
return np.mean(np.square(X_batch - nv_means))
def batch_iter(self, X):
n_batches = len(X) / self.batch_size
for i in xrange(n_batches):
start = i * self.batch_size
end = start + self.batch_size
X_batch = X[start:end]
yield X_batch
if n_batches * self.batch_size < len(X):
yield X[end:]
def train_epoch(self, X):
mean_recons = []
for i, X_batch in enumerate(self.batch_iter(X)):
mean_recons.append(self.update(X_batch))
if self.verbose and i % (len(X)/(self.batch_size * 16)) == 0:
print_inline('.')
if self.verbose: print_inline(' ')
return np.mean(mean_recons)
def _fit(self, X):
if not self._initialized:
layer = FullyConnected(self.n_hidden,
bias=0.,
random_seed=self.random_seed)
layer.setup_weights(X.shape)
self.W = layer.W
self.vb = np.zeros(X.shape[1])
self.hb = layer.b
self._dW = np.zeros_like(self.W)
self._dvb = np.zeros_like(self.vb)
self._dhb = np.zeros_like(self.hb)
self._rng = RNG(self.random_seed)
self._rng.reseed()
timer = Stopwatch(verbose=False).start()
for _ in xrange(self.n_epochs):
self.epoch += 1
if self.verbose:
print_inline('Epoch {0:>{1}}/{2} '.format(self.epoch, len(str(self.n_epochs)), self.n_epochs))
if isinstance(self.learning_rate, str):
S, F = map(float, self.learning_rate.split('->'))
self._learning_rate = S + (F - S) * (1. - np.exp(-(self.epoch - 1.)/8.)) / (
1. - np.exp(-(self.n_epochs - 1.)/8.))
else:
self._learning_rate = self.learning_rate
if isinstance(self.momentum, str):
S, F = map(float, self.momentum.split('->'))
self._momentum = S + (F - S) * (1. - np.exp(-(self.epoch - 1)/4.)) / (
1. - np.exp(-(self.n_epochs - 1)/4.))
else:
self._momentum = self.momentum
mean_recon = self.train_epoch(X)
if mean_recon < self.best_recon:
self.best_recon = mean_recon
self.best_epoch = self.epoch
self.best_W = self.W.copy()
self.best_vb = self.vb.copy()
self.best_hb = self.hb.copy()
self._early_stopping = self.early_stopping
msg = 'elapsed: {0} sec'.format(width_format(timer.elapsed(), default_width=5, max_precision=2))
msg += ' - recon. mse: {0}'.format(width_format(mean_recon, default_width=6, max_precision=4))
msg += ' - best r-mse: {0}'.format(width_format(self.best_recon, default_width=6, max_precision=4))
if self.early_stopping:
msg += ' {0}*'.format(self._early_stopping)
if self.verbose:
print msg
if self._early_stopping == 0:
return
if self.early_stopping:
self._early_stopping -= 1
def _serialize(self, params):
for attr in ('W', 'best_W',
'vb', 'best_vb',
'hb', 'best_hb'):
if attr in params and params[attr] is not None:
params[attr] = params[attr].tolist()
return params
def _deserialize(self, params):
for attr in ('W', 'best_W',
'vb', 'best_vb',
'hb', 'best_hb'):
if attr in params and params[attr] is not None:
params[attr] = np.asarray(params[attr])
return params
if __name__ == '__main__':
# run corresponding tests
from utils.testing import run_tests
run_tests(__file__)
| 8,597 |
study/study_data_utils.py
|
sealuzh/benchmark-instability-prediction-replication-package
| 0 |
2023920
|
import numpy as np
import pandas as pd
def pivot_table_grouping(dataframe, index, columns, metrics, index_sort, columns_sort, aggfunc):
"""
We define a function to produce a pivot table that group by model, iterations, and threshold, applying an aggregation function.
"""
# Group into a pivot table.
result_df = dataframe.pivot_table(index=index, columns=columns, values=metrics, aggfunc=aggfunc)
# Sort the index.
if index_sort:
if len(index_sort) == 1:
result_df = result_df.reindex(index_sort[0])
elif len(index_sort) > 1:
for i, sorting in enumerate(index_sort):
result_df = result_df.reindex(sorting, level=i)
# Sort the columns.
if columns_sort:
if len(columns_sort) == 1:
result_df = result_df.reindex(columns_sort[0], axis=1)
if len(columns_sort) > 1:
for i, sorting in enumerate(columns_sort):
result_df = result_df.reindex(sorting, level=i, axis=1)
return result_df
def median_long_dataframe(dataframe, models, metrics):
"""
We define a utility function to prepare the data.
"""
# Compute the medians for each of the groups.
median_df = dataframe[dataframe['model'].isin(models)].groupby(['model', 'iterations', 'threshold', 'selector', 'sampler'], observed=True).median().drop(columns=['fold'])
# Transform the data from wide to long.
long_median_df = pd.melt(median_df.reset_index(), id_vars=['model', 'iterations', 'threshold', 'selector', 'sampler'], value_vars=metrics)
# Transform the "variable" label into categorical.
long_median_df['variable'] = pd.Categorical(long_median_df['variable'], categories=metrics)
# Return the dataframe.
return long_median_df
| 1,783 |
oops_fhir/r4/code_system/gender_status.py
|
Mikuana/oops_fhir
| 0 |
2024455
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["GenderStatus"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class GenderStatus:
"""
Gender status
This example value set defines a set of codes that can be used to
indicate the current state of the animal's reproductive organs.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/animal-genderstatus
"""
neutered = CodeSystemConcept(
{
"code": "neutered",
"definition": "The animal has been sterilized, castrated or otherwise made infertile.",
"designation": [{"language": "nl", "value": "gesteriliseerd"}],
"display": "Neutered",
}
)
"""
Neutered
The animal has been sterilized, castrated or otherwise made infertile.
"""
intact = CodeSystemConcept(
{
"code": "intact",
"definition": "The animal's reproductive organs are intact.",
"designation": [{"language": "nl", "value": "intact"}],
"display": "Intact",
}
)
"""
Intact
The animal's reproductive organs are intact.
"""
unknown = CodeSystemConcept(
{
"code": "unknown",
"definition": "Unable to determine whether the animal has been neutered.",
"designation": [{"language": "nl", "value": "onbekend"}],
"display": "Unknown",
}
)
"""
Unknown
Unable to determine whether the animal has been neutered.
"""
class Meta:
resource = _resource
| 1,684 |
src/ingest/GFSA.py
|
openwfm/wrfxpy
| 23 |
2023813
|
from __future__ import absolute_import
from ingest.grib_source import GribError
from ingest.grib_reanalysis import GribReanalysis
from datetime import datetime
import pytz
class GFSA(GribReanalysis):
"""
The GFS (Global Forecast System) grib source as provided by NOMADS.
The NCEP operational Global Forecast System analysis grids are on a 0.5 global latitude longitude grid.
Model analysis runs occur at 00, 06, 12, and 18 UTC daily.
Grids include forecast time steps at a 3 hourly interval from 0 to 6.
"""
def __init__(self, arg):
super(GFSA, self).__init__(arg)
def vtables(self):
"""
Returns the variable tables that must be linked in for use with the GFS data source.
:return: a dictionary of variable tables
"""
return {'geogrid_vtable': 'GEOGRID.TBL',
'ungrib_vtable': 'Vtable.GFS',
'metgrid_vtable': 'METGRID.TBL.GFS'}
def namelist_keys(self):
"""
Returns the namelist keys that must be modified in namelist.input with GFS.
:return: a list of paths to local GRIB files
"""
#GFS requires that ''num_metgrid_soil_levels'' is set to 4.
return { 'domains' : { 'num_metgrid_levels' : 34,
'num_metgrid_soil_levels': 4,
'p_top_requested': 10000 }}
def make_relative_url(self, utc_time):
"""
Build the relative URL of the GFS GRIB2 file, which is based on the UTC time.
:param utc_time: the UTC time
:return: the relative URL
"""
path_tmpl = '%04d%02d/%04d%02d%02d/gfsanl_4_%04d%02d%02d_%02d00_000.grb2'
year, mon, day, hour = utc_time.year, utc_time.month, utc_time.day, utc_time.hour
return path_tmpl % (year, mon, year, mon, day, year, mon, day, hour)
# instance variables
info_url = 'https://data.nodc.noaa.gov/cgi-bin/iso?id=gov.noaa.ncdc:C00634'
info = "Global Forecast System (GFS) Analysis"
remote_url = 'https://www.ncei.noaa.gov/data/global-forecast-system/access/historical/analysis'
period_hours = 6
id = "GFSA"
available_from_utc = datetime(2004,3,1,tzinfo=pytz.UTC)
available_to_utc = datetime.now(pytz.UTC)
# see also
# https://www.ncdc.noaa.gov/data-access/model-data/model-datasets/global-forcast-system-gfs
# https://developers.google.com/earth-engine/datasets/catalog/NOAA_GFS0P25
# https://catalog.data.gov/dataset/noaa-ncep-global-forecast-system-gfs-atmospheric-model
| 2,552 |
tools/display-sighandlers.py
|
blink1073/pexpect
| 0 |
2025590
|
#!/usr/bin/env python
# Displays all signals, their values, and their handlers.
from __future__ import print_function
import signal
FMT = '{name:<10} {value:<5} {description}'
# header
print(FMT.format(name='name', value='value', description='description'))
print('-' * (33))
for name, value in [(signal_name, getattr(signal, signal_name))
for signal_name in dir(signal)
if signal_name.startswith('SIG')
and not signal_name.startswith('SIG_')]:
handler = signal.getsignal(value)
description = {
signal.SIG_IGN: "ignored(SIG_IGN)",
signal.SIG_DFL: "default(SIG_DFL)"
}.get(handler, handler)
print(FMT.format(name=name, value=value, description=description))
| 751 |
progs/shell_sort.py
|
Breccia/s-py
| 0 |
2024532
|
#!/usr/local/anaconda3/bin/python
import sys
sys.path.insert(0, "../libs")
from spy_shell_sort import shell_sort
if __name__ == '__main__':
test_data = [23, 5, 1, 65, 3, 1, 20, 34,56,78,90,12]
print("Shell sort aka merge-xchng sort")
print("Length of given data: {0}, data={1}".format(len(test_data), test_data))
sorted = shell_sort(test_data)
print("Length of sorted data: {0}, data={1}".format(len(sorted), sorted))
| 444 |
fastseg/image/palette.py
|
NeelayS/walkability_ml
| 0 |
2024622
|
"""Various RGB palettes for coloring segmentation labels."""
cityscapes = (
128,
64,
128,
244,
35,
232,
70,
70,
70,
102,
102,
156,
190,
153,
153,
153,
153,
153,
250,
170,
30,
220,
220,
0,
107,
142,
35,
152,
251,
152,
70,
130,
180,
220,
20,
60,
255,
0,
0,
0,
0,
142,
0,
0,
70,
0,
60,
100,
0,
80,
100,
0,
0,
230,
119,
11,
32,
)
all_palettes = {
"cityscapes": cityscapes,
}
| 605 |
plots/plot_fig8_cp_by_media_type.py
|
avalanchesiqi/youtube-crosstalk
| 10 |
2025589
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
""" Which media types attract more cross-partisan comments?
Usage: python plot_fig8_cp_by_media_type.py
Input data files: data/video_meta.csv
Output image file: images/fig8_cp_by_media_type.pdf
Time: ~1M
"""
import up # go to root folder
import platform
import numpy as np
from collections import defaultdict
import pandas as pd
import pingouin as pg
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style='white')
from utils.helper import Timer
from utils.plot_conf import hide_spines, aaai_init_plot
def main():
timer = Timer()
timer.start()
left_channel_cp_dict = defaultdict(list)
right_channel_cp_dict = defaultdict(list)
cid_type_dict = {}
with open('data/video_meta.csv', 'r') as fin:
fin.readline()
for line in fin:
_, channel_id, media_leaning, media_type, _, num_comment, \
num_cmt_from_liberal, num_cmt_from_conservative, _ = line.rstrip().split(',', 8)
num_comment = int(num_comment)
num_cmt_from_liberal = int(num_cmt_from_liberal)
num_cmt_from_conservative = int(num_cmt_from_conservative)
if num_comment >= 10:
cid_type_dict[channel_id] = media_type
if media_leaning == 'L':
left_channel_cp_dict[channel_id].append(num_cmt_from_conservative / num_comment * 100)
elif media_leaning == 'R':
right_channel_cp_dict[channel_id].append(num_cmt_from_liberal / num_comment * 100)
example_left_channels = {'UCupvZG-5ko_eiXAupbDfxWw': -2, # CNN
'UCaXkIU1QidjPwiAYu6GcHjg': -3, # MSNBC
'UCBi2mrWuNuyYy4gbM6fU18Q': -4, # ABC News
}
for channel_id in example_left_channels:
print(channel_id, cid_type_dict[channel_id],
np.mean(left_channel_cp_dict[channel_id]), np.std(left_channel_cp_dict[channel_id]))
print('-' * 79)
example_right_channels = {'UCXIJgqnII2ZOINSWNOGFThA': 2, # Fox News
'UCe02lGcO-ahAURWuxAJnjdA': 3, # Timcast
'UCLoNQH9RCndfUGOb2f7E1Ew': 4, # The Next News Network
}
for channel_id in example_right_channels:
print(channel_id, cid_type_dict[channel_id],
np.mean(right_channel_cp_dict[channel_id]), np.std(right_channel_cp_dict[channel_id]))
print('-' * 79)
topic_list = []
party_list = []
cp_list = []
left_list = [0] * 4
right_list = [0] * 4
for channel_id, v in left_channel_cp_dict.items():
if len(v) >= 5:
party_list.append('Left')
cp_list.append(np.mean(left_channel_cp_dict[channel_id]))
if cid_type_dict[channel_id] == 'national':
topic_list.append('national')
left_list[0] += 1
elif cid_type_dict[channel_id] == 'local':
topic_list.append('local')
left_list[1] += 1
elif cid_type_dict[channel_id] == 'organization':
topic_list.append('organization')
left_list[2] += 1
elif cid_type_dict[channel_id] == 'independent':
topic_list.append('independent')
left_list[3] += 1
for channel_id, v in right_channel_cp_dict.items():
if len(v) >= 5:
party_list.append('Right')
cp_list.append(np.mean(right_channel_cp_dict[channel_id]))
if cid_type_dict[channel_id] == 'national':
topic_list.append('national')
right_list[0] += 1
elif cid_type_dict[channel_id] == 'local':
topic_list.append('local')
right_list[1] += 1
elif cid_type_dict[channel_id] == 'organization':
topic_list.append('organization')
right_list[2] += 1
elif cid_type_dict[channel_id] == 'independent':
topic_list.append('independent')
right_list[3] += 1
print()
print('num of left-leaning national, local, organization, independent media', left_list)
print('num of right-leaning national, local, organization, independent media', right_list)
print('total number of media that have at least 5 videos with at least 10 comments', sum(left_list) + sum(right_list))
print()
df = pd.DataFrame({'topic': topic_list, 'party': party_list, 'cp_list': cp_list})
for topic in ['national', 'local', 'organization', 'independent']:
for metric in ['cp_list']:
left = df[(df.topic == topic) & (df.party == 'Left')][metric]
right = df[(df.topic == topic) & (df.party == 'Right')][metric]
print(topic, metric)
print(np.median(left), np.median(right))
print(pg.mwu(left, right, tail='one-sided'))
print('-' * 79)
ax1 = aaai_init_plot(plt, profile='1x1')
sns.violinplot(x=df["topic"], y=df['cp_list'], hue=df["party"],
palette={"Right": "#e06666", "Left": "#6d9eeb"},
inner="quartile",
linewidth=1.5, cut=1.5,
ax=ax1, order=["national", "local", "organization", "independent"],
scale="area", split=True, width=0.7, hue_order=['Left', 'Right'])
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles=handles[0:], labels=['left channel', 'right channel'], loc='upper left', frameon=False, edgecolor='k', handlelength=1, handleheight=1)
ax1.set(xlabel=None)
ax1.set_yticks([0, 50, 100])
ax1.set_ylabel('%cross-talk')
ax1.set_ylim([-10, 102])
hide_spines(ax1)
timer.stop()
plt.tight_layout()
plt.savefig('images/fig8_cp_by_media_type.pdf', bbox_inches='tight')
if not platform.system() == 'Linux':
plt.show()
if __name__ == '__main__':
main()
| 5,975 |
multitest_transport/api/config_set_api.py
|
maksonlee/multitest_transport
| 0 |
2025388
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to provide config set APIs."""
# Non-standard docstrings are used to generate the API documentation.
import endpoints
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from multitest_transport.api import base
from multitest_transport.models import build
from multitest_transport.models import config_set_helper
from multitest_transport.models import messages as mtt_messages
from multitest_transport.models import ndb_models
@base.MTT_API.api_class(resource_name='config_set',
path='config_sets')
class ConfigSetApi(remote.Service):
"""A handler for Config Set API."""
def _ConvertFromMessage(self, msg):
"""Convert a message to a config set info."""
return mtt_messages.Convert(
msg, ndb_models.ConfigSetInfo, from_cls=mtt_messages.ConfigSetInfo)
@base.ApiMethod(
endpoints.ResourceContainer(
message_types.VoidMessage,),
mtt_messages.BuildChannelList,
path='build_channels', http_method='GET',
name='build_channels')
def ListBuildChannels(self, request):
"""Fetches a list of build channels used for importing config sets."""
channels = []
for channel_id in config_set_helper.CONFIG_SET_BUILD_CHANNEL_IDS:
channels.append(build.GetBuildChannel(channel_id))
return mtt_messages.BuildChannelList(
build_channels=mtt_messages.ConvertList(
channels, mtt_messages.BuildChannel))
@base.ApiMethod(
endpoints.ResourceContainer(
message_types.VoidMessage,
include_remote=messages.BooleanField(1),
statuses=messages.EnumField(
ndb_models.ConfigSetStatus, 2, repeated=True),),
mtt_messages.ConfigSetInfoList,
path='/config_sets', http_method='GET', name='list')
def List(self, request):
"""Fetches a list of config sets.
Parameters:
include_remote: True to check remote config sets and determine the
imported config sets are updatable, False to only return imported
config sets
statuses: config set statuses to include
"""
imported_infos = config_set_helper.GetLocalConfigSetInfos()
remote_infos = []
if request.include_remote:
remote_infos = config_set_helper.GetRemoteConfigSetInfos()
info_message_list = config_set_helper.UpdateConfigSetInfos(imported_infos,
remote_infos)
if request.statuses:
info_message_list = [msg for msg in info_message_list
if msg.status in request.statuses]
return mtt_messages.ConfigSetInfoList(
config_set_infos=info_message_list)
@base.ApiMethod(
endpoints.ResourceContainer(mtt_messages.ConfigSetInfo),
mtt_messages.ConfigSetInfo,
path='latest_version',
http_method='POST',
name='latest_version')
def GetLatestVersion(self, request):
imported_info = self._ConvertFromMessage(request)
return config_set_helper.GetLatestVersion(imported_info)
@base.ApiMethod(
endpoints.ResourceContainer(
message_types.VoidMessage,
url=messages.StringField(1),
content=messages.StringField(2)),
mtt_messages.ConfigSetInfo,
path='import/{url}',
http_method='POST',
name='import')
def Import(self, request):
"""Downloads and imports a config set.
Parameters:
url: URL from which to download a config file
content: contents of a config file, only used if url is not provided
"""
content = (request.content if request.content else
config_set_helper.ReadRemoteFile(request.url))
return config_set_helper.Import(content)
@base.ApiMethod(
endpoints.ResourceContainer(message_types.VoidMessage,
url=messages.StringField(1)),
message_types.VoidMessage,
path='{url}',
http_method='DELETE',
name='delete')
def Delete(self, request):
"""Removes a config set and all associated objects (tests, etc).
Parameters:
url: the url of the config set to remove
"""
config_set_helper.Delete(request.url)
return message_types.VoidMessage()
| 4,779 |
examples/fourFn.py
|
klahnakoski/mo-parsing
| 1 |
2025578
|
# fourFn.py
#
# Demonstration of the mo_parsing module, implementing a simple 4-function expression parser,
# with support for scientific notation, and symbols for e and pi.
# Extended to add exponentiation and simple built-in functions.
# Extended test cases, simplified pushFirst method.
# Removed unnecessary expr.suppress() call (thanks <NAME>!), and added Group
# Changed fnumber to use a Regex, which is now the preferred method
# Reformatted to latest pypyparsing features, support multiple and variable args to functions
#
# Copyright 2003-2019 by <NAME>
#
import math
import operator
from mo_parsing import *
from mo_parsing.helpers import delimited_list
from mo_parsing.utils import alphas, alphanums
exprStack = []
def push_first(toks):
exprStack.append(toks[0])
def push_unary_minus(toks):
for t in toks:
if t == "-":
exprStack.append("unary -")
else:
break
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided number, but convert back to str:
# fnumber = number().add_parse_action(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimited_list(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
fn_call = (ident + lpar - Group(expr_list) + rpar).add_parse_action(lambda t: (
(t[0], t[1].length()),
))
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).add_parse_action(push_first)
| Group(lpar + expr + rpar)
)
).add_parse_action(push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).add_parse_action(push_first)[...]
term = factor + (multop + factor).add_parse_action(push_first)[...]
expr <<= term + (addop + term).add_parse_action(push_first)[...]
bnf = expr
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0,
}
def evaluate_stack(s):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
return -evaluate_stack(s)
if op in opn:
# note: operands are pushed onto the stack in reverse order
op2 = evaluate_stack(s)
op1 = evaluate_stack(s)
return opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in fn:
# note: args are pushed onto the stack in reverse order
args = tuple(reversed([evaluate_stack(s) for _ in range(num_args)]))
return fn[op](*args)
elif op[0].isalpha():
raise Exception("invalid identifier '%s'" % op)
else:
# try to evaluate as int first, then as float if int fails
try:
return int(op)
except ValueError:
return float(op)
| 4,141 |
Class1/Class1X8.py
|
GnetworkGnome/Class
| 0 |
2025340
|
# Import Modules
from ciscoconfparse import CiscoConfParse
# Parse Configuration File
cisco_config = CiscoConfParse("cisco_ipsec.txt")
# Search for Crypto Objects
crypto = cisco_config.find_objects(r"^crypto map CRYPTO")
# Print Crypto Objects
for i in crypto:
print(i.text)
for child in i.children:
print(child.text)
| 337 |
src/data/io/loader.py
|
AlanGanem/fastai-flow
| 0 |
2025130
|
import pandas as pd
import pickle
from pathlib import Path
def load_csv(path, encoding = 'utf-8', sep = ';'):
path = Path(r'{}'.format(path))
data = pd.read_csv(path, sep = sep, encoding = encoding)
return data
def save_csv(data, folder_path, file_name):
folder_path = Path(r'{}'.format(folder_path))
file_path = folder_path / file_name
data.to_csv(file_path)
return file_path
def load_object(path):
path = Path(path)
with (open(path, "rb")) as openfile:
obj = pickle.load(openfile)
return obj
| 546 |
homecontrol/modules/tasmota_rf_adapter/module.py
|
jaipradeesh/HomeControl
| 0 |
2025431
|
"""Support for Tasmota RF devices"""
from contextlib import suppress
import json
from functools import reduce
import asyncio
from homecontrol.dependencies.entity_types import Item
class TasmotaRFAdapter(Item):
"""The TasmotaRFAdapter class"""
sending: asyncio.Event
async def init(self):
"""Initialise the adapter"""
self.sending = asyncio.Event()
self.sending.set()
@self.core.event_engine.register("mqtt_connected")
async def on_mqtt_connected(event, mqtt_adapter):
"""Handle connection"""
if mqtt_adapter == self.cfg["mqtt_adapter"]:
self.cfg["mqtt_adapter"].client.subscribe(
self.cfg["topic"] + "/tele/RESULT")
@self.core.event_engine.register("mqtt_message_received")
async def on_mqtt_message_received(event, mqtt_adapter, message):
"""Handle message"""
if mqtt_adapter == self.cfg["mqtt_adapter"]:
# pylint: disable=no-member
with suppress(json.decoder.JSONDecodeError):
data = json.loads(message.payload)
if data.get("RfReceived"):
code = data["RfReceived"].get("Data", 0)
bits = bin(int(code, 16))[2:][::2]
self.core.event_engine.broadcast(
"rf_code_received",
code=int(bits, 2),
length=len(bits))
async def send_code(self, code: int) -> None:
"""Send RF code"""
await self.sending.wait()
self.sending.clear()
binary = bin(int(code))[2:]
zero_padded = reduce(
lambda x, y: x + y, zip(["0"] * len(binary), binary))
data = "#" + hex(int("".join(zero_padded), 2))[2:]
self.cfg["mqtt_adapter"].client.publish(
self.cfg["topic"] + "/cmnd/RfCode", data)
self.core.loop.call_later(self.cfg["tx_interval"], self.sending.set)
async def stop(self):
"""Stops the adapter"""
self.cfg["mqtt_adapter"].client.unsubscribe(
self.cfg["topic"] + "/tele/RESULT")
| 2,167 |
mxm-python/src/main/jazz/lstm.py
|
MusicExMachina/MxM
| 11 |
2025542
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import random
import collections
import time
start_time = time.time()
def elapsed(sec):
if sec<60:
return str(sec) + " sec"
elif sec<(60*60):
return str(sec/60) + " min"
else:
return str(sec/(60*60)) + " hr"
# Target log path
writer = tf.summary.FileWriter("logs/testlog.txt")
training_data_f = "data/test_dataset.txt"
songs = []
with open(training_data_f,"r") as reader:
songs.append(reader.readline().split(','))
print("Loaded training data...")
def build_dataset(words):
pass
dictionary, reverse_dictionary = build_dataset(songs)
vocab_size = len(dictionary)
# Parameters
learning_rate = 0.001
training_iters = 50000
display_step = 1000
n_input = 3
n_hidden = 512
# tf Graph input
x = tf.placeholder("float", [None, n_input, 1])
y = tf.placeholder("float", [None, vocab_size])
# RNN output node weights and biases
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, vocab_size]))
}
biases = {
'out': tf.Variable(tf.random_normal([vocab_size]))
}
def RNN(x, weights, biases):
x = tf.reshape(x, [-1, n_input])
x = tf.split(x,n_input,1)
rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden),rnn.BasicLSTMCell(n_hidden)])
outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)
# Model evaluation
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as session:
session.run(init)
step = 0
offset = random.randint(0,n_input+1)
end_offset = n_input + 1
acc_total = 0
loss_total = 0
writer.add_graph(session.graph)
while step < training_iters:
# Generate a minibatch. Add some randomness on selection process.
if offset > (len(training_data)-end_offset):
offset = random.randint(0, n_input+1)
symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ]
symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])
symbols_out_onehot = np.zeros([vocab_size], dtype=float)
symbols_out_onehot[dictionary[str(training_data[offset+n_input])]] = 1.0
symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])
_, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \
feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
loss_total += loss
acc_total += acc
if (step+1) % display_step == 0:
print("Iter= " + str(step+1) + ", Average Loss= " + \
"{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
"{:.2f}%".format(100*acc_total/display_step))
acc_total = 0
loss_total = 0
symbols_in = [training_data[i] for i in range(offset, offset + n_input)]
symbols_out = training_data[offset + n_input]
symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())]
print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred))
step += 1
offset += (n_input+1)
print("Optimization Finished!")
print("Elapsed time: ", elapsed(time.time() - start_time))
print("Run on command line.")
print("\ttensorboard --logdir=%s" % (logs_path))
print("Point your web browser to: http://localhost:6006/")
while True:
prompt = "%s words: " % n_input
sentence = input(prompt)
sentence = sentence.strip()
words = sentence.split(' ')
if len(words) != n_input:
continue
try:
symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))]
for i in range(32):
keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])
onehot_pred = session.run(pred, feed_dict={x: keys})
onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval())
sentence = "%s %s" % (sentence,reverse_dictionary[onehot_pred_index])
symbols_in_keys = symbols_in_keys[1:]
symbols_in_keys.append(onehot_pred_index)
print(sentence)
except:
print("Word not in dictionary")
| 4,292 |
example-plugin/src/flake8_example_plugin/__init__.py
|
jmcgill298/flake8
| 0 |
2024906
|
"""Module for an example Flake8 plugin."""
from .on_by_default import ExampleOne
from .off_by_default import ExampleTwo
__all__ = (
'ExampleOne',
'ExampleTwo',
)
| 172 |
authentication/forms.py
|
hosseinmoghimi/waiter
| 1 |
2025293
|
from django import forms
class ResetPasswordForm(forms.Form):
username=forms.CharField(required=True,max_length=200)
old_password=forms.CharField(max_length=150, required=False)
new_password=forms.CharField(max_length=150, required=True)
class LoginAsUserForm(forms.Form):
username=forms.CharField(required=True,max_length=200)
class UploadProfileImageForm(forms.Form):
# profile_id=forms.IntegerField(required=True)
image=forms.ImageField(required=True)
class EditProfileForm(forms.Form):
# profile_id=forms.IntegerField(required=True)
first_name=forms.CharField(max_length=100, required=True)
last_name=forms.CharField(max_length=100, required=True)
email=forms.CharField(max_length=150, required=False)
bio=forms.CharField(max_length=150, required=False)
mobile=forms.CharField(max_length=150, required=False)
address=forms.CharField(max_length=150, required=False)
class LoginForm(forms.Form):
username=forms.CharField(max_length=50, required=True)
password=forms.CharField(max_length=150, required=True)
back_url=forms.CharField(max_length=150, required=False)
class RegisterForm(forms.Form):
username=forms.CharField(max_length=50, required=True)
password=forms.CharField(max_length=150, required=True)
first_name=forms.CharField(max_length=50, required=True)
last_name=forms.CharField(max_length=50, required=True)
mobile=forms.CharField(max_length=50, required=False)
address=forms.CharField(max_length=200, required=False)
bio=forms.CharField(max_length=50, required=False)
| 1,583 |
Drivers/DMM6500_DAQ6510/DMM6500_Python_Sockets_Driver/DAQ6510_04_4WResistance_Scan.py
|
398786172/keithley
| 31 |
2023615
|
#--------------------------------------------------------------------------------
# DESCRIPTION:
# a. This example uses the Keithley DAQ6510 to perform temperature
# scanning
#--------------------------------------------------------------------------------
import socket
import struct
import math
import time
import Keithley_DMM6500_Sockets_Driver as kei
#===== MAIN PROGRAM STARTS HERE =====
ipAddress1 = "192.168.1.165"
port = 5025
timeout = 20.0
myFile = "dmm_functions.tsp"
DAQ6510 = kei.DMM6500()
myID = DAQ6510.Connect(ipAddress1, 5025, 20000, 1, 1)
DAQ6510.echoCmd = 1
scanCount = 10
scanInterval = 1.0 # for this setup, limit to no less than 5s intervals
print(myID)
t1 = time.time()
DAQ6510.LoadScriptFile(myFile)
DAQ6510.SendCmd("do_beep(1.0, 3500)")
DAQ6510.Reset()
DAQ6510.SetFunction_4W_Resistance("107:109", DAQ6510.OCOMP.ON, DAQ6510.OLeadDetect.ON)
DAQ6510.SetMeasure_Range("107:109", DAQ6510.AutoRange.ON)
DAQ6510.SetMeasure_NPLC("107:109", 1.0)
DAQ6510.SetMeasure_AutoDelay("107:109", DAQ6510.DmmState.ON)
DAQ6510.SetMeasure_AutoZero("107:109", DAQ6510.DmmState.ON)
DAQ6510.SetMeasure_Count("107:109", 1)
DAQ6510.SetScan_BasicAttributes("107:109", scanCount, scanInterval)
DAQ6510.Init()
startIndex = 1
endIndex = 3
chanCnt = 3
targetCnt = scanCount * chanCnt
loopCnt = 1
accumCnt = DAQ6510.QueryCmd("print(defbuffer1.n)", 8)
while(endIndex < (targetCnt+1)):
myData = DAQ6510.GetScan_Data(chanCnt, startIndex, endIndex)
print("Scan {}: {}".format(loopCnt, myData))
myDataList = myData.split(",")
startIndex += chanCnt
endIndex += chanCnt
loopCnt += 1
DAQ6510.Disconnect()
t2 = time.time()
# Notify the user of completion and the test time achieved.
print("done")
print("{0:.6f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
exit()
| 1,828 |
gen.py
|
finnbar/potion-gen
| 1 |
2025402
|
'''
Utilises the ingredient generators to build actual recipes.
Also uses the name generators to build a name.
'''
from random import choice, randrange
from ingredients import ingredients
from name import names
def generatePotion():
ingredientCount = randrange(4, 7)
recipeIngredients = []
while len(recipeIngredients) < ingredientCount:
newIngredient = choice(ingredients).create()
if not newIngredient in recipeIngredients:
recipeIngredients.append(newIngredient)
name = choice(names).create()
return name + ":\n" + "\n".join(recipeIngredients)
if __name__ == '__main__':
for i in range(10):
print(generatePotion())
print("\n")
| 700 |
changjie.py
|
hktonylee/ChangJieFun
| 0 |
2025309
|
#!/bin/env python
# -*- encoding: utf-8
import codecs
import sys, tty, termios
class InputMethod:
def __init__(self):
self._wordMap = {}
def add(self, stroke, character):
lst = self._wordMap.setdefault(stroke, [])
lst.append(character)
def get(self, stroke):
return self._wordMap[stroke]
WORDS = '!'
class InputMethod2:
def __init__(self):
self._wordMap = {}
def add(self, stroke, character):
d = self._wordMap
n = len(stroke)
for i in range(n):
d = d.setdefault(stroke[i], {})
d.setdefault(WORDS, []).append(character)
def get(self, stroke):
try:
d = self._wordMap
for ch in stroke:
d = d[ch]
return d[WORDS]
except KeyError:
return []
def get_generator(self, stroke):
d = self._wordMap
for ch in stroke:
d = d[ch]
for item in d[WORDS]:
yield item
for ch in d:
if ch != WORDS:
words = d[ch].get(WORDS, None)
if words is not None:
for item in words:
yield item
KEY_CTRL_C = '\x03'
KEY_DELETE = ['\x10', '\x7F']
KEY_ENTER = '\x0D'
KEY_BACKSPACE = '\x08'
KEY_ESCAPE = '\x1B'
class InteractivePrompt(object):
def __init__(self):
pass
def read_ch(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def run(self):
while True:
ch = self.read_ch()
if ch == KEY_CTRL_C and self.on_ctrl_c():
return
elif ch in KEY_DELETE:
self.on_delete()
elif ch == KEY_ENTER:
self.on_enter()
elif ch == KEY_ESCAPE:
self.on_escape()
else:
self.on_input(ch)
def on_ctrl_c(self):
sys.stdout.write('\n')
return True
def on_input(self, ch):
pass
def on_enter(self):
pass
def on_delete(self):
pass
def on_escape(self):
pass
class BasicInteractivePrompt(InteractivePrompt):
def on_input(self, ch):
sys.stdout.write(ch)
sys.stdout.write(' ')
sys.stdout.write(hex(ord(ch)))
sys.stdout.write(' ')
def on_enter(self):
sys.stdout.write('ENTER ')
def on_delete(self):
sys.stdout.write(KEY_BACKSPACE)
STATE_INPUT_STROKE = 1
STATE_SELECT_CHARACTER = 2
class ChangJieInteractivePrompt(InteractivePrompt):
def __init__(self):
super(ChangJieInteractivePrompt, self).__init__()
self._strokes = ""
self._state = STATE_INPUT_STROKE
self._selecting_characters = []
self._selecting_characters_length = 0
def on_ctrl_c(self):
sys.stdout.write('\n')
return True
def on_input(self, ch):
if self._state == STATE_INPUT_STROKE:
if ch == ' ':
self._input_stroke()
elif ch.isalpha():
self._strokes += ch.lower();
sys.stdout.write(ch)
elif self._state == STATE_SELECT_CHARACTER:
if ch.isdigit():
n = (int(ch) - 1) % 10
if n < len(self._selecting_characters):
self._select_character(n)
elif ch == ' ':
self._select_character(0)
elif ch.isalpha():
self._select_character(0)
self._strokes += ch.lower();
sys.stdout.write(ch)
def on_enter(self):
if self._state == STATE_INPUT_STROKE:
self._input_stroke()
elif self._state == STATE_SELECT_CHARACTER:
self._select_character(0)
def on_delete(self):
if self._state == STATE_INPUT_STROKE:
if len(self._strokes) > 0:
sys.stdout.write(KEY_BACKSPACE + ' ' + KEY_BACKSPACE)
self._strokes = self._strokes[:-1]
else:
self._clear_buffer(2)
elif self._state == STATE_SELECT_CHARACTER:
self._clear_select_characters()
self._state = STATE_INPUT_STROKE
def _select_character(self, n):
ch = self._selecting_characters[n]
self._clear_select_characters()
sys.stdout.write(ch)
self._state = STATE_INPUT_STROKE
def _clear_buffer(self, n, clear_before=True, clear_after=True):
if clear_before:
sys.stdout.write(KEY_BACKSPACE * n)
sys.stdout.write(' ' * n)
if clear_after:
sys.stdout.write(KEY_BACKSPACE * n)
def _clear_stroke(self):
if len(self._strokes) > 0:
self._clear_buffer(len(self._strokes))
self._strokes = ""
def _clear_select_characters(self):
self._clear_buffer(self._selecting_characters_length, clear_before=False)
self._selecting_characters_length = 0
self._selecting_characters = []
def _input_stroke(self):
if self._state == STATE_INPUT_STROKE:
characters = chang_jie.get(self._strokes)[:10]
if len(characters) >= 2:
self._clear_stroke()
sys.stdout.write(' ')
out = ""
for i, ch in enumerate(characters):
out += str((i + 1) % 10) + ' ' + ch + ' '
sys.stdout.write(out)
self._selecting_characters_length = len(out) + len(characters) + 1
sys.stdout.write(KEY_BACKSPACE * self._selecting_characters_length)
self._state = STATE_SELECT_CHARACTER
self._selecting_characters = characters
elif len(characters) == 1:
self._clear_stroke()
sys.stdout.write(characters[0])
def on_escape(self):
if self._state == STATE_INPUT_STROKE:
self._clear_stroke()
elif self._state == STATE_SELECT_CHARACTER:
self._clear_select_characters()
self._state = STATE_INPUT_STROKE
def load_chang_jie():
with open('cj5-21000', 'rb') as f:
content = f.read()
bom = codecs.BOM_UTF16_LE
assert content.startswith(bom)
content = content[len(bom):].decode('utf-16le')
content_started = False
for line in content.splitlines():
if line == '[Text]':
content_started = True
elif content_started:
stroke = line[1:]
character = line[0]
if len(character.encode('utf-8')) > 0:
chang_jie.add(stroke, character)
def print_stroke(stroke):
print stroke, ':\t',
lst = chang_jie.get(stroke)
for ch in lst:
print ch,
print
def print_list(lst):
for ch in lst:
print ch,
print
# chang_jie = InputMethod()
chang_jie = InputMethod2()
load_chang_jie()
prompt = ChangJieInteractivePrompt()
prompt.run()
# print_stroke('hqi')
# print_stroke('oan')
# print_stroke('janl')
# print_stroke('amyo')
# print_stroke('jd')
# print_stroke('doo')
# print_stroke('hapi')
# print_stroke('yg')
# print_stroke('o')
# print_stroke('cism')
# print_stroke('vfog')
# print_stroke('opd')
# print_stroke('djpn')
# print_stroke('dtbo')
# print_stroke('tod')
# print_stroke('vnd')
# print_stroke('oino')
# print list(chang_jie.get_generator('hq'))
# print 'hq :\t',
# print print_list(list(chang_jie.get_generator('hq')))
| 7,652 |
utils/boilerplate/fly.py
|
cfginn/sap-simulation-package
| 0 |
2024357
|
from pysapets.animal import Animal
import pysapets.constants as constants
import random
import logging
class Fly(Animal):
# base health and attack values
BASE_ATTACK = 2
BASE_HEALTH = 2
def __init__(self, addAttack = 0, addHealth = 0):
# lvl 1: Friend faints: Summon a 2/2 fly in its place
# lvl 2: Friend faints: Summon a 4/4 fly in its place
# lvl 3: Friend faints: Summon a 6/6 fly in its place
def _run_effect(self, friends):
pass
# create ability
self.ability = Animal.Ability(self, constants.FAINT, constants.EACH_FRIEND, _run_effect)
super().__init__(addAttack + self.BASE_ATTACK, addHealth + self.BASE_HEALTH, animalType = constants.FLY, tier = 6, ability=self.ability)
| 740 |
read_csv.py
|
Uqido/movies-suggestions-bot
| 0 |
2025411
|
import pandas as pd
import numpy as np
from ast import literal_eval
import csv
path = '../the-movies-dataset/'
def get_md():
md = pd.read_csv(path + 'final_metadata.csv', encoding='utf-8')
del md['useless']
md['id'] = md['id'].astype('int')
md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
# md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(lambda x: [str(x).split('-')[0]] if x != np.nan else [])
# md['year'] = md['year'].fillna('[]').apply(lambda x: [str(x)] if isinstance(x, int) else [])
return md
def get_titles():
md = get_md()
return [str(t) for t in md['title']]
def get_most_poular():
md = get_md()
vote_counts = md[md['vote_count'].notnull()]['vote_count'].astype('int')
vote_averages = md[md['vote_average'].notnull()]['vote_average'].astype('int')
C = vote_averages.mean()
m = vote_counts.quantile(0.95)
qualified = md[(md['vote_count'] >= m) & (md['vote_count'].notnull()) & (md['vote_average'].notnull())][['title', 'year', 'vote_count', 'vote_average', 'popularity', 'genres', 'poster_path']]
qualified['vote_count'] = qualified['vote_count'].astype('int')
qualified['vote_average'] = qualified['vote_average'].astype('int')
def weighted_rating(x):
v = x['vote_count']
R = x['vote_average']
return (v/(v+m) * R) + (m/(m+v) * C)
qualified['wr'] = qualified.apply(weighted_rating, axis=1)
qualified = qualified.sort_values('wr', ascending=False).head(250)
movie = []
for i in qualified.head(7).values:
movie.append([str(i[0]), "https://image.tmdb.org/t/p/original/" + str(i[-2])])
return movie
def add_rating(userId, movie_title, rating):
md = get_md()
links = pd.read_csv(path + 'final_links.csv', encoding='utf-8')
del links['useless']
id_map = links[['movieId', 'tmdbId']]
links = links[links['tmdbId'].notnull()]['tmdbId'].astype('int')
smd = md[md['id'].isin(links)]
indices = pd.Series(smd.index, index=smd['title'])
def convert_int(x):
try:
return int(x)
except:
return np.nan
id_map['tmdbId'] = id_map['tmdbId'].apply(convert_int)
id_map.columns = ['movieId', 'id']
id_map = id_map.merge(smd[['title', 'id']], on='id').set_index('title')
indices_map = id_map.set_index('id')
with open(path + 'smaller_final_ratings.csv', 'a') as csvfile:
fieldnames = ['useless', 'userId','movieId', 'rating']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
tmdbId = md.loc[md['title'] == movie_title]['id']
tmdbId = tmdbId.values[0]
movieId = indices_map['movieId'][tmdbId]
writer.writerow({'useless':0, 'userId':userId, 'movieId':movieId, 'rating':rating})
| 2,860 |
key.py
|
Phaugt/encryption
| 0 |
2024459
|
from cryptography.fernet import Fernet
key = Fernet.generate_key()
with open('secret.key', 'wb') as secret:
secret.write(key)
| 132 |
qiskit_qudits/circuit/quditdelay.py
|
q-inho/QuditsTeam-1
| 1 |
2024101
|
# This code is from Qiskit Hackathon 2021 by the team
# Qiskit for high dimensional multipartite quantum states.
# It is a modified version of barrier.py from the original Qiskit-Terra code.
#
# Author: <NAME>
#
# (C) Copyright 2021 <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#
###############################################################################
#
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#
###############################################################################
"""
Delay instruction on qudits.
"""
import numpy as np
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit import Delay
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .flexiblequditinstruction import FlexibleQuditInstruction
class QuditDelay(FlexibleQuditInstruction):
"""Do nothing and just delay/wait/idle for a specified duration."""
num_qudits = 1
def __init__(self, qudit_dimensions, duration=1, unit='dt'):
"""Create new delay instruction for qudits."""
if not isinstance(duration, (float, int)):
raise CircuitError('Unsupported duration type.')
if unit == 'dt' and not isinstance(duration, int):
raise CircuitError("Integer duration is required for 'dt' unit.")
if unit not in {'s', 'ms', 'us', 'ns', 'ps', 'dt'}:
raise CircuitError('Unknown unit %s is specified.' % unit)
super().__init__("delay", qudit_dimensions, 0, 0, params=[duration], unit=unit)
def _define(self):
"""Relay delay to each underlying qubit."""
q = QuantumRegister(self.num_qubits, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(Delay(self.params[0], self.unit), q[:], [])
]
for inst, qargs, cargs in rules:
qc._append(inst, qargs, cargs)
self.definition = qc
def c_if(self, classical, val):
raise CircuitError('Conditional Delay is not yet implemented.')
@property
def duration(self):
"""Get the duration of this delay."""
return self.params[0]
@duration.setter
def duration(self, duration):
"""Set the duration of this delay."""
self.params = [duration]
self._define()
def __array__(self, dtype=None):
"""Return the identity matrix."""
return np.identity(self.num_qubits, dtype=dtype)
def to_matrix(self) -> np.ndarray:
"""Return a Numpy.array for the unitary matrix. This has been
added to enable simulation without making delay a full Gate type.
Returns:
np.ndarray: matrix representation.
"""
return self.__array__(dtype=complex)
def __repr__(self):
"""Return the official string representing the delay."""
return "%s(duration=%s[unit=%s])" % \
(self.__class__.__name__, self.params[0], self.unit)
| 3,790 |
sdk/python/pulumi_aws_native/emr/get_instance_fleet_config.py
|
pulumi/pulumi-aws-native
| 29 |
2025075
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetInstanceFleetConfigResult',
'AwaitableGetInstanceFleetConfigResult',
'get_instance_fleet_config',
'get_instance_fleet_config_output',
]
@pulumi.output_type
class GetInstanceFleetConfigResult:
def __init__(__self__, id=None, target_on_demand_capacity=None, target_spot_capacity=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if target_on_demand_capacity and not isinstance(target_on_demand_capacity, int):
raise TypeError("Expected argument 'target_on_demand_capacity' to be a int")
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity and not isinstance(target_spot_capacity, int):
raise TypeError("Expected argument 'target_spot_capacity' to be a int")
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[int]:
return pulumi.get(self, "target_on_demand_capacity")
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[int]:
return pulumi.get(self, "target_spot_capacity")
class AwaitableGetInstanceFleetConfigResult(GetInstanceFleetConfigResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInstanceFleetConfigResult(
id=self.id,
target_on_demand_capacity=self.target_on_demand_capacity,
target_spot_capacity=self.target_spot_capacity)
def get_instance_fleet_config(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceFleetConfigResult:
"""
Resource Type definition for AWS::EMR::InstanceFleetConfig
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:emr:getInstanceFleetConfig', __args__, opts=opts, typ=GetInstanceFleetConfigResult).value
return AwaitableGetInstanceFleetConfigResult(
id=__ret__.id,
target_on_demand_capacity=__ret__.target_on_demand_capacity,
target_spot_capacity=__ret__.target_spot_capacity)
@_utilities.lift_output_func(get_instance_fleet_config)
def get_instance_fleet_config_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceFleetConfigResult]:
"""
Resource Type definition for AWS::EMR::InstanceFleetConfig
"""
...
| 3,260 |
examples/main.py
|
muhlba91/ledpi-controller
| 1 |
2025323
|
"""Flask application for the WS2801 controller."""
import argparse
from flask import Flask, jsonify, request
from ledpi_controller.controller import Controller
from ledpi_controller.yaml_processor import StateYamlProcessor, YamlProcessor
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", required=True)
parser.add_argument("-s", "--state", required=True)
args = parser.parse_args()
class Server:
def __init__(self, controller: Controller):
self.controller = controller
def get_state(self):
return {
"state": "on" if self.controller.is_on() else "off",
"rgb_color": self.controller.rgb_hex_color(),
"leds": self.controller.get_leds(),
"brightness": self.controller.brightness(),
}
def set_state(self, state: dict):
if "rgb_color" in state:
self.controller.set_rgb_color(state["rgb_color"])
if "brightness" in state:
self.controller.set_brightness(state["brightness"])
if "state" in state:
state = state["state"]
if state == "on":
self.controller.turn_on()
elif state == "off":
self.controller.turn_off()
def create_app(config, state_file):
leds = config.get("leds", 160)
state_processor = StateYamlProcessor(state_file)
controller = Controller(state_processor, leds)
server = Server(controller)
app = Flask(__name__)
@app.route("/api/v1/state", methods=["GET", "POST"])
def state_method():
if request.method == "POST":
json = request.get_json(force=True)
server.set_state(json)
return jsonify({"success": True, **server.get_state()})
return app
# main()
if __name__ == "__main__":
config = YamlProcessor(args.config).load()
create_app(config, args.state).run(
host="0.0.0.0", port=config.get("port", 80), debug=config.get("debug", False)
)
| 1,971 |
setup.py
|
agmcfarland/GeneGrouper
| 15 |
2023905
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="GeneGrouper",
version="1.0.2",
author="<NAME>",
author_email="<EMAIL>",
description="Find and cluster genomic regions containing a seed gene",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/agmcfarland/GeneGrouper",
project_urls={
"Bug Tracker": "https://github.com/agmcfarland/GeneGrouper/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src", exclude=['docs','test_data']), #exlude = ['*.egg-info', ]
entry_points={
'console_scripts': ['GeneGrouper = GeneGrouper.__main__:main'], },
package_data={'GeneGrouper' : ['Rscripts/*']},
python_requires=">=3.6")
# https://packaging.python.org/tutorials/installing-packages/#creating-virtual-environments
# https://packaging.python.org/tutorials/packaging-projects/
# https://trstringer.com/easy-and-nice-python-cli/
# https://github.com/pypa/sampleproject/blob/main/setup.py
# https://chriswarrick.com/blog/2014/09/15/python-apps-the-right-way-entry_points-and-scripts/
| 1,422 |
files/bin/bin/clock.py
|
erroneousboat/dotfiles
| 16 |
2023896
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################################################
#
# clock
# -----
#
# This script prints an icon representation of the time of day.
#
# Dependencies: python3, nerd-fonts
#
# :authors: <NAME>
# :date: 07-01-2019
# :version: 0.1.0
#
##############################################################################
import datetime
def clock():
now = datetime.datetime.now().hour % 12
if now == 0:
return ""
elif now == 1:
return ""
elif now == 2:
return ""
elif now == 3:
return ""
elif now == 4:
return ""
elif now == 5:
return ""
elif now == 6:
return ""
elif now == 7:
return ""
elif now == 8:
return ""
elif now == 9:
return ""
elif now == 10:
return ""
elif now == 11:
return ""
else:
return ""
if __name__ == "__main__":
print(clock())
| 1,001 |
SignalGenerators/Python/RsSmw_ScpiPackage/RsSmw_SimpleRFsettings_Example.py
|
Rohde-Schwarz/examples
| 0 |
2025449
|
from RsSmw import *
RsSmw.assert_minimum_version('5.0.44')
smw = RsSmw('TCPIP::10.102.52.47::HISLIP')
# smw = RsSmw('TCPIP::10.112.1.179::5025::SOCKET', options='SelectVisa=SocketIo') # No VISA needed
print(f'Driver Info: {smw.utilities.driver_version}')
print(f'Instrument: {smw.utilities.idn_string}')
# Instrument options are properly parsed duplicates are removed, and the items are sorted (k-options first)
print(f'Instrument options: {",".join(smw.utilities.instrument_options)}')
# Driver's instrument status checking ( SYST:ERR? ) after each command (default value is True):
smw.utilities.instrument_status_checking = True
# The smw object uses the global HW instance one - RF out A
smw.repcap_hwInstance_set(repcap.HwInstance.InstA)
# Clone the smw object to the smw_rf2 and select the RF out B
smw_rf2 = smw.clone()
smw_rf2.repcap_hwInstance_set(repcap.HwInstance.InstB)
# Now we have two independent objects for two RF Outputs - smw and smw_rf2
# They share some common features of the instrument, like for example resetting
smw_rf2.utilities.reset()
smw.output.state.set_value(True)
smw.source.frequency.set_mode(enums.FreqMode.CW)
smw.source.power.level.immediate.set_amplitude(-20)
smw.source.frequency.fixed.set_value(223E6)
print(f'Channel 1 PEP level: {smw.source.power.get_pep()} dBm')
smw_rf2.output.state.set_value(False)
smw_rf2.source.frequency.set_mode(enums.FreqMode.SWEep)
smw_rf2.source.power.level.immediate.set_amplitude(-35)
smw_rf2.source.frequency.set_start(800E6)
smw_rf2.source.frequency.set_stop(900E6)
smw_rf2.source.frequency.step.set_mode(enums.FreqStepMode.DECimal)
smw_rf2.source.frequency.step.set_increment(10E6)
print(f'Channel 2 PEP level: {smw_rf2.source.power.get_pep()} dBm')
# Direct SCPI interface:
response = smw.utilities.query_str('*IDN?')
print(f'Direct SCPI response on *IDN?: {response}')
smw.close()
| 1,864 |
helper/list_cache.py
|
kurokobo/game-update-notifier
| 0 |
2024654
|
import json
import os
from tabulate import tabulate
def append_cache(table, platform, json_path):
_json = {}
if os.path.exists(json_path):
with open(json_path) as file:
_json = json.load(file)
_table = table
for _data in _json:
_result = _json[_data]
_row = [
platform,
_result["app"]["id"],
_result["app"]["name"],
_result["last_checked"],
_result["last_updated"],
_result["data"],
]
_table.append(_row)
return _table
def main():
json_epicgames = "../cache/epicgames/latest_result.json"
json_msstore = "../cache/msstore/latest_result.json"
json_steam = "../cache/steam/latest_result.json"
current_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_path)
_header = ["Platform", "ID", "Name", "Last Checked", "Last Updated", "Data"]
_table = []
_table = append_cache(_table, "Epic Games", json_epicgames)
_table = append_cache(_table, "Microsoft Store", json_msstore)
_table = append_cache(_table, "Steam", json_steam)
print(tabulate(_table, _header))
if __name__ == "__main__":
main()
| 1,207 |
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_1_xsd/nistschema_sv_iv_atomic_any_uri_enumeration_1.py
|
tefra/xsdata-w3c-tests
| 1 |
2023361
|
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-anyURI-enumeration-1-NS"
class NistschemaSvIvAtomicAnyUriEnumeration1Type(Enum):
HTTP_THEISTE_COM = "http://Theiste.com"
MAILTO_PROV_ORG = "mailto:@<EMAIL>"
FTP_H_COM = "ftp://h.com"
MAILTO_DEVIC_MANIPULATIONANDABILITYSPECIFICA_GOV = "mailto:<EMAIL>"
HTTP_WWW_SYSTEMSWEBI_TEROPERABI_ITYBEANDOF_HIC_EDU = "http://www.systemswebi.teroperabi.itybeandof.hic.edu"
GOPHER_CONFORMANCE_UP_COM = "gopher://Conformance.up.com"
TELNET_F_ORG = "telnet://f.org"
HTTP_WWW_ASSERIES_GOV = "http://www.asseries.gov"
TELNET_WIT_EDU = "telnet://wit.edu"
FTP_FTP_ATHECONSTIT_ENT_OASISRE_RIE_NET = "ftp://ftp.atheconstit.entOASISre.rie.net"
@dataclass
class NistschemaSvIvAtomicAnyUriEnumeration1:
class Meta:
name = "NISTSchema-SV-IV-atomic-anyURI-enumeration-1"
namespace = "NISTSchema-SV-IV-atomic-anyURI-enumeration-1-NS"
value: Optional[NistschemaSvIvAtomicAnyUriEnumeration1Type] = field(
default=None,
metadata={
"required": True,
}
)
| 1,163 |
quiz/views/launch.py
|
manikagarg/iQuiz
| 0 |
2022689
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from quiz.utils import lti
import quiz.utils.lti_validator as lti_validator
from . import manager
from . import student
@csrf_exempt
def index(request):
"""
The function is the entry point, it validates the POST request (if any) as LTI
compliant, and then return the appropriate view to the user.
"""
if request.method == "POST":
valid = lti_validator.validate_request(request)
if valid:
lti.save_launch_request_session(request) # Save the Launch Request Parameters to the session
if lti.is_student(request):
return student.index(request)
elif lti.is_manager(request):
return manager.index(request)
return render(request, "error.html") # If NOT valid, or role is neither student nor manager
else:
# it is not a POST (LTI) request, launch home page.
return render(request, 'home.html')
| 1,016 |
portal/urls.py
|
PedrinaBrasil/PortalLetrasPotengienses
| 0 |
2025373
|
from django.urls import path
from . import views
app_name = "portal"
urlpatterns = [
path('', views.IndexView.as_view(), name="index"),
path('obras', views.BooksListView.as_view(), name="obras"),
path('autores', views.AuthorsListView.as_view(), name="autores"),
]
| 277 |
tests/test_unit.py
|
gustavohenrique/django-splinter-example
| 2 |
2024783
|
from django.test import TestCase
from poll.models import Candidate
from poll import util
class UtilTest(TestCase):
def test_calculate_percentual_scores_from_two_candidates(self):
c1 = Candidate(id=1, score=6)
c2 = Candidate(id=2, score=4)
result = util.calculate_scores(c1, c2)
self.assertEquals(result.get('c1').get('id'), 1)
self.assertEquals(result.get('c1').get('score'), '60.00')
self.assertEquals(result.get('c2').get('id'), 2)
self.assertEquals(result.get('c2').get('score'), '40.00')
def test_should_return_percentual_zero_if_score_is_zero(self):
c1 = Candidate(id=1, score=0)
c2 = Candidate(id=2, score=0)
result = util.calculate_scores(c1, c2)
self.assertEquals(result.get('c1').get('id'), 1)
self.assertEquals(result.get('c1').get('score'), '0.00')
self.assertEquals(result.get('c2').get('id'), 2)
self.assertEquals(result.get('c2').get('score'), '0.00')
def test_should_return_empty_dict_when_args_is_none(self):
result = util.calculate_scores(None, None)
self.assertEquals(result, {})
class ViewTest(TestCase):
def test_create_Candidates_if_database_is_empty(self):
candidates = Candidate.objects.all()
self.assertEquals(len(candidates), 0)
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
candidates = Candidate.objects.all()
self.assertEquals(len(candidates), 2)
def test_increment_the_score_and_redirect_to_index(self):
candidate = Candidate.objects.create(score=6)
response = self.client.get('/vote/%s/' % candidate.id)
self.assertEquals(response.status_code, 302)
self.assertEquals(Candidate.objects.get(pk=candidate.id).score, 7)
def test_should_return_404_if_id_doesnt_exists(self):
candidate = Candidate.objects.create(score=6)
response = self.client.get('/vote/99/')
self.assertEquals(response.status_code, 404)
self.assertEquals(Candidate.objects.get(pk=candidate.id).score, 6)
| 2,108 |
django_productline/features/staticfiles/settings.py
|
henzk/django-productline
| 5 |
2023583
|
from __future__ import unicode_literals
# refinement for django_productline.settings
def refine_INSTALLED_APPS(original):
return ['django.contrib.staticfiles'] + list(original)
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
introduce_STATIC_URL = '/static/'
# Additional locations of static files
introduce_STATICFILES_DIRS = [
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
]
# List of finder classes that know how to find static files in
# various locations.
introduce_STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
| 869 |
day09/part1.py
|
aajjbb/advent-of-code-2018
| 2 |
2025271
|
"""
Advent of Code 2018 - Day 09 solution.
"""
INPUT = input().split(' ')
PLAYERS = int(INPUT[0])
MARBLES = int(INPUT[6])
circle = [0]
current_pos = 0
players_score = [0 for _ in range(PLAYERS)]
for marble in range(1, MARBLES + 1):
current_player = (marble - 1) % PLAYERS
if marble % 23 != 0:
new_position = 1 + (current_pos + 1) % (len(circle))
circle.insert(new_position, marble)
current_pos = new_position
else:
players_score[current_player] += marble
removed_position = (current_pos - 7 + len(circle)) % len(circle)
players_score[current_player] += circle[removed_position]
circle.pop(removed_position)
current_pos = removed_position
print(max(players_score))
| 746 |
adbnx_adapter/adbnx_adapter/imdb_arangoDB_networkx_adapter.py
|
arangoml/networkx-adapter
| 13 |
2025239
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 09:51:47 2020
@author: <NAME>
"""
from adbnx_adapter.arangoDB_networkx_adapter import ArangoDB_Networkx_Adapter
import networkx as nx
class IMDBArangoDB_Networkx_Adapter(ArangoDB_Networkx_Adapter):
def create_networkx_graph(self, graph_name, graph_attributes, **query_options):
if self.is_valid_graph_attributes(graph_attributes):
g = nx.DiGraph()
for k, v in graph_attributes['vertexCollections'].items():
query = "FOR doc in %s " % (k)
cspl = [s + ':' + 'doc.' + s for s in v]
cspl.append('_id: doc._id')
csps = ','.join(cspl)
query = query + "RETURN { " + csps + "}"
cursor = self.db.aql.execute(query, **query_options)
for doc in cursor:
if k == "Users":
bip_key = 0
else:
bip_key = 1
g.add_node(doc['_id'], attr_dict=doc, bipartite=bip_key)
for k, v in graph_attributes['edgeCollections'].items():
query = "FOR doc in %s " % (k)
cspl = [s + ':' + 'doc.' + s for s in v]
cspl.append('_id: doc._id')
csps = ','.join(cspl)
query = query + "RETURN { " + csps + "}"
cursor = self.db.aql.execute(query, **query_options)
for doc in cursor:
g.add_edge(doc['_from'], doc['_to'])
return g
| 1,577 |
temp01.py
|
nesen2019/english
| 0 |
2024420
|
import os
import glob
import json
if __name__ == '__main__':
with open("word/3Kwords.md", "w") as f:
f.write(f"\n\n")
for i in range(1, 32):
f.write(f"### [Music_{i:02}](http://download.dogwood.com.cn/online/yaoniming3000/{i:02}.mp3), [text_{i:02}](./word/text/list{i:02}.txt)")
f.write(f"\n")
| 345 |
CSIKit/reader/readers/pico/ExtraInfoSegment.py
|
FredeJ/CSIKit
| 67 |
2024981
|
from CSIKit.util import stringops
import struct
class ExtraInfoSegment:
BOOLS = {
1: [
"hasLength",
"hasVersion",
"hasMacAddr_cur",
"hasMacAddr_rom",
"hasChansel",
"hasBMode",
"hasEVM",
"hasTxChainMask",
"hasRxChainMask",
"hasTxpower",
"hasCF",
"hasTxTSF",
"hasLastHWTxTSF",
"hasChannelFlags",
"hasTxNess",
"hasTuningPolicy",
"hasPLLRate",
"hasPLLRefDiv",
"hasPLLClkSel",
"hasAGC",
"hasAntennaSelection",
"hasSamplingRate",
"hasCFO",
"hasSFO",
"hasPreciseTxTiming",
]
}
def __init__(self, data: bytes, version: int):
VERSION_MAP = {
1: self.parseV1,
}
if version in VERSION_MAP:
# Parse data with relevant parser.
VERSION_MAP[version](data)
def parseV1(self, data: bytes):
pos = 0
self.featureCode = struct.unpack("I", data[:4])[0]
pos += 4
# bools = self.BOOLS[1]
# numBools = len(bools)
# boolVals = struct.unpack(numBools*"?", data[pos:pos+numBools])
#
# {setattr(self, boolKey, boolVal) for boolKey, boolVal in zip(bools, boolVals)}
#
# pos += numBools
self.length = struct.unpack("H", data[pos:pos + 2])[0]
pos += 2
self.version = struct.unpack("Q", data[pos:pos + 8])[0]
pos += 8
self.macaddr_rom = stringops.hexToMACString(data[pos:pos + 6].hex())
pos += 6
self.macaddr_cur = stringops.hexToMACString(data[pos:pos + 6].hex())
pos += 6
self.chansel = struct.unpack("I", data[pos:pos + 4])[0]
pos += 4
self.bmode = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.evm = struct.unpack(20*"B", data[pos:pos + 20])
pos += 20
self.txChainMask = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.rxChainMask = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.txpower = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.cf = struct.unpack("Q", data[pos:pos + 8])[0]
pos += 8
self.txTSF = struct.unpack("I", data[pos:pos + 4])[0]
pos += 4
self.lastHwTxTSF = struct.unpack("H", data[pos:pos + 2])[0]
pos += 2
self.channelFlags = struct.unpack("H", data[pos:pos + 2])[0]
pos += 2
self.tx_ness = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
tuningpolicy = struct.unpack("B", data[pos:pos + 1])[0]
if tuningpolicy == 30:
self.tuningPolicy = "Chansel"
elif tuningpolicy == 31:
self.tuningPolicy = "FastCC"
elif tuningpolicy == 32:
self.tuningPolicy = "Reset"
elif tuningpolicy == 33:
self.tuningPolicy = "Default"
else:
# clean this up
print("invalid tuning policy.")
self.pll_rate = struct.unpack("H", data[pos:pos + 2])[0]
pos += 2
self.pll_refdiv = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.pll_clock_select = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.agc = struct.unpack("B", data[pos:pos + 1])[0]
pos += 1
self.ant_sel = struct.unpack("BBB", data[pos:pos + 3])
pos += 3
self.samplingRate = struct.unpack("Q", data[pos:pos + 8])[0]
pos += 8
self.cfo = struct.unpack("I", data[pos:pos + 4])[0]
pos += 4
self.sfo = struct.unpack("I", data[pos:pos + 4])[0]
pos += 4
self.preciseTxTiming = struct.unpack("d", data[pos:pos + 8])[0]
pos += 8
| 3,895 |
src/venim/pathmanager.py
|
michaelaye/venim
| 2 |
2025238
|
from pathlib import Path
from .config import config
storage_root = Path(config["venim_path"]).expanduser()
class PathManager:
def __init__(self, mission, instr):
self.mission = mission
self.instr = instr
@property
def instr_savedir(self):
return storage_root / self.mission / self.instr
| 328 |
applications/physbam/physbam-lib/Scripts/Archives/pd/sim/SERVER.py
|
schinmayee/nimbus
| 20 |
2023281
|
#!/usr/bin/python
# Host representation: hostname, max_cpus, max_memory
# Session representation: id, label, username, created_date, state={active,inactive,done} machine, claim_id, memory, cpus, user_status (where state and status are dictionaries)
from pd.common import CONFIG
from pd.common import SOCKET
import os
import mutex
import time
import threading
class SERVER:
def __init__(self):
self.session_directory=CONFIG.session_directory
self.sessions={}
self.next_id=1
self.server_id="pd/%s"%os.environ["HOSTNAME"]
self.hosts_client=client=SOCKET.CLIENT(CONFIG.pdhosts_server_host,CONFIG.pdhosts_server_port,
(CONFIG.client_private_key_file,CONFIG.client_certificate_file,CONFIG.ca_certificate_file))
print "our test %s"%self.hosts_client.Host_List()
self.Read_All_Sessions()
# Define RPC interface
self.mutex=threading.Lock()
self.commands=["Session_Info","Session_List","Create_Session","Activate_Session","Deactivate_Session","Label_Session","Update_State","Host_List","Update_Status","Remove_Status_If_Exists","Session_Directory"]
# PRIVATE ROUTINES
def Read_Session(self,session_id):
# read main status
try:
info=eval(open(os.path.join(self.session_directory,str(session_id),"etc","info.py")).read())
self.sessions[info["id"]]=info
except:
pass
def Read_All_Sessions(self):
for directory in os.listdir(self.session_directory):
self.Read_Session(directory)
self.next_id=reduce(max,self.sessions.keys()+[self.next_id])+1
def Write_Session(self,session_id):
open(os.path.join(self.session_directory,str(session_id),"etc","info.py"),"w").write(repr(self.sessions[session_id]))
def Validate_Session_Id(self,session_id):
if type(session_id)!=int: raise SOCKET.COMMAND_EXCEPTION("Invalid session id")
elif not self.sessions.has_key(session_id): raise SOCKET.COMMAND_EXCEPTION("Invalid session id %d"%session_id)
# PUBLIC ROUTINES
def Session_Info(self,session_id):
self.Validate_Session_Id(session_id)
return self.sessions[session_id]
def Session_List(self):
return self.sessions
def Create_Session(self,username,memory,cpus):
session_id,directory=None,None
while 1:
session_id=self.next_id
self.next_id+=1
directory=os.path.join(self.session_directory,str(session_id))
if not os.path.exists(directory): break
etc_directory=os.path.join(directory,"etc")
info={"id":session_id, "label": "<unnamed>","username": username,"created_date":time.time(),"state":"inactive","machine":None,"claim_id":None,"memory":memory,"cpus":cpus,"user_status":{}}
os.umask(0)
os.mkdir(directory,01775) # create directory
os.mkdir(etc_directory,01775) # create etc directory
self.sessions[session_id]=info
self.Write_Session(session_id)
return info
def Activate_Session(self,session_id,desired_hostname):
self.Validate_Session_Id(session_id)
if self.sessions[session_id]["machine"]!=None:
raise SOCKET.COMMAND_EXCEPTION("session is already bound to machine %s"%self.sessions[session_id]["machine"])
if self.sessions[session_id]["state"]=="active":
raise SOCKET.COMMAND_EXCEPTION("session already activated but no machine: PANIC")
claim_id=self.hosts_client.Claim_Host(desired_hostname,self.server_id,self.sessions[session_id]["username"],self.sessions[session_id]["cpus"],self.sessions[session_id]["memory"])
self.sessions[session_id]["machine"]=desired_hostname
self.sessions[session_id]["claim_id"]=claim_id
self.sessions[session_id]["state"]="active"
self.Write_Session(session_id)
def Deactivate_Session(self,session_id,state):
self.Validate_Session_Id(session_id)
if state == "active": raise SOCKET.COMMAND_EXCEPTION("state must not be active")
if self.sessions[session_id]["state"]!="active": raise SOCKET.COMMAND_EXCEPTION("session already inactive")
if self.sessions[session_id]["machine"]==None: raise SOCKET.COMMAND_EXCEPTION("session is not bound to machine but session is inactive: PANIC")
self.hosts_client.Release_Host(self.sessions[session_id]["machine"],self.sessions[session_id]["claim_id"])
self.sessions[session_id]["claim_id"]=None
self.sessions[session_id]["machine"]=None
self.sessions[session_id]["state"]=state
self.Write_Session(session_id)
def Label_Session(self,session_id,label):
self.Validate_Session_Id(session_id)
self.sessions[session_id]["label"]=label
self.Write_Session(session_id)
def Update_State(self,session_id):
self.Validate_Session_Id(session_id)
self.Read_Session(session_id)
def Host_List(self):
return self.hosts_client.Host_List()
def Update_Status(self,session_id,key,value):
self.Validate_Session_Id(session_id)
self.sessions[session_id]["user_status"][key]=value
self.Write_Session(session_id)
def Remove_Status_If_Exists(self,session_id,key):
self.Validate_Session_Id(session_id)
try:
del self.sessions[session_id]["user_status"][key]
self.Write_Session(session_id)
except:
pass
def Session_Directory(self,session_id):
self.Validate_Session_Id(session_id)
directory=os.path.join(self.session_directory,str(session_id))
return directory
import socket
if __name__ == "__main__":
server=SERVER()
SOCKET.SERVER(socket.gethostbyname(CONFIG.pdsim_server_host),CONFIG.pdsim_server_port,server,
(CONFIG.server_private_key_file,CONFIG.server_certificate_file,CONFIG.ca_certificate_file))
| 5,927 |
src/annalist_root/annalist/models/recordtype.py
|
gklyne/annalist
| 18 |
2024677
|
"""
Annalist record type
A record type is represented by:
- an ID (slug)
- a URI
- a name/label
- a description
- ...
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import os.path
import shutil
from django.conf import settings
from annalist import layout
from annalist.exceptions import Annalist_Error
from annalist.identifiers import ANNAL
from annalist import util
from annalist.models.entity import Entity
from annalist.models.entitydata import EntityData
class RecordType(EntityData):
_entitytype = ANNAL.CURIE.Type
_entitytypeid = layout.TYPE_TYPEID
_entityroot = layout.COLL_TYPE_PATH
_entityview = layout.COLL_TYPE_VIEW
_entityfile = layout.TYPE_META_FILE
def __init__(self, parent, type_id):
"""
Initialize a new RecordType object, without metadta (yet).
parent is the parent collection in which the type is defined.
type_id the local identifier for the record type
"""
super(RecordType, self).__init__(parent, type_id)
self._parent = parent
# log.debug("RecordType %s: dir %s"%(type_id, self._entitydir))
# log.debug("RecordType %s: uri %s"%(type_id, self._entityurl))
return
def _migrate_values(self, entitydata):
"""
Type definition entity format migration method.
The specification for this method is that it returns an entitydata value
which is a copy of the supplied entitydata with format migrations applied.
NOTE: implementations are free to apply migrations in-place. The resulting
entitydata should be exctly as the supplied data *should* appear in storage
to conform to the current format of the data. The migration function should
be idempotent; i.e.
x._migrate_values(x._migrate_values(e)) == x._migrate_values(e)
"""
# Convert representation of supertype URIs to use repeated property instead of
# reference to an RDF list.
if ANNAL.CURIE.supertype_uris in entitydata:
if isinstance(entitydata[ANNAL.CURIE.supertype_uris], list):
entitydata[ANNAL.CURIE.supertype_uri] = (
[ {'@id': st[ANNAL.CURIE.supertype_uri] }
for st in entitydata[ANNAL.CURIE.supertype_uris]
])
del entitydata[ANNAL.CURIE.supertype_uris]
# Return result
return entitydata
def _migrate_filenames(self):
"""
Override EntityData method
"""
return None
def _post_update_processing(self, entitydata, post_update_flags):
"""
Post-update processing.
This method is called when an entity has been created or updated.
"""
self._parent.cache_add_type(self)
return entitydata
def _post_remove_processing(self, post_update_flags):
"""
Post-remove processing.
This method is called when an entity has been removed.
"""
self._parent.cache_remove_type(self.get_id())
return
# End.
| 3,408 |
sumTheOldNumber.py
|
Zo3i/codewarsPy
| 0 |
2024980
|
def row_sum_odd_numbers(n):
count = 0
first = 1
for i in range(n):
count = 2 * i
first += count
sum = 0
for j in range(0, i + 1):
sum += first + 2 * j
return sum
print(row_sum_odd_numbers(13))
#大佬的最佳实践
def row_sum_odd_numbers(n):
#your code here
return n ** 3
| 353 |
examples.py/3D/Form/ShapeTransform.py
|
timgates42/processing.py
| 1,224 |
2024749
|
"""
Shape Transform
by <NAME>.
(Rewritten in Python by <NAME>.)
Illustrates the geometric relationship
between Cube, Pyramid, Cone and
Cylinder 3D primitives.
Instructions:
Up Arrow - increases points
Down Arrow - decreases points
'p' key toggles between cube/pyramid
"""
# constants
radius = 99
cylinderLength = 95
angleInc = PI / 300.0
# globals that can be chaned by the user
pts = 12
isPyramid = False
def setup():
size(640, 360, OPENGL)
noStroke()
def draw():
background(170, 95, 95)
lights()
fill(255, 200, 200)
translate(width / 2, height / 2)
rotateX(frameCount * angleInc)
rotateY(frameCount * angleInc)
rotateZ(frameCount * angleInc)
dTheta = TWO_PI / pts
x = lambda(j): cos(dTheta * j) * radius
y = lambda(j): sin(dTheta * j) * radius
# draw cylinder tube
beginShape(QUAD_STRIP)
for j in range(pts + 1):
vertex(x(j), y(j), cylinderLength)
if isPyramid:
vertex(0, 0, -cylinderLength)
else:
vertex(x(j), y(j), -cylinderLength)
endShape()
#draw cylinder ends
beginShape()
for j in range(pts + 1):
vertex(x(j), y(j), cylinderLength)
endShape(CLOSE)
if not isPyramid:
beginShape()
for j in range(pts + 1):
vertex(x(j), y(j), -cylinderLength)
endShape(CLOSE)
"""
up/down arrow keys control
polygon detail.
"""
def keyPressed():
global pts, isPyramid
if key == CODED:
if keyCode == UP and pts < 90:
pts += 1
elif keyCode == DOWN and pts > 4:
pts -= 1
elif key == 'p':
isPyramid = not isPyramid
| 1,667 |
problems/1423.py
|
mengshun/Leetcode
| 0 |
2024309
|
"""
1423. 可获得的最大点数 难度: 中等
几张卡牌 排成一行,每张卡牌都有一个对应的点数。点数由整数数组 cardPoints 给出。
每次行动,你可以从行的开头或者末尾拿一张卡牌,最终你必须正好拿 k 张卡牌。
你的点数就是你拿到手中的所有卡牌的点数之和。
给你一个整数数组 cardPoints 和整数 k,请你返回可以获得的最大点数。
"""
def maxScore(cardPoints, k):
n = len(cardPoints)
def min_deal():
sum_v = sum(cardPoints)
count = n - k + 1
begin = n - k
t = sum(cardPoints[:begin])
min_v = t
for i in range(1, k+1):
t = t - cardPoints[i-1] + cardPoints[begin + i - 1]
print(t)
min_v = min(min_v, t)
return sum_v - min_v
def huadong_deal():
sum_v = sum(cardPoints[:k])
res = sum_v
for i in range(1, k+1):
sum_v = sum_v - cardPoints[k-i] + cardPoints[n - i]
res = max(res, sum_v)
return res
if (n >> 1) > k:
# k值 小于 一半数据 直接求k个数的和
return huadong_deal()
else:
# 求 除去k个数之外的最小值
return min_deal()
# print(maxScore([1,2,3,4,5,6,1], 3)) # 12
# print(maxScore([9,7,7,9,7,7,9], 7))
print(maxScore([96,90,41,82,39,74,64,50,30], 8)) # 536
| 1,087 |
execjs/_exceptions.py
|
d0ugal/PyExecJS
| 1 |
2024062
|
class Error(Exception):
pass
class RuntimeError(Error):
pass
class ProgramError(Error):
pass
class RuntimeUnavailableError(RuntimeError):
pass
| 165 |
natural_tammes.py
|
Tim024/NaturalTammes
| 0 |
2024131
|
import random
from math import radians, cos, sin, asin, sqrt
from pyproj import Geod
from fibonacci import *
# Maybe induce randomness in point movement?
def move_point_away(p1, p2, d, geoid): # d in km
# Moves p1 away from p2 by d, return new p1 coordinates
lat1, lon1 = p1
lat2, lon2 = p2
fwd_azimuth, back_azimuth, distance = geoid.inv(lon1, lat1, lon2, lat2)
# back_azimuth = back_azimuth + np.random.randn()/100
lng_new, lat_new, return_az = geoid.fwd(lon1, lat1, back_azimuth, d * 1000)
return lat_new, lng_new
def haversine(p1, p2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lat1, lon1 = p1
lat2, lon2 = p2
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers.
return c * r
def natural_tammes(n, init='fibonacci', step=1):
geoid = Geod(ellps='WGS84')
# Display n points randomly on map
points = []
# Init with fibo by default
if init == 'fibonacci':
points = fibonacci(n)
elif init == 'random':
for i in range(n):
lat = random.random() * 180 - 90
lon = random.random() * 360 - 180
points.append((lat, lon))
elif init == 'zeroes':
for i in range(n):
lat = random.random()
lon = random.random()
points.append((lat, lon))
def push_around(point_index, radius):
p1 = points[point_index]
random_list = list(range(n))
random_list.remove(point_index)
if point_index != 0: random_list.remove(0) # Point 0 is fixed
random.shuffle(random_list)
for i in random_list:
p2 = points[i]
d = haversine(p1, p2)
if n * step > radius - d > 0.001: # If p2 is inside p1 radius
# Push p2 to edge
points[i] = move_point_away(p2, p1, radius - d, geoid)
if radius - d > 20 * step: # Stop when moving too much ?
return 0
return 1
r = 0
ok = 1
while ok == 1:
r += step
# Increase radius
for i in range(n):
ok *= push_around(i, r)
return points
if __name__ == '__main__':
points = natural_tammes(5)
| 2,518 |
tools/infer_net_dataset.py
|
leehsiu/pyopenpose
| 0 |
2025662
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from openpose.config import cfg
from openpose.engine.predictor import LiveDemo
from openpose.model.detector.build_model import build_model
from openpose.data import make_data_loader
import time
import matplotlib.pyplot as plt
import matplotlib.patches as plt_patches
import numpy as np
heat_cmap = plt.get_cmap('viridis')
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
'--model-type',
dest='model_type',
type=str,
default='OpenPose',
help='model type, currently support OpenPose,DensePose,GinesOpenPose'
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
# create the model
if args.model_type not in ['OpenPose','DensePose','GinesOpenPose']:
raise ValueError('Unknown model type')
model = build_model(cfg,args.model_type)
live_demo = LiveDemo(
cfg,
model,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
data_loader = make_data_loader(
cfg,
is_train=False,
is_distributed=False,
start_iter=0,
)
kps_name = [
'background',
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
vis_ch = 0
max_ch = 17
term_signal = False
for iteration,(image_lists,targets,proposals,_) in enumerate(data_loader[0]):
prediction,features = live_demo.run_on_dataset_image(image_lists)
img0 = image_lists.tensors[0]
img0 = img0.numpy().transpose(1,2,0)
#img0[...] += [102.9801,115.9465,122.7117]
img0[:,:,0] += 102.9801
img0[:,:,1] += 115.9465
img0[:,:,2] += 122.7117
img0 = img0.astype(np.uint8)
overlay = img0.copy()
if prediction is not None:
overlay = live_demo.overlay_boxes(overlay,prediction)
overlay = live_demo.overlay_keypoints(overlay,prediction)
while True:
heatmap = features[0].cpu().numpy()[vis_ch]
#alpha = 0.7
overlay_heat = live_demo.overlay_heatmap(img0,heatmap)
font = cv2.FONT_HERSHEY_SIMPLEX
overlay_heat = cv2.putText(overlay_heat,'channel:{}'.format(kps_name[vis_ch]),(10,20),font,1,(0,255,0),3)
cv2.imshow('heatmap',overlay_heat)
cv2.imshow('img',overlay)
retkey=cv2.waitKey(-1)
if retkey==ord('q'):
term_signal = True
break
elif retkey==ord('n'):
break
elif retkey==ord('d'):
vis_ch += 1
elif retkey==ord('a'):
vis_ch -= 1
if vis_ch >= max_ch:
vis_ch = max_ch
elif vis_ch <0 :
vis_ch = 0
if term_signal:
break
cv2.destroyAllWindows()
# while True:
# start_time = time.time()
# #ret_val, img = cam.read()
# composite = live_demo.run_on_opencv_image(img)
# print(composite)
# print("Time: {:.2f} s / img".format(time.time() - start_time))
# cv2.imshow("COCO detections", img)
# if cv2.waitKey(1) == 27:
# break # esc to quit
# cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 5,171 |
split_meet/apps/accounts/migrations/0001_initial.py
|
rishabh3354/split_meet
| 0 |
2025419
|
# Generated by Django 2.2.17 on 2020-12-12 10:19
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('uu', models.UUIDField(default=uuid.uuid4, unique=True)),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='last name')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('mobile', models.CharField(blank=True, max_length=20, null=True)),
('is_email_verified', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False, verbose_name='superuser')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
]
| 1,796 |
tests/test_ids.py
|
mzabrams/fars-cleaner
| 1 |
2023042
|
import fars_cleaner.fars_utils as futil
import pandas as pd
from hypothesis import given
from hypothesis import strategies as st
from hypothesis.extra.pandas import column, data_frames
@given(year=st.integers(min_value=1975, max_value=2019),
vehicle_numbers=st.integers(min_value=0,max_value=99),
st_case=st.integers(min_value=10000, max_value=569999))
def test_vehicle_id(year, vehicle_numbers, st_case):
output = int("{0}{1}{2:03}".format(str(year)[-2:], int(st_case), int(vehicle_numbers)))
df_in = pd.DataFrame({'ST_CASE': [st_case], 'VEH_NO': [vehicle_numbers]})
dummy = futil.createVehID(df_in, year)['VEH_ID'][0]
assert dummy == output
@given(year=st.integers(min_value=1975, max_value=2019),
vehicle_numbers=st.integers(min_value=0,max_value=99),
person_numbers=st.integers(min_value=0,max_value=99),
st_case=st.integers(min_value=10000, max_value=569999))
def test_person_id(year, vehicle_numbers, person_numbers, st_case):
output = int("{0}{1}{2:03}{3:03}".format(str(year)[-2:], int(st_case), int(vehicle_numbers), int(person_numbers)))
df_in = pd.DataFrame({'ST_CASE': [st_case],
'VEH_NO': [vehicle_numbers],
'PER_NO': [person_numbers],
'YEAR': [year]})
dummy = futil.createPerID(df_in, year)['PER_ID'][0]
dummy2 = futil.createPerID(df_in, None)['PER_ID'][0]
assert dummy == output
assert dummy2 == output
@given(year=st.integers(min_value=1975, max_value=2019),
st_case=st.integers(min_value=10000, max_value=569999))
def test_case_id(year, st_case):
output = int("{0}{1}".format(str(year)[-2:], int(st_case)))
df_in = pd.DataFrame({'ST_CASE': [st_case],
'YEAR': [year]})
dummy = futil.createCaseID(df_in, year)['ID'][0]
assert dummy == output
| 1,865 |
main.py
|
jvech/DeepSort_Yolo
| 2 |
2023007
|
import tkinter as tk
from interface import App
if __name__=="__main__":
root = tk.Tk()
app = App(root)
| 113 |
setup.py
|
benoitc/gevent-zeromq
| 1 |
2025529
|
try:
from setuptools import Extension, setup
except ImportError:
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
cython_available = False
try:
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
cython_available = True
except ImportError:
print 'cython not available, proceeding with pure python implementation.'
pass
def get_ext_modules():
import zmq
return [
Extension(
'gevent_zeromq._zmq',
['gevent_zeromq/_zmq.py'],
include_dirs = zmq.get_includes(),
),
]
if cython_available:
ext_modules = get_ext_modules()
else:
ext_modules = []
__version__ = (0, 0, 1)
setup(
name = 'gevent_zeromq',
version = '.'.join([str(x) for x in __version__]),
packages = ['gevent_zeromq'],
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
author = '<NAME>',
author_email = '<EMAIL>',
description = 'gevent compatibility layer for pyzmq',
install_requires = ['pyzmq>=2.1.0', 'gevent'],
)
| 1,108 |
ocflib/infra/github.py
|
ocf/alib
| 13 |
2024381
|
from github import Github
from github import InputGitTreeElement
class GithubCredentials():
"""Basic class to store Github credentials and verify input"""
def __init__(self, username=None, password=<PASSWORD>, token=None):
if not (username or password or token):
raise ValueError('No credentials supplied')
if (username and password and token):
raise ValueError('Can\'t pass in both username/password and token')
if (username and not password) or (password and not username):
raise ValueError('Username/password supplied but not the other')
self.username = username
self.password = password
self.token = token
class GitRepo():
"""
Extension of PyGithub with a couple of other helper methods.
"""
def __init__(self, repo_name, credentials=None):
"""Retrieves a Repository by its fully qualified name. If credentials are passed
they will be used."""
if not credentials:
self._github = Github().get_repo(repo_name)
elif credentials.token:
self._github = Github(credentials.token).get_repo(repo_name)
else:
self._github = Github(credentials.username, credentials.password).get_repo(repo_name)
@property
def github(self):
"""
Direct access to the underlying PyGithub object.
"""
return self._github
def get_file(self, filename):
"""Fetch and decode the file from the master branch.
Note that GitHub's API only supports files up to 1MB in size."""
return self._github.get_contents(filename).decoded_content.decode('utf-8')
def modify_and_branch(self, base_branch, new_branch_name, commit_message, filename, file_content):
"""Create a new branch from base_branch, makes changes to a file, and
commits it to the new branch."""
base_sha = self._github.get_git_ref('heads/{}'.format(base_branch)).object.sha
base_tree = self._github.get_git_tree(base_sha)
element = InputGitTreeElement(filename, '100644', 'blob', file_content)
tree = self._github.create_git_tree([element], base_tree)
parent = self._github.get_git_commit(base_sha)
commit = self._github.create_git_commit(commit_message, tree, [parent])
self._github.create_git_ref('refs/heads/{}'.format(new_branch_name), commit.sha)
| 2,419 |
src/bl_urx_script.py
|
hyunjeong847/ROS-EYE
| 13 |
2025331
|
import math
def f_to_s(v):
return'{0:.5f}'.format(float(v))
def list_to_array(vals):
return '{}'.format(','.join([f_to_s(v) for v in vals]))
class URScript(object):
def __init__(self):
self.text = ''
# Starts inside of a function
self.indent_level = 0
def add_line(self, text):
#print('add_line')
tabs = ''.join('\t' for i in range(0, self.indent_level))
#self.text += '{}{}\n'.format(tabs, text.strip())
self.text += '{}{}'.format(tabs, text.strip())
def function(self, name, args=[]):
print('function')
#self.add_line('def {}({}):'.format(name, ', '.join(args)))
self.add_line('{}:'.format(name, ', '.join(args)))
#self.add_line('{}:'.format(name)
self.indent_level += 1
def end(self):
if self.indent_level == 0:
raise Exception('No structure to end')
self.indent_level -= 1
self.add_line('end')
def set_tool_digital_out(self, index, state):
self.add_line('set_tool_digital_out({}, {})'.format(index, state))
def while_loop(self, condition):
self.add_line('while {}:'.format(condition))
self.indent_level += 1
def servoj(self, angles, t=0.008, lookahead_time=0.1, gain=300):
if not len(angles) == 6:
raise Exception('Incorrect number of joint angles (need 6)')
a = 0
v = 0
self.add_line('servoj({}, {}, {}, {}, {}, {})'.format(f_to_s(1),list_to_array(angles), *[f_to_s(v) for v in [a, v, t, lookahead_time, gain]]))
def movej(self, angles, t=0, radius = 0, gripper=0):
#t=0: default, use accel & velo
#t>0: ignore accel & velo, make motion in t(sec)
if not len(angles) == 6:
raise Exception('Incorrect number of joint angles (need 6)')
self.add_line('movej({}, {}, {}, {}, {})'.format(f_to_s(1), list_to_array(angles), f_to_s(t), f_to_s(radius), f_to_s(gripper)))
def speedj(self,angles):
self.add_line('speedj({}, {})'.format(f_to_s(1), list_to_array(angles)))
def setVelo(self, a=1.4, v=1.05):
null_7s = [0,0,0,0,0,0,0]
self.add_line('setVelo({}, {}, {})'.format(f_to_s(2), f_to_s(a), f_to_s(v), list_to_array(null_7s)))
def stopj(self, a=2):
null_7s = [0,0,0,0,0,0,0]
self.add_line('stopj({}, {}, {})'.format(f_to_s(3), f_to_s(a), list_to_array(null_7s)))
def emergency(self):
null_8s = [0,0,0,0,0,0,0,0]
self.add_line('({}, {})'.format(f_to_s(3), list_to_array(null_8s)))
def server_Alive(self, a=2):
null_8s = [0,0,0,0,0,0,0,0]
self.add_line('({}, {})'.format(f_to_s(1), list_to_array(null_8s)))
def set_digital_out(self, num=1, out=1):
null_7s = [0,0,0,0,0,0,0]
self.add_line('set_digital_out({}, {}, {}, {})'.format(f_to_s(0),f_to_s(num), f_to_s(out), list_to_array(null_7s)))
def set_digital_out_off(self, num=1, out=0):
null_7s = [0,0,0,0,0,0,0]
self.add_line('set_digital_out({}, {}, {}, {})'.format(f_to_s(3),f_to_s(num), f_to_s(out), list_to_array(null_7s)))
def set_digital_out_on(self, num=1, out=1):
null_7s = [0,0,0,0,0,0,0]
self.add_line('set_digital_out({}, {}, {}, {})'.format(f_to_s(4),f_to_s(num), f_to_s(out), list_to_array(null_7s)))
def set_tool_digital_out(self, num=0, out=0):
null_6s = [0,0,0,0,0,0]
self.add_line('set_tool_digital_out({}, {}, {}, {})'.format(f_to_s(5),f_to_s(num), f_to_s(out), list_to_array(null_6s)))
def set_tool_voltage(self, voltage=24):
null_7s = [0,0,0,0,0,0,0]
self.add_line('set_tool_digital_out({}, {}, {})'.format(f_to_s(6),f_to_s(voltage), list_to_array(null_7s)))
# def move_home(self):
# null_7s = [0, 0, 0, 0, 0, 0, 0]
# self.add_line('({}, {}, {})'.format(f_to_s(8),f_to_s(1),list_to_array(null_7s)))
# print("move_home clear")
def move_home(self):
# self.add_line('({}, {}, {})'.format(f_to_s(8),f_to_s(1),list_to_array(null_7s)))
angles = [-(math.pi/2), -(math.pi/2), 0, -(math.pi/2), 0, 0]
self.add_line('move_home({}, {}, {}, {}, {})'.format(f_to_s(1), list_to_array(angles), 0, 0, 0))
def end_signal(self, end_signal = 1):
null_7s = [0,0,0,0,0,0,0]
self.add_line('end_signal({}, {}, {})'.format(f_to_s(9),f_to_s(end_signal),list_to_array(null_7s)))
print("end_signal clear")
def TeachMode(self):
null_8s = [0, 0, 0, 0, 0, 0, 0, 0]
self.add_line('({}, {})'.format(f_to_s(10),list_to_array(null_8s)))
print("TeachMode")
def offTeachMode(self):
null_8s = [0, 0, 0, 0, 0, 0, 0, 0]
self.add_line('({}, {})'.format(f_to_s(11),list_to_array(null_8s)))
print("offTeachMode")
def finish_Work(self):
null_8s = [0, 0, 0, 0, 0, 0, 0, 0]
self.add_line('({}, {})'.format(f_to_s(12),list_to_array(null_8s)))
print("finishWork")
| 4,633 |
converter.py
|
JontySR/htm
| 0 |
2023915
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Speech API sample application using the REST API for batch
processing.
Takes the audio and writes the text to a text file with the samew name as the audio file
Example usage:
python transcribe.py "audio.wav"
"""
import io
import time
# [START speech_transcribe_sync_gcs]
def transcribe_gcs(filename):
"""Asynchronously transcribes the audio file specified by the gcs_uri."""
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
client = speech.SpeechClient()
gcs_uri = 'gs://audioforhtm/{}'.format(filename)
audio = types.RecognitionAudio(uri=gcs_uri)
config = types.RecognitionConfig(
language_code='en-US')
operation = client.long_running_recognize(config, audio)
start = time.time()
print('Waiting for operation to complete...')
response = operation.result(timeout=3200)
end = time.time()
print end - start
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
output = open(gcs_uri[17:]+'.txt', 'w')
for result in response.results:
# The first alternative is the most likely one for this portion.
output.write('{}.'.format(result.alternatives[0].transcript))
output.close()
# [END speech_transcribe_sync_gcs]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'path', help='GCS path for audio file to be recognized')
args = parser.parse_args()
transcribe_gcs(args.path)
| 2,323 |
utils/button_event.py
|
shounen51/barrage4U
| 0 |
2025285
|
import json
import time
import sys
from datetime import datetime
import random
import os
import logging
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import PyQt5.sip
from configs import platform_list, default_setting
from utils.utils import save_ini
class btn_events():
def __init__(self, main_window):
self.main = main_window
def edit_channel(self):
logging.info('edit_channel edited')
bot_platform = platform_list[self.main.ui.combo_platform.currentIndex()]
bot_channel = self.main.ui.edit_channel.text()
self.main.modfy_setting(bot_platform, 'channel', bot_channel)
def btn_login(self):
logging.info('btn_login clicked')
self.main.set_status(1)
bot_platform = platform_list[self.main.ui.combo_platform.currentIndex()]
bot_server = self.main.ui.edit_server.text()
bot_channel = self.main.ui.edit_channel.text()
bot_args = {
'server':bot_server,
'channel':bot_channel
}
OK = self.main.bot.login(bot_platform, bot_args)
if OK:
self.main.modfy_setting('connect', 'platform', bot_platform)
self.main.modfy_setting(bot_platform, 'channel', bot_channel)
else:
self.main.set_status(0)
def btn_save(self):
logging.info('btn_save clicked')
try:
bot_platform = platform_list[self.main.ui.combo_platform.currentIndex()]
bot_server = self.main.ui.edit_server.text()
bot_channel = self.main.ui.edit_channel.text()
Bontop = self.main.ui.cb_on_top.isChecked()
Bcrosshair = self.main.ui.cb_avoid_crosshair.isChecked()
Bscrolling = self.main.ui.edit_scrolling.text()
Bname = self.main.ui.cb_show_name.isChecked()
text = self.main.ui.edit_size.text()
Bsize = abs(int(text))
text = self.main.ui.edit_time.text()
Btime = abs(int(text))
Balpha = self.main.ui.sli_alpha.value()
self.main.modfy_setting('connect', 'platform', bot_platform)
self.main.modfy_setting(bot_platform, 'channel', bot_channel)
self.main.modfy_setting('discord', 'server', bot_server)
self.main.modfy_setting('canvas', 'cover', Bontop)
self.main.modfy_setting('canvas', 'avoid_crosshair', Bcrosshair)
self.main.modfy_setting('canvas', 'scrolling_text', Bscrolling)
self.main.modfy_setting('barrage', 'name', Bname)
self.main.modfy_setting('barrage', 'size', Bsize)
self.main.modfy_setting('barrage', 'alive_time', Btime)
self.main.modfy_setting('barrage', 'alpha', Balpha)
except Exception as e:
logging.warning('save setting.ini failed.')
logging.error(e)
return
save_ini('./setting.ini', self.main.setting)
def btn_re_exec(self):
logging.info('btn_re_exec clicked')
os.startfile(sys.argv[0])
self.main.close()
def combo_platform(self):
logging.info('combo_platform selected')
index = self.main.ui.combo_platform.currentIndex()
self.main.ui.change_platform_combobox(index)
def cb_optional(self):
logging.info('cb_optional checked')
check = self.main.ui.cb_optional.isChecked()
if check:
self.main.resize(700, 600)
else:
self.main.resize(400, 600)
def cb_on_top(self):
logging.info('cb_on_top checked')
check = self.main.ui.cb_on_top.isChecked()
self.main.modfy_setting('canvas', 'cover', check)
if check:
self.main.canvas.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.BypassWindowManagerHint | QtCore.Qt.WindowStaysOnTopHint)
self.main.canvas.show()
self.main.setFocus()
else:
self.main.canvas.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.BypassWindowManagerHint)
self.main.canvas.show()
self.main.setFocus()
def cb_show_name(self):
logging.info('cb_on_top checked')
check = self.main.ui.cb_show_name.isChecked()
self.main.modfy_setting('barrage', 'name', check)
self.main.canvas.set_name_mode(check)
def cb_avoid_crosshair(self):
logging.info('cb_avoid_crosshair checked')
check = self.main.ui.cb_avoid_crosshair.isChecked()
self.main.modfy_setting('canvas', 'avoid_crosshair', check)
self.main.barrage_thread.avoid_crosshair(check)
def sli_alpha(self):
# logging.info('sli_alpha changed')
value = self.main.ui.sli_alpha.value()
self.main.modfy_setting('barrage', 'alpha', value)
self.main.canvas.set_alpha(value)
| 4,856 |
tools/mo/openvino/tools/mo/front/mxnet/arange_like_ext.py
|
ryanloney/openvino-1
| 1,127 |
2025534
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.graph.graph import Node
from openvino.tools.mo.ops.arange_like import ArangeLikeOp
class ArangeLikeExt(FrontExtractorOp):
op = '_contrib_arange_like'
enabled = True
@classmethod
def extract(cls, node: Node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
ArangeLikeOp.update_node_stat(node, {
'start': attrs.float('start', 0),
'repeat': attrs.int('repeat', 1),
'step': attrs.float('step', 1),
'axis': attrs.int('axis', None),
})
return cls.enabled
| 818 |
snake/config/__init__.py
|
TheoKlein/snake-core
| 9 |
2024385
|
"""This module exposes the initialised config object.
Attributes:
config_parser (:obj:`Config`): The configuration parser for snake.
scale_configs (dict): Convenient access to the scale_configs dictionary.
snake_config (dict): Convenient access to the snake_config dictionary.
"""
from snake.config import config
# pylint: disable=invalid-name
config_parser = config.Config()
scale_configs = config_parser.scale_configs
snake_config = config_parser.snake_config
| 477 |
Utils/py/BallDetection/PatchClassificator/patchReader.py
|
BerlinUnited/NaoTH
| 15 |
2025283
|
import sys
import getopt
from naoth.log import Reader as LogReader
from naoth.log import Parser
import numpy
from PIL import Image
def parse_arguments(argv):
input_file = ''
try:
opts, args = getopt.getopt(argv, "hi:", ["ifile="])
except getopt.GetoptError:
print('patchReader.py -i <input file>')
sys.exit(2)
if not opts:
print('python patchReader.py -i <logfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('patchReader.py -i <input file>')
sys.exit()
elif opt in ("-i", "--ifile"):
input_file = arg
return input_file
def image_from_proto(message):
# read each channel of yuv422 separately
yuv422 = numpy.fromstring(message.data, dtype=numpy.uint8)
y = yuv422[0::2]
u = yuv422[1::4]
v = yuv422[3::4]
# convert from yuv422 to yuv888
yuv888 = numpy.zeros(message.height * message.width * 3, dtype=numpy.uint8)
yuv888[0::3] = y
yuv888[1::6] = u
yuv888[2::6] = v
yuv888[4::6] = u
yuv888[5::6] = v
yuv888 = yuv888.reshape(message.height, message.width, 3)
# convert the image to rgb and save it
img = Image.fromstring('YCbCr', (message.width, message.height), yuv888.tostring())
return img
def get_images(frame):
# we are only interested in top images
image_top = frame["ImageTop"]
image_bottom = frame["Image"]
cm_bottom = frame["CameraMatrix"]
cm_top = frame["CameraMatrixTop"]
return [frame.number, image_from_proto(image_bottom), image_from_proto(image_top), cm_bottom,
cm_top]
def get_patches(frame):
ball_candidates = frame["BallCandidates"]
ball_candidates_top = frame["BallCandidatesTop"]
# print len(ball_candidates.patches), len(ball_candidates_top.patches)
return [ball_candidates_top]
def read_all_patches_from_log(fileName, type=0):
# initialize the parser
my_parser = Parser()
# register the protobuf message name for the 'ImageTop'
my_parser.register("ImageTop", "Image")
my_parser.register("BallCandidatesTop", "BallCandidates")
my_parser.register("CameraMatrixTop", "CameraMatrix")
# get all the images from the logfile
# images = map(getPatches, LogReader(fileName, my_parser))
camera_index = []
patches = []
for frame in LogReader(fileName, my_parser):
ball_candidates = frame["BallCandidates"]
for p in ball_candidates.patches:
if p.type == type:
data = numpy.fromstring(p.data, dtype=numpy.uint8)
patches.append(data)
camera_index.append([0])
ball_candidates_top = frame["BallCandidatesTop"]
for p in ball_candidates_top.patches:
if p.type == type:
data = numpy.fromstring(p.data, dtype=numpy.uint8)
patches.append(data)
camera_index.append([1])
return patches, camera_index
if __name__ == "__main__":
fileName = parse_arguments(sys.argv[1:])
print(fileName)
patches = read_all_patches_from_log(fileName)
print(len(patches))
| 3,128 |
repl.py
|
AyushBhargav/Brain-Friendly-Interpreter
| 1 |
2023133
|
import sys
import tape
def scan_char(cmd, t):
index = 0
while index < len(cmd):
ch = cmd[index]
if ch == '>':
t.move_pointer(right=True)
elif ch == '<':
t.move_pointer(left=True)
elif ch == '+':
t.change_value(diff=1)
elif ch == '-':
t.change_value(diff=-1)
elif ch == '.':
t.get_value()
elif ch == ',':
t.set_value()
elif ch == '[':
n_index = cmd.find(']', index+1)
if n_index == -1:
print("Loop unclosed.")
return
if t.peek_value() == "0":
# Skip loop
index = n_index
elif ch == ']':
p_index = cmd.find('[', 0, index-1)
if p_index == -1:
print("Loop unclosed.")
return
if t.peek_value() != "0":
# Skip loop
index = p_index
index += 1
def run():
t = tape.Tape()
print("REPL for BF interpreter. Enter 'exit' to close shell.")
while(True):
cmd = input(">> ")
if cmd == 'exit':
print("Shell aborted.")
return
scan_char(cmd, t)
print() # Newline
| 1,275 |
pydenji/appcontext/__init__.py
|
alanfranz/pydenji
| 0 |
2025642
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2010 <NAME>
# compatibility hooks, to be removed.
#from .aware import *
#from .context import *
| 148 |
Value added course(ML)/linear_reg.py
|
Highcourtdurai/Value-adding-course-ML-
| 0 |
2024307
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
#Data preperation
x = 10*np.random.rand(100)
y = 3*x+np.random.rand(100)
print(x,y)
plt.scatter(x, y)
plt.show()
#choose class of model
model=LinearRegression(fit_intercept=True)
#arrange data in matrix
X=x.reshape(-1,1)
#fit model
model.fit(X,y)
print(model.coef_,model.intercept_)
#Data preperation for prediction
x_fit=np.linspace(-1,11)
X_fit=x_fit.reshape(-1,1)
#prediction
y_fit=model.predict(X_fit)
plt.scatter(x,y)
plt.plot(x_fit,y_fit)
plt.show()
| 594 |
src/sqlite_writer.py
|
atompkins/ufsg-item-scraper-py
| 0 |
2025343
|
import sqlite3
con = sqlite3.connect('data.sqlite', isolation_level=None)
def init_sql():
con.execute('''CREATE TABLE IF NOT EXISTS data (
id integer primary key
, name text null
, rarity text null
, Type text null
, Level integer null
, Attack integer null
, Defense integer null
, Armor integer null
, Damage integer null
, HP integer null
, XPGain integer null
, Stamina integer null
, StaminaGain integer null
, GoldGain integer null
, Banishment integer null
, BeastSlayer integer null
, Breaker integer null
, CriticalHit integer null
, Disarm integer null
, Dodge integer null
, Duelist integer null
, EliteHunter integer null
, FirstStrike integer null
, FuryCaster integer null
, GlorySeeker integer null
, GreenskinSlayer integer null
, Holy integer null
, Hypnotize integer null
, MasterBlacksmith integer null
, MasterCrafter integer null
, MasterInventor integer null
, MasterThief integer null
, Nullify integer null
, Oceanic integer null
, PiercingStrike integer null
, ProtectGold integer null
, Protection integer null
, ReinforcedArmor integer null
, Sustain integer null
, TemporalShift integer null
, Thievery integer null
, craftAttack integer null
, craftDefense integer null
, craftArmor integer null
, craftDamage integer null
, craftHP integer null
, craftXPGain integer null
, craftStamina integer null
, craftGoldGain integer null
, setName text null
, setAttack integer null
, setDefense integer null
, setArmor integer null
, setDamage integer null
, setHP integer null
, setXPGain integer null
, setStamina integer null
, setStaminaGain integer null
, setGoldGain integer null
, setBanishment integer null
, setBeastSlayer integer null
, setBreaker integer null
, setCriticalHit integer null
, setDisarm integer null
, setDodge integer null
, setDuelist integer null
, setEliteHunter integer null
, setFirstStrike integer null
, setFuryCaster integer null
, setGlorySeeker integer null
, setGreenskinSlayer integer null
, setHoly integer null
, setHypnotize integer null
, setMasterBlacksmith integer null
, setMasterCrafter integer null
, setMasterInventor integer null
, setMasterThief integer null
, setNullify integer null
, setOceanic integer null
, setPiercingStrike integer null
, setProtectGold integer null
, setProtection integer null
, setReinforcedArmor integer null
, setSoulless integer null
, setSustain integer null
, setTemporalShift integer null
, setThievery integer null
)''')
def close_sql():
con.close()
def sql_writer(list):
colNames = ', '.join([a for (a, b) in list])
placeholders = ', '.join(['?' for (a, b) in list])
values = tuple(b for (a, b) in list)
con.execute(f'REPLACE INTO data ({colNames}) values({placeholders})', values)
| 3,647 |
backend/server.py
|
mrzzy/Portfolio-I
| 1 |
2024128
|
#
# server.py
# Style Transfer Server
#
import os
import api
import uuid
import styleopt
from PIL import Image
from flask import Flask, request
from multiprocessing import Process, Queue, Manager
## Tasking
# Style transfer worker that runs style transfers task as defined by the
# payloads queued
class TransferWorker:
def __init__(self, queue=Queue(), verbose=True):
self.queue = queue
self.verbose = verbose
# Setup shared style transfer process log
manager = Manager()
self.log = manager.dict()
# Setup worker process
self.process = Process(target=self.run)
self.process.start()
# Setup directory to generated pastiches
if not os.path.exists("static/pastiche"): os.mkdir("static/pastiche")
# Enqeue a new style transfer task parameterised by the given style
# transfer request. Returns an uuid that uniquely identifies the task
def enqueue(self, request):
# Create task for request
task_id = str(uuid.uuid4())
task = {
"request": request,
"ID": task_id
}
# Queue task for style transfer
self.log[task_id] = 0.0
self.queue.put(task)
return task_id
# Run loop of worker
def run(self):
while True:
# Perform style transfer for style transfer requst
task = self.queue.get()
request = task["request"]
task_id = task["ID"]
# Unpack style transfer request
content_image = request.content_image
style_image = request.style_image
settings = request.settings
# Perform style transfer
# Callback to record status of style transfer in worker log
def callback_status(graph, feed, i_epoch):
n_epoch = graph.settings["n_epochs"]
self.log[task_id] = i_epoch / n_epoch
if self.verbose: print("[TransferWorker]: processing task: ", task_id)
try:
pastiche_image = styleopt.transfer_style(content_image, style_image,
settings=settings,
callbacks=[styleopt.callback_progress,
styleopt.callback_tensorboard,
callback_status])
except Exception as e:
# Style transfer failed for some reason
print("[TransferWorker]: FATAL: style transfer failed for task:",
task_id)
print(repr(e))
self.log[task_id] = -1.0 # Mark failure for task in log
continue # Abadon and work on next job
# Save results of style transfer
if self.verbose: print("[TransferWorker]: completed payload: ", task_id)
pastiche_image.save("static/pastiche/{}.jpg".format(task_id))
# Check the status of the worker task specified by task_id
# Returns None if no task for the given task_id is found
# Returns -1.0 if style transfer task failed for some reason
def check_status(self, task_id):
if not task_id in self.log: return None
else: return self.log[task_id]
worker = TransferWorker()
# Server Routes
app = Flask(__name__, static_folder="static")
# Default route "/" displays server running message, used to check server if
# server is running properly
@app.route("/", methods=["GET"])
def route_test():
return app.send_static_file("test.html")
## REST API
# Rest API route "/api/style" triggers style transfer given POST style transfer
# request payload
@app.route("/api/style", methods=["POST"])
def route_api_style():
print("[REST]: /api/style")
# Read style transfer request from body
transfer_request = api.TransferRequest.parse(request.data)
# Queue request to perform style transfer on worker
task_id = worker.enqueue(transfer_request)
# Return response to requester
response = api.TransferResponse(task_id)
return response.serialise(), 200, {'ContentType':'application/json'}
# Rest API route "/api/status" retrieves the current status of style transfer
# for the given task_id.
@app.route("/api/status/<task_id>", methods=["GET"])
def route_api_status(task_id):
print("[REST]: /api/status")
# Query work current status
progress = worker.check_status(task_id)
if progress == None:
status_code = 404 # Task for the given ID not found
elif progress == -1.0:
status_code = 500 # Internal server error in style transfer
else:
status_code = 200
# Return status response to request
response = api.StatusResponse(progress)
return response.serialise(), status_code, {'ContentType':'application/json'}
# Rest API route "/api/pastiche" retrieves the pastiche
# for the given task_id.
@app.route("/api/pastiche/<task_id>", methods=["GET"])
def route_api_pastiche(task_id):
print("[REST]: /api/pastiche")
# Query work current status
progress = worker.check_status(task_id)
if progress == None:
status_code = 404 # Task for the given ID not found
return "", status_code
elif progress == -1.0:
status_code = 500 # Internal server error in style transfer
return "", status_code
elif progress >= 0.0 and progress < 1.0:
status_code = 202 # Style transfer genrated pastiche not yet ready
return "", status_code
else:
status_code = 200
return app.send_static_file("pastiche/{}.jpg".format(task_id)), status_code
# Cross origin pain in the ass
@app.after_request
def handle_cors(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
if __name__ == "__main__":
app.run(host='0.0.0.0', port=api.SERVER_PORT)
| 6,004 |
rochambeau.py
|
stevie170/python-scripts
| 0 |
2024231
|
# a Python script to play the game rochambeau (rock, paper, scissors)
# user vs computer
import random
# set base scores to 0
computer_score = 0
player_score = 0
# introduce the game
print("\n")
print("Let's play a game!")
print("Ever heard of Rochambeau?")
print("You might call it \"Rock, Paper, Scissors.\"")
print("Enter your choice, or type \"end\" to stop the game.")
print("\n")
choices = ['Rock', 'Paper', 'Scissors']
while True: # loop until the loop breaks
computer = random.choice(choices)
player = input("rock, paper, or scissors? ").capitalize() # capitalize the input (first letter only) so it's easy to use in conditional statements
# a series of conditionals to see who won and keep score
if player == computer:
print("Tie! You both chose", computer)
elif player == "Rock":
if computer == "Paper":
print("You lose! ", computer, "covers", player)
computer_score += 1 # add 1 to the computer's score
else:
print("You win! ", player, "smashes", computer)
player_score += 1 # add 1 to the player's score
elif player == "Paper":
if computer == "Scissors":
print("You lose! ", computer, "cut", player)
computer_score += 1 # add 1 to the computer's score
else:
print("You win! ", player, "covers", computer)
player_score += 1 # add 1 to the player's score
elif player == "Scissors":
if computer == "Rock":
print("You lose! ", computer, "smashes", player)
computer_score += 1 # add 1 to the computer's score
else:
print("You win! ", player, "cut", computer)
player_score += 1 # add 1 to the player's score
elif player == "End": # stop the game, print the score
print("Final Scores:")
print(f"CPU: {computer_score}")
print(f"Player: {player_score}")
break # end the loop (note that this break is inside the conditional statement for if the player inputs "End."
else: # stop the game, error
print("Invalid input. Hope you'll try again!")
break # end the loop (note that this break is inside the conditional statement for if the player inputs "End."
| 2,109 |
setup.py
|
pshem/andriller
| 2 |
2025396
|
import os.path
from setuptools import setup
from andriller import __version__, __website__, __package_name__
req = os.path.join(os.path.dirname(__file__), 'requirements.txt')
with open(req, 'rt', encoding="utf-8") as f:
install_requires = [dep for dep in f.read().splitlines() if not dep.startswith('#')]
reme = os.path.join(os.path.dirname(__file__), 'README.md')
with open(reme, 'rt', encoding='utf-8') as f:
long_description = f.read()
setup(
name='andriller',
scripts=['andriller-gui.py'],
version=__version__,
description='Andriller CE | Android Forensic Tools',
author='<NAME>',
author_email='<EMAIL>',
url=__website__,
packages=[__package_name__],
license='MIT License',
keywords="andriller android forensic forensics adb dfir".split(),
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=install_requires,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
zip_safe=True)
| 1,172 |
test/test_add_contact_to_group.py
|
karahcheev/python_training
| 0 |
2025244
|
import random
from fixture.orm import ORMFixture
from model.contact import Contact
from model.group import Group
orm = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_add_contact_to_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="adding to group"))
if len(db.get_group_list()) == 0:
group = Group(name="name1", header="header1", footer="footer1")
app.group.create(group)
random_group = random.choice(db.get_group_list())
contact = random.choice(db.get_contact_list())
print("Contact ", contact.firstname, contact.lastname, "added to group ", random_group.name)
app.contact.add_contact_to_group_by_id(contact.id, random_group.name)
contact_in_group = orm.get_contacts_in_group(random_group)
assert contact in contact_in_group
| 864 |
sql_app/admin_crud.py
|
louis70109/testcontainer-fastapi-example
| 2 |
2025537
|
from sqlalchemy.orm import Session
from . import models, schemas
def get_games(db: Session, skip: int = 0, limit: int = 12):
return db.query(models.Game).offset(skip).limit(limit).order_by("score desc, level desc").all()
def get_styles(db: Session):
return db.query(models.Style).all()
def set_styles(db: Session, style: schemas.StyleCreate):
styles = get_styles(db=db)
for idx in range(len(styles)):
styles[idx].size = style.size
styles[idx].color = style.color
styles[idx].duration = style.duration
styles[idx].level = style.level
db.query(models.Style).filter(models.Style.id == styles[idx].id)
db.add(styles[idx])
try:
db.commit()
db.close()
except Exception:
raise Exception("create style error")
return style
| 823 |
lab_01/installer.py
|
solnishko-pvs/Infomation_Security_BMSTU
| 0 |
2025556
|
from os import path, remove, rename, chdir, system, replace
from subprocess import check_output
from hashlib import sha256
def get_key():
raw_uuid = check_output("wmic csproduct get UUID").decode()
uuid = raw_uuid.split("\n")[1][:-4]
raw_inum = check_output("wmic csproduct get identifyingnumber").decode()
inum = raw_inum.split("\n")[1][:-4]
code = uuid + inum
return str(sha256(code.encode('utf-8')).hexdigest())
def rewrite_file(filename, key):
if not path.isfile(filename):
return False
f = open(filename, 'r')
new_f = open("new.txt", 'w')
lines = f.readlines()
new_f.write("magic_number = " + '\'' + str(key) + '\'' + '\n')
for i in range(1, len(lines)):
new_f.write(lines[i])
f.close()
new_f.close()
remove(path.abspath(filename))
rename("new.txt", filename)
return True
def build_image():
try:
chdir("venv")
chdir("Scripts")
system(".\pyinstaller.exe -F ../../main.py")
replace("C:\\Users\\vlad2\\Desktop\\7th_part\\Infomation_Security\\lab_01\\venv\\Scripts\\dist\\main.exe",
"C:\\Users\\vlad2\\Desktop\\7th_part\\Infomation_Security\\lab_01\\main.exe")
except:
print("Something went wrong")
return
def run_installation(filename):
key = get_key()
if not rewrite_file(filename, key):
print(f"Error: file {filename} doesn't exist")
return False
build_image()
return True
if __name__ == '__main__':
run_installation("main.py")
| 1,536 |
src/datasets/simple_boxes/generate.py
|
LadaOndris/IBT
| 0 |
2024342
|
import os
import cv2
import numpy as np
from src.utils.paths import SIMPLE_DATASET_DIR
"""
It is a simplified dataset to verify models intended for hand detection.
Each generated image contains two circles.
Run this file to generate the simple dataset.
Edit the dataset_size to determine the number of images in the dataset.
File bboxes.txt is created during the generation that contains true
bounding box labels for each image.
"""
images_path = str(SIMPLE_DATASET_DIR.joinpath('images'))
os.makedirs(images_path, exist_ok=True)
image_size = (416, 416, 1)
dataset_size = 10
value = 150
radius_range = (10, 60)
# (dataset_size, 2, 1)
radiuses = np.random.randint(radius_range[0], radius_range[1], size=(dataset_size, 2, 1))
max_window_width = 416 - radiuses
centers = np.random.randint(0, 416, size=(dataset_size, 2, 2))
# move boxes which are partly out of the image
centers = np.where(centers < radiuses, centers + radiuses, centers)
centers = np.where(centers > 416 - radiuses, centers - radiuses, centers)
lines = []
for i, (center, radius) in enumerate(zip(centers, radiuses)):
topleft = center - radius
rightbottom = center + radius
img = np.zeros(image_size, dtype="uint8")
cv2.circle(img, tuple(center[0]), radius[0], value, thickness=-1, lineType=8, shift=0)
cv2.circle(img, tuple(center[1]), radius[1], value, thickness=-1, lineType=8, shift=0)
file_name = F"image_{i}.png"
cv2.imwrite(os.path.join(images_path, file_name), img)
lines.append(file_name)
lines[i] += F" {topleft[0][0]} {topleft[0][1]} {rightbottom[0][0]} {rightbottom[0][1]}"
lines[i] += F" {topleft[1][0]} {topleft[1][1]} {rightbottom[1][0]} {rightbottom[1][1]}"
lines[i] += '\n'
with open(SIMPLE_DATASET_DIR.joinpath('bboxes.txt'), 'w') as f:
f.writelines(lines)
| 1,802 |
ex093.py
|
honeyhugh/PythonCurso
| 0 |
2024722
|
jogador = dict()
jogador['Nome'] = input('Nome do jogador: ').strip().title()
jogador['Partidas'] = int(input(f'Quantidade de partidas que {jogador["Nome"]} jogou: '))
gols = list()
for p in range(1, jogador['Partidas'] + 1):
gols.append(int(input(f'Gols feitos na {p}ª partida: ')))
jogador['Gols'] = gols.copy()
jogador['Total de Gols'] = sum(gols)
print('-=' * 20)
print(jogador)
print('-=' * 20)
for k, v in jogador.items():
print(f'{k}: {v}')
print('-=' * 20)
print(f'O jogador {jogador["Nome"]} jogou {jogador["Partidas"]} partidas.')
for v in range(1, jogador['Partidas'] + 1):
print(f'=> Na {v}ª partida, fez {jogador["Gols"][v - 1]} gol(s)')
print(f'E o total de gols foi {jogador["Total de Gols"]}.')
| 723 |
ntlm_parser/decode.py
|
NoFishLikeIan/ntlm_parser
| 3 |
2025119
|
import sys
import base64
import struct
import string
import collections
from functools import partial
from .parsers import parse_request_type, parse_response_type, parse_challenge_type
process_fn = {
1: parse_request_type,
2: parse_challenge_type,
3: parse_response_type
}
possible_messages = ['request', 'challenge', 'response']
def decode(passed_auth=None):
parsed_data = {
'valid_sig': False,
'message': 'unfound'
}
auth = sys.stdin.read() if passed_auth is None else passed_auth
# Converting
try:
auth_b64 = base64.b64decode(auth)
except Exception as e:
raise Exception(f'Input is not a base64 string, failed with:\t{e}')
# Parsing signature
signature = auth_b64[:8]
if signature == b'NTLMSSP\x00':
print('Authentication signature parsed correctly')
else:
raise Exception(f'Found as signature ${signature} instead of NTLMSSP\x00')
parsed_data['valid_sig'] = True
# Parsing message
message_portion = auth_b64[8:12]
message_id = struct.unpack('<i', message_portion)[0]
if (-1 < message_id - 1 < 3):
message_type = possible_messages[message_id - 1]
parsed_data['message'] = message_type
print(f'Message type:\t{message_type}')
if message_id in process_fn:
parsed_structure = process_fn[message_id](auth_b64)
parsed_data['structure'] = parsed_structure
else:
raise Exception(f"Unknown message structure. Have a raw (hex-encoded) message:\n{auth_b64.decode('hex')}")
return parsed_data
| 1,581 |
Chapter10/utils.py
|
PacktPublishing/Machine-Learning-with-TensorFlow-1.x
| 12 |
2025317
|
from prettytable import PrettyTable
def print_variables(variables):
table = PrettyTable(["Variable Name", "Shape"])
for var in variables:
table.add_row([var.name, var.get_shape()])
print(table)
print("")
def print_layers(layers):
table = PrettyTable(["Layer Name", "Shape"])
for var in layers.values():
table.add_row([var.name, var.get_shape()])
print(table)
print("")
def lines_from_file(filename, repeat=False):
with open(filename) as handle:
while True:
try:
line = next(handle)
yield line.strip()
except StopIteration as e:
if repeat:
handle.seek(0)
else:
raise
if __name__ == "__main__":
data_reader = lines_from_file("/home/ubuntu/datasets/ucf101/sample.txt", repeat=True)
for i in range(15):
print(next(data_reader))
| 936 |
sgkit_vcf/tests/utils.py
|
ravwojdyla/sgkit-vcf
| 0 |
2023882
|
from pathlib import Path
from sgkit.typing import PathType
def path_for_test(shared_datadir: Path, file: str, is_path: bool = True) -> PathType:
"""Return a test data path whose type is determined by `is_path`.
If `isPath` is True, return a `Path`, otherwise return a `str`.
"""
path: PathType = shared_datadir / file
if not is_path:
path = str(path)
return path
| 399 |
hltv/api/base_request.py
|
kiobu/hltv-py
| 0 |
2024974
|
from typing import Any
import requests
import hltv.libs.logger
from hltv.api.parser import ResponseParser
from hltv.api.models import Consts
class Request:
def __init__(self):
self.url = Consts.HLTV_URL
def __call__(self):
hltv.libs.logger.request(f"GET: [{self.url}]")
parser = ResponseParser(requests.get(self.url, headers={'User-Agent': Consts.USER_AGENT}))
self.parsed_body: Any = parser()
return self.parsed_body
| 470 |
desafios/exe045.py
|
RafaelGomides/VemPython
| 0 |
2025668
|
# Projeto: VemPython/exe045
# Autor: rafael
# Data: 16/03/18 - 11:01
# Objetivo: TODO Criar um programa que faça com que o comptador jogue JOKENPO com o usuário
from time import sleep
from random import randint
opc = 1
uv = 0
cv = 0
# Introdução
text = ['\033[1;3{}mJO\033[m'.format(randint(0, 7)),
'\033[1;3{}mKEN\033[m'.format(randint(0, 7)),
'\033[1;3{}mPÔ\033[m'.format(randint(0, 7))]
for i in range(0, 3):
print(text[i], end="", flush=True)
sleep(1)
escolhas = ['\033[1;30;4{}mPEDRA\033[m'.format(randint(1, 7)),
'\033[1;30;4{}mPAPEL\033[m'.format(randint(1, 7)),
'\033[1;30;4{}mTESOURA\033[m'.format(randint(1, 7))]
# Inicio do Jogo
while opc == 1:
# Escolhas
com = randint(0, 2)
usr = int(input('\n\nFaça sua Escolha: \n'
'1 - {}\n2 - {}\n3 - {}\n'
'Opção: '.format(escolhas[0], escolhas[1], escolhas[2])))-1
# Resultado
print('\n\nEu escolhi {} e você {}!\n\n'.format(escolhas[com], escolhas[usr]))
if com == usr:
print('\033[7;33mEMPATE\033[m')
elif (com == 0 and usr == 2) or (com == 1 and usr == 0) or (com == 2 and usr == 1):
print('\033[7;31mVOCÊ PERDEU!\033[m')
cv += 1
else:
print('\033[7;32mVOCÊ GANHOU\033[m')
uv += 1
# Placar
print('O PLACAR ESTÁ:\n\033[34mUSUÁRIO\033[m \033[32m{}\033[m X \033[32m{}\033[m \033[36mCPU\033[m'.format(uv, cv))
sleep(3)
opc = int(input('\n\nDeseja Tentar Novamente?\n'
'\n\033[4;32m1 - Para sim'
'\n\033[4;31m2 - Para não\033[m'
'\nOpção: '))
| 1,643 |
senseclust/methods/base.py
|
frankier/finn-sense-clust
| 0 |
2023056
|
import os
import sys
from dataclasses import dataclass
from expcomb.models import Exp, ExpGroup as ExpGroupBase
from senseclust.exceptions import NoSuchLemmaException
from senseclust.eval import eval, UnguessedException
from click.utils import LazyFile
from os.path import exists
@dataclass(frozen=True)
class ExpPathInfo:
corpus: str
guess: str
gold: str
def get_paths(self, iden, exp):
return self.corpus, self.guess, None, self.gold
class SenseClusExp(Exp):
returns_centers = False
def run(self, words_fn, guess_fn, exemplars=False):
add_exemplars = self.returns_centers and exemplars
with open(words_fn) as inf, LazyFile(guess_fn, "w") as outf:
try:
for line in inf:
lemma_name, pos = line.strip().rsplit(",", 1)
try:
if add_exemplars:
clus_obj, centers = self.clus_lemma(lemma_name, pos, True)
else:
clus_obj = self.clus_lemma(lemma_name, pos)
centers = []
except NoSuchLemmaException:
print(f"No such lemma: {lemma_name}", file=sys.stderr)
else:
for k, v in sorted(clus_obj.items()):
num = k + 1
for ss in v:
if add_exemplars:
exemplar = "1" if ss in centers else "0"
print(f"{lemma_name}.{num:02},{ss},{exemplar}", file=outf)
else:
print(f"{lemma_name}.{num:02},{ss}", file=outf)
except Exception:
# This is probably a partial guess: delete it to avoid getting
# incorrect results
outf.close()
if exists(guess_fn):
os.unlink(guess_fn)
raise
def calc_score(self, gold, guess_path):
try:
return eval(open(gold), open(guess_path), False)
except UnguessedException as exc:
exc.gold_fn = gold
exc.guess_fn = guess_path
raise
def clus_lemma(self, *args, **kwargs):
return self.clus_func(*args, **kwargs)
class ExpGroup(ExpGroupBase):
supports_wiktionary = False
supports_wordnet = False
group_attrs = ("supports_wiktionary", "supports_wordnet")
class WiktionaryOnlyExpGroup(ExpGroup):
supports_wiktionary = True
class WordnetOnlyExpGroup(ExpGroup):
supports_wordnet = True
class BothExpGroup(ExpGroup):
supports_wiktionary = True
supports_wordnet = True
| 2,756 |
main.py
|
matcool/pygdps
| 7 |
2025467
|
from flask import Flask, request
import json
import os
from importlib import import_module
from context import Context
from colorama import init, Fore
app = Flask(__name__)
@app.route('/')
def root():
return 'hi there'
@app.errorhandler(404)
def err(e):
print(f'{Fore.YELLOW}Unhandled request! {Fore.LIGHTWHITE_EX}{request.path} {Fore.LIGHTBLACK_EX}{json.dumps(request.values.to_dict())}{Fore.RESET}')
return '-1'
ctx = Context
ctx.app = app
init() # init colorama
for root, _, files in os.walk('routes'):
for file in files:
if not file.endswith('.py'): continue
# this is very hacky but whatever
path = os.path.join(root, file[:-3]).replace(os.sep, '.')
print(f'{Fore.LIGHTBLACK_EX}Importing module: {Fore.RESET}{path}')
import_module(path).setup(ctx)
| 813 |
itertable/parsers/xls.py
|
wq/itertable
| 16 |
2024729
|
import xlrd
import datetime
import math
from .base import TableParser
class WorkbookParser(TableParser):
workbook = None
worksheet = None
sheet_name = 0
start_row = None
column_count = None
no_pickle_parser = ['workbook', 'worksheet']
binary = True
date_format = 'yyyy-mm-dd'
time_format = 'hh:mm:ss'
datetime_format = 'yyyy-mm-dd hh:mm:ss'
def parse(self):
self.parse_workbook()
if self.sheet_name is None:
self.data = [{'name': name, 'data': self.get_sheet_by_name(name)}
for name in self.sheet_names]
return
sheet_name = self.sheet_name
if isinstance(self.sheet_name, int):
sheet_name = self.sheet_names[sheet_name]
self.parse_worksheet(sheet_name)
if self.header_row is None:
if self.start_row is not None:
self.header_row = self.start_row - 1
else:
self.column_count = 0
def checkval(cell):
if cell.value is not None and cell.value != '':
return True
return False
search_rows = min(len(self.worksheet) - 1, self.max_header_row)
for row in range(search_rows, -1, -1):
count = len(list(filter(checkval, self.worksheet[row])))
if count >= self.column_count:
self.column_count = count
self.header_row = row
if self.start_row is None:
self.start_row = self.header_row + 1
if self.field_names is None:
rows = self.worksheet[self.header_row:self.start_row]
self.field_names = [
str(c.value) or 'c%s' % i for i, c in enumerate(rows[0])
]
for row in rows[1:]:
for i, c in enumerate(row):
self.field_names[i] += "\n" + str(c.value)
seen_fields = set()
for i, field in enumerate(self.field_names):
if field in seen_fields:
field += str(i)
self.field_names[i] = field
seen_fields.add(field)
self.data = list(map(self.parse_row, self.worksheet[self.start_row:]))
self.extra_data = {}
if self.header_row > 0:
for r in range(0, self.header_row):
for c, cell in enumerate(self.worksheet[r]):
val = self.get_value(cell)
if val is not None and val != '':
self.extra_data.setdefault(r, {})
self.extra_data[r][c] = val
def parse_workbook(self):
raise NotImplementedError
@property
def sheet_names(self):
raise NotImplementedError
def get_sheet_by_name(self, name):
raise NotImplementedError
def parse_worksheet(self, name):
raise NotImplementedError
def parse_row(self, row):
raise NotImplementedError
def get_value(self, cell):
raise NotImplementedError
def dump(self, file=None):
if file is None:
file = self.file
write, close = self.open_worksheet(file)
for i, field in enumerate(self.field_names):
write(0, i, field)
for r, row in enumerate(self.data):
for c, field in enumerate(self.field_names):
write(r + 1, c, row[field])
close()
class ExcelParser(WorkbookParser):
def parse_workbook(self):
self.workbook = xlrd.open_workbook(file_contents=self.file.read())
@property
def sheet_names(self):
return self.workbook.sheet_names()
def get_sheet_by_name(self, name):
return self.workbook.sheet_by_name(name)
def parse_worksheet(self, name):
worksheet = self.get_sheet_by_name(name)
self.worksheet = [worksheet.row(i) for i in range(worksheet.nrows)]
def parse_row(self, row):
return {name: self.get_value(row[i])
for i, name in enumerate(self.get_field_names())
if i < len(row)}
def get_value(self, cell):
if cell.ctype == xlrd.XL_CELL_DATE:
time, date = math.modf(cell.value)
tpl = xlrd.xldate_as_tuple(cell.value, self.workbook.datemode)
if date and time:
return datetime.datetime(*tpl)
elif date:
return datetime.date(*tpl[0:3])
else:
return datetime.time(*tpl[3:6])
return cell.value
def calc_width(self, val):
val = str(val) if val is not None else ""
size = 0
for c in val:
if c in ".,;:'\"iIlt1":
size += 0.5
elif c in 'MW':
size += 1.3
elif c.isupper():
size += 1.2
elif c.islower():
size += 1
else:
size += 1.1
return size
def open_worksheet(self, file):
if getattr(self, 'filename', '').endswith('.xls'):
return self._open_xls_worksheet(file)
else:
return self._open_xlsx_worksheet(file)
def _open_xls_worksheet(self, file):
import xlwt
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
formats = {
datetime.date: xlwt.Style.easyxf(
num_format_str=self.date_format,
),
datetime.time: xlwt.Style.easyxf(
num_format_str=self.time_format,
),
datetime.datetime: xlwt.Style.easyxf(
num_format_str=self.datetime_format,
),
'header': xlwt.Style.easyxf(
"font: bold on; borders: bottom thick;"
),
}
widths = {}
def write(r, c, val):
widths.setdefault(c, 0)
widths[c] = max(widths[c], self.calc_width(val))
fmt = formats.get(type(val))
if not fmt and r == 0:
fmt = formats['header']
if fmt:
worksheet.write(r, c, val, fmt)
else:
worksheet.write(r, c, val)
def close():
for c, width in widths.items():
worksheet.col(c).set_width(int(width * 256))
workbook.save(file)
return write, close
def _open_xlsx_worksheet(self, file):
import xlsxwriter
workbook = xlsxwriter.Workbook(file)
worksheet = workbook.add_worksheet()
formats = {
datetime.date: workbook.add_format({
'num_format': self.date_format,
}),
datetime.time: workbook.add_format({
'num_format': self.time_format,
}),
datetime.datetime: workbook.add_format({
'num_format': self.datetime_format,
}),
'header': workbook.add_format({
'bold': True,
'bottom': 2,
}),
}
widths = {}
def write(r, c, val):
widths.setdefault(c, 0)
widths[c] = max(widths[c], self.calc_width(val))
fmt = formats.get(type(val))
if fmt:
worksheet.write_datetime(r, c, val, fmt)
elif r == 0:
worksheet.write(r, c, val, formats['header'])
else:
worksheet.write(r, c, val)
def close():
for c, width in widths.items():
worksheet.set_column(c, c, width)
workbook.close()
return write, close
| 7,667 |
comics/requests.py
|
SkimaniKings/comicsy
| 0 |
2024573
|
import requests
from .models import Superhero
url = 'https://akabab.github.io/superhero-api/api//all.json'
def get_superhero():
"""
Function to consume http request and return a Quote class instance
"""
response_list = requests.get(url).json()
superheroes_list = []
for sups in response_list:
sup = Superhero(sups.get("images"), sups.get("biography"),sups.get("powerstats"),sups.get("connections"))
superheroes_list.append(sup)
return superheroes_list
| 498 |
cursoemvideo/python/exercicio/013(calculandoAumento).py
|
mateusjustino/cursos
| 0 |
2025597
|
n = float(input('Digite o salário atual: '))
nNovoSalario = (n/100) * 115
print('O salário com aumento de 15% será R$ {:.2f}'.format(nNovoSalario))
| 148 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.