max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
examples/security/sha1-hmac.py
|
checkaayush/eve
| 3,122 |
2026341
|
# -*- coding: utf-8 -*-
"""
Auth-SHA1/HMAC
~~~~~~~~~~~~~~
Securing an Eve-powered API with Basic Authentication (RFC2617).
This script assumes that user accounts are stored in a MongoDB collection
('accounts'), and that passwords are stored as SHA1/HMAC hashes. All API
resources/methods will be secured unless they are made explicitly public
(by fiddling with some settings you can open one or more resources and/or
methods to public access -see docs).
Since we are using werkzeug we don't need any extra import (werkzeug being
one of Flask/Eve prerequisites).
Checkout Eve at https://github.com/pyeve/eve
This snippet by <NAME> can be used freely for anything you like.
Consider it public domain.
"""
from eve import Eve
from eve.auth import BasicAuth
from werkzeug.security import check_password_hash
from settings_security import SETTINGS
class Sha1Auth(BasicAuth):
def check_auth(self, username, password, allowed_roles, resource, method):
# use Eve's own db driver; no additional connections/resources are used
accounts = app.data.driver.db["accounts"]
account = accounts.find_one({"username": username})
return account and check_password_hash(account["password"], password)
if __name__ == "__main__":
app = Eve(auth=Sha1Auth, settings=SETTINGS)
app.run()
| 1,370 |
tap_deputy/discover.py
|
DeputyApp/stitch-stream-deputy
| 4 |
2026996
|
from singer.catalog import Catalog, CatalogEntry, Schema
RESOURCES = {
'Address': 'addresses',
'Category': 'categories',
'Comment': 'comments',
'Company': 'companies',
'CompanyPeriod': 'company_periods',
'Contact': 'contacts',
'Country': 'countries',
'CustomAppData': 'custom_app_data',
'CustomField': 'custom_fields',
'CustomFieldData': 'custom_field_data',
'Employee': 'employees',
'EmployeeAgreement': 'employee_agreements',
'EmployeeAgreementHistory': 'employee_agreement_history',
'EmployeeAppraisal': 'employee_appraisal',
'EmployeeAvailability': 'employee_availability',
'EmployeeHistory': 'employee_history',
'EmployeePaycycle': 'employee_paycycles',
'EmployeePaycycleReturn': 'employee_paycycle_returns',
'EmployeeRole': 'employee_roles',
'EmployeeSalaryOpunitCosting': 'employee_salary_opunit_costing',
'EmployeeWorkplace': 'employee_workplaces',
'EmploymentCondition': 'employeement_conditions',
'EmploymentContract': 'employee_contracts',
'EmploymentContractLeaveRules': 'employee_contract_leave_rules',
'Event': 'events',
'Geo': 'geo',
'Journal': 'journal',
'Kiosk': 'kiosks',
'Leave': 'leaves',
'LeaveAccrual': 'leave_accruals',
'LeavePayLine': 'leave_pay_lines',
'LeaveRules': 'leave_rules',
'Memo': 'memos',
'OperationalUnit': 'operational_units',
'PayPeriod': 'pay_periods',
'PayRules': 'pay_rules',
'PublicHoliday': 'public_holidays',
'Roster': 'rosters',
'RosterOpen': 'roster_opens',
'RosterSwap': 'roster_swaps',
'SalesData': 'sales_data',
'Schedule': 'schedules',
'SmsLog': 'sms_logs',
'State': 'states',
'StressProfile': 'stress_profiles',
'SystemUsageBalance': 'system_usage_balances',
'SystemUsageTracking': 'system_usage_tracking',
'Task': 'tasks',
'TaskGroup': 'task_groups',
'TaskGroupSetup': 'task_group_setups',
'TaskOpunitConfig': 'task_opunit_configs',
'TaskSetup': 'task_setups',
'Team': 'teams',
'Timesheet': 'timesheets',
'TimesheetPayReturn': 'timesheet_pay_returns',
'TrainingModule': 'training_modules',
'TrainingRecord': 'training_records',
'Webhook': 'webhooks'
}
TYPE_MAP = {
'Integer': 'integer',
'Float': 'number',
'VarChar': 'string',
'Blob': 'string',
'Bit': 'boolean',
'Time': 'string'
}
def get_schema(client, resource_name):
data = client.get(
'/api/v1/resource/{}/INFO'.format(resource_name),
endpoint='resource_info')
properties = {}
metadata = [
{
'breadcrumb': [],
'metadata': {
'tap-deputy.resource': resource_name
}
}
]
for field_name, field_type in data['fields'].items():
if field_type in ['Date', 'DateTime']:
json_schema = {
'type': ['null', 'string'],
'format': 'date-time'
}
else:
json_schema = {
'type': ['null', TYPE_MAP[field_type]]
}
properties[field_name] = json_schema
metadata.append({
'breadcrumb': ['properties', field_name],
'metadata': {
'inclusion': 'automatic' if field_name == 'Id' else 'available'
}
})
schema = {
'type': 'object',
'additionalProperties': False,
'properties': properties
}
return schema, metadata
def discover(client):
catalog = Catalog([])
for resource_name in RESOURCES.keys():
schema_dict, metadata = get_schema(client, resource_name)
schema = Schema.from_dict(schema_dict)
stream_name = RESOURCES[resource_name]
catalog.streams.append(CatalogEntry(
stream=stream_name,
tap_stream_id=stream_name,
key_properties=['Id'],
schema=schema,
metadata=metadata
))
return catalog
| 3,976 |
E2b_withdrawResult.py
|
AngeloXD13/Simple_ATM_Interface
| 0 |
2026186
|
from PyQt5 import uic
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QLabel, QPushButton
from X_logoutWindow import LogoutWindowClass
class WithdrawResultWindowClass(QMainWindow):
def __init__(self, windowData, amount):
super(WithdrawResultWindowClass, self).__init__()
self.windowData = windowData
self.account = None
self.account = self.windowData.accountDATA
self.phoneNumber = self.account.phonenumber
self.amount = None
self.amount = amount
print("amount", amount)
uic.loadUi("ui/WithdrawResultWindow.ui", self)
self.yes_btn = self.findChild(QPushButton, "yes_btn")
self.no_btn = self.findChild(QPushButton, "no_btn")
self.resultTitle_lbl = self.findChild(QLabel, "resultTitle_lbl")
self.result_lbl = self.findChild(QLabel, "result_lbl")
self.result_lbl2 = self.findChild(QLabel, "result_lbl_2")
self.result_lbl3 = self.findChild(QLabel, "result_lbl_3")
self.show()
self.yes_btn.clicked.connect(lambda: menu(self))
self.no_btn.clicked.connect(lambda : logout(self))
getcheckAvailablebalance(self)
def getcheckAvailablebalance(self):
#get updated available balance
from utils.DatabaseManager import selectData
account = selectData(self.account.phonenumber, 2)
self.available = account.availablebalance
#if amount above availabe balance FAILED if not SUCCESS then update database
print("self.available: ", self.available)
print("self.amount: ", self.amount)
if int(self.amount) <= int(self.available):
successAndUpdatadatabse(self)
else:
failedAndCancel(self, "Insufficient Balance")
def successAndUpdatadatabse(self):
print("successAndUpdatadatabse")
newavailableBalance = int(self.available) - int(self.amount)
from utils.DatabaseManager import updateAvailBalance
result = updateAvailBalance(self.account.phonenumber, newavailableBalance)
if result == True:
self.result_lbl.setText("Previous Available Balance: "+ str("₱ {:,.2f}".format(int(self.available))))
self.result_lbl2.setText("Amount you Withdraw: " + str("₱ {:,.2f}".format(int(self.amount))))
self.result_lbl3.setText("New Available Balance: " + str("₱ {:,.2f}".format(int(newavailableBalance))))
self.account.availablebalance = newavailableBalance
else:
failedAndCancel(self, "Database Error")
def failedAndCancel(self, reason):
print("failedAndCancel")
self.resultTitle_lbl.setText("CANCELED")
if reason == "Database Error":
self.result_lbl.setText("Database Error")
self.result_lbl2.setText("Please Try Again Later...")
self.result_lbl3.setText("Transaction Cancelled")
else:
self.result_lbl.setText("Insufficient Balance")
self.result_lbl2.setText("Previous Available Balance: "+ str("₱ {:,.2f}".format(int(self.available))))
self.result_lbl3.setText("Transaction Cancelled")
def logout(self):
self.ui = LogoutWindowClass(self.windowData)
self.destroy()
def menu(self):
self.destroy()
self = self.windowData.previousWindow
self.show()
| 3,236 |
SourceCodeOnly/src/visualization/visualize.py
|
VamshiMalthummeda/NewsClassifierSourceCode
| 0 |
2024658
|
import os
import pickle
import logging
import time
import multiprocessing as mp
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from src.features import PickledCorpusReader as pcr
from src.models import TextNormalizer as tn
from src.features import CorpusLoader as cl
from sklearn.base import BaseEstimator, TransformerMixin
from yellowbrick.text.freqdist import FreqDistVisualizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB,GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC,SVC
from sklearn.model_selection import GridSearchCV
from yellowbrick.classifier import ClassificationReport,ConfusionMatrix
class ModelError(Exception):
def __init__(self, expression, message):
self.expression = expression
self.message = message
def create_pipeline(estimator):
steps = [
('normalize', tn.TextNormalizer()),
('vectorize', TfidfVectorizer()),
('classifier',estimator)
]
return Pipeline(steps)
def on_result(result):
return result
def on_error(error_msg):
print(error_msg)
def get_classifier(model_path):
classifier_f = open(model_path,"rb")
classifier = pickle.load(classifier_f)
classifier_f.close()
return classifier
def generate_report(estimator,name,reports_path,counter,*data):
categories = data[0]
X_train_data = data[1]
y_train_data = data[2]
X_test_data = data[3]
y_test_data = data[4]
visualizer = None
visualizer = ClassificationReport(estimator, classes=categories)
visualizer.fit(X_train_data, y_train_data)
visualizer.score(X_test_data, y_test_data)
visualizer.set_title(name + "- Classification Report")
class_rep_path = os.path.join(reports_path,name + "-" + "Classification Report" + ".png")
visualizer.poof(outpath=class_rep_path)
visualizer.finalize()
def generate_freq_dist(reports_path,use_custom_sw,*data):
X_train_data = data[0]
y_train_data = data[1]
text_normalizer = tn.TextNormalizer(use_custom_sw=use_custom_sw)
corpus_data = text_normalizer.fit_transform(X_train_data,y_train_data)
tfidf_vect = TfidfVectorizer(max_df = 0.5,min_df = 0.1,smooth_idf = True,norm='l2',ngram_range=(1,1),sublinear_tf=True)
docs = tfidf_vect.fit_transform(corpus_data,y_train_data)
features = tfidf_vect.get_feature_names()
visualizer = None
visualizer = FreqDistVisualizer(features=features)
visualizer.fit(docs)
freq_rep_path = ''
if use_custom_sw == 0:
visualizer.set_title("Frequency Distribution Before SW Removal")
freq_rep_path = os.path.join(reports_path,"FreqDist_Before_SW_Removal.png")
else:
visualizer.set_title("Frequency Distribution After SW Removal")
freq_rep_path = os.path.join(reports_path,"FreqDist_After_SW_Removal.png")
visualizer.poof(outpath=freq_rep_path)
visualizer.finalize()
def generate_matrix(estimator,name,reports_path,counter,*data):
categories = data[0]
X_train_data = data[1]
y_train_data = data[2]
X_test_data = data[3]
y_test_data = data[4]
visualizer = None
visualizer = ConfusionMatrix(estimator, classes=categories)
visualizer.fit(X_train_data, y_train_data)
visualizer.score(X_test_data, y_test_data)
visualizer.set_title(name + "- Confusion Matrix")
conf_mat_path = os.path.join(reports_path,name + "-" + "Confusion Matrix" + ".png")
visualizer.poof(outpath=conf_mat_path)
visualizer.finalize()
def generate_reports(project_dir):
try:
logger = logging.getLogger(__name__)
load_dotenv(find_dotenv())
DOC_PATTERN = os.getenv('doc_pkl_pattern')
CAT_PATTERN = os.getenv('cat_pattern')
PROCESS_DIR_NAME = 'processed'
process_path = os.path.join(project_dir,'data',PROCESS_DIR_NAME)
test = os.path.join(process_path,'test')
train = os.path.join(process_path,'train')
models_path = os.path.join(project_dir,'models')
rep_path = os.path.join(project_dir,'reports/figures')
test_corpus = pcr.PickledCorpusReader(test,DOC_PATTERN, cat_pattern=CAT_PATTERN)
X_test_data = [list([doc]) for doc in test_corpus.docs()]
y_test_data = [test_corpus.categories(fileids=[fileid])[0] for fileid in test_corpus.fileids()]
trained_corpus = pcr.PickledCorpusReader(train,DOC_PATTERN, cat_pattern=CAT_PATTERN)
X_train_data = [list([doc]) for doc in trained_corpus.docs()]
y_train_data = [trained_corpus.categories(fileids=[fileid])[0] for fileid in trained_corpus.fileids()]
category_data = trained_corpus.categories()
# Make sure the models directory exists
if not os.path.exists(models_path):
raise ModelError(models_path,"Models does not exist at the specified path")
# Make sure the reports directory exists
if not os.path.exists(rep_path):
os.makedirs(rep_path)
p = mp.Process(target=generate_freq_dist,args=(rep_path,0,X_train_data,y_train_data))
p.start()
p.join()
p = mp.Process(target=generate_freq_dist,args=(rep_path,1,X_train_data,y_train_data))
p.start()
p.join()
ctr=0
for name in os.listdir(models_path):
fpath = os.path.join(models_path,name)
classifier = get_classifier(fpath)
clf_name,_ = name.split(".")
p = mp.Process(target=generate_report,args=(classifier,clf_name,rep_path,ctr,category_data,X_train_data,y_train_data,X_test_data,y_test_data))
p.start()
p.join()
p = mp.Process(target=generate_matrix,args=(classifier,clf_name,rep_path,ctr,category_data,X_train_data,y_train_data,X_test_data,y_test_data))
p.start()
p.join()
ctr+=1
except Exception as e:
logger.info("Error: {}".format(e))
def main():
LOG_NAME = "process.log"
project_dir = str(Path(__file__).resolve().parents[2])
log_path = os.path.join(project_dir,LOG_NAME)
log_fmt = '%(processName)-10s %(module)s %(asctime)s %(message)s'
logging.basicConfig(filename=log_path,level=logging.INFO, format=log_fmt)
generate_reports(project_dir)
| 6,547 |
merge.py
|
kunyuan/FeynCalculator
| 1 |
2026886
|
import numpy as np
rs=1.0
Lambda=1.0
Beta=25
############## 3D ##################################
kF=(9.0*np.pi/4.0)**(1.0/3.0)/rs #3D
###### Bare Green's function #########################
# Bubble=0.08871 # 3D, Beta=0.5, rs=1
# Bubble=0.0971916 #3D, Beta=10, rs=1
# Bubble=0.0971613 #3D, T=0.04Ef, rs=1
# Bubble= 0.097226 # 3D, zero temperature, rs=1
###### Fock dressed Green's function ###################
Bubble, Density, dMu2=0.088883, 0.2387, -0.2699 #3D, Beta=0.1, rs=1, Lambda=1.0
############## 2D ##################################
###### Bare Green's function #########################
# kF=np.sqrt(2.0)/rs #2D
# Bubble=0.11635 #2D, Beta=0.5, rs=1
# Bubble=0.15916 #2D, Beta=10, rs=1
folder="./Beta{0}_rs{1}_lambda{2}_freq/".format(Beta, rs, Lambda)
RawData=np.loadtxt(folder+"output.dat")
Order={}
OrderList=[1,2,3]
for order in OrderList:
Order[order]=RawData[RawData[:,0]==order]
Norm=Order[1][:,1]/Bubble
for order in OrderList:
Order[order][:,1]/=Norm
for order in OrderList:
print "Order {0}: {1}+-{2}".format(order, Order[order][:,1].mean(), Order[order][:,1].std()/np.sqrt(len(Order[order][:,1])))
print Order[order]
| 1,182 |
cogs/Gacha.py
|
seanrowland101/Ayesha-2.0
| 0 |
2025808
|
import discord
from discord.commands.commands import Option, OptionChoice
from discord.ext import commands, pages
import asyncpg
import json
import random
from Utilities import Checks, Vars, PlayerObject, AcolyteObject, ItemObject
from Utilities.Finances import Transaction
class SummonDropdown(discord.ui.Select):
def __init__(self, results : list, author_id : int):
self.results = results
self.author_id = author_id
options = [discord.SelectOption(label=results[i][0], value=str(i))
for i in range(len(results))]
super().__init__(options = options)
async def callback(self, interaction: discord.Interaction):
if interaction.user.id != self.author_id:
return
await interaction.response.edit_message(
embed=self.results[int(self.values[0])][1])
class Gacha(commands.Cog):
"""Spend rubidics and gold for random items"""
def __init__(self, bot):
self.bot = bot
self.rarities = None
self.int_rar_to_str = {
1 : "Common",
2 : "Uncommon",
3 : "Rare",
4 : "Epic",
5 : "Legendary"
}
self.armor_costs = {
"Cloth" : 2500,
"Wood" : 5000,
"Silk" : 8000,
"Leather" : 20000,
"Gambeson" : 25000,
"Bronze" : 50000,
"Ceramic Plate" : 70000,
"Chainmail" : 75000,
"Iron" : 100000
} # Be sure to change the OptionChoices in shop if changing this
# Get a list of all acolytes sorted by rarity
with open(Vars.ACOLYTE_LIST_PATH) as f:
acolyte_list = json.load(f)
self.rarities = {i:[] for i in range(1,6)}
for acolyte in acolyte_list:
self.rarities[acolyte_list[acolyte]['Rarity']].append(acolyte)
# EVENTS
@commands.Cog.listener()
async def on_ready(self):
print("Gacha is ready.")
# INVISIBLE
async def roll_acolyte(self, conn : asyncpg.Connection,
player : PlayerObject.Player,
rarity : int) -> discord.Embed:
"""Creates a random acolyte of the specified rarity.
Returns a tuple containing an informational string (for Dropdown Menu)
and an embed listing the acolyte's information.
"""
acolyte_name = random.choice(self.rarities[rarity])
acolyte = await AcolyteObject.create_acolyte(
conn, player.disc_id, acolyte_name)
embed=discord.Embed(
title=(
f"{acolyte.acolyte_name} ({acolyte.gen_dict['Rarity']}⭐) has "
f"entered the tavern!"),
color=Vars.ABLUE)
embed.set_thumbnail(url=acolyte.gen_dict['Image'])
embed.add_field(name="Attack",
value=f"{acolyte.gen_dict['Attack']} + {acolyte.gen_dict['Scale']}")
embed.add_field(name="Crit", value = acolyte.gen_dict['Crit'])
embed.add_field(name="HP", value=acolyte.gen_dict['HP'])
embed.add_field(name="Effect",
value=(
f"{acolyte.gen_dict['Effect']}\n {acolyte.acolyte_name} uses `"
f"{acolyte.gen_dict['Mat']}` to level up."))
return (f"{rarity}⭐ Acolyte: {acolyte_name}", embed)
# COMMANDS
@commands.slash_command(guild_ids=[762118688567984151])
@commands.check(Checks.is_player)
async def summon(self, ctx,
pulls : Option(int,
description="Do up to 10 pull at once!",
required=False,
min_value=1,
max_value=10,
default=1)):
"""Spend 1 rubidics to get a random acolyte or weapon."""
async with self.bot.db.acquire() as conn:
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
if player.rubidics < pulls:
raise Checks.NotEnoughResources("rubidics",
pulls, player.rubidics)
# This essentially calculates the results (type and rarity)
r_types = random.choices(
population=["weapon", "acolyte"],
weights=[75, 25],
k=pulls)
r_rarities = random.choices(
population=range(1,6),
weights=[1, 60, 35, 3, 1],
k=pulls)
# Simulate the pulls by creating new objects
# embed_list = []
result_list = []
# In order to show summons in a dropdown menu instead of a paginator
# we need another way to create the labels for the dropdown choices
# necessitating the use of a list of tuples that contain both this
# descriptive name and the embed that will be shown.
# A dictionary may be clearer for future (TODO), otherwise note the
# placement of the string at index 0 and the embed at index 1
# result_list[SUMMON NUMBER][0 IF STR ELSE 1]
for i in range(pulls):
if player.pity_counter >= 79:
# Give 5 star acolyte
result_list.append(await self.roll_acolyte(conn, player, 5))
player.pity_counter = 0
continue
# Create a random new weapon or acolyte
# Write an embed for this and add it to the list
if r_types[i] == "acolyte":
result_list.append(await self.roll_acolyte(
conn, player, r_rarities[i]))
else:
weapon = await ItemObject.create_weapon(
conn=conn,
user_id=player.disc_id,
rarity=self.int_rar_to_str[r_rarities[i]])
embed=discord.Embed(
title=f"You received {weapon.name} ({weapon.rarity})",
color=Vars.ABLUE)
embed.add_field(name="Type", value=weapon.type)
embed.add_field(name="Attack", value=weapon.attack)
embed.add_field(name="Crit", value=weapon.crit)
result_list.append(
(f"{weapon.rarity} {weapon.type}: {weapon.name}",
embed))
if r_rarities[i] == 5:
player.pity_counter = 0
else:
player.pity_counter += 1 # Temp change, not stored in db
# Summons done, tell player their remaining balance in footer
for result in result_list:
result[1].set_footer(text=(
f"You have {player.rubidics-pulls} rubidics. You will "
f"receive a 5-star acolyte in {80-player.pity_counter} "
f"summons."))
# Update player's rubidics and pity counter
await player.give_rubidics(conn, pulls*-1)
await player.set_pity_counter(conn, player.pity_counter)
# Paginate embeds if pulls > 1 and print them
if len(result_list) > 1:
view = discord.ui.View()
view.add_item(SummonDropdown(result_list, player.disc_id))
await ctx.respond(embed=result_list[0][1], view=view)
else:
await ctx.respond(embed=result_list[0][1])
@commands.slash_command(guild_ids=[762118688567984151])
@commands.check(Checks.is_player)
async def shop(self, ctx,
armor : Option(str,
description="The type of armor you are buying",
choices=[OptionChoice(t) for t in Vars.ARMOR_DEFENSE]),
material : Option(str,
description="The material of the armor you want",
choices=[
OptionChoice("Cloth Armor (2,500 gold)", "Cloth"),
OptionChoice("Wooden Armor (5,000 gold)", "Wood"),
OptionChoice("Silk Armor (8,000 gold)", "Silk"),
OptionChoice("Leather Armor (20,000 gold)", "Leather"),
OptionChoice("Gambeson Armor (25,000 gold)", "Gambeson"),
OptionChoice("Bronze Armor (50,000 gold)", "Bronze"),
OptionChoice("Ceramic Plate Armor (70,000 gold)",
"Ceramic Plate"),
OptionChoice("Chainmail Armor (75,000 gold)", "Chainmail"),
OptionChoice("Iron Armor (100,000 gold)", "Iron")])):
"""Exchange your extra gold for some other stuff!"""
async with self.bot.db.acquire() as conn:
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
purchase = await Transaction.calc_cost(
conn, player, self.armor_costs[material])
if purchase.paying_price > player.gold:
raise Checks.NotEnoughGold(purchase.paying_price, player.gold)
item = await ItemObject.create_armor(conn, player.disc_id, armor, material)
print_tax = await purchase.log_transaction(conn, "purchase")
await ctx.respond((
f"You purchased `{item.id}`: {item.name}. Use the `/equip` "
f"command to equip it!\n"
f"This purchase cost `{purchase.subtotal}` gold. {print_tax}"))
def setup(bot):
bot.add_cog(Gacha(bot))
| 9,586 |
src/controls.py
|
nsde/intellicraft
| 1 |
2026011
|
import time
import pynput
import random
import pyautogui
mouse = pynput.mouse.Controller()
def hotbar():
return \
{
# 'name': slot,
'sword': 1,
}
# HUGE thanks to https://stackoverflow.com/a/58043888
def look(x: int, y: int, duration: float=1):
t = duration/60
for i in range(duration*60):
if i < t/2:
h = i
else:
h = t - i
mouse.move(h*x, h*y)
time.sleep(1/60)
def random_look():
return random.randint(-1, 1)/20
def look_around():
look(random_look(), random_look(), duration=1)
def wait_attack(sword: bool=True):
t = 1.25
if sword:
t = 0.625
time.sleep(t)
return t
def press(left=True):
pyautogui.click(button=pyautogui.LEFT if left else pyautogui.RIGHT)
def slot(name=None, number=None):
if name:
number = hotbar()[name]
pyautogui.press(str(number))
def attack(critical=True):
if critical:
pyautogui.press('space')
time.sleep(0.6)
press()
| 1,028 |
experiments/lora/lora_client.py
|
skyforcetw/smart-watt-hour-meter
| 0 |
2026574
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 10 14:43:05 2019
@author: skyforce.shen
"""
import RPi.GPIO as GPIO #引入RPi.GPIO库
import serial
import time
port = '/dev/serial0'
baud = 4800
pin_aux = 18
pin_md1 = 17
pin_md0 = 27
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_aux, GPIO.IN)
GPIO.setup(pin_md1, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(pin_md0, GPIO.OUT, initial=GPIO.LOW)
# 打开串口
ser = serial.Serial(port, baud)
def loop():
while True:
# 获得接收缓冲区字符
count = ser.inWaiting()
if count != 0:
# 读取内容并回显
recv = ser.read(count)
ser.write(recv)
# 清空接收缓冲区
ser.flushInput()
# 必要的软件延时
time.sleep(0.1)
def main():
setup()
loop()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
if ser != None:
ser.close()
| 906 |
server/libs/webserver/executer.py
|
teemosauce/rpi-cube
| 195 |
2026627
|
from libs.webserver.blueprints.authentication_executer import AuthenticationExecuter
from libs.webserver.blueprints.device_executer import DeviceExecuter
from libs.webserver.blueprints.device_settings_executer import DeviceSettingsExecuter
from libs.webserver.blueprints.effect_executer import EffectExecuter
from libs.webserver.blueprints.effect_settings_executer import EffectSettingsExecuter
from libs.webserver.blueprints.general_executer import GeneralExecuter
from libs.webserver.blueprints.general_settings_executer import GeneralSettingsExecuter
from libs.webserver.blueprints.system_info_executer import SystemInfoExecuter
from libs.webserver.blueprints.microphone_settings_executer import MicrophoneSettingsExecuter
import logging
class Executer():
def __init__(self, config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio):
self.logger = logging.getLogger(__name__)
self.authentication_executer = AuthenticationExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.device_executer = DeviceExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.device_settings_executer = DeviceSettingsExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.effect_executer = EffectExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.effect_settings_executer = EffectSettingsExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.general_executer = GeneralExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.general_settings_executer = GeneralSettingsExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.system_info_executer = SystemInfoExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
self.microphone_settings_executer = MicrophoneSettingsExecuter(
config_lock, notification_queue_in, notification_queue_out, effects_queue, py_audio)
Executer.instance = self
| 2,357 |
Module_02/ex00/ft_map.py
|
CristinaFdezBornay/PythonPiscine
| 1 |
2026915
|
def function_map(function_to_apply, iterable):
"""
Map the function to all elements of the iterable.
"""
for i in iterable:
yield function_to_apply(i)
def ft_map(function_to_apply, iterable):
"""Map the function to all elements of the iterable.
Args:
function_to_apply: a function taking an iterable.
iterable: an iterable object (list, tuple, iterator).
Returns:
An iterable.
None if the iterable can not be used by the function.
"""
if callable(function_to_apply) == False:
raise TypeError("'{}' object is not callable".format(type(function_to_apply).__name__))
elif not hasattr(iterable, '__iter__'):
raise TypeError("'{}' object is not iterable".format(type(iterable).__name__))
else:
return function_map(function_to_apply, iterable)
| 831 |
pamqp/heartbeat.py
|
annuupadhyayPS/pamqp
| 38 |
2026713
|
# -*- encoding: utf-8 -*-
"""
AMQP Heartbeat Frame, used to create new Heartbeat frames for sending to a peer
"""
import struct
from pamqp import constants
class Heartbeat(object):
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
on to this class for a common access structure to the attributes/data
values.
"""
name: str = 'Heartbeat'
value = struct.pack('>BHI', constants.FRAME_HEARTBEAT, 0, 0) + \
constants.FRAME_END_CHAR
@classmethod
def marshal(cls) -> bytes:
"""Return the binary frame content"""
return cls.value
| 608 |
odoo-13.0/venv/lib/python3.8/site-packages/stdnum/by/unp.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
| 0 |
2026925
|
# unp.py - functions for handling Belarusian UNP numbers
# coding: utf-8
#
# Copyright (C) 2020 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""УНП, UNP (Учетный номер плательщика, the Belarus VAT number).
The УНП (UNP) or Учетный номер плательщика (Uchetniy nomer platel'shika,
Payer account number) is issued to organisations and individuals for tax
purposes. The number consists of 9 digits (numeric for organisations,
alphanumeric for individuals) and contains a region identifier, a serial per
region and a check digit.
More information:
* https://be.wikipedia.org/wiki/Уліковы_нумар_плацельшчыка
* http://pravo.levonevsky.org/bazaby09/sbor37/text37892/index3.htm
>>> validate('200988541')
'200988541'
>>> validate('УНП MA1953684')
'MA1953684'
>>> validate('200988542')
Traceback (most recent call last):
...
InvalidChecksum: ...
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits, to_unicode
# Mapping of Cyrillic letters to Latin letters
_cyrillic_to_latin = dict(zip(
u'АВЕКМНОРСТ',
u'ABEKMHOPCT',
))
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
for prefix in ('УНП', u'УНП', 'UNP', u'UNP'):
if type(number) == type(prefix) and number.startswith(prefix):
number = number[len(prefix):]
# Replace Cyrillic letters with Latin letters
cleaned = ''.join(_cyrillic_to_latin.get(x, x) for x in to_unicode(number))
if type(cleaned) != type(number): # pragma: no cover (Python2 only)
cleaned = cleaned.encode('utf-8')
return cleaned
def calc_check_digit(number):
"""Calculate the check digit for the number."""
number = compact(number)
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
weights = (29, 23, 19, 17, 13, 7, 5, 3)
if not isdigits(number):
number = number[0] + str('ABCEHKMOPT'.index(number[1])) + number[2:]
c = sum(w * alphabet.index(n) for w, n in zip(weights, number)) % 11
if c > 9:
raise InvalidChecksum()
return str(c)
def validate(number):
"""Check if the number is a valid number. This checks the length,
formatting and check digit."""
number = compact(number)
if len(number) != 9:
raise InvalidLength()
if not isdigits(number[2:]):
raise InvalidFormat()
if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):
raise InvalidFormat()
if number[0] not in '1234567ABCEHKM':
raise InvalidComponent()
if number[-1] != calc_check_digit(number):
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number is a valid number."""
try:
return bool(validate(number))
except ValidationError:
return False
def check_nalog(number, timeout=30): # pragma: no cover (not part of normal test suite)
"""Retrieve registration information from the portal.nalog.gov.by web site.
This basically returns the JSON response from the web service as a dict.
Will return ``None`` if the number is invalid or unknown.
"""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the web service
import requests
from pkg_resources import resource_filename
# Since the nalog.gov.by web site currently provides an incomplete
# certificate chain, we provide our own.
certificate = resource_filename(__name__, 'portal.nalog.gov.by.crt')
response = requests.get(
'https://www.portal.nalog.gov.by/grp/getData',
params={
'unp': compact(number),
'charset': 'UTF-8',
'type': 'json'},
timeout=timeout,
verify=certificate)
if response.ok:
return response.json()['ROW']
| 4,607 |
tests/contracts/root_chain/test_submit_block.py
|
DavidKnott/plasma-contracts
| 0 |
2027116
|
import pytest
from ethereum.tools.tester import TransactionFailed
def test_submit_block_valid_key_should_succeed(ethtester, testlang):
submitter = testlang.accounts[0]
assert testlang.root_chain.currentChildBlock() == 1000
blknum = testlang.submit_block([], submitter)
block_info = testlang.root_chain.blocks(1000)
assert block_info[0] == testlang.child_chain.get_block(blknum).root
assert block_info[1] == ethtester.chain.head_state.timestamp
assert testlang.root_chain.currentChildBlock() == 2000
def test_submit_block_invalid_key_should_fail(testlang):
submitter = testlang.accounts[1]
with pytest.raises(TransactionFailed):
testlang.submit_block([], submitter)
| 715 |
meg_runtime/ui/webpanel.py
|
MultimediaExtensibleGit/Runtime
| 0 |
2022633
|
from PyQt5 import QtCore, QtWebEngineWidgets
from meg_runtime.ui.basepanel import BasePanel
from meg_runtime.app import App
class WebPanel(BasePanel):
"""HTML web view panel for URL."""
def __init__(self, url, **kwargs):
"""WebPanel constructor."""
self._url = url
super().__init__(**kwargs)
def get_title(self):
"""Get the title of this panel."""
title = '' if self._widgets is None else self._widgets.title()
if not title:
title = 'Loading...' if self._url is None else self._url
return title
def get_status(self):
"""Get the status of this panel."""
return '' if self._url is None else self._url
def get_icon(self):
"""Get the icon image of this panel."""
icon = self._widgets.icon()
return None if self._widgets is None else icon
def on_load(self):
"""Load dynamic elements within the panel."""
self._widgets = QtWebEngineWidgets.QWebEngineView()
self._widgets.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self._widgets.load(QtCore.QUrl(self._url))
self._widgets.iconChanged.connect(self._update_title)
self._widgets.titleChanged.connect(self._update_title)
def _update_title(self, title):
App.get_window().set_title(self)
| 1,329 |
contrib/tools/templates/extensions/extension/extension.py
|
davidt/reviewboard
| 1 |
2025821
|
# {{extension_name}} Extension for Review Board.
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include
from reviewboard.extensions.base import Extension
class {{class_name}}(Extension):
metadata = {
'Name': '{{extension_name}}',
'Summary': 'Describe your extension here.',
}
{%- if is_configurable %}
is_configurable = True
{%- endif %}
def initialize(self):
# Your extension initialization is done here.
pass
| 530 |
venv/lib/python3.6/site-packages/ansible/__init__.py
|
usegalaxy-no/usegalaxy
| 1 |
2026612
|
# (c) 2012-2014, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# make vendored top-level modules accessible EARLY
import ansible._vendor
# patch Jinja2 >= 3.0 for backwards compatibility
try:
import sys as _sys
from jinja2.filters import pass_context as _passctx, pass_environment as _passenv, pass_eval_context as _passevalctx
_mod = _sys.modules['jinja2.filters']
_mod.contextfilter = _passctx
_mod.environmentfilter = _passenv
_mod.evalcontextfilter = _passevalctx
except ImportError:
_sys = None
# Note: Do not add any code to this file. The ansible module may be
# a namespace package when using Ansible-2.1+ Anything in this file may not be
# available if one of the other packages in the namespace is loaded first.
#
# This is for backwards compat. Code should be ported to get these from
# ansible.release instead of from here.
from ansible.release import __version__, __author__
| 1,666 |
AtCoder_Virtual_Contest/prd_xxx_20200805 copy/c.py
|
KATO-Hiro/AtCoder
| 2 |
2025897
|
# -*- coding: utf-8 -*-
def main():
n = int(input())
a = [int(input()) for _ in range(n)]
ans = 0
if a[0] > 0:
print(-1)
exit()
# KeyInsight:
# ◯: 2マスの値を比べる。
# 右側 - 左側 = 1のときは、答えに1加算。
# 同じ値のときは、その値を答えに加算。
# ◯: a0以外のときと、2マスの差が2以上のときは、条件を満たさない。
# △: 上記の考察を実装したが、10ケースWA。
# △: 条件を満たさない場合を先に持ってくると、その後の条件がシンプルに。
# See:
# https://atcoder.jp/contests/agc024/submissions/15705631
for pre, cur in zip(a, a[1:]):
if pre + 1 < cur:
print(-1)
exit()
if pre + 1 == cur:
ans += 1
else:
ans += cur
print(ans)
if __name__ == '__main__':
main()
| 733 |
A/ChoosingTeams.py
|
shukkkur/hello-world
| 11 |
2027096
|
__author__ = 'shukkkur'
'''
https://codeforces.com/problemset/problem/432/A
A. Choosing Teams
'''
n, k = map(int, input().split())
nums = list(map(int, input().split()))
count = 0
for num in nums:
if num <= 5 - k:
count += 1
print(count//3)
| 279 |
Tk_SOLUZION_Client.py
|
emowen4/InfoFlow
| 2 |
2026205
|
#!/usr/bin/python3
"""Tk_SOLUZION_Client.py
This file implements a simple "SOLUZION" client that
permits a user ("problem solver") to explore a search tree
for a suitably-formulated problem. The user only has to
input single-character commands to control the search.
Output is purely textual, and thus the complexity of a
graphical interface is avoided.
This client runs standalone -- no server connection.
It thus provides a bare-bones means of testing a problem
formulation.
Tk is the graphics and GUI Toolkit that ships with Python.
This client program uses Tk only for its graphics, setting up
a graphics window that is used for the display of each state
of the problem-solution process.
To take advantage of this, the problem formulation file should
check to see if the global USE_TK_GRAPHICS is True, and if so, it
should import a visualization file with a name similar to:
Missionaries_Array_VIS_FOR_TK.py.
One technical challenge in creating this client is that Tk graphics
requires that the main execution thread be devoted to Tk,
which means that a normal text-input loop cannot easily be
sync'd with Tk. The solution is to use a separate thread for
the text loop and have it make calls re-draw the Tk graphic.
Tk still runs the mainloop method in the main thread, which
not only is there to handle any GUI events (but there are not any
in this program) but also just to show the graphics window.
If we don't call the mainloop method, the Tk graphic window
will not show up until the rest of the program completely
finishes, which is useless. So there is a separate thread
here for the user interaction loop.
Status: Started on Aug. 2.
Aug. 3. Basic array graphics is working. But now we
need the strings and advanced options.
Need example file Missionaries_Array_VIS_FOR_TK.py.
Need code to display a color array, with defaults if anything
is not provided.
Need code to display a corresponding string array.
consider options to include column headers, footers, and
row titles on left and right.
Add caption feature.
The file for these features: show_state_array.py
----
PURPOSE OF THIS MODULE:
This module supports what we can call "interactive state
space search". Whereas traditional search algorithms in the
context of artificial intelligence work completely automatically,
this module lets the user make the moves. It provides support
to the user in terms of computing new states, displaying that
portion of the state space that the user has embodied, and
providing controls to permit the user to adapt the presentation
to his or her needs. This type of tool could ultimately be a
powerful problem solving tool, useful in several different
modes of use: interactive processing of individual objects,
programming by demonstration (the path from the root to any
other node in the state space represents a way of processing
any object similar in structure to that of the root object.)
"""
# The following line is used in the Tk_SOLUZION_Client and the IDLE_Text_SOLUZION_Client.
problem_name = 'InfoFlow'
def client_mainloop():
print(TITLE)
print(PROBLEM.PROBLEM_NAME+"; "+PROBLEM.PROBLEM_VERSION)
global STEP, DEPTH, OPERATORS, CURRENT_STATE, STATE_STACK
CURRENT_STATE = PROBLEM.copy_state(PROBLEM.INITIAL_STATE)
STATE_STACK = [CURRENT_STATE]
STEP = 0
DEPTH = 0
PROBLEM.render_state(CURRENT_STATE)
while(True):
print("\nStep "+str(STEP)+", Depth "+str(DEPTH))
print("CURRENT_STATE = "+str(CURRENT_STATE))
if PROBLEM.goal_test(CURRENT_STATE):
print('''CONGRATULATIONS!
You have solved the problem by reaching a goal state.
Do you wish to continue exploring?
''')
answer = input("Y or N? >> ")
if answer=="Y" or answer=="y": print("OK, continue")
else: return
applicability_vector = get_applicability_vector(CURRENT_STATE)
#print("applicability_vector = "+str(applicability_vector))
for i in range(len(OPERATORS)):
if applicability_vector[i]:
print(str(i)+": "+OPERATORS[i].name)
command = input("Enter command: 0, 1, 2, etc. for operator; B-back; H-help; Q-quit. >> ")
if command=="B" or command=="b":
if len(STATE_STACK)>1:
STATE_STACK.pop()
DEPTH -= 1
STEP += 1
else:
print("You're already back at the initial state.")
continue
CURRENT_STATE = STATE_STACK[-1]
PROBLEM.render_state(CURRENT_STATE)
continue
if command=="H" or command=="h": show_instructions(); continue
if command=="Q" or command=="q": break
if command=="": continue
try:
i = int(command)
except:
print("Unknown command or bad operator number.")
continue
print("Operator "+str(i)+" selected.")
if i<0 or i>= len(OPERATORS):
print("There is no operator with number "+str(i))
continue
if applicability_vector[i]:
CURRENT_STATE = OPERATORS[i].apply(CURRENT_STATE)
STATE_STACK.append(CURRENT_STATE)
PROBLEM.render_state(CURRENT_STATE)
DEPTH += 1
STEP += 1
continue
else:
print("Operator "+str(i)+" is not applicable to the current state.")
continue
#print("Operator "+command+" not yet supported.")
def get_applicability_vector(s):
#print("OPERATORS: "+str(OPERATORS))
return [op.is_applicable(s) for op in OPERATORS]
def exit_client():
print("Terminating Text_SOLUZION_Client session.")
log("Exiting")
exit()
def show_instructions():
print('''\nINSTRUCTIONS:\n
The current state of your problem session represents where you
are in the problem-solving process. You can try to progress
forward by applying an operator to change the state.
To do this, type the number of an applicable operator.
The program shows you a list of what operators are
applicable in the current state.
You can also go backwards (undoing a previous step)
by typing 'B'.
If you reach a goal state, you have solved the problem,
and the computer will usually tell you that, but it depends
on what kind of problem you are solving.''')
def apply_one_op():
"""Populate a popup menu with the names of currently applicable
operators, and let the user choose which one to apply."""
currently_applicable_ops = applicable_ops(CURRENT_STATE)
#print "Applicable operators: ",\
# map(lambda o: o.name, currently_applicable_ops)
print("Now need to apply the op")
def applicable_ops(s):
"""Returns the subset of OPERATORS whose preconditions are
satisfied by the state s."""
return [o for o in OPERATORS if o.is_applicable(s)]
import sys, importlib.util
# Get the PROBLEM name from the command-line arguments
if len(sys.argv)<2:
""" The following few lines go with the LINUX version of the text client.
print('''
Usage:
./IDLE_Text_SOLUZION_Client <PROBLEM NAME>
For example:
./IDLE_Text_SOLUZION_Client Missionaries
''')
exit(1)
"""
sys.argv = ['Tk_SOLUZION_Client.py', problem_name] # IDLE and Tk version only.
# Sets up sys.argv as if it were coming in on a Linux command line.
problem_name = sys.argv[1]
print("problem_name = "+problem_name)
try:
spec = importlib.util.spec_from_file_location(problem_name, problem_name+".py")
PROBLEM = spec.loader.load_module()
spec.loader.exec_module(PROBLEM)
except Exception as e:
print(e)
raise e
exit(1)
try:
spec = importlib.util.spec_from_file_location(problem_name+'_Array_VIS_FOR_TK',
problem_name+'_Array_VIS_FOR_TK.py')
VIS = spec.loader.load_module()
spec.loader.exec_module(VIS)
print("Using TK vis routine")
PROBLEM.render_state = VIS.render_state
VIS.initialize_vis()
except Exception as e:
print(e)
raise e
exit(1)
OPERATORS=PROBLEM.OPERATORS
STATE_STACK = []
TITLE="Tk_SOLUZION_Client (Version 0-1)"
import threading
class Client(threading.Thread):
def __init__(self, tk_root):
self.root = tk_root
threading.Thread.__init__(self)
self.start()
def run(self):
client_mainloop()
self.root.quit()
exit(0)
#self.root.update()
# The following is only executed if this module is being run as the main
# program, rather than imported from another one.
if __name__ == '__main__':
import show_state_array
client = Client(show_state_array.STATE_WINDOW)
show_state_array.STATE_WINDOW.mainloop()
print("The session is finished.")
| 8,496 |
src/camera_calibration.py
|
pmh47/textured-mesh-gen
| 30 |
2026301
|
import numpy as np
import tensorflow as tf
import dirt.projection
def get_vanishing_point(first_line, second_line):
# Based on apartment-sketching; may not be optimal here!
# Derived using Mathematica's Eliminate & Solve on the vector equations
((a1x, a1y), (a2x, a2y)) = first_line
((b1x, b1y), (b2x, b2y)) = second_line
epsilon = 1.e-9
if a1y == a2y and b1y == b2y:
return None
if a1x == a2x:
if b1x == b2x:
return None
# The elimination we use below isn't valid in this case
beta = (a1x - b1x) / (b2x - b1x)
alpha = (beta * (b2y - b1y) + b1y - a1y) / (a2y - a1y)
else:
beta = (-a2y * b1x + a1y * (-a2x + b1x) + a1x * (a2y - b1y) + a2x * b1y) / ((a1y - a2y) * (b1x - b2x) - (a1x - a2x) * (b1y - b2y) + epsilon)
alpha = (beta * (b2x - b1x) + b1x - a1x) / (a2x - a1x + epsilon)
return np.float32([a1x + alpha * (a2x - a1x), a1y + alpha * (a2y - a1y)])
def get_nearest_point(segment, point):
# Based on apartment-sketching!
# Returns the nearest point on the infinite line through segment[0] & segment[1], to point
start, end = segment
normaliser = np.linalg.norm(end - start)
direction = (end - start) / normaliser
alpha = np.dot(point - start, direction) / normaliser
return start + alpha * (end - start)
def get_camera_matrices_from_vanishing_points(first_vp, second_vp, image_size):
# See https://annals-csis.org/proceedings/2012/pliks/110.pdf and http://ksimek.github.io/2012/08/14/decompose/ [1]
# first_vp & second_vp are batched x,y coordinates of pairs of parallel lines, in pixel space with the top-left corner as the origin
# image_size is indexed by x/y
# This is a 'traditional' calibration, not following OpenGL conventions, except that the camera is assumed to look along
# the *negative* z-axis, i.e. it's a right-handed coordinate-system
# The +ve world-x-axis will point from the camera-centre towards V1; the -ve world z-axis will point towards V2
# The resulting matrices are assumed to *left*-multiply vectors, i.e. are indexed as out, in
# Translate the vanishing points to be relative to the image centre, with y increasing as we move up
V1 = (first_vp - image_size / 2.) * [1., -1.]
V2 = (second_vp - image_size / 2.) * [1., -1.]
# Calculate the focal length
Vi = get_nearest_point([V1, V2], (0, 0)) # in centred image space, the nearest point to the image-centre (0, 0) that lies on the line between the two vanishing pionts
Oi_Vi_sq = Vi[0] * Vi[0] + Vi[1] * Vi[1]
Oc_Vi_sq = np.linalg.norm(Vi - V1) * np.linalg.norm(V2 - Vi)
if Oc_Vi_sq < Oi_Vi_sq:
raise ValueError
f = np.sqrt(Oc_Vi_sq - Oi_Vi_sq) # focal length measured in pixels
# print 'estimated fov = {:.1f}deg'.format(2 * np.arctan(0.5 * image_size[0] / f) * 180 / np.pi)
K = np.diag([f, f, 1.])
# Calculate the world --> camera rotation matrix, which is made of the direction vectors Xc, Yc & Zc in camera space of the world axes
Oc_V1 = np.concatenate([V1, [-f]]) # negation of f is because camera looks along negative-z
Oc_V2 = np.concatenate([V2, [-f]])
Xc = Oc_V1 / np.linalg.norm(Oc_V1) # thus, physical lines pointing at V1 are parallel with the world-x-axis...
Zc = Oc_V2 / np.linalg.norm(Oc_V2) # ...and physical lines pointing at V2 are parallel with the world-z-axis
if Zc[0] < 0:
# Make sure the +ve z-axis (perpendicular to road) points right-ish in image space -- which it won't
# do naturally if V2 is to the left of the image
Zc = -Zc
Yc = np.cross(Zc, Xc)
assert Yc[1] > 0 # i.e. require that 'up is up'
R = np.stack([Xc, Yc, Zc], axis=1)
return K, R
def convert_camera_to_gl(K, R, image_size, near, far):
# This assumes K, R are given wrt a left-handed camera (Hartley-Zisserman / OpenCV / etc.), i.e. the camera looks along the positive-z axis
# See http://ksimek.github.io/2012/08/14/decompose/ [1] and http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/
# It also assumes they left-multiply vectors, but the output matrices are transposed so they right-multiply, as in our other code
# If image_size is None, then the x and y coordinates are left in pixel units instead of ndc
perspective_matrix = np.asarray([
[K[0, 0], K[0, 1], -K[0, 2], 0],
[K[1, 0], K[1, 1], -K[1, 2], 0],
[0, 0, near + far, near * far],
[0, 0, -K[2, 2], 0]
]) # negation of K (of which only K[2, 2] is non-zero) is required because OpenGL clip coordinates have the camera looking along positive-z, in contrast to view coordinates
ndc_matrix = np.asarray([
[2. / image_size[0] if image_size is not None else 1., 0, 0, 0],
[0, 2. / image_size[1] if image_size is not None else 1., 0, 0],
[0, 0, -2. / (far - near), -(far + near) / (far - near)],
[0, 0, 0, 1]
])
projection_matrix = np.dot(ndc_matrix, perspective_matrix)
view_matrix = np.eye(4)
view_matrix[:3, :3] = R
if np.linalg.det(view_matrix) < 0: # step four of [1] sec. 4
print('warning: view_matrix has negative determinant')
view_matrix = -view_matrix
return np.transpose(projection_matrix), np.transpose(view_matrix) # transpose as we assume matrices right-multiply vectors everywhere else
def unproject_onto_ground(pixel_locations, clip_to_world_matrix, image_size):
# This unprojects the given image-space locations onto the world-space y = 0 plane
# pixel_locations is indexed by *, x/y
# clip_to_world_matrix is indexed by x/y/z/w (in), x/y/z/w (out)
# image_size is indexed by x/y
# result is indexed by *, x/y/z
pixel_ray_starts_world, pixel_ray_deltas_world = dirt.projection.unproject_pixels_to_rays(pixel_locations, clip_to_world_matrix, image_size)
lambda_ground = -pixel_ray_starts_world[..., 1:2] / pixel_ray_deltas_world[..., 1:2] # indexed by *, singleton
return pixel_ray_starts_world + lambda_ground * pixel_ray_deltas_world
| 6,067 |
linked_lists/linked_list_cycle.py
|
elenaborisova/A2SV-interview-prep
| 0 |
2025306
|
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __repr__(self):
return str(self.val)
# Time: O(n); Space: O(1)
def has_cycle(head):
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
# Test cases:
head = ListNode(3)
node = ListNode(2)
head.next = node
head.next.next = ListNode(0)
head.next.next.next = ListNode(-4)
head.next.next.next = node
print(has_cycle(head))
| 572 |
tests/test_downloader.py
|
masterjoseph914/mapview
| 59 |
2026709
|
from unittest import mock
from kivy.clock import Clock
from kivy_garden.mapview.constants import CACHE_DIR
from kivy_garden.mapview.downloader import Downloader
from tests.utils import patch_requests_get
class TestDownloader:
def teardown_method(self):
Downloader._instance = None
def test_instance(self):
"""Makes sure instance is a singleton."""
assert Downloader._instance is None
downloader = Downloader.instance()
assert downloader == Downloader._instance
assert type(downloader) == Downloader
assert downloader.cache_dir == CACHE_DIR
Downloader._instance = None
new_cache_dir = "new_cache_dir"
downloader = Downloader.instance(new_cache_dir)
assert downloader.cache_dir == new_cache_dir
def test_download(self):
"""Checks download() callback."""
callback = mock.Mock()
url = "https://ifconfig.me/"
downloader = Downloader.instance()
assert len(downloader._futures) == 0
with patch_requests_get() as m_get:
downloader.download(url, callback)
assert m_get.call_args_list == [mock.call(url)]
assert callback.call_args_list == []
assert len(downloader._futures) == 1
Clock.tick()
assert callback.call_args_list == [mock.call(url, mock.ANY)]
assert len(downloader._futures) == 0
def test_download_status_error(self):
"""
Error status code should be checked.
Callback function will not be invoked on error.
"""
callback = mock.Mock()
url = "https://httpstat.us/404"
status_code = 404
downloader = Downloader.instance()
assert len(downloader._futures) == 0
with patch_requests_get(status_code=status_code) as m_get:
downloader.download(url, callback)
assert m_get.call_args_list == [mock.call(url)]
assert len(downloader._futures) == 1
assert callback.call_args_list == []
while len(downloader._futures) > 0:
Clock.tick()
assert callback.call_args_list == []
assert len(downloader._futures) == 0
| 2,166 |
core/post_processing.py
|
MD2Korg/CerebralCortex-Reporting
| 0 |
2027085
|
# Copyright (c) 2017, MD2K Center of Excellence
# - <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pandas as pd
from glob import glob
import os, shutil
import argparse
from cerebralcortex.core.config_manager.config import Configuration
def post_process(config: dict):
"""
This will merge all stream quality data to one csv file per participant
"""
csv_files_path = config["output"]["folder_path"]+"/"+config["reports"]["data_yield_per_day"]+"/"
all_files = glob(csv_files_path+"*.csv")
usernames = []
dfs = []
files = []
ext = ".csv"
motionsense_left_led = "_motionsense_left_led"+ext
motionsense_right_led = "_motionsense_right_led"+ext
motionsense_left_accel = "_motionsense_left_accel"+ext
motionsense_right_accel = "_motionsense_right_accel"+ext
autosense_ble = "_autosense_ble_accel"+ext
autosense_respiration = "_autosense_ble_respiration"+ext
for file_name in all_files:
usernames.append(file_name.split("/")[-1].split("_")[0] + "_" + file_name.split("/")[-1].split("_")[1])
usernames = list(set(usernames))
merged_file_path = csv_files_path+"/merged/"
if not os.path.exists(merged_file_path):
os.mkdir(merged_file_path)
else:
shutil.rmtree(merged_file_path)
os.mkdir(merged_file_path)
for username in usernames:
files = []
files.append(csv_files_path+username+motionsense_left_led)
files.append(csv_files_path+username+motionsense_right_led)
files.append(csv_files_path+username+motionsense_left_accel)
files.append(csv_files_path+username+motionsense_right_accel)
files.append(csv_files_path+username+autosense_ble)
files.append(csv_files_path+username+autosense_respiration)
dfs = []
for f in files:
if os.path.exists(f):
dfs.append(pd.read_csv(f))
merged = pd.concat([df for df in dfs],axis=1)
merged.to_csv(merged_file_path+username+".csv", sep=",")
if __name__ == '__main__':
# create and load CerebralCortex object and configs
parser = argparse.ArgumentParser(description='CerebralCortex Kafka Message Handler.')
parser.add_argument("-cr", "--cr_reporting_config_filepath", help="mDebugger configuration file path", required=True)
args = vars(parser.parse_args())
# load data reporting configs
cr_config_file = args["cr_reporting_config_filepath"]
cr_config = Configuration(cr_config_file).config
post_process(cr_config)
| 3,790 |
tracker/tests/test_views.py
|
AB-informatica-service/swat4stats.com
| 14 |
2026919
|
from __future__ import unicode_literals
import datetime
import random
from mock import patch
from django import test
from tracker import models, utils, const
class PopularProfileTestCase(test.TestCase):
unpopular_sets = (
{},
{'name': 'Serge', 'country': 'eu'},
{'team': 0},
{'name': 'Serge', 'team': 0},
)
def unpopular_profile_raises_404(self):
for field_set in self.unpopular_sets:
response = self.client.get('/profile/%d' % models.Profile.create(**field_set).pk)
self.assertEqual(response.status_code, 404)
| 595 |
src/prefect/environments/storage/gitlab.py
|
vnsn/prefect
| 1 |
2025994
|
from prefect.storage import GitLab as _GitLab
from prefect.environments.storage.base import _DeprecatedStorageMixin
class GitLab(_GitLab, _DeprecatedStorageMixin):
pass
| 175 |
hackerrank/euler008/euler008.py
|
jcpince/algorithms
| 0 |
2026866
|
#!/bin/python3
import sys
def prodk(digits):
prod = 1
for d in digits:
prod *= int(d)
print("%s => %d" % (digits, prod))
return prod
t = int(input().strip())
for a0 in range(t):
n,k = input().strip().split(' ')
n,k = [int(n),int(k)]
num = input().strip()
#print("Looking for maxprod of %d digits in %s" % (k, num))
maxprod = prodk(num[0:k])
origin = 1
end = k+1
prod = maxprod
while(end <= n):
if '0' == num[end-1]:
origin += k
end += k
prod = 0
continue
if prod == 0:
prod = prodk(num[origin:end])
maxprod = max(prod, maxprod)
origin += 1
end += 1
continue
prod /= int(num[origin-1])
prod *= int(num[end-1])
print("%s => %d" % (num[origin:end], prod))
maxprod = max(prod, maxprod)
origin += 1
end += 1
print(int(maxprod))
| 963 |
lyrics/config.py
|
gauthampkrishnan/Lyrics_generator
| 38 |
2025576
|
"""Configuration and parameters."""
BATCH_SIZE = 256
MAX_EPOCHS = 100
MAX_NUM_WORDS = 20000
SONGDATA_FILE = "./data/songdata.csv"
NUM_LINES_TO_INCLUDE = 4
MAX_REPEATS = 2
SAVE_FREQUENCY = 10
EARLY_STOPPING_PATIENCE = 5
# The default embedding dimension matches the glove filename
EMBEDDING_DIM = 50
EMBEDDING_FILE = "./data/glove.6B.50d.txt"
# Sample rock artists (this was based on a random top 20 I found online)
# Artists are confirmed to exist in the dataset
ARTISTS = [
"The Beatles",
"<NAME>",
"<NAME>",
"Queen",
"Who", # The Who
"<NAME>",
"Doors", # The Doors
"Nirvana",
"Eagles",
"Aerosmith",
"Creedence Clearwater Revival",
"Guns N' Roses",
"Black Sabbath",
"U2",
"<NAME>",
"Beach Boys",
"<NAME>",
"<NAME>",
"<NAME>",
"Red Hot Chili Peppers",
]
| 840 |
cfp_common_v1.py
|
strib/scipher
| 135 |
2026815
|
# This file is for GRAMMAR_VERSION == 0. Future versions may have to wrap it
# in a class to access it when decoding older grammars.
import cfp_common
import nltk
import sys
class CfpCommonV1(cfp_common.CfpCommon):
# top-level section -> (list weight, list nonterminal for that section)
list_weights = {nltk.Nonterminal("CFP_TOPIC_SECTION"):
(1,nltk.Nonterminal("CFP_TOPIC_LIST")),
nltk.Nonterminal("LOC_SECTION"):
(.5,nltk.Nonterminal("LOC_LIST")),
nltk.Nonterminal("ORGS_SECTION"):
(1,nltk.Nonterminal("ORGS_LIST")),
nltk.Nonterminal("STEER_SECTION"):
(1,nltk.Nonterminal("STEER_LIST")),
nltk.Nonterminal("KEYNOTE_SECTION"):
(7,nltk.Nonterminal("KEYNOTE_LIST")),
nltk.Nonterminal("PC_SECTION"):
(5,nltk.Nonterminal("PC_LIST"))}
recursive_terms = [nltk.Nonterminal("CFP_TOPIC_LIST"),
nltk.Nonterminal("PROF_LIST_PAREN"),
nltk.Nonterminal("PROF_LIST_COMMA"),
nltk.Nonterminal("PROF_LIST_DASH"),
nltk.Nonterminal("LOC_LIST"),
nltk.Nonterminal("KEYNOTE_LIST_DASH")]
newline_terms = {nltk.Nonterminal("CFP_GREETING"):1,
nltk.Nonterminal("CFP_TOPIC_HEADER"):1,
nltk.Nonterminal("CFP_TOPIC_LIST_ITEM"):1,
nltk.Nonterminal("PROF_LIST_PAREN_ITEM"):1,
nltk.Nonterminal("PROF_LIST_COMMA_ITEM"):1,
nltk.Nonterminal("PROF_LIST_DASH_ITEM"):1,
nltk.Nonterminal("KEYNOTE_ITEM_DASH"):1,
nltk.Nonterminal("ORGS_HEADER"):1,
nltk.Nonterminal("PC_HEADER"):1,
nltk.Nonterminal("STEER_HEADER"):1,
nltk.Nonterminal("KEYNOTE_HEADER"):1,
nltk.Nonterminal("LOC_HEADER"):1,
nltk.Nonterminal("LOC_PLACE_ITEM"):1,
nltk.Nonterminal("LOC_UNIV_ITEM"):1,
nltk.Nonterminal("DATE_HEADER"):1,
nltk.Nonterminal("SUBSTITUTE_DATE_NL"):1,
nltk.Nonterminal("DATE_TYPE_1_NL"):1,
nltk.Nonterminal("DATE_TYPE_2_NL"):1,
nltk.Nonterminal("DATE_TYPE_3_NL"):1,
nltk.Nonterminal("DATE_TYPE_4_NL"):1,
nltk.Nonterminal("CFP_INTRO_SECTION"):1,
nltk.Nonterminal("CFP_SCOPE_SECTION"):1,
nltk.Nonterminal("CFP_SUBMIT_SECTION"):1,
nltk.Nonterminal("SPACE_NEWLINE"):1}
last_or_not_terms = {nltk.Nonterminal("SUBMIT_CLOSING"):False}
@staticmethod
def version():
return 1
def chars_to_remove_a_space_before(self):
return '.,:;\?\)\!'
def chars_to_remove_a_space_after(self):
return '\('
def list_recursive_terms(self):
return CfpCommonV1.recursive_terms
def append_newlines(self):
return CfpCommonV1.newline_terms
def choose_last_or_nots(self):
return CfpCommonV1.last_or_not_terms
def calc_list_bits(self, msg_len, body_prod):
# we only care about lists that are actually used in the body
used_lists = {w[1]: w[0] for l,w in self.list_weights.iteritems()
if l in body_prod.rhs()}
total_weight = sum(used_lists.values())
# we'll get most of our entropy from lists, but we should make
# sure that the bits are spread out among the lists as much as
# possible. So given a set of lists, each with weight w (total
# weight of W), and a number of bits remaining = B, make sure
# B*w/W bits are used up in this list. Multiply by some fraction
# since other parts of the message will use some bits too.
fraction_in_lists = 0.85
list_bits = {}
for l,w in used_lists.iteritems():
list_bits[l] = int(msg_len*fraction_in_lists*w/total_weight)
return list_bits
def header_cfg_filename(self):
return "cfp_header.cfg"
def body_cfg_filename(self):
return "cfp_body.cfg"
cfp_common.CfpCommon.register_common(CfpCommonV1)
| 4,448 |
genshin/models/genshin/chronicle/abyss.py
|
thesadru/genshin.py
| 63 |
2026939
|
import datetime
import typing
import pydantic
from genshin.models.genshin import character
from genshin.models.model import Aliased, APIModel
__all__ = [
"AbyssCharacter",
"AbyssRankCharacter",
"Battle",
"Chamber",
"CharacterRanks",
"Floor",
"SpiralAbyss",
"SpiralAbyssPair",
]
class AbyssRankCharacter(character.BaseCharacter):
"""Character with a value of a rank."""
id: int = Aliased("avatar_id")
icon: str = Aliased("avatar_icon")
value: int
class AbyssCharacter(character.BaseCharacter):
"""Character with just a level."""
level: int
# flake8: noqa: E222
class CharacterRanks(APIModel):
"""Collection of rankings achieved during spiral abyss runs."""
# fmt: off
most_played: typing.Sequence[AbyssRankCharacter] = Aliased("reveal_rank", default=[], mi18n="bbs/go_fight_count")
most_kills: typing.Sequence[AbyssRankCharacter] = Aliased("defeat_rank", default=[], mi18n="bbs/max_rout_count")
strongest_strike: typing.Sequence[AbyssRankCharacter] = Aliased("damage_rank", default=[], mi18n="bbs/powerful_attack")
most_damage_taken: typing.Sequence[AbyssRankCharacter] = Aliased("take_damage_rank", default=[], mi18n="bbs/receive_max_damage")
most_bursts_used: typing.Sequence[AbyssRankCharacter] = Aliased("energy_skill_rank", default=[], mi18n="bbs/element_break_count")
most_skills_used: typing.Sequence[AbyssRankCharacter] = Aliased("normal_skill_rank", default=[], mi18n="bbs/element_skill_use_count")
# fmt: on
def as_dict(self, lang: str = "en-us") -> typing.Mapping[str, typing.Any]:
"""Helper function which turns fields into properly named ones"""
return {self._get_mi18n(field, lang): getattr(self, field.name) for field in self.__fields__.values()}
class Battle(APIModel):
"""Battle in the spiral abyss."""
half: int = Aliased("index")
timestamp: datetime.date
characters: typing.Sequence[AbyssCharacter] = Aliased("avatars")
class Chamber(APIModel):
"""Chamber of the spiral abyss."""
chamber: int = Aliased("index")
stars: int = Aliased("star")
max_stars: typing.Literal[3] = Aliased("max_star")
battles: typing.Sequence[Battle]
class Floor(APIModel):
"""Floor of the spiral abyss."""
floor: int = Aliased("index")
# icon: str - unused
# settle_time: int - appsample might be using this?
unlocked: typing.Literal[True] = Aliased("is_unlock")
stars: int = Aliased("star")
max_stars: typing.Literal[9] = Aliased("max_star") # maybe one day
chambers: typing.Sequence[Chamber] = Aliased("levels")
class SpiralAbyss(APIModel):
"""Information about Spiral Abyss runs during a specific season."""
unlocked: bool = Aliased("is_unlock")
season: int = Aliased("schedule_id")
start_time: datetime.datetime
end_time: datetime.datetime
total_battles: int = Aliased("total_battle_times")
total_wins: str = Aliased("total_win_times")
max_floor: str
total_stars: int = Aliased("total_star")
ranks: CharacterRanks
floors: typing.Sequence[Floor]
@pydantic.root_validator(pre=True)
def __nest_ranks(cls, values: typing.Dict[str, typing.Any]) -> typing.Dict[str, AbyssCharacter]:
"""By default ranks are for some reason on the same level as the rest of the abyss."""
values.setdefault("ranks", {}).update(values)
return values
class SpiralAbyssPair(APIModel):
"""Pair of both current and previous spiral abyss.
This may not be a namedtuple due to how pydantic handles them.
"""
current: SpiralAbyss
previous: SpiralAbyss
| 3,655 |
dumpframes.py
|
socram8888/pvdscripts
| 0 |
2026979
|
#!/usr/bin/env python3
from PIL import Image
import sys
import os
FRAME_WIDTH = 216
FRAME_HEIGHT = 160
FRAME_SIZE = (FRAME_WIDTH * FRAME_HEIGHT) // 2
COLOR_MATRIX = [
[
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
],
[
(0, 1, 0),
(0, 0, 1),
(1, 0, 0),
]
]
assert(len(sys.argv) == 3)
video = open(sys.argv[1], 'rb')
if not os.path.exists(sys.argv[2]):
os.makedirs(sys.argv[2])
image = Image.new('RGB', (FRAME_WIDTH, FRAME_HEIGHT))
framenum = 0
while True:
rawframe = video.read(FRAME_SIZE)
if len(rawframe) != FRAME_SIZE:
break
for y in range(FRAME_HEIGHT):
for x in range(FRAME_WIDTH):
nibblepos = y * FRAME_WIDTH + x
value = rawframe[nibblepos >> 1]
if nibblepos & 1:
value >>= 4
else:
value &= 0xF
value = round(value * 255 / 15)
matrix = COLOR_MATRIX[y % 2][x % 3]
rgb = (matrix[0] * value, matrix[1] * value, matrix[2] * value)
image.putpixel((x, y), rgb)
image.save(os.path.join(sys.argv[2], '%05d.bmp' % framenum))
framenum += 1
| 992 |
orcamentos/core/views.py
|
rg3915/orcamentos
| 94 |
2025554
|
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from django.db.models import IntegerField, Count, Case, When
from orcamentos.proposal.models import Proposal
from orcamentos.crm.forms import EmployeeForm
from .mixins import DashboardMixin
def home(request):
if request.user.is_authenticated:
return redirect('core:dashboard')
return render(request, 'index.html')
def welcome(request):
return render(request, 'welcome.html')
def subscription(request):
if request.method == 'POST':
form = EmployeeForm(request.POST)
if form.is_valid():
e = form.save(commit=False)
e.slug = e.username
e.is_staff = True
e.set_password(form.cleaned_data['password'])
e.save()
return redirect('core:welcome')
else:
form = EmployeeForm()
return render(request, 'subscription.html', {'form': form})
class Dashboard(DashboardMixin, TemplateView):
template_name = 'dashboard.html'
def get_context_data(self, **kwargs):
p = Proposal.objects.aggregate(
proposals=Count('pk'),
proposal_elab=Count(
Case(When(status='elab', then=1), output_field=IntegerField())),
proposal_pending=Count(
Case(When(status='p', then=1), output_field=IntegerField())),
proposal_concluded=Count(
Case(When(status='co', then=1), output_field=IntegerField())),
proposal_approved=Count(
Case(When(status='a', then=1), output_field=IntegerField())),
proposal_canceled=Count(
Case(When(status='c', then=1), output_field=IntegerField())),
)
context = super(Dashboard, self).get_context_data(**kwargs)
context['proposals'] = p
context['proposal_list'] = self.proposal_list()
context['proposal_elab'] = self.proposal_elab()
context['entrys'] = self.entry_list()
context['contract_total_per_month'] = self.contract_total_per_month()
context['contract_total_per_year'] = self.contract_total_per_year()
return context
def status(request):
return render(request, 'status.html')
| 2,243 |
Basic_servers/server_using_wsgiref.py
|
Rockfish/PythonCourse
| 0 |
2025591
|
from wsgiref.simple_server import make_server
""""
A simple WSGI application.
It prints out the environment dictionary after being updated by setup_testing_defaults
"""
def simple_app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain; charset=utf-8')]
start_response(status, headers)
ret = [("%s: %s\n" % (key, value)).encode("utf-8")
for key, value in environ.items()]
return ret
def run():
print("Serving at http://localhost:8000/ ...")
httpd = make_server('', 8000, simple_app)
httpd.serve_forever()
run()
| 595 |
knx2mqtt/setup.py
|
FireFrei/knx2mqtt
| 1 |
2025635
|
from setuptools import setup
setup(name='knx2mqtt',
version='0.2',
description='KNX 2 MQTT bridge',
url='https://github.com/gbeine/knx2mqtt',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['knx2mqtt'],
requires=[
'logging',
'paho.mqtt',
'pyyaml',
'xknx',
],
zip_safe=False)
| 399 |
micro_record_play.py
|
lemariva/uPyM5Echo
| 8 |
2025266
|
# Copyright 2020 <NAME> - <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from machine import I2S
from machine import Pin
from config import *
import time
bck_pin = Pin(device_config['bck']) # Bit clock output
ws_pin = Pin(device_config['ws']) # Word clock output
sdout_pin = Pin(device_config['sdout']) # Serial data output
sdin_pin = Pin(device_config['sdin']) # Serial data output
samples = bytearray(36000)
audio_in = I2S(I2S.NUM0, # create I2S peripheral to read audio
ws=ws_pin, sdin=sdin_pin, #
standard=I2S.PHILIPS, mode=I2S.MASTER_PDW, #
dataformat=I2S.B16, #
channelformat=I2S.ONLY_LEFT,
samplerate=8000,
dmacount=8,dmalen=512)
num_bytes_read = bytes(audio_in.readinto(samples))
time.sleep_ms(2000)
audio_out = I2S(I2S.NUM1, # create I2S peripheral to write audio
bck=bck_pin, ws=ws_pin, sdout=sdout_pin, # sample data to an Adafruit I2S Amplifier
standard=I2S.PHILIPS, mode=I2S.MASTER_TX, # breakout board,
dataformat=I2S.B16, # based on NS4168 device
channelformat=I2S.ONLY_LEFT,
samplerate=8000,
dmacount=8,dmalen=512)
num_bytes_written = audio_out.write(samples)
| 1,928 |
tests/test_export.py
|
balos1/easy-gpg-to-paper
| 46 |
2027090
|
"""
Test exporting functionality of easy-gpg-to-paper.
"""
import argparse
import os.path
import glob
import pytest
from gpg2paper import gpg2paper
KEY_NAME = "easy-gpg-to-paper-testenv"
KEY_ID = "98436C7A"
OUTPUT_ROOT = "tests/out"
OUTPUT_FILE = "testsout"
FULL_PATH = os.path.join(OUTPUT_ROOT, OUTPUT_FILE)
@pytest.mark.usefixtures("load_gpg_keys", "cleanup_export_output")
class TestExport:
@pytest.mark.parametrize("args", [
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=4,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=4,
out_filename=FULL_PATH, png=False, size=512))
])
def test_do_export_to_path(self, args):
gpg2paper.do_export(args)
if args.png:
outfiles = glob.glob("%s*.png" % args.out_filename)
elif args.base64:
outfiles = glob.glob("%s*.txt" % args.out_filename)
else:
assert False
assert len(outfiles) == args.num_files
@pytest.mark.parametrize("args", [
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=4,
out_filename=OUTPUT_FILE, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=4,
out_filename=OUTPUT_FILE, png=False, size=512)),
])
def test_do_export_to_cwd(self, args):
gpg2paper.do_export(args)
if args.png:
outfiles = glob.glob("%s*.png" % args.out_filename)
elif args.base64:
outfiles = glob.glob("%s*.txt" % args.out_filename)
else:
assert False
assert len(outfiles) == args.num_files
@pytest.mark.parametrize("args", [
(argparse.Namespace(base64=False, command="export", key_id=KEY_ID, num_files=4,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_ID, num_files=4,
out_filename=FULL_PATH, png=False, size=512))
])
def test_do_export_by_key_id(self, args):
gpg2paper.do_export(args)
if args.png:
outfiles = glob.glob("%s*.png" % args.out_filename)
elif args.base64:
outfiles = glob.glob("%s*.txt" % args.out_filename)
else:
assert False
assert len(outfiles) == args.num_files
@pytest.mark.parametrize("args", [
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=4,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=4,
out_filename=FULL_PATH, png=False, size=512))
])
def test_do_export_by_key_name(self, args):
gpg2paper.do_export(args)
if args.png:
outfiles = glob.glob("%s*.png" % args.out_filename)
elif args.base64:
outfiles = glob.glob("%s*.txt" % args.out_filename)
else:
assert False
assert len(outfiles) == args.num_files
@pytest.mark.parametrize("args", [
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=5,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=5,
out_filename=FULL_PATH, png=False, size=512)),
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=11,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=11,
out_filename=FULL_PATH, png=False, size=512))
])
def test_do_export_odd_num_files(self, args):
gpg2paper.do_export(args)
if args.png:
outfiles = glob.glob("%s*.png" % args.out_filename)
elif args.base64:
outfiles = glob.glob("%s*.txt" % args.out_filename)
else:
assert False
assert len(outfiles) == args.num_files
@pytest.mark.parametrize("args", [
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=4,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=4,
out_filename=FULL_PATH, png=False, size=512)),
(argparse.Namespace(base64=False, command="export", key_id=KEY_NAME, num_files=8,
out_filename=FULL_PATH, png=True, size=512)),
(argparse.Namespace(base64=True, command="export", key_id=KEY_NAME, num_files=8,
out_filename=FULL_PATH, png=False, size=512))
])
def test_do_export_even_num_files(self, args):
gpg2paper.do_export(args)
if args.png:
outfiles = glob.glob("%s*.png" % args.out_filename)
elif args.base64:
outfiles = glob.glob("%s*.txt" % args.out_filename)
else:
assert False
assert len(outfiles) == args.num_files
| 5,356 |
grr/gui/api_plugins/stats.py
|
theGreenJedi/grr
| 0 |
2025695
|
#!/usr/bin/env python
"""API handlers for stats."""
from grr.gui import api_call_handler_base
from grr.gui import api_value_renderers
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import timeseries
from grr.lib import utils
from grr.lib.aff4_objects import stats_store as stats_store_lib
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import api_pb2
CATEGORY = "Other"
class ApiListStatsStoreMetricsMetadataArgs(rdf_structs.RDFProtoStruct):
protobuf = api_pb2.ApiListStatsStoreMetricsMetadataArgs
class ApiListStatsStoreMetricsMetadataHandler(
api_call_handler_base.ApiCallHandler):
"""Renders available metrics descriptors for a given system component."""
category = CATEGORY
args_type = ApiListStatsStoreMetricsMetadataArgs
def Render(self, args, token=None):
stats_store = aff4.FACTORY.Create(None,
aff4_type=stats_store_lib.StatsStore,
mode="w",
token=token)
process_ids = [pid for pid in stats_store.ListUsedProcessIds()
if pid.startswith(args.component.name.lower())]
if not process_ids:
return {}
else:
metadata = stats_store.ReadMetadata(process_id=process_ids[0])
return api_value_renderers.RenderValue(metadata)
class ApiGetStatsStoreMetricArgs(rdf_structs.RDFProtoStruct):
protobuf = api_pb2.ApiGetStatsStoreMetricArgs
class ApiGetStatsStoreMetricHandler(api_call_handler_base.ApiCallHandler):
"""Renders historical data for a given metric."""
category = CATEGORY
args_type = ApiGetStatsStoreMetricArgs
def Render(self, args, token):
stats_store = aff4.FACTORY.Create(
stats_store_lib.StatsStore.DATA_STORE_ROOT,
aff4_type=stats_store_lib.StatsStore,
mode="rw",
token=token)
process_ids = stats_store.ListUsedProcessIds()
filtered_ids = [pid for pid in process_ids
if pid.startswith(args.component.name.lower())]
start_time = args.start
end_time = args.end
if not end_time:
end_time = rdfvalue.RDFDatetime().Now()
if not start_time:
start_time = end_time - rdfvalue.Duration("1h")
# Run for a little extra time at the start. This improves the quality of the
# first data points of counter metrics which don't appear in every interval.
base_start_time = start_time
# pylint: disable=g-no-augmented-assignment
start_time = start_time - rdfvalue.Duration("10m")
# pylint: enable=g-no-augmented-assignment
if end_time <= start_time:
raise ValueError("End time can't be less than start time.")
result = dict(start=base_start_time.AsMicroSecondsFromEpoch(),
end=end_time.AsMicroSecondsFromEpoch(),
metric_name=args.metric_name,
timeseries=[])
data = stats_store.MultiReadStats(
process_ids=filtered_ids,
metric_name=utils.SmartStr(args.metric_name),
timestamp=(start_time, end_time))
if not data:
return result
pid = data.keys()[0]
metadata = stats_store.ReadMetadata(process_id=pid)
metric_metadata = metadata[args.metric_name]
query = stats_store_lib.StatsStoreDataQuery(data)
query.In(args.component.name.lower() + ".*").In(args.metric_name)
if metric_metadata.fields_defs:
query.InAll()
requested_duration = end_time - start_time
if requested_duration >= rdfvalue.Duration("1d"):
sampling_duration = rdfvalue.Duration("5m")
elif requested_duration >= rdfvalue.Duration("6h"):
sampling_duration = rdfvalue.Duration("1m")
else:
sampling_duration = rdfvalue.Duration("30s")
if metric_metadata.metric_type == metric_metadata.MetricType.COUNTER:
query.TakeValue().MakeIncreasing().Normalize(
sampling_duration,
start_time,
end_time,
mode=timeseries.NORMALIZE_MODE_COUNTER)
elif metric_metadata.metric_type == metric_metadata.MetricType.EVENT:
if args.distribution_handling_mode == "DH_SUM":
query.TakeDistributionSum()
elif args.distribution_handling_mode == "DH_COUNT":
query.TakeDistributionCount()
else:
raise ValueError("Unexpected request.distribution_handling_mode "
"value: %s." % args.distribution_handling_mode)
query.MakeIncreasing()
query.Normalize(sampling_duration,
start_time,
end_time,
mode=timeseries.NORMALIZE_MODE_COUNTER)
elif metric_metadata.metric_type == metric_metadata.MetricType.GAUGE:
query.TakeValue().Normalize(sampling_duration, start_time, end_time)
else:
raise RuntimeError("Unsupported metric type.")
if args.aggregation_mode == "AGG_SUM":
query.AggregateViaSum()
elif args.aggregation_mode == "AGG_MEAN":
query.AggregateViaMean()
elif args.aggregation_mode == "AGG_NONE":
pass
else:
raise ValueError("Unexpected request.aggregation value: %s." %
args.aggregation)
if (args.rate and
metric_metadata.metric_type != metric_metadata.MetricType.GAUGE):
query.Rate()
query.InTimeRange(base_start_time, end_time)
ts = []
for value, timestamp in query.ts.data:
if value is not None:
ts.append((timestamp / 1e3, value))
result["timeseries"] = ts
return result
| 5,477 |
app/src/error.py
|
hagifoo/gae-pomodoro
| 0 |
2025505
|
"""
this module provide package specific error classes.
"""
import webapp2
class BaseHTTPException(webapp2.HTTPException):
code = None
def __init__(self, message=''):
self._message = message
@property
def message(self):
return self._message
class BadRequestException(BaseHTTPException):
code = 400
class UnauthorizedException(BaseHTTPException):
code = 401
class ForbiddenException(BaseHTTPException):
code = 403
class NotFoundException(BaseHTTPException):
code = 403
class TaskUnrecoverableException(Exception):
def __init__(self, exception):
self._exception = exception
@property
def cause(self):
return self._exception
| 713 |
app_tk.py
|
bluejenga/xmastree
| 0 |
2026243
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import tkinter as tk
import copy
from PIL import Image, ImageTk
from tree import Tree
TREE_HEIGHT = 9
DISP_RATIO = 0.75
TREE_WIDTH = TREE_HEIGHT * 2 - 3
CANVAS_WIDTH = round(1700 * DISP_RATIO)
CANVAS_HEIGHT = round(1100 * DISP_RATIO)
CELL_LENGTH = round(900 / TREE_HEIGHT * DISP_RATIO)
TREE_OFFSET_X = round((CANVAS_WIDTH - CELL_LENGTH * TREE_WIDTH) / 2)
TREE_OFFSET_Y = round(150 * DISP_RATIO)
STAR_OFFSET_X = round(775 * DISP_RATIO)
STAR_OFFSET_Y = round(20 * DISP_RATIO)
STAR_WIDTH = round(149 * DISP_RATIO)
STAR_HEIGHT = round(143 * DISP_RATIO)
BG_SIZE = (CANVAS_WIDTH, CANVAS_HEIGHT)
IMG_BG = Image.open('image/bg.png').resize(BG_SIZE, Image.BILINEAR)
STAR_SIZE = (STAR_WIDTH, STAR_HEIGHT)
IMG_STAR_OFF = Image.open('image/star_off.png').resize(STAR_SIZE, Image.BILINEAR)
IMG_STAR_ON= Image.open('image/star_on.png').resize(STAR_SIZE, Image.BILINEAR)
CELL_SIZE = (CELL_LENGTH, CELL_LENGTH)
IMG_ON_P = Image.open('image/cell_on_+.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_ON_I = Image.open('image/cell_on_I.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_ON_L = Image.open('image/cell_on_L.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_ON_T = Image.open('image/cell_on_T.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_ON_O = Image.open('image/cell_on_o.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_OFF_P = Image.open('image/cell_off_+.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_OFF_I = Image.open('image/cell_off_I.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_OFF_L = Image.open('image/cell_off_L.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_OFF_T = Image.open('image/cell_off_T.png').resize(CELL_SIZE, Image.BILINEAR)
IMG_OFF_O = Image.open('image/cell_off_o.png').resize(CELL_SIZE, Image.BILINEAR)
class CellImageInfo:
"""セル毎に保持する画像に関する情報"""
def __init__(self, off_img, on_img, angle):
self.off_img = off_img
self.on_img = on_img
self.angle = angle
self.id = None
self.img = None
self.photo_img = None
self.light = None
IMG_DICT = {
'S' :CellImageInfo(IMG_OFF_O, IMG_ON_O, 0),
'E' :CellImageInfo(IMG_OFF_O, IMG_ON_O, 90),
'N' :CellImageInfo(IMG_OFF_O, IMG_ON_O, 180),
'W' :CellImageInfo(IMG_OFF_O, IMG_ON_O, 270),
'NE' :CellImageInfo(IMG_OFF_L, IMG_ON_L, 0),
'NW' :CellImageInfo(IMG_OFF_L, IMG_ON_L, 90),
'SW' :CellImageInfo(IMG_OFF_L, IMG_ON_L, 180),
'ES' :CellImageInfo(IMG_OFF_L, IMG_ON_L, 270),
'NS' :CellImageInfo(IMG_OFF_I, IMG_ON_I, 0),
'EW' :CellImageInfo(IMG_OFF_I, IMG_ON_I, 90),
'ESW' :CellImageInfo(IMG_OFF_T, IMG_ON_T, 0),
'NES' :CellImageInfo(IMG_OFF_T, IMG_ON_T, 90),
'NEW' :CellImageInfo(IMG_OFF_T, IMG_ON_T, 180),
'NSW' :CellImageInfo(IMG_OFF_T, IMG_ON_T, 270),
'NESW':CellImageInfo(IMG_OFF_P, IMG_ON_P, 0),
}
class Application(tk.Frame):
def __init__(self, master, tree):
super().__init__(master)
self.master = master
self.tree = tree
self.playing = False
self.counter_callback_id = None
self.img_info = [[None for y in range(self.tree.height)]
for x in range(self.tree.width)]
self.create_canvas()
self.create_controls()
self.pack()
def create_canvas(self):
w, h = IMG_BG.size
self.canvas = tk.Canvas(self, width=w, height=h)
self.bg_img = ImageTk.PhotoImage(IMG_BG)
self.canvas.create_image(0, 0, image=self.bg_img, anchor=tk.NW)
# アプリ開始時は完成したツリーを表示
self.tree.build()
self.tree.lightup()
for cell in tree.get_cell_list():
info = self.get_img_info_for_cell(cell)
x = cell.x * CELL_LENGTH + TREE_OFFSET_X
y = cell.y * CELL_LENGTH + TREE_OFFSET_Y
info.id = self.canvas.create_image(x, y,
image=info.photo_img,
anchor=tk.NW)
self.img_info[cell.x][cell.y] = info
self.star_img = ImageTk.PhotoImage(IMG_STAR_ON)
self.star_id = self.canvas.create_image(STAR_OFFSET_X,
STAR_OFFSET_Y,
image=self.star_img,
anchor=tk.NW)
self.canvas.bind('<ButtonRelease-1>', self.on_click_canvas)
self.canvas.pack()
def create_controls(self):
frame = tk.Frame(self, bg='#e5f8cf', padx=5)
start = tk.Button(frame, text='Start', command=self.start_new_game,
fg='#345834', font=('', 22, 'bold'))
start.pack(side=tk.LEFT, padx=5, pady=10)
self.counter_text = tk.StringVar()
self.counter_text.set('00:00')
self.counter_label = tk.Label(frame, textvariable=self.counter_text,
bg='#e5f8cf', fg='#345834',
font=('', 22, 'bold'))
self.counter_label.pack(side=tk.LEFT, padx=5, pady=10)
self.canvas.create_window(20, 20, window=frame, anchor=tk.NW)
def get_img_info_for_cell(self, cell):
info = copy.copy(IMG_DICT[str(cell)])
info.img = info.on_img if cell.light else info.off_img
info.light = cell.light
info.photo_img = ImageTk.PhotoImage(info.img.rotate(info.angle))
return info
def on_click_canvas(self, event):
if not self.playing:
return
x = (event.x - TREE_OFFSET_X) // CELL_LENGTH
y = (event.y - TREE_OFFSET_Y) // CELL_LENGTH
if self.tree.is_valid_coord(x, y):
self.rotate_cell(x, y)
def rotate_cell(self, x, y):
info = self.img_info[x][y]
info.angle -= 15
info.photo_img = ImageTk.PhotoImage(info.img.rotate(info.angle))
self.canvas.itemconfigure(info.id, image = info.photo_img)
if info.angle % 90 == 0:
self.tree.rotate(x, y)
self.tree.lightup()
self.update_tree()
if tree.is_complete():
self.update_star()
self.playing = False
self.counter_label.configure(fg='#ff0000')
else:
self.after(15, self.rotate_cell, x, y)
def update_tree(self):
for cell in tree.get_cell_list():
info = self.img_info[cell.x][cell.y]
if info.light != cell.light:
info.img = info.on_img if cell.light else info.off_img
info.light = cell.light
info.photo_img = ImageTk.PhotoImage(info.img.rotate(info.angle))
self.canvas.itemconfigure(info.id, image = info.photo_img)
self.img_info[cell.x][cell.y] = info
def update_star(self):
if tree.is_complete():
self.star_img = ImageTk.PhotoImage(IMG_STAR_ON)
else:
self.star_img = ImageTk.PhotoImage(IMG_STAR_OFF)
self.canvas.itemconfigure(self.star_id, image=self.star_img)
def start_new_game(self):
self.playing = True
self.tree.build()
self.tree.shuffle()
self.tree.lightup()
for cell in tree.get_cell_list():
info = self.get_img_info_for_cell(cell)
info.id = self.img_info[cell.x][cell.y].id # idは前回のを引き継ぐ
self.canvas.itemconfigure(info.id, image=info.photo_img)
self.img_info[cell.x][cell.y] = info
self.update_star()
if self.counter_callback_id:
self.after_cancel(self.counter_callback_id)
self.counter = 0
self.counter_text.set('00:00')
self.counter_label.configure(fg='#345834')
self.counter_callback_id = self.after(1000, self.update_counter)
def update_counter(self):
if not self.tree.is_complete():
self.counter += 1
t = f'{self.counter//60:02d}:{self.counter%60:02d}'
self.counter_text.set(t)
self.counter_callback_id = self.after(1000, self.update_counter)
tree = Tree(TREE_HEIGHT)
root = tk.Tk()
app = Application(root, tree)
app.mainloop()
| 8,176 |
scipy/_build_utils/compiler_helper.py
|
cielavenir/scipy
| 0 |
2026811
|
"""
Helpers for detection of compiler features
"""
try:
from tempfile import TemporaryDirectory
except ImportError:
from backports.tempfile import TemporaryDirectory
import os
def try_compile(compiler, code=None, flags=[], ext=None):
"""Returns True if the compiler is able to compile the given code"""
from distutils.errors import CompileError
from numpy.distutils.fcompiler import FCompiler
if code is None:
if isinstance(compiler, FCompiler):
code = " program main\n return\n end"
else:
code = 'int main (int argc, char **argv) { return 0; }'
ext = ext or compiler.src_extensions[0]
with TemporaryDirectory() as temp_dir:
fname = os.path.join(temp_dir, 'main'+ext)
with open(fname, 'w') as f:
f.write(code)
try:
compiler.compile([fname], output_dir=temp_dir, extra_postargs=flags)
except CompileError:
return False
return True
def has_flag(compiler, flag, ext=None):
"""Returns True if the compiler supports the given flag"""
return try_compile(compiler, flags=[flag], ext=ext)
def get_cxx_std_flag(compiler):
"""Detects compiler flag for c++14, c++11, or None if not detected"""
# GNU C compiler documentation uses single dash:
# https://gcc.gnu.org/onlinedocs/gcc/Standards.html
# but silently understands two dashes, like --std=c++11 too.
# Other GCC compatible compilers, like Intel C Compiler on Linux do not.
gnu_flags = ['-std=c++14', '-std=c++11']
flags_by_cc = {
'msvc': ['/std:c++14', None],
'intelw': ['/Qstd=c++14', '/Qstd=c++11'],
'intelem': ['-std=c++14', '-std=c++11']
}
flags = flags_by_cc.get(compiler.compiler_type, gnu_flags)
for flag in flags:
if flag is None:
return None
if has_flag(compiler, flag):
return flag
from numpy.distutils import log
log.warn('Could not detect c++ standard flag')
return None
def try_add_flag(args, compiler, flag, ext=None):
"""Appends flag to the list of arguments if supported by the compiler"""
if try_compile(compiler, flags=args+[flag], ext=ext):
args.append(flag)
| 2,240 |
stactools_aster/stactools/aster/cog.py
|
jonas-eberle/stactools
| 0 |
2024991
|
from collections import defaultdict
import logging
import os
import re
from tempfile import TemporaryDirectory
from typing import Any, List, Tuple, Dict
import rasterio as rio
from shapely.geometry import shape
from stactools.core.projection import reproject_geom
from stactools.core.utils.subprocess import call
from stactools.core.utils.convert import cogify
from stactools.aster.utils import AsterSceneId
from stactools.aster.xml_metadata import XmlMetadata
logger = logging.getLogger(__name__)
def get_cog_filename(item_id, sensor):
return f'{item_id}-{sensor}.tif'
def export_band(subdataset, bounds, crs, output_path):
# ulx uly lrx lry
ullr_args = [str(x) for x in [bounds[0], bounds[3], bounds[2], bounds[1]]]
cmd = ['gdal_translate', '-of', 'GTiff', '-a_ullr']
cmd += ullr_args
cmd += ['-a_srs', crs, subdataset, output_path]
call(cmd)
def merge_bands(input_paths, output_path):
call(['gdal_merge.py', '-separate', '-o', output_path] + input_paths)
def set_band_names(href: str, band_names: List[str]) -> None:
with rio.open(href) as ds:
profile = ds.profile
with rio.open(href, 'r+', **profile) as ds:
ds.descriptions = band_names
def _create_cog_for_sensor(sensor: str, file_prefix: str, tmp_dir: str,
output_dir: str, bounds: List[float], crs: str,
subdataset_info: List[Tuple[Any, int]]) -> str:
sensor_cog_href = os.path.join(output_dir,
get_cog_filename(file_prefix, sensor))
sensor_dir = os.path.join(tmp_dir, sensor)
os.makedirs(sensor_dir)
band_paths = []
band_names = []
for subdataset, band_order in subdataset_info:
band_path = os.path.join(sensor_dir, '{}.tif'.format(band_order))
export_band(subdataset, bounds, crs, band_path)
band_paths.append(band_path)
band_names.append(f"ImageData{band_order} {sensor}_Swath")
merged_path = os.path.join(sensor_dir, 'merged.tif')
merge_bands(band_paths, merged_path)
set_band_names(merged_path, band_names)
cogify(merged_path, sensor_cog_href, extra_args=["-co", "predictor=2"])
return sensor_cog_href
def create_cogs(hdf_path: str, xml_metadata: XmlMetadata,
output_path: str) -> Dict[str, str]:
"""Create COGs from an HDF asset and an XmlMetadata
Args:
hdf_path: Path to the ASTER L1T 003 HDF EOS data
xml_metadata: The XmlMetadata representing this ASTER scene.
output_path: The directory to which the cogs will be written.
"""
logger.info(f'Creating COGs and writing to {output_path}...')
file_name = os.path.basename(hdf_path)
aster_id = AsterSceneId.from_path(file_name)
with rio.open(hdf_path) as ds:
subdatasets = ds.subdatasets
# Gather the subdatasets by sensor, sorted by band number
sensor_to_subdatasets = defaultdict(list)
for subdataset in subdatasets:
m = re.search(r':?([\w]+)_Swath:ImageData([\d]+)', subdataset)
if m is None:
raise ValueError(
'Unexpected subdataset {} - is this a non-standard ASTER L1T 003 HDF-EOS file?'
.format(subdataset))
sensor = m.group(1)
band_order = m.group(2)
sensor_to_subdatasets[sensor].append((subdataset, band_order))
# Sort by band_order
for k in sensor_to_subdatasets:
sensor_to_subdatasets[k] = [
x for x in sorted(sensor_to_subdatasets[k], key=lambda x: x[1])
]
geom, _ = xml_metadata.geometries
crs = 'epsg:{}'.format(xml_metadata.epsg)
reprojected_geom = reproject_geom('epsg:4326', crs, geom)
bounds = list(shape(reprojected_geom).bounds)
result = {}
with TemporaryDirectory() as tmp_dir:
for sensor, subdataset_info in sensor_to_subdatasets.items():
result[sensor] = _create_cog_for_sensor(
sensor,
aster_id.file_prefix,
tmp_dir=tmp_dir,
output_dir=output_path,
bounds=bounds,
crs=crs,
subdataset_info=subdataset_info)
return result
| 4,174 |
tl_env/logic/automaton.py
|
mhtb32/tl-env
| 1 |
2026509
|
from pathlib import Path
from typing import Optional, Union, Iterable, Tuple, Dict
import networkx as nx
State = Union[int, str]
TypedState = Tuple[State, Dict]
Transition = Tuple[State, State, str]
class Automaton(nx.DiGraph):
"""A simple automaton.
LTL formulas are translated to an Automaton instance.
"""
def __init__(self):
super().__init__()
self.input_alphabet = {}
# For now, we only allow one initial and one final state
self.initial_state = None
self.final_state = None
self.cur_state = None
def add_state(self, state: State, type_: Optional[str] = None) -> None:
"""Adds new state to automaton
:param state: an integer or string denoting the state
:param type_: ('init', 'final'), to mark initial or final states
"""
if type_:
if type_ == 'init':
self.add_node(state, type=type_)
self.cur_state = self.initial_state = state
elif type_ == 'final':
self.add_node(state, type=type_)
self.final_state = state
else:
raise ValueError("node type just can be 'init' or 'final'")
else:
self.add_node(state)
def add_state_from(self, states: Iterable[Union[State, TypedState]]) -> None:
"""Adds states from a container.
example
-------
You can add all states at once like::
A = Automaton()
A.add_state_from([1, (2, {'type': 'final'})])
:param states: an iterable container of states or typed states
"""
for state in states:
if isinstance(state, (int, str)):
self.add_state(state)
elif isinstance(state, tuple):
try:
self.add_state(state[0], type_=state[1]['type'])
except KeyError:
print("Dictionary key must be 'type'\n")
raise
# making an alias for nodes property
states = nx.DiGraph.nodes
def add_transition_from(self, transitions: Iterable[Transition]) -> None:
"""Takes transitions as an iterable container of transition tuples
:param transitions: an iterable container of transition tuples like ('q1', 'q2', 'x')
"""
for a, b, x in transitions:
# Graphviz interprets label keyword as edge label
self.add_edge(a, b, label=x)
# add transition label to alphabet
self.input_alphabet[x] = False
def step(self, input_: Dict) -> None:
"""Determines next state of automaton.
Determines next state of automaton based on input alphabet status(boolean).
An example of input alphabet dictionary::
input_ = {
'g1': True,
'g2': False
}
:param input_: a dictionary containing input alphabet and their status(true, false)
"""
if self.cur_state is None:
raise TransitionError("Current state is unknown. You should provide at least one state with type 'init'.")
self.input_alphabet = input_
active_alphabet = []
for alphabet, status in self.input_alphabet.items():
if status:
active_alphabet.append(alphabet)
if len(active_alphabet) > 1:
raise TransitionError("Currently more than one active alphabet is not supported")
if not active_alphabet:
# return if there is no active alphabet
return
for s in self.successors(self.cur_state):
if self.get_edge_data(self.cur_state, s)['label'] == active_alphabet[0]:
self.cur_state = s
def in_final(self) -> bool:
"""Returns true if current state is a final state
:return: a boolean indicating whether automaton is in final state or not.
"""
return self.cur_state == self.final_state
def reset(self) -> None:
self.cur_state = self.initial_state
def draw(self) -> None:
"""Draws automaton using graphviz"""
agraph = nx.nx_agraph.to_agraph(self)
agraph.node_attr.update(shape='circle')
# change shape of the final state
final_node = agraph.get_node(self.final_state)
final_node.attr.update(shape='doublecircle')
agraph.layout(prog='dot')
# create /out in cwd, pass if exists
(Path.cwd() / 'out').mkdir(exist_ok=True)
agraph.draw(str(Path.cwd() / 'out' / 'automaton.pdf'))
class TransitionError(Exception):
"""Raised when a transition in not possible from current state of automaton"""
pass
| 4,697 |
src/data/json_serializer.py
|
fabianWrede/tournament-manager
| 3 |
2027091
|
import json
import sys
from data.model import BaseData, Tournament, Team, Round, Game
class DataJSONEncoder(json.JSONEncoder):
# pylint: disable=E0202
def default(self, obj):
if isinstance(obj, BaseData):
return obj.encode_json()
return json.JSONEncoder.default(self, obj)
def decode_data_json(dct):
if '_type' in dct:
cls = getattr(sys.modules[__name__], dct.get('_type'))
return cls.decode_json(dct)
return dct
| 478 |
seapy/progressbar.py
|
hadfieldnz/seapy
| 2 |
2026949
|
#!/usr/bin/env python
"""
ASCII Progress Bar that work in IPython notebooks.
Code take from examples shown at:
<http://nbviewer.ipython.org/github/ipython/ipython/blob/3607712653c66d63e0d7f13f073bde8c0f209ba8/docs/examples/notebooks/Animations_and_Progress.ipynb>
Modified to include a timer and added an iterator that displays a
progressbar as it iterates.
Examples
--------
>>> for i in progressbar.progress(range(10)):
>>> frobnicate(i)
"""
import sys
import time
try:
from IPython.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '='
self.width = 40
self.__update_amount(0)
self.start = time.process_time()
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
print('\r', self, end='', file=sys.stderr)
sys.stderr.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
t = time.process_time()
delta = t - self.start
togo = (delta / elapsed_iter) * (self.iterations - elapsed_iter)
self.prog_bar += ' [%d of %d, %.1f secs elapsed/%.1f secs ETA]' % \
(elapsed_iter, self.iterations, delta, togo)
if elapsed_iter > self.iterations:
print("\r [ COMPLETED %d ITERATIONS IN %.1f SECS ] %s" %
(self.iterations, delta, " " * 60))
print("\r [ COMPLETED %d ITERATIONS IN %.1f SECS ] %s" %
(self.iterations, delta, " " * 60), file=sys.stderr)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * \
(all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
class progress:
"""
Draw a progess bar while going through the given iterator.
Notes
-----
If the iterator does not support the len() method, you should
supply the maxlen parameter.
Examples
--------
>>> for i in progress(range(10)):
>>> frobnicate(i)
or
>>> for n,i in progressbar(enumerate(range(10)),10)
>>> frobnicate(n, i)
"""
def __init__(self, iterable, maxlen=100):
try:
self.size = len(iterable)
except TypeError:
self.size = maxlen
self.pbar = ProgressBar(self.size + 1)
self.iterator = iter(iterable)
self.count = 0
def __getitem__(self, index):
self.count += 1
self.pbar.animate(self.count)
return next(self.iterator)
| 3,253 |
hinkskalle_api/util/parse_timedelta.py
|
csf-ngs/hinkskalle-api
| 0 |
2025356
|
# deltat.py — Parse a time duration
#
# License unknown, based on work by virhilo and Peter on StackOverflow
# (see https://stackoverflow.com/a/51916936/2445204).
# Modified by <NAME>.
# According to Peter, "any license is fine". Marcel concurs.
#
import re
from datetime import timedelta
regex = re.compile(r'^((?P<weeks>[\.\d]+?)w)? *'
r'((?P<days>[\.\d]+?)d)? *'
r'((?P<hours>[\.\d]+?)h)? *'
r'((?P<minutes>[\.\d]+?)m)? *'
r'((?P<seconds>[\.\d]+?)s?)?$')
def parse_time(time_str: str) -> timedelta:
"""
Parse a time string e.g. '2h 13m' or '1.5d' into a timedelta object.
Based on Peter's answer at https://stackoverflow.com/a/51916936/2445204
and virhilo's answer at https://stackoverflow.com/a/4628148/851699
:param time_str: A string identifying a duration, e.g. '2h13.5m'
:return datetime.timedelta: A datetime.timedelta object
"""
parts = regex.match(time_str)
assert parts is not None, """Could not parse any time information from '{}'.
Examples of valid strings: '8h', '2d 8h 5m 2s', '2m4.3s'""".format(time_str)
time_params = {name: float(param)
for name, param in parts.groupdict().items() if param}
return timedelta(**time_params)
| 1,290 |
workflow/NumberNode.py
|
YOCKOW/PythonGitHubActionsWorkflowRepresentation
| 0 |
2025953
|
from .string import Lines
from .Node import Node
from .util import yaml_from_number
from numbers import Real, Integral
class NumberNode(Node):
def __init__(self, info: Real):
assert isinstance(info, Real)
self.__number = info
def yaml(self) -> Lines:
return yaml_from_number(self.__number)
class IntegerNode(NumberNode):
def __init__(self, info: Integral):
assert isinstance(info, Integral)
super().__init__(info)
| 444 |
BuildSimHubAPI/measures/equipment_epd_percent.py
|
ruijis/buildsimhub_python_api
| 19 |
2026800
|
from .model_action import ModelAction
class EquipmentEPDPercent(ModelAction):
def __init__(self):
ModelAction.__init__(self, 'epd_percent')
self._measure_name = 'EPDPercent'
self._lower_limit = 0
self._measure_help = '''
measure name: EPDPercent
Unit: Not required
Minimum: 0.1
Maximum: NA
Type: numeric
This measure will update the power density by percentage in the
ElectricEquipment, GasEquipment, HotWaterEquipment, SteamEquipment,
OtherEquipment.
The percentage is the remaining percentage - e.g. if user input is 0.8,
It means the equipment power density will be 80% of its original level.
'''
| 728 |
setup.py
|
silky/iepy
| 1 |
2026606
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from pip.req import parse_requirements
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(HERE, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(HERE, 'iepy', 'version.txt'), encoding='utf-8') as f:
iepy_version = f.read().strip()
requirements_path = path.join(HERE, "docs", "setup", "requirements-base.txt")
base_reqs = [str(x.req) for x in parse_requirements(requirements_path)]
dev_requirements_path = path.join(HERE, "docs", "setup", "requirements-development.txt")
dev_reqs = [str(x.req) for x in parse_requirements(dev_requirements_path)]
setup(
name='iepy',
version=iepy_version,
zip_safe=False,
description='Information Extraction framework in Python',
long_description=long_description,
url='https://github.com/machinalis/iepy',
# Author details
author=(
"<NAME>, <NAME>, <NAME>, "
"<NAME>, <NAME>",
),
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='information extraction relation detection',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs', 'tests*', 'scripts']),
include_package_data=True,
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=base_reqs,
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {'dev': dev_reqs},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'iepy=iepy.instantiation.command_line:execute_from_command_line',
],
},
)
| 3,261 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/paver_tests/test_stylelint.py
|
osoco/better-ways-of-thinking-about-software
| 3 |
2026593
|
"""
Tests for Paver's Stylelint tasks.
"""
from unittest.mock import MagicMock, patch
import pytest
import ddt
from paver.easy import call_task
from .utils import PaverTestCase
@ddt.ddt
class TestPaverStylelint(PaverTestCase):
"""
Tests for Paver's Stylelint tasks.
"""
@ddt.data(
[0, False],
[99, False],
[100, True],
)
@ddt.unpack
def test_run_stylelint(self, violations_limit, should_pass):
"""
Verify that the quality task fails with Stylelint violations.
"""
_mock_stylelint_violations = MagicMock(return_value=100)
with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
if should_pass:
call_task('pavelib.quality.run_stylelint', options={"limit": violations_limit})
else:
with pytest.raises(SystemExit):
call_task('pavelib.quality.run_stylelint', options={"limit": violations_limit})
| 993 |
Windows/setup.py
|
jeena/Bungloo
| 1 |
2025086
|
#!/usr/bin/env python2
import os
from distutils.core import setup
import py2exe
files = []
for dirname, dirnames, filenames in os.walk('WebKit'):
for filename in filenames:
files += [(dirname, [os.path.join(dirname, filename)])]
for dirname, dirnames, filenames in os.walk('images'):
for filename in filenames:
files += [(dirname, [os.path.join(dirname, filename)])]
imageformats = []
for dirname, dirnames, filenames in os.walk('C:\\Python27\\Lib\\site-packages\\PyQt4\\plugins\\imageformats'):
for filename in filenames:
imageformats += [os.path.join(dirname, filename)]
files += [('imageformats', imageformats)]
setup(
name = "Bungloo",
version = "2.0.0",
author = "<NAME>",
author_email = "<EMAIL>",
url = "http://jabs.nu/bungloo",
license = "BSD license",
data_files = files,
windows = [{
'script': "Bungloo.py",
'icon_resources': [(1, 'images/Icon.ico')],
}],
options = {
"py2exe": {
"includes": ["sip", "ssl", "PyQt4.QtCore", "PyQt4.QtGui", "PyQt4.QtNetwork"],
}
}
)
| 1,104 |
src/pytraits/core/base/__init__.py
|
Hc10b/py3traits
| 25 |
2026432
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
'''
Copyright 2014-2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from pytraits.support import Factory
from .inspectors import TraitSourceInspector, TraitTargetInspector
class TraitFactory(Factory):
""" Factory for core trait objects. """
# TODO: Don't leave this permanent
TraitFactory(override_duplicates=True)
TraitFactory.register(TraitSourceInspector, TraitTargetInspector)
__all__ = ["TraitFactory"]
| 985 |
kbsync.py
|
Nicolas-Reyland/Arachnoid
| 3 |
2026967
|
# Keyboard Synchronization
import arachnoid as ara
#import win32api, win32con
from pynput import keyboard
import os, sys
print('PID: {}'.format(ara.get_pid()))
IP_ADDR = '192.168.1.94'#'43.33'
PORT_NUMBER = 5555
#KB_LAYOUT = win32api.GetKeyboardLayout()
'''
def get_key_strokes(from_=0, to=250):
return list(filter(lambda key: win32api.GetAsyncKeyState(key), range(from_, to)))
def simple_key_press(key):
win32api.keybd_event(key, 0, 0, 0)
ara.time.sleep(.05)
win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP, 0)
def hold_key_press(keys):
# press keys in order (e.g. control + shift + a)
for key in keys:
win32api.keybd_event(key, 0, 0, 0)
ara.time.sleep(.05)
# release keys in reverse order
for key in keys[::-1]:
win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP, 0)
ara.time.sleep(.1)
def change_keyboard_layout(layout):
win32api.LoadKeyboardLayout(layout, 1)
'''
def on_press(key):
global server
if type(key) == keyboard.Key:
key_code = key.value.vk
else:
assert type(key) == keyboard.KeyCode
key_code = key.vk
server.tasks.append(b'<KEY-PRESS %i>' % key_code)
#print('pressed {}'.format(key_code))
def on_release(key):
global server
if key == keyboard.Key.esc:
return False
#key_code = key.vk
#server.tasks.append(b'<KEY-RELEASE %i>' % key_code)
if not server.alive:
return False
def simple_key_press(key_code):
key = keyboard.KeyCode(key_code)
controller.press(key)
def sync_keyboard_client(client):
open('OK Flag.txt', 'w').write('1')
special_values = vars(keyboard.Key)
while client.alive and 'OK Flag.txt' in os.listdir():
if client.tasks:
msg = client.tasks[0]
client.tasks.pop(0)
else:
msg = client.bempty_flag
client.client.sendall(b'<>')
response = client.client.recv(client.max_buffer_size)
if response != client.bempty_flag:
response = response.decode('utf8')
tag = response[1:response.index(' ')]
if tag == 'KEY-PRESS':
key_code = int(response[10:-1])
simple_key_press(key_code)
#print('pressing key {}'.format(key_code))
# elif tag == 'KEY-COMBO': ...
else:
print('Unkown tag: {}'.format(tag))
def client_side():
client = ara.Spider(ip=IP_ADDR, port=PORT_NUMBER, verbose=1)
client.connect()
thread = ara.Thread(target=client.loop2)
thread.start()
thread2 = ara.Thread(target=sync_keyboard_client, args=(client,))
thread2.start()
input('stop')
thread.join()
thread2.join()
os.system('taskkill /f /pid {}'.format(ara.get_pid()))
def server_side():
global server
server = ara.Web(host=IP_ADDR, port=PORT_NUMBER, verbose=1)
server.tasks = []
server.init()
def read_keyboard_info(server):
if server.tasks:
return server.tasks.pop(0)
else:
None
'''
def send_keyboard_info(server): # send mouse info to server
global last_pos
MAX_KEY_CHUNK_SIZE = 30 # one key is defined by a group of n key, up to MAX_KEY_CHUNK_SIZE times the same key
while True:
key_strokes = get_key_strokes()
if key_strokes:
chunk_size = 0
last_key_stroke = key_strokes[0]
server.tasks.append(b'<KEY-PRESS %i>' % last_key_stroke)
print('counted {} times {} key'.format(key_strokes.count(last_key_stroke), last_key_stroke))
for key in key_strokes:
if key == last_key_stroke:
chunk_size += 1
else:
server.tasks.append(b'<KEY-PRESS %i>' % key)
last_key_stroke = key
chunk_size = 0
if chunk_size >= MAX_KEY_CHUNK_SIZE: # >= because if the next one is not last_key_stroke, the ky won't repeat. So, the key repeats only if chunk_size > MAX_KEY_CHUNK_SIZE (next iteration, if key == last_key_stroke)
chunk_size = 0
ara.time.sleep(.01)
'''
def send_keyboard_info():
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
thread = ara.Thread(target=lambda : server.start(read_f=lambda : read_keyboard_info(server)))
thread2 = ara.Thread(target=send_keyboard_info)
thread.start()
thread2.start()
input('stop')
os.system('taskkill /f /pid {}'.format(ara.get_pid()))
if __name__ == '__main__':
if sys.argv[-1] == 'server':
server_side()
elif sys.argv[-1] == 'client':
controller = keyboard.Controller()
client_side()
else:
raise ValueError('Unkown value for mouse sync role...')
| 4,427 |
hypha/apply/funds/migrations/0081_add_screening_statuses_field.py
|
maxpearl/hypha
| 20 |
2026515
|
# Generated by Django 2.2.16 on 2020-11-09 05:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funds', '0080_add_yes_and_default_fields_to_screening_status'),
]
operations = [
migrations.AddField(
model_name='applicationsubmission',
name='screening_statuses',
field=models.ManyToManyField(blank=True, related_name='submissions', to='funds.ScreeningStatus'),
),
]
| 497 |
machina/apps/forum_tracking/urls.py
|
BrendaH/django-machina
| 572 |
2026263
|
"""
Forum tracking URLs
===================
This module defines URL patterns associated with the django-machina's ``forum_tracking``
application.
"""
from django.urls import path
from machina.core.loading import get_class
from machina.core.urls import URLPatternsFactory
class ForumTrackingURLPatternsFactory(URLPatternsFactory):
""" Allows to generate the URL patterns of the ``forum_search`` application. """
app_namespace = 'forum_tracking'
mark_forums_read_view = get_class('forum_tracking.views', 'MarkForumsReadView')
mark_topics_read_view = get_class('forum_tracking.views', 'MarkTopicsReadView')
unread_topics_view = get_class('forum_tracking.views', 'UnreadTopicsView')
def get_urlpatterns(self):
""" Returns the URL patterns managed by the considered factory / application. """
return [
path(
'mark/forums/',
self.mark_forums_read_view.as_view(),
name='mark_all_forums_read',
),
path(
'mark/forums/<int:pk>/',
self.mark_forums_read_view.as_view(),
name='mark_subforums_read',
),
path(
'mark/forum/<int:pk>/topics/',
self.mark_topics_read_view.as_view(),
name='mark_topics_read',
),
path(
'unread-topics/',
self.unread_topics_view.as_view(),
name='unread_topics',
),
]
urlpatterns_factory = ForumTrackingURLPatternsFactory()
| 1,597 |
winguhub/contacts/models.py
|
movicha/winguhub
| 0 |
2026790
|
# encoding: utf-8
from django import forms
from django.db import models
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from settings import CONTACT_EMAIL_LENGTH
class ContactManager(models.Manager):
def get_contacts_by_user(self, user_email):
"""Get a user's contacts.
"""
return super(ContactManager, self).filter(user_email=user_email)
def get_contact_by_user(self, user_email, contact_email):
"""Return a certern contact of ``user_email``.
"""
try:
c = super(ContactManager, self).get(user_email=user_email,
contact_email=contact_email)
except Contact.DoesNotExist:
c = None
return c
def get_registered_contacts_by_user(self, user_email):
"""Get a user's registered contacts.
"""
from winguhub.views import is_registered_user
return [ c for c in super(ContactManager, self).filter(
user_email=user_email) if is_registered_user(c.contact_email) ]
class Contact(models.Model):
"""Record user's contacts."""
user_email = models.CharField(max_length=CONTACT_EMAIL_LENGTH, db_index=True)
contact_email = models.CharField(max_length=CONTACT_EMAIL_LENGTH)
contact_name = models.CharField(max_length=255, blank=True, null=True, \
default='')
note = models.CharField(max_length=255, blank=True, null=True, default='')
objects = ContactManager()
def __unicode__(self):
return self.contact_email
# class Meta:
# unique_together = ("user_email", "contact_email")
class ContactAddForm(ModelForm):
class Meta:
model = Contact
def clean(self):
if not 'contact_email' in self.cleaned_data:
raise forms.ValidationError(_('Email is required.'))
user_email = self.cleaned_data['user_email']
contact_email = self.cleaned_data['contact_email']
if user_email == contact_email:
raise forms.ValidationError(_("You can't add yourself."))
elif Contact.objects.filter(user_email=user_email,
contact_email=contact_email).count() > 0:
raise forms.ValidationError(_("It is already your contact."))
else:
return self.cleaned_data
class ContactEditForm(ModelForm):
class Meta:
model = Contact
def __init__(self, *args, **kwargs):
super(ContactEditForm, self).__init__(*args, **kwargs)
self.fields['contact_email'].widget.attrs['readonly'] = True
def clean(self):
# This is used to override unique index check
return self.cleaned_data
| 2,768 |
Codigos-python/Ex6-RaizQuadrada.py
|
franciscocleiton/Meus-codigos-de-exercicios
| 1 |
2026508
|
n1 = int(input("Digite um número: "))
d = n1*2
t = n1*3
r = pow(n1, 1/2)
print("Número: {}".format(n1))
print("Seu dobro: {}".format(d))
print("Seu triplo: {}".format(t))
print("Raiz Quadrada: {}".format(r))
| 208 |
web/ask_server.py
|
pieroit/python-base
| 0 |
2026934
|
import requests
payload = {
'text': 'Hello this movie sucks'
}
r = requests.post('http://localhost:5000', json=payload)
print(r.content)
| 142 |
Diagnostics/main.py
|
heyrict/exam
| 2 |
2023863
|
#!/usr/bin/env python3
import sys
from exam import *
sys.path.insert(0, '..')
class QuestFormTextLoaderDiagnostics(QuestFormTextLoader):
def load(self):
qf = self.get_cached_qf()
if type(qf) != type(None): return qf
filelist = sorted([
i for i in os.listdir()
if re.search('\.md$', i) or re.search('\.txt', i)
])
for i in range(len(filelist)):
print(i, filelist[i])
no = InteractiveAnswer(
'Which one to choose?',
verify=range(len(filelist)),
serializer=lambda x: [int(i) for i in split_wrd(x, list(' ,,、'))]
).get()
if type(no) == int:
with open(filelist[no]) as f:
queststr = f.read()
else:
queststr = ''
qf = QuestForm()
for i in no:
with open(filelist[i]) as f:
qf.extend(self._load(f.read(), filename=filelist[i]))
return qf
def _load(self, queststr, filename=None):
questform = QuestForm()
for quest in re.findall(self.questpattern, queststr):
qitem = re.findall(self.qpattern, quest)
selitem = re.findall(self.selpattern,
quest) if self.selpattern else None
taitem = re.findall(self.tapattern,
quest) if self.tapattern else None
argitem = [(patnam, re.findall(self.argpattern[patnam], quest)) \
for patnam in self.argpattern] if self.argpattern else {}
argitem['chapter'] = filename
questform = questform.append(
Quest(q=qitem, sel=selitem, ta=taitem, args=argitem))
return questform
class BeginDiagnosticsQuestForm(BeginQuestForm):
def check_ans(self, ans, quest, **kwargs):
if not quest.ta:
self.qf[kwargs['qid']].ta = ans
return 'getans'
print('True Answer:', ''.join(quest.ta))
if ans == 'pass':
print(colorit('Roger!', 'magenta'))
return 'pass'
elif ans == 'drop':
print(colorit('Roger!', 'magenta'))
return True
elif set(list(split_wrd(ans.upper(), list(', ,、'), ''))) == set(
list(''.join(quest.ta).upper())):
print(colorit('Same!', 'green'))
return True
else:
print(colorit('NotTheSame!', 'lightred'))
return False
def raise_q(self, quest, **kwargs):
if quest.ta is None:
print(colorit("No True Answer", "red"))
super().raise_q(quest, **kwargs)
def raise_ta(self, quest, **kwargs):
return
def main():
qf = QuestFormTextLoaderDiagnostics(
questpattern=r'\[[ABCDE]\]+\d+\.[\s\S]+?\n(?=\n)',
qpattern=r'(?=\d+\.).*',
selpattern=r'[ABCDE]\.[\s\S]+?(?=\n)',
tapattern=r'(?<=\[)[ABCDE]+(?=\])').load()
BeginDiagnosticsQuestForm(
qf,
input_manner=InteractiveAnswer("Your Answer:"),
arrange="qst",
storage='l|w').start()
if __name__ == '__main__':
main()
| 3,143 |
frigg/urls.py
|
gitter-badger/frigg
| 0 |
2026447
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'frigg.builds.views.overview'),
url(r'^build/(?P<build_id>\d+)/$', 'frigg.builds.views.build', name="view_build"),
url(r'^deploy/(?P<build_id>\d+)/$', 'frigg.builds.views.deploy_master_branch',
name="deploy_master_branch"),
url(r'^github-webhook/', 'frigg.builds.views.github_webhook'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'},
name='login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
)
| 731 |
backend/tests/stats/test_views.py
|
flaviogf/match_games
| 0 |
2026280
|
class TestStats:
def test_should_return_status_200(self, client):
response = client.get('/api/v1/stats')
assert 200 == response.status_code
def test_should_return_games_count(self, client):
response = client.get('/api/v1/stats')
games = response.json['data']['games']
assert isinstance(games, int)
def test_should_return_stores_count(self, client):
response = client.get('/api/v1/stats')
stores = response.json['data']['stores']
assert isinstance(stores, int)
def test_should_return_users_count(self, client):
response = client.get('/api/v1/stats')
users = response.json['data']['users']
assert isinstance(users, int)
| 732 |
base/log.py
|
thu-spmi/semi-EBM
| 2 |
2024842
|
import numpy as np
def to_str(v, fmt='{:.3f}'):
if isinstance(v, (int, np.int32, np.int64)):
return str(v)
elif isinstance(v, (float, np.float32, np.float64)):
if np.abs(v) < 1e3 and np.abs(v) > 1e-3:
return fmt.format(v)
else:
return '{:.3e}'.format(v)
elif isinstance(v, list) or isinstance(v, tuple):
return '[' + ','.join([fmt.format(i) for i in v]) + ']'
elif isinstance(v, np.ndarray):
if v.ndim == 0:
return fmt.format(float(v))
else:
return '[' + ','.join([fmt.format(i) for i in v.flatten()]) + ']'
else:
return str(v)
def print_line(info, end=' ', skip_none=True):
for name, v in info.items():
if skip_none and v is None:
continue
print(name + '=' + to_str(v), end=end, flush=True)
def write_array(fp, a, name='array', form='{:<10.3f}'):
"""
write to file a 1-d array. The results is:
name= 1, 2, 3, 4, 5, 6
Returns:
"""
a = np.reshape(a, [-1])
fp.write(name + '=' + ' '.join([form.format(i) for i in a]) + '\n')
fp.flush()
def write_seq(fp, a):
fp.write(' '.join(str(x) for x in a) + '\n')
fp.flush()
def write_seq_to_file(seq_list, fname):
with open(fname, 'wt') as f:
for a in seq_list:
write_seq(f, a)
| 1,405 |
src/aws_encryption_sdk/internal/arn.py
|
farleyb-amazon/aws-encryption-sdk-python
| 95 |
2026563
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Utility class for processing Amazon Resource Names (ARNs)"""
from aws_encryption_sdk.exceptions import MalformedArnError
class Arn(object):
"""Arn to identify AWS resources. See https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
for details.
:param str partition: The AWS partition of the resource, e.g. 'aws'
:param str service: The service of the resource, e.g. 'kms'
:param str region: The region to which the resource belongs, e.g. 'us-east-1'
:param str account_id: The account containing the resource, e.g. '123456789012'
:param str resource_type: The type of the resource, e.g. 'key'
:param resource_id: The id for the resource, e.g. 'aaaaaaaa-1111-2222-3333-bbbbbbbbbbbb'
"""
def __init__(self, partition, service, region, account_id, resource_type, resource_id):
"""Initializes an ARN with all required fields."""
self.partition = partition
self.service = service
self.region = region
self.account_id = account_id
self.resource_type = resource_type
self.resource_id = resource_id
def to_string(self):
"""Returns the string format of the ARN."""
return ":".join(
[
"arn",
self.partition,
self.service,
self.region,
self.account_id,
"/".join([self.resource_type, self.resource_id]),
]
)
def indicates_multi_region_key(self):
"""Returns True if this ARN indicates a multi-region key, otherwise False"""
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.8
# //# If resource type is "alias", this is an AWS KMS alias ARN and MUST
# //# return false.
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.8
# //# If resource type is "key" and resource ID does not start with "mrk-",
# //# this is a (single-region) AWS KMS key ARN and MUST return false.
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.8
# //# If resource type is "key" and resource ID starts with
# //# "mrk-", this is a AWS KMS multi-Region key ARN and MUST return true.
return self.resource_type == "key" and self.resource_id.startswith("mrk-")
def is_valid_mrk_arn_str(arn_str):
"""Determines whether a string can be interpreted as
a valid MRK ARN
:param str arn_str: The string to parse.
:returns: a bool representing whether this key ARN indicates an MRK
:rtype: bool
:raises MalformedArnError: if the string fails to parse as an ARN
"""
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.8
# //# This function MUST take a single AWS KMS ARN
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.8
# //# If the input is an invalid AWS KMS ARN this function MUST error.
arn = arn_from_str(arn_str)
return arn.indicates_multi_region_key()
def is_valid_mrk_identifier(id_str):
"""Determines whether a string can be interpreted as
a valid MRK identifier; either an MRK arn or a raw resource ID for an MRK.
:param str id_str: The string to parse.
:returns: a bool representing whether this key identifier indicates an MRK
:rtype: bool
:raises MalformedArnError: if the string starts with "arn:" but fails to parse as an ARN
"""
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.9
# //# This function MUST take a single AWS KMS identifier
if id_str.startswith("arn:"):
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.9
# //# If the input starts with "arn:", this MUST return the output of
# //# identifying an an AWS KMS multi-Region ARN (aws-kms-key-
# //# arn.md#identifying-an-an-aws-kms-multi-region-arn) called with this
# //# input.
return is_valid_mrk_arn_str(id_str)
elif id_str.startswith("alias/"):
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.9
# //# If the input starts with "alias/", this an AWS KMS alias and not a
# //# multi-Region key id and MUST return false.
return False
elif id_str.startswith("mrk-"):
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.9
# //# If the input starts with "mrk-", this is a multi-Region key id and
# //# MUST return true.
return True
else:
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.9
# //# If
# //# the input does not start with any of the above, this is a multi-
# //# Region key id and MUST return false.
return False
def arn_from_str(arn_str): # noqa: C901
"""Parses an input string as an ARN.
:param str arn_str: The string to parse.
:returns: An ARN object representing the input string.
:rtype: aws_encryption_sdk.internal.arn.Arn
:raises MalformedArnError: if the string cannot be parsed as an ARN.
"""
elements = arn_str.split(":", 5)
try:
if elements[0] != "arn":
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# MUST start with string "arn"
raise MalformedArnError("Missing 'arn' string")
partition = elements[1]
service = elements[2]
region = elements[3]
account = elements[4]
if not partition:
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The partition MUST be a non-empty
raise MalformedArnError("Missing partition")
if not account:
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The account MUST be a non-empty string
raise MalformedArnError("Missing account")
if not region:
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The region MUST be a non-empty string
raise MalformedArnError("Missing region")
if service != "kms":
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The service MUST be the string "kms"
raise MalformedArnError("Unknown service")
resource = elements[5]
if not resource:
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The resource section MUST be non-empty and MUST be split by a
# //# single "/" any additional "/" are included in the resource id
raise MalformedArnError("Missing resource")
resource_elements = resource.split("/", 1)
resource_type = resource_elements[0]
resource_id = resource_elements[1]
if resource_type not in ("alias", "key"):
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The resource type MUST be either "alias" or "key"
raise MalformedArnError("Unknown resource type")
if not resource_id:
# //= compliance/framework/aws-kms/aws-kms-key-arn.txt#2.5
# //# The resource id MUST be a non-empty string
raise MalformedArnError("Missing resource id")
return Arn(partition, service, region, account, resource_type, resource_id)
except (IndexError, MalformedArnError) as exc:
raise MalformedArnError("Resource {} could not be parsed as an ARN: {}".format(arn_str, exc.args[0]))
| 7,949 |
pollsapi/polls/urls.py
|
olegush/polls_api_test
| 0 |
2026970
|
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'questions', views.PollViewSet, basename='polls')
router.register(r'votes', views.VoteViewSet, basename='votes')
urlpatterns = router.urls
| 245 |
Python3/Python3_Lesson07/src/animal_farm.py
|
ceeblet/OST_PythonCertificationTrack
| 0 |
2027001
|
class Animal(object):
def __init__(self, name):
self.name = name
def sound(self):
raise NotImplementedError("Animals need a sound method")
def has_wings(self):
return False
class Pig(Animal):
def sound(self):
return "oink!"
class Dog(Animal):
def sound(self):
return "woof!"
class Chicken(Animal):
def sound(self):
return "bok bok!"
def has_wings(self):
return True
| 470 |
helloWorld/helloWorldApp/migrations/0017_suggestion_title.py
|
jcheon/reddit_clone
| 4 |
2025423
|
# Generated by Django 2.2.5 on 2019-12-03 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helloWorldApp', '0016_auto_20191126_0619'),
]
operations = [
migrations.AddField(
model_name='suggestion',
name='title',
field=models.CharField(blank=True, max_length=240),
),
]
| 406 |
project_opensource/coco_csv_voc_format/txt2coco.py
|
yunshangyue71/mycodes
| 0 |
2026903
|
# -*- coding: utf-8 -*-
'''
@time: 2019/01/11 11:28
spytensor
'''
import os
import json
import numpy as np
import pandas as pd
import glob
import cv2
import os
import shutil
from IPython import embed
from sklearn.model_selection import train_test_split
np.random.seed(41)
#0为背景
# classname_to_id = {"person": 1,
# "1":1, "2":2,"3":3,"4":4,"5":5,"6":6,"7":7,"8":8,"9":9}
class Txt2CoCo:
#image_dir: 存放照片的路径
#total_annos:所有图片, 除了名字的annotation
#classNameAndClassId:{"className0":int classId ,"background":"0",...}
def __init__(self,image_dir,total_annos, classNameAndClassId):
self.images = [] #存放所有的照片信息, 分辨率等等
self.annotations = [] #存放所有的label 信息, bbox, label , seg等等
self.categories = [] #存放所有的分类信息,
self.img_id = 0 #coco json中每个图片的id
self.ann_id = 0 #coco json中每个annotation 的id, 一个图片可能有多个anno
self.image_dir = image_dir
self.total_annos = total_annos
self.classNameAndClassId = classNameAndClassId
# 由txt文件构建COCO
# imgNames:所有的图片名字
def to_coco(self, imgNames):
self._init_categories()
for imgName in imgNames:
self.images.append(self._image(imgName))
imgAnnos = self.total_annos[imgName]
for imgAnno in imgAnnos:
# print(imgAnno)
bboxi = []
for cor in imgAnno[:4]:
bboxi.append(int(cor))
label = int(imgAnno[4])
if str(label) not in self.classNameAndClassId:
continue
annotation = self._annotation(bboxi,label)
annotation["score"] = imgAnno[5]
self.annotations.append(annotation)
self.ann_id += 1
self.img_id += 1
#这个就是整个的dataset的annotation
instance = {}
instance['info'] = {"year":"","version":"",
"description":"","contributor":"",
"url":"","data_created":""} #这个一般用不到
instance['license'] = {"id":"","name":"", "url":""}#这个一般也用不到
instance['images'] = self.images
instance['annotations'] = self.annotations
instance['categories'] = self.categories
return instance
# 构建类别
def _init_categories(self):
for k, v in self.classNameAndClassId.items():
category = {}
category['id'] = v
category['name'] = k
self.categories.append(category)
print(self.categories)
# 构建COCO的image字段
def _image(self, imgName):
image = {}
img = cv2.imread(self.image_dir + imgName)
#img = cv2.imread(path)
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = self.img_id
image['file_name'] = imgName
return image
# 构建COCO的annotation字段
def _annotation(self, imgAnno,label):
# label = shape[-1]
points = imgAnno[:4]
annotation = {}
annotation['id'] = self.ann_id
annotation['image_id'] = self.img_id
label = str(label)# TODO qdw
annotation['category_id'] = int(self.classNameAndClassId[label])
annotation['segmentation'] = self._get_seg(points)
annotation['bbox'] = self._get_box(points)
annotation['iscrowd'] = 0
annotation['area'] = self._get_area(points)
return annotation
# COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
def _get_box(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return [min_x, min_y, max_x - min_x, max_y - min_y]
# 计算面积
def _get_area(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return (max_x - min_x+1) * (max_y - min_y+1)
# segmentation
def _get_seg(self, points):
a = []
b = points[4:]
a.append(b)
return a
def save_coco_json(self, instance, save_path):
json.dump(instance, open(save_path, 'w'), ensure_ascii=False, indent=2) # indent=2 更加美观显示
if __name__ == '__main__':
"""需要提前设置的参数"""
#txt 格式位置
# xmin, ymin, w, h, cls
# xmin, ymin, w, h, cls
#coco格式 xmin,ymin,w, h
#img copy or not
imgCopyFlag = False
# 该路径下存放所有的图片
image_dir = "/media/q/data/datasets/VOC/VOC2007_test/JPEGImages/"
#txt
txtDir = "/media/q/data/datasets/VOC/VOC2007_test/format_me/Main/person/result/"
#将整理好的coco 格式存放在什么位置
saved_coco_path = "/media/q/data/datasets/VOC/VOC2007_test/format_me/Main/person/"
# 这个是将整个数据集分为训练集和测试集比例
testSize = 0.0
# 整个数据集用不用打乱
shuffle = False
# classNameAndClassId:{"className0":int classId ,"background":"0",...}
#"className0" 这个就是csv文件中的名字,就是这个类是哪个类
classNameAndClassId = {"0" : 0, "1":1, "2":2,"3":3,
"4" : 4, "5":5,"6":6,
"7" : 7,"8":8,"9":9}
"""END"""
# 整合csv格式标注文件
# {imgName:[[anno0], [anno1]]}
total_annotations = {}
txtnames = os.listdir(txtDir)
for i in range(len(txtnames)):
info = np.loadtxt(txtDir + txtnames[i])
info = info.reshape((-1, 6))
imgname = txtnames[i].split('.')[0]+".jpg"
info[:, 2:4] +=info[:,:2]
total_annotations[imgname] = info
#所有的图片名字
imgNamesTotal = list(total_annotations.keys())
#将图像名字为训练集测试集
imgNamesTrain, imgNamesVal = train_test_split(imgNamesTotal, test_size=testSize, shuffle = shuffle)
print("train_n:", len(imgNamesTrain), 'val_n:', len(imgNamesVal))
# 创建必须的文件夹
if not os.path.exists('%scoco/annotations/'%saved_coco_path):
os.makedirs('%scoco/annotations/'%saved_coco_path)
if not os.path.exists('%scoco/images/train2017/'%saved_coco_path):
os.makedirs('%scoco/images/train2017/'%saved_coco_path)
if not os.path.exists('%scoco/images/val2017/'%saved_coco_path):
os.makedirs('%scoco/images/val2017/'%saved_coco_path)
#copy images
if imgCopyFlag:
for file in imgNamesTrain:
shutil.copy(image_dir+file,"%scoco/images/train2017/"%saved_coco_path)
for file in imgNamesVal:
shutil.copy(image_dir+file,"%scoco/images/val2017/"%saved_coco_path)
# 把训练集转化为COCO的json格式
l2c_train = Txt2CoCo(image_dir=image_dir,total_annos=total_annotations, classNameAndClassId=classNameAndClassId)
train_instance = l2c_train.to_coco(imgNamesTrain)
l2c_train.save_coco_json(train_instance, '%scoco/annotations/instances_train20172.json'%saved_coco_path)
# 把验证集转化为COCO的json格式
l2c_val = Txt2CoCo(image_dir=image_dir,total_annos=total_annotations, classNameAndClassId=classNameAndClassId)
val_instance = l2c_val.to_coco(imgNamesVal)
l2c_val.save_coco_json(val_instance, '%scoco/annotations/instances_val2017.json'%saved_coco_path)
| 6,902 |
UnityPy/classes/Texture.py
|
hydrargyrum/UnityPy
| 0 |
2025640
|
from .NamedObject import NamedObject
class Texture(NamedObject):
def __init__(self, reader):
super().__init__(reader=reader)
if self.version[0] > 2017 or (self.version[0] == 2017 and self.version[1] >= 3): # 2017.3 and up
self.forced_fallback_format = reader.read_int()
self.downscale_fallback = reader.read_boolean()
reader.align_stream()
| 357 |
runway_model.py
|
dvschultz/runwayml-lucid-resnet50
| 2 |
2026856
|
# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import category, image, number, boolean
import numpy as np
import tensorflow as tf
from PIL import Image
# from example_model import ExampleModel
import lucid.modelzoo.vision_models as models
from lucid.misc.io import show
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
setup_options = {
"network": category(choices=["ResnetV2_50_slim"], default="ResnetV2_50_slim")
}
@runway.setup(options=setup_options)
def setup(opts):
msg = '[SETUP] Ran with options: network = {}'
print(msg.format(opts['network']))
model = models.ResnetV2_50_slim()
model.load_graphdef()
return model
input_options = {
'layer': category(choices=["block1/unit_1/bottleneck_v2/preact/Relu (max:64)",
"block1/unit_1/bottleneck_v2/add (max:256)",
"block1/unit_2/bottleneck_v2/add (max:256)",
"block1/unit_3/bottleneck_v2/add (max:256)",
"block2/unit_1/bottleneck_v2/preact/Relu (max:64)",
"block2/unit_1/bottleneck_v2/add (max:512)",
"block2/unit_2/bottleneck_v2/add (max:512)",
"block2/unit_3/bottleneck_v2/add (max:512)",
"block2/unit_4/bottleneck_v2/add (max:512)",
"block3/unit_1/bottleneck_v2/preact/Relu (max:512)",
"block3/unit_1/bottleneck_v2/add (max:1024)",
"block3/unit_2/bottleneck_v2/add (max:1024)",
"block3/unit_3/bottleneck_v2/add (max:1024)",
"block3/unit_4/bottleneck_v2/add (max:1024)",
"block3/unit_5/bottleneck_v2/add (max:1024)",
"block3/unit_6/bottleneck_v2/add (max:1024)",
"block4/unit_1/bottleneck_v2/preact/Relu (max:1024)",
"block4/unit_1/bottleneck_v2/add (max:2048)",
"block4/unit_2/bottleneck_v2/add (max:2048)",
"block4/unit_3/bottleneck_v2/add (max:2048)",
"postnorm/Relu (max:2048)"
], default="block3/unit_1/bottleneck_v2/preact/Relu (max:512)", description='choose layer of network to visualize'),
'neuron': number(default=0, min=0, max=2047, step=1, description='Neuron ID'),
'size': number(default=128, min=128, max=1024, step=128, description='Image Size'),
'transforms': boolean(default=False, description='Vary size of visualization'),
'transform_min': number(default=0.3, min=0.0, max=1.0, step=.1, description='Minimum scaling amount'),
'transform_max': number(default=0.5, min=0.0, max=1.0, step=.1, description='Maximum scaling amount')
}
@runway.command(name='generate',
inputs=input_options,
outputs={ 'image': image() },
description='Use Lucid to visualize the layers and neurons of a specific ML network.')
def generate(model, args):
print('[GENERATE] Ran with layer {} and neuron {}'.format(args['layer'], args['neuron']))
layer_id = args['layer'].split(' ')[0]
layer_neuron = 'resnet_v2_50/{}:{}'.format(layer_id, args['neuron'])
print(layer_neuron)
s = int(args['size'])
min_scale = args['transform_min']
max_scale = args['transform_max']
scale_offset = (max_scale - min_scale) * 10
# https://github.com/tensorflow/lucid/issues/148
with tf.Graph().as_default() as graph, tf.Session() as sess:
t_img = param.image(s)
crop_W = int(s/2)
t_offset = tf.random.uniform((2,), 0, s - crop_W, dtype="int32")
t_img_crop = t_img[:, t_offset[0]:t_offset[0]+crop_W, t_offset[1]:t_offset[1]+crop_W]
if(args['transforms']):
transforms=[transform.jitter(2),
transform.random_scale([min_scale + n/10. for n in range(20)]),
transform.random_rotate(range(-10,11)),
transform.jitter(2)]
T = render.make_vis_T(model, layer_neuron, t_img_crop, transforms=transforms)
else:
T = render.make_vis_T(model, layer_neuron, t_img_crop)
tf.initialize_all_variables().run()
for i in range(1024):
T("vis_op").run()
img = t_img.eval()[0]
# https://github.com/tensorflow/lucid/issues/108
# img = render.render_vis(model, layer_neuron)[-1][0]
img = Image.fromarray(np.uint8(img*255))
return {
'image': img
}
if __name__ == '__main__':
# run the model server using the default network interface and ports,
# displayed here for convenience
runway.run(host='0.0.0.0', port=8000)
| 4,506 |
python/drned_xmnr/op/filtering/filtering.py
|
micnovak/drned-xmnr
| 7 |
2026782
|
import sys
from contextlib import closing
from .cort import filter_sink
from .events import EventGenerator, InitialPrepareEvent
from .states import TransitionEventContext, LogStateMachine, TransitionTestState, \
run_event_machine, ExploreState, WalkState
def transition_output_filter(level, sink, context=None):
machine = LogStateMachine(level, TransitionTestState(), context)
return run_event_machine(machine, sink)
def explore_output_filter(level, sink, context=None):
machine = LogStateMachine(level, ExploreState(), context)
return run_event_machine(machine, sink)
def walk_output_filter(level, sink, context=None):
machine = LogStateMachine(level, WalkState(), context)
handler = run_event_machine(machine, sink)
handler.send(InitialPrepareEvent())
return handler
def run_test_filter(outfilter, filename, level='drned-overview', out=sys.stdout):
'''
Testing and experimenting utility. Can be used as
filtering.run_test_filter(filtering.transition_output_filter, "data.txt")
'''
sink = filter_sink(out.write)
ctx = TransitionEventContext()
lines = outfilter(level, sink, ctx)
evts = EventGenerator(lines)
with closing(ctx):
with closing(evts):
with open(filename) as data:
for line in data:
ln = line.strip()
if ln:
evts.send(ln)
return ctx
| 1,435 |
scripts/python/bus_line_geojson.py
|
grvl/grvl.github.io
| 0 |
2027161
|
"""bus_line_scorer.py
Scores each district based on bus lines.
"""
import json
import geojson
from math import log
from shapely.geometry import Point, LineString, asShape
from sp_districts import get_districts, is_line_in_district
_INPUT_FILE = 'data/bus_lines_accessibility.json'
_OUTPUT_FILE = 'data/bus_lines_geo.json'
def get_bus_lines():
"""Returns an object with raw bus lines data.
"""
with open(_INPUT_FILE, 'r') as f:
bus_lines_json = json.load(f)
for bus_line in bus_lines_json:
# Transforms coordinates to GeoJson standard.
bus_line['shape'] = LineString(map(lambda pair: (pair['lng'], pair['lat']),
bus_line['shape']))
zonas_json = json.loads("""
{"type": "FeatureCollection", "features": []}
""")
feature = []
for i in range(0, len (bus_lines_json)):
feature.append( geojson.Feature(geometry = bus_lines_json[i]['shape'], properties = {'route_id':bus_lines_json[i]['route_id'],'accessibility_score':bus_lines_json[i]['accessibility_score']}))
zonas_json['features'] = feature
with open(_OUTPUT_FILE, 'w', newline="") as f:
f.write(json.dumps(zonas_json))
if __name__ == '__main__':
get_bus_lines()
| 1,249 |
pgxnclient/tests/test_semver.py
|
intgr/pgxnclient
| 1 |
2026978
|
from pgxnclient.tests import unittest
from pgxnclient import SemVer
class SemVerTestCase(unittest.TestCase):
def test_ok(self):
for s in [
'1.2.2',
'0.2.2',
'1.2.2',
'0.0.0',
'0.1.999',
'9999.9999999.823823',
'1.0.0beta1',
'1.0.0beta2',
'1.0.0',
'20110204.0.0', ]:
self.assertEqual(SemVer(s), s)
def test_bad(self):
def ar(s):
try: SemVer(s)
except ValueError: pass
else: self.fail("ValueError not raised: '%s'" % s)
for s in [
'1.2',
'1.2.02',
'1.2.2-',
'1.2.3b#5',
'03.3.3',
'v1.2.2',
'1.3b',
'1.4b.0',
'1v',
'1v.2.2v',
'1.2.4b.5', ]:
ar(s)
def test_eq(self):
for s1, s2 in [
('1.2.2', '1.2.2'),
('1.2.23', '1.2.23'),
('0.0.0', '0.0.0'),
('999.888.7777', '999.888.7777'),
('0.1.2beta3', '0.1.2beta3'),
('1.0.0rc-1', '1.0.0RC-1'), ]:
self.assertEqual(SemVer(s1), SemVer(s2))
self.assertEqual(hash(SemVer(s1)), hash(SemVer(s2)))
self.assert_(SemVer(s1) <= SemVer(s2),
"%s <= %s failed" % (s1, s2))
self.assert_(SemVer(s1) >= SemVer(s2),
"%s >= %s failed" % (s1, s2))
def test_ne(self):
for s1, s2 in [
('1.2.2', '1.2.3'),
('0.0.1', '1.0.0'),
('1.0.1', '1.1.0'),
('1.1.1', '1.1.0'),
('1.2.3b', '1.2.3'),
('1.2.3', '1.2.3b'),
('1.2.3a', '1.2.3b'),
('1.2.3aaaaaaa1', '1.2.3aaaaaaa2'), ]:
self.assertNotEqual(SemVer(s1), SemVer(s2))
self.assertNotEqual(hash(SemVer(s1)), hash(SemVer(s2)))
def test_dis(self):
for s1, s2 in [
('2.2.2', '1.1.1'),
('2.2.2', '2.1.1'),
('2.2.2', '2.2.1'),
('2.2.2b', '2.2.1'),
('2.2.2', '2.2.2b'),
('2.2.2c', '2.2.2b'),
('2.2.2rc-2', '2.2.2RC-1'),
('0.9.10', '0.9.9'), ]:
self.assert_(SemVer(s1) >= SemVer(s2),
"%s >= %s failed" % (s1, s2))
self.assert_(SemVer(s1) > SemVer(s2),
"%s > %s failed" % (s1, s2))
self.assert_(SemVer(s2) <= SemVer(s1),
"%s <= %s failed" % (s2, s1))
self.assert_(SemVer(s2) < SemVer(s1),
"%s < %s failed" % (s2, s1))
def test_clean(self):
for s1, s2 in [
('1.2.2', '1.2.2'),
('01.2.2', '1.2.2'),
('1.02.2', '1.2.2'),
('1.2.02', '1.2.2'),
('1.2.02b', '1.2.2b'),
('1.2.02beta-3 ', '1.2.2beta-3'),
('1.02.02rc1', '1.2.2rc1'),
('1.0', '1.0.0'),
('1', '1.0.0'),
('.0.02', '0.0.2'),
('1..02', '1.0.2'),
('1..', '1.0.0'),
('1.1', '1.1.0'),
('1.2.b1', '1.2.0b1'),
('9.0beta4', '9.0.0beta4'), # PostgreSQL format.
('9b', '9.0.0b'),
('rc1', '0.0.0rc1'),
('', '0.0.0'),
('..2', '0.0.2'),
('1.2.3 a', '1.2.3a'),
('..2 b', '0.0.2b'),
(' 012.2.2', '12.2.2'),
('20110204', '20110204.0.0'), ]:
self.assertEqual(SemVer.clean(s1), SemVer(s2))
def test_cant_clean(self):
def ar(s):
try: SemVer.clean(s)
except ValueError: pass
else: self.fail("ValueError not raised: '%s'" % s)
for s in [
'1.2.0 beta 4',
'1.2.2-',
'1.2.3b#5',
'v1.2.2',
'1.4b.0',
'1v.2.2v',
'1.2.4b.5',
'1.2.3.4',
'1.2.3 4',
'1.2000000000000000.3.4',]:
ar(s)
if __name__ == '__main__':
unittest.main()
| 4,240 |
InvenTree/part/migrations/0058_remove_partsellpricebreak_cost.py
|
ArakniD/InvenTree
| 656 |
2027144
|
# Generated by Django 3.0.7 on 2020-11-10 11:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('part', '0057_remove_partsellpricebreak_currency'),
]
operations = [
migrations.RemoveField(
model_name='partsellpricebreak',
name='cost',
),
]
| 351 |
eda5/veljavnostdokumentov/forms.py
|
vasjapavlovic/eda5
| 0 |
2024652
|
from functools import partial
from django import forms
from django.utils import timezone
from .models import VeljavnostDokumenta
from eda5.narocila.models import Narocilo
from eda5.planiranje.models import PlaniranoOpravilo
DateInput = partial(forms.DateInput, {'class': 'datepicker'})
TimeInput = partial(forms.TimeInput, {'class': 'timepicker'})
class VeljavnostDokumentaCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(VeljavnostDokumentaCreateForm, self).__init__(*args, **kwargs)
self.fields['planirano_opravilo'].queryset = PlaniranoOpravilo.objects.filter(is_active=True)
self.fields['narocilo'].queryset = Narocilo.objects.filter(datum_veljavnosti__gte=timezone.now()).order_by('-id')
class Meta:
model = VeljavnostDokumenta
fields = (
'stavba',
'narocilo',
'planirano_opravilo',
# vrsta_stroska --> eda5:racunovodstvo:vrsta_stroska --> izbiraForm
'velja_od',
'velja_do',
)
widgets = {
'velja_od': DateInput(),
'velja_do': DateInput(),
}
class VeljavnostDokumentaUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(VeljavnostDokumentaUpdateForm, self).__init__(*args, **kwargs)
self.fields['planirano_opravilo'].queryset = PlaniranoOpravilo.objects.filter(is_active=True)
self.fields['narocilo'].queryset = Narocilo.objects.filter(datum_veljavnosti__gte=timezone.now()).order_by('-id')
class Meta:
model = VeljavnostDokumenta
fields = (
'is_active',
'stavba',
'narocilo',
'vrsta_stroska',
'planirano_opravilo',
'velja_od',
'velja_do',
)
widgets = {
'velja_od': DateInput(),
'velja_do': DateInput(),
}
| 1,917 |
tcga_encoder/models/vae/batcher_dna_out.py
|
tedmeeds/tcga_encoder
| 2 |
2026434
|
from tcga_encoder.models.vae.batcher_ABC import *
class TCGABatcher( TCGABatcherABC ):
pass
| 128 |
tests/renderers/embed/conftest.py
|
timvink/pheasant
| 24 |
2026427
|
import pytest
import os
from pheasant.renderers.embed.embed import Embed
from pheasant.core.parser import Parser
from pheasant.renderers.jupyter.jupyter import Jupyter
from pheasant.renderers.number.number import Header
@pytest.fixture()
def jupyter():
jupyter = Jupyter()
directory = os.path.normpath(os.path.join(__file__, "../../jupyter/templates"))
jupyter.set_template("fenced_code", directory)
return jupyter
@pytest.fixture()
def header():
header = Header()
return header
@pytest.fixture()
def embed():
embed = Embed()
return embed
@pytest.fixture()
def parser(jupyter, header, embed):
parser = Parser()
jupyter.parser = parser
header.parser = parser
embed.parser = parser
return parser
| 756 |
app/search/hot_eval/common_check.py
|
okriuchykhin/anfisa
| 0 |
2026916
|
def evalRec(env, rec):
"""Candidates (Clinical)"""
known = len(rec.Presence_in_Databases & {"ClinVar", "HGMD"}) > 0
if ("Quality-PASS" not in rec.Rules):
return False
if (known):
return True
if (rec.Severity < env.severity):
return False
return rec.gnomAD_AF < env.af
| 321 |
tests/acceptance/30_custom_promise_types/append_promises.py
|
olehermanse/masterfiles
| 44 |
2026935
|
#!/usr/bin/python3
#
# Sample custom promise type, uses cfengine.py library located in same dir.
#
# Use it in the policy like this:
# promise agent append
# {
# interpreter => "/usr/bin/python3";
# path => "$(sys.inputdir)/append_promises.py";
# }
# bundle agent main
# {
# append:
# "/path/to/target/file"
# string => "string to append";
# }
from cfengine import PromiseModule, ValidationError, Result
class AppendPromiseTypeModule(PromiseModule):
def __init__(self):
super().__init__("append_promise_module", "0.0.1")
def validate_promise(self, promiser, attributes):
if type(promiser) != str:
raise ValidationError("Promiser must be of type string")
if not "string" in attributes:
raise ValidationError("Missing attribute 'string'")
if type(attributes["string"]) != str:
raise ValidationError("Attribute 'string' must be of type string")
def evaluate_promise(self, promiser, attributes):
assert "string" in attributes
try:
with open(promiser, "a+") as f:
f.seek(0)
if (attributes["string"] not in f.read()):
f.write(attributes["string"])
self.log_verbose("Promise '%s' repaired" % promiser)
return Result.REPAIRED
else:
self.log_verbose("Promise '%s' kept" % promiser)
return Result.KEPT
except Exception as e:
self.log_error(e)
self.log_error("Promise '%s' not kept" % promiser)
return Result.NOT_KEPT
if __name__ == "__main__":
AppendPromiseTypeModule().start()
| 1,701 |
pgAdmin/browser/server_groups/servers/databases/schemas/fts_dictionaries/tests/utils.py
|
WeilerWebServices/PostgreSQL
| 0 |
2026490
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import os
import sys
import traceback
from regression.python_test_utils.test_utils import get_db_connection
file_name = os.path.basename(__file__)
def create_fts_dictionary(server, db_name, schema_name, fts_dict_name):
"""This function will add the fts_dictionary under test schema. """
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
query = "CREATE TEXT SEARCH DICTIONARY %s.%s (TEMPLATE = simple)" % (
schema_name, fts_dict_name)
pg_cursor.execute(query)
connection.commit()
# Get 'oid' from newly created dictionary
pg_cursor.execute("select oid from pg_catalog.pg_ts_dict where "
"dictname = '%s' order by oid ASC limit 1"
% fts_dict_name)
oid = pg_cursor.fetchone()
fts_dict_id = ''
if oid:
fts_dict_id = oid[0]
connection.close()
return fts_dict_id
except Exception:
traceback.print_exc(file=sys.stderr)
def verify_fts_dict(server, db_name, fts_dict_name):
"""
This function will verify current FTS dictionary.
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param fts_dict_name: FTS dictionary name to be added
:type fts_dict_name: str
:return fts_dict: FTS dictionary detail
:rtype: tuple
"""
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
pg_cursor.execute(
"select oid from pg_catalog.pg_ts_dict where "
"dictname = '%s' order by oid ASC limit 1"
% fts_dict_name)
fts_dict = pg_cursor.fetchone()
connection.close()
return fts_dict
except Exception:
traceback.print_exc(file=sys.stderr)
def delete_fts_dictionaries(server, db_name, schema_name, fts_dict_name):
"""
This function delete FTS dictionaries.
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param fts_dict_name: FTS dict name to be added
:type fts_dict_name: str
:param schema_name: schema name
:type schema_name: str
:return: None
"""
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
pg_cursor.execute("DROP TEXT SEARCH DICTIONARY %s.%s" % (schema_name,
fts_dict_name))
connection.commit()
connection.close()
| 3,734 |
adv/kleimann.py
|
XenoXilus/dl
| 0 |
2026302
|
from core.advbase import *
def module():
return Kleimann
class Kleimann(Adv):
a3 = ('s',0.35)
conf = {}
conf['slots.a'] = [
'Candy_Couriers',
'Flash_of_Genius',
'Moonlight_Party',
'The_Plaguebringer',
'Dueling_Dancers'
]
conf['acl'] = """
`dragon(c3-s-end), x=5
`s3, not buff(s3)
`s1
`s2
`s4
`fs, self.madness_status<5 and self.madness=5
"""
conf['coabs'] = ['Ieyasu','Gala_Alex','Delphi']
conf['share'] = ['Curran']
def d_coabs(self):
if self.duration <= 60:
self.conf['coabs'] = ['Ieyasu','Gala_Alex','Bow']
def a1_madness_autocharge(self, t):
for s in self.skills:
if s.charged < s.sp:
sp = self.madness_status * 100
s.charge(sp)
log('sp', s.name+'_autocharge', int(sp))
self.set_hp(self.hp-1)
@property
def madness(self):
return self.fs_alt.uses
def prerun(self):
self.madness_status = 0
self.madness_timer = Timer(self.a1_madness_autocharge, 2.9, 1)
self.fs_alt = FSAltBuff('a1_madness', 'madness', uses=0)
def fs_madness_proc(self, e):
if self.madness_status < 5:
self.madness_status += 1
if self.madness_status == 1:
self.madness_timer.on()
def s2_proc(self, e):
if not self.fs_alt.get():
self.fs_alt.on()
if self.fs_alt.uses < 5:
self.fs_alt.uses += 1
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 1,651 |
packnet/UDP.py
|
jschulenklopper/packnet
| 0 |
2026285
|
"""
PACKNET - c0mplh4cks
UDP
.---.--------------.
| 7 | Application |
|---|--------------|
| 6 | Presentation |
|---|--------------|
| 5 | Session |
#===#==============#
# 4 # Transport #
#===#==============#
| 3 | Network |
|---|--------------|
| 2 | Data Link |
|---|--------------|
| 1 | Physical |
'---'--------------'
"""
# === Importing Dependencies === #
from struct import pack, unpack
from .standards import encode, decode, checksum
# === UDP Header === #
class Header:
def __init__(self, packet=b""):
self.packet = packet
self.src = ["", 0, ""]
self.dst = ["", 0, ""]
self.length = 0
self.checksum = 0
self.data = b""
def build(self):
packet = []
self.length = 8 + len(self.data)
packet.insert(0, pack( ">H", self.src[1] )) # Source PORT
packet.insert(1, pack( ">H", self.dst[1] )) # Target PORT
packet.insert(2, pack( ">H", self.length )) # Total length
packet.insert(4, self.data ) # Data
packet.insert(3, checksum( [ # Checksum
*packet,
encode.ip( self.src[0] ),
encode.ip( self.dst[0] ),
pack( ">H", 17 ),
pack( ">H", self.length )
] ))
self.packet = b"".join(packet)
return self.packet
def read(self):
packet = self.packet
i = 0
i, self.src[1] = i+2, unpack( ">H", packet[i:i+2] )[0] # Source PORT
i, self.dst[1] = i+2, unpack( ">H", packet[i:i+2] )[0] # Target PORT
i, length = i+2, unpack( ">H", packet[i:i+2] )[0] # Total length
i, self.checksum = i+2, unpack( ">H", packet[i:i+2] )[0] # Checksum
i, self.data = i+len( packet[i:] ), packet[i:] # Data
self.length = i
return i
| 2,003 |
modules/powershell/powersccm.py
|
decidedlygray/ptf
| 4,391 |
2026235
|
#!/usr/bin/env python
#####################################
# Installation module for PowerShell SCCM
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="<NAME> (ReL1K)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="PowerSCCM - PowerShell module to interact with SCCM deployments"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/PowerShellMafia/PowerSCCM"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="powersccm"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
# THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL
LAUNCHER=""
| 758 |
src/mtenv/tests/examples/bandit_test.py
|
NagisaZj/ac-teach
| 56 |
2026421
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from examples.bandit import BanditEnv # noqa: E402
from tests.utils.utils import validate_single_task_env
def get_valid_n_arms() -> List[int]:
return [1, 10, 100]
def get_invalid_n_arms() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_arms", get_valid_n_arms())
def test_n_arm_bandit_with_valid_input(n_arms):
env = BanditEnv(n_arms=n_arms)
env.seed(seed=5)
validate_single_task_env(env)
@pytest.mark.parametrize("n_arms", get_invalid_n_arms())
def test_n_arm_bandit_with_invalid_input(n_arms):
with pytest.raises(Exception):
env = BanditEnv(n_arms=n_arms)
env.seed(seed=5)
validate_single_task_env(env)
| 784 |
node.py
|
YannickSF/blob
| 1 |
2026662
|
from core.settings import SETTINGS
from p2pnetwork.node import Node
from core.blockchain import Blobchain, Block, Txion
class BlobNode(Node):
def __init__(self, host, port, callback=None, max_connections=0):
super(BlobNode, self).__init__(host, port, None, callback, max_connections)
self._blockchain = Blobchain()
print("Node {} - port {}: Started".format(self.id, port))
def balance(self, wallet_address):
""" return amount of coin on a given address """
return self._blockchain.balance(wallet_address)
def blocks(self, *args):
if len(args) > 0:
return self._blockchain.block(args[0])
else:
return self._blockchain.__repr__()
def exchanges(self, b_type, exp, to, value):
payload = {'b_type': b_type}
ex_callback = None
if b_type == 'txion':
new_txion = self._blockchain.exchanges(exp, to, value)
payload['item'] = new_txion.__repr__()
ex_callback = new_txion
self.send_to_nodes(payload)
return ex_callback
def forge(self, address):
for action in self._blockchain.forge(address):
if type(action) is Block:
payload = {'b_type': 'block', 'item': action.__repr__()}
self.send_to_nodes(data=payload)
elif type(action) is Txion:
payload = {'b_type': 'txion', 'item': action.__repr__()}
self.send_to_nodes(data=payload)
elif type(action) is int:
return action
# all the methods below are called when things happen in the test_network.
# implement your test_network node behavior to create the required functionality.
def outbound_node_connected(self, node):
# this connect to other
print("outbound_node_connected (" + self.id + "): " + node.id)
def inbound_node_connected(self, node):
# other connect to this
payload = self.blocks()
payload['synchronisation'] = 'synchronisation'
self.send_to_node(node, payload)
print("inbound_node_connected: (" + self.id + "): " + node.id)
def outbound_node_disconnected(self, node):
print("outbound_node_disconnected: (" + self.id + "): " + node.id)
def inbound_node_disconnected(self, node):
print("inbound_node_disconnected: (" + self.id + "): " + node.id)
def node_message(self, node, data):
print("node_message (" + self.id + ") from " + node.id + ": " + str(data))
if 'synchronisation' in data.keys():
resolve, synchron_chain = self._blockchain.synchronise(data['synchronisation'],
blockchain=data['blockchain'])
if resolve:
self.send_to_node(node, {'synchronisation': 'resolve', 'blockchain': synchron_chain})
elif 'b_type' in data.keys():
self._blockchain.peers_exchanges(data['b_type'], data['item'])
else:
self._blockchain.peers_exchanges(None, data)
def node_request_to_stop(self):
print("node is requested to stop (" + self.id + "): ")
if __name__ == '__main__':
n = BlobNode(SETTINGS.HOST, SETTINGS.PORT)
n.start()
stop = False
while not stop:
outbound = input('Press \'0\' to stop. \n')
if int(outbound) == 0:
stop = True
if stop:
n.stop()
| 3,439 |
utils/__init__.py
|
dd-iuonac/object-detector-in-carla
| 1 |
2026758
|
import colorsys
import math
from random import random
import numpy as np
def rand_color(seed):
"""Return random color based on a seed"""
random.seed(seed)
col = colorsys.hls_to_rgb(random.random(), random.uniform(.2, .8), 1.0)
return int(col[0] * 255), int(col[1] * 255), int(col[2] * 255)
def vector3d_to_array(vec3d):
return np.array([vec3d.x, vec3d.y, vec3d.z])
def degrees_to_radians(degrees):
return degrees * math.pi / 180
| 460 |
authapp/urls.py
|
ZeroXKiritsu/geekshop
| 0 |
2026806
|
from django.urls import re_path
from authapp import views as authapp
app_name = "authapp"
urlpatterns = [
re_path(r"^login/$", authapp.login, name="login"),
re_path(r"^logout/$", authapp.logout, name="logout"),
re_path(r"^register/$", authapp.register, name="register"),
re_path(r"^edit/$", authapp.edit, name="edit"),
re_path(r"^verify/(?P<email>.+)/(?P<activation_key>\w+)/$", authapp.verify, name="verify"),
]
| 434 |
exercises/pt/test_03_16_02.py
|
tuanducdesign/spacy-course
| 0 |
2027183
|
def test():
assert (
'with nlp.select_pipes(disable=["lemmatizer"])' in __solution__
or 'with nlp.select_pipes(disable=["lemmatizer")' in __solution__
), "Você está usando nlp.select_pipes com os componentes corretos?"
__msg__.good(
"Perfeito! Agora que você já praticou as dicas e truques para melhorar "
"a performance dos seus projetos, estamos prontos para o próximo capítulo "
"onde vamos treinar modelos de redes neurais da biblioteca spaCy."
)
| 508 |
vcli/packages/vtablefmt.py
|
Shasthojoy/vcli
| 82 |
2026536
|
from .tabulate import TableFormat, Line, DataRow
# "Vertica-bar" seperated values
vsv_unaligned = TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', ''),
datarow=DataRow('', '|', ''),
padding=0, with_header_hide=None)
| 452 |
patronage/util.py
|
phildini/django-patronage
| 6 |
2026335
|
import logging
import requests
logger = logging.getLogger(__file__)
def get_creator_tiers(patreonuser):
from .models import Tier
# TODO: pull tiers on account connect
logger.info("Getting creator tiers")
tiers = []
patreon_response = requests.get(
"https://www.patreon.com/api/oauth2/v2/campaigns",
params={
"include": "tiers,creator",
"fields[tier]": "title,amount_cents",
"fields[user]": "full_name",
},
headers={"Authorization": "Bearer {}".format(patreonuser.token)},
)
patreon_json = patreon_response.json()
data = patreon_json.get("data")
if patreon_json.get("included") and data:
campaign_id = patreon_json.get("data",[{}])[0].get("id")
creator_id = patreon_json.get("data")[0]["relationships"]["creator"][
"data"
]["id"]
includes = parse_includes(patreon_json["included"])
tiers = []
for tier in includes.get("tier", []):
tier, created = Tier.objects.get_or_create(
campaign_id=campaign_id,
tier_id=tier,
)
if created:
tier.tier_title = includes["tier"][tier].get("attributes", {}).get("title")
tier.tier_amount_cents = includes["tier"][tier].get("attributes", {}).get("amount_cents")
tier.campaign_title = includes["user"][creator_id]["attributes"]["full_name"]
tier.creators.add(patreonuser.account.user)
tier.save()
tiers.append(tier)
tiers = Tier.objects.filter(
campaign_id=campaign_id,
creators=patreonuser.account.user,
).order_by(
"tier_amount_cents"
)
return tiers
def parse_includes(include_dict):
includes = {}
for include in include_dict:
include_dict = {
"attributes": include["attributes"],
"relationships": include.get("relationships", {}),
}
id = include["id"]
if include["type"] not in includes:
includes[include["type"]] = {id: include_dict}
else:
includes[include["type"]][id] = include_dict
return includes
| 2,225 |
BooleanComp.py
|
Ramakm/python_tutor_programs
| 0 |
2026330
|
age = input("ENter your age")
if age == 50:
print("Welcome to Fifty")
# IN Keyword
name = {"Ram", "HAri","jadu"}
my_name = input("ENter My name")
if my_name in name:
print(f"myname is {my_name} too same")
# print(my_name in name)
| 254 |
ztf_viewer/importer.py
|
snad-space/ztf-viewer
| 2 |
2025659
|
from time import sleep
import numpy as np
def import_matplotlib():
"""Matplotlib default parameters"""
import matplotlib
matplotlib.use('pgf')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['pgf.rcfonts'] = True
matplotlib.rcParams['pgf.preamble'] = r'''
\usepackage{hyperref}
\hypersetup{colorlinks=true, urlcolor=black}
'''
def import_astropy():
"""Dirty hack to overcome problem of simultaneous cache folder creation for astropy"""
while True:
try:
from astroquery.cds import cds
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
break
except FileExistsError:
sleep(np.random.uniform(0.05, 0.2))
import_matplotlib()
import_astropy()
| 945 |
correct_python_programs/bitcount.py
|
PatrickShaw/QuixBugs
| 22 |
2026905
|
def bitcount(n):
count = 0
while n:
n &= n - 1
count += 1
return count
| 100 |
alembic/env.py
|
andreasots/lrrbot
| 1 |
2027189
|
from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 1,345 |
tc.py
|
facebookresearch/py2bpf
| 171 |
2026963
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pyroute2
import py2bpf.prog as prog
from py2bpf.socket_filter import SkBuffContext
def clear_ingress_filter(dev):
with pyroute2.IPRoute() as ipr:
idx = ipr.link_lookup(ifname=dev)[0]
ipr.tc('del', 'ingress', idx, 'ffff:')
class IngressFilter:
def __init__(self, fn):
self.prog = prog.create_prog(
prog.ProgType.SCHED_CLS, SkBuffContext, fn)
def close(self):
self.prog.close()
def install(self, dev):
with pyroute2.IPRoute() as ipr:
idx = ipr.link_lookup(ifname=dev)[0]
ipr.tc('add', 'ingress', idx, 'ffff:')
ipr.tc('add-filter', 'bpf', idx, ':1', fd=self.prog.fd,
name='drop_face', parent='ffff:', action='drop', classid=1)
def remove(self):
clear_ingress_filter()
| 1,040 |
Max2SAT_graphs/counts_histograms_satisfiable.py
|
puyamirkarimi/quantum-walks
| 4 |
2026894
|
import matplotlib.pyplot as plt
import numpy as np
def counts_data(n):
return np.loadtxt("./../Max2SAT/adam_counts_" + str(n) + ".txt")[:10000]
def zero_to_nan(array):
"""Replace every 0 with 'nan' and return a copy."""
return [float('nan') if x==0 else x for x in array]
def get_satisfiable_list(n):
data = np.genfromtxt('./../Max2SAT/m2s_satisfiable.csv', delimiter=',', skip_header=1, dtype=str)
satisfiable_data = data[:, 1]
m = n - 5
return satisfiable_data[m*10000:(m+1)*10000]
if __name__ == '__main__':
plt.rc('text', usetex=True)
plt.rc('font', size=14)
n = 8
counts_list = []
num_bins = 60
counts = counts_data(n)
satisfiable_list = get_satisfiable_list(n)
satisfiable_counts = []
unsatisfiable_counts = []
for i in range(len(counts)):
if satisfiable_list[i] == '1':
satisfiable_counts.append(counts[i])
elif satisfiable_list[i] == '0':
unsatisfiable_counts.append(counts[i])
counts_list.append(np.array(satisfiable_counts))
counts_list.append(np.array(unsatisfiable_counts))
min_runtime = np.min(counts_list[1])
max_runtime = np.max(counts_list[1])
x = np.linspace(min_runtime, max_runtime, num=num_bins)
plt.figure()
# for i_adam, n in enumerate(n_list):
# plt.scatter(x, y_adam[i_adam], label="n="+str(n), marker='+')
# plt.errorbar(x, runtimes_average, runtimes_standard_error)
# plt.xlim([0, 100])
# plt.ylim([0.6, 1e4])
plt.hist(counts_list[0], x, color='red', align='mid', rwidth=0.5, density=True, label='satisfiable')
plt.hist(counts_list[1], x, color='green', align='left', rwidth=0.5, density=True, label='unsatisfiable')
# plt.yscale('log')
plt.xlabel("Number of calls (bnb)")
plt.ylabel("Normalised probability")
plt.legend()
plt.tick_params(direction='in', top=True, right=True, which='both')
# ax2.set_ylabel(r"$\overline{T}_{inst}$~/~$s$")
# plt.tight_layout()
# plt.savefig('probability_histogram.png', dpi=200)
plt.show()
| 2,074 |
1_Python/Desafios/104_Inputint.py
|
guilhermebaos/Curso-em-Video-Python
| 0 |
2026148
|
def inputint(s='Escreva um número inteiro: '):
while True:
n2 = input(s)
try:
n2 = int(n2)
return n2
except ValueError:
print('ERRO! Escreva um número inteiro!\n')
n = inputint()
print(f'Escreveu o núemro {n}!')
| 278 |
botbot/apps/plugins/utils.py
|
metabrainz/brainzbot-core
| 5 |
2024659
|
import datetime
from functools import wraps
from django.template import Template, Context
from django.template.defaultfilters import urlize
from django.utils.timezone import utc
import markdown
def plugin_docs_as_html(plugin, channel):
tmpl = Template(plugin.user_docs)
ctxt = Context({
'nick': channel.chatbot.nick,
'channel': channel,
'SITE': 'http://chatlogs.metabrainz.org',
})
return markdown.markdown(urlize(tmpl.render(ctxt)))
def convert_nano_timestamp(nano_timestamp):
"""
Takes a time string created by the bot (in Go using nanoseconds)
and makes it a Python datetime using microseconds
"""
# convert nanoseconds to microseconds
# http://stackoverflow.com/a/10612166/116042
rfc3339, nano_part = nano_timestamp.split('.')
micro = nano_part[:-1] # strip trailing "Z"
if len(nano_part) > 6: # trim to max size Python allows
micro = micro[:6]
rfc3339micro = ''.join([rfc3339, '.', micro, 'Z'])
micro_timestamp = datetime.datetime.strptime(
rfc3339micro, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=utc)
return micro_timestamp
def log_on_error(Log, method):
@wraps(method)
def wrap(*args, **kwargs):
try:
return method(*args, **kwargs)
except Exception:
Log.error("Plugin failed [%s]", method.__name__, exc_info=True)
return wrap
| 1,393 |
examples/tutorials/nni_experiment.py
|
dutxubo/nni
| 1 |
2026648
|
"""
Start and Manage a New Experiment
=================================
"""
# %%
# Configure Search Space
# ----------------------
search_space = {
"C": {"_type": "quniform", "_value": [0.1, 1, 0.1]},
"kernel": {"_type": "choice", "_value": ["linear", "rbf", "poly", "sigmoid"]},
"degree": {"_type": "choice", "_value": [1, 2, 3, 4]},
"gamma": {"_type": "quniform", "_value": [0.01, 0.1, 0.01]},
"coef0": {"_type": "quniform", "_value": [0.01, 0.1, 0.01]}
}
# %%
# Configure Experiment
# --------------------
from nni.experiment import Experiment
experiment = Experiment('local')
experiment.config.experiment_name = 'Example'
experiment.config.trial_concurrency = 2
experiment.config.max_trial_number = 10
experiment.config.search_space = search_space
experiment.config.trial_command = 'python scripts/trial_sklearn.py'
experiment.config.trial_code_directory = './'
experiment.config.tuner.name = 'TPE'
experiment.config.tuner.class_args['optimize_mode'] = 'maximize'
experiment.config.training_service.use_active_gpu = True
# %%
# Start Experiment
# ----------------
experiment.start(8080)
# %%
# Experiment View & Control
# -------------------------
#
# View the status of experiment.
experiment.get_status()
# %%
# Wait until at least one trial finishes.
import time
for _ in range(10):
stats = experiment.get_job_statistics()
if any(stat['trialJobStatus'] == 'SUCCEEDED' for stat in stats):
break
time.sleep(10)
# %%
# Export the experiment data.
experiment.export_data()
# %%
# Get metric of jobs
experiment.get_job_metrics()
# %%
# Stop Experiment
# ---------------
experiment.stop()
| 1,641 |
openstacktenantcleaner/common.py
|
wtsi-hgi/openstack-tenant-cleanup
| 0 |
2026553
|
import os
from openstacktenantcleaner.models import OpenstackItem
def create_human_identifier(item: OpenstackItem, include_type: bool=False) -> str:
"""
Creates a common, human readable message to allow the given item to be identified.
:param item: the item
:param include_type: whether to include the item's type in the message
:return: the created message
"""
type_message = f"of type \"{type(item).__name__}\" " if include_type else ""
return f"{type_message}with id \"{item.identifier}\" and name \"{item.name}\""
def get_absolute_path_relative_to(path: str, relative_to: str) -> str:
"""
Gets the path as relative to the given other path.
:param path: path suffix
:param relative_to: path prefix
:return: the absolute path
"""
if os.path.isabs(path):
raise ValueError("The given path is not relative")
return os.path.join(os.path.dirname(relative_to), path)
| 940 |
orchestrator/api_pb2.py
|
difince/kinney
| 27 |
2025485
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: orchestrator/api.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='orchestrator/api.proto',
package='kinney',
syntax='proto3',
serialized_options=b'Z*github.com/CamusEnergy/kinney/orchestrator',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16orchestrator/api.proto\x12\x06kinney\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc1\x01\n\x0e\x43hargerSession\x12\r\n\x05point\x18\x01 \x01(\t\x12\x0f\n\x07vehicle\x18\x02 \x01(\t\x12\r\n\x05watts\x18\x03 \x01(\x01\x12,\n\x08measured\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x05start\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03\x65nd\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"[\n\x0e\x43hargerCommand\x12\r\n\x05point\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x01\x12+\n\x08lifetime\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration2O\n\x0cOrchestrator\x12?\n\x07\x43harger\x12\x16.kinney.ChargerSession\x1a\x16.kinney.ChargerCommand\"\x00(\x01\x30\x01\x42,Z*github.com/CamusEnergy/kinney/orchestratorb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_CHARGERSESSION = _descriptor.Descriptor(
name='ChargerSession',
full_name='kinney.ChargerSession',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='point', full_name='kinney.ChargerSession.point', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vehicle', full_name='kinney.ChargerSession.vehicle', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='watts', full_name='kinney.ChargerSession.watts', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='measured', full_name='kinney.ChargerSession.measured', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start', full_name='kinney.ChargerSession.start', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end', full_name='kinney.ChargerSession.end', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=293,
)
_CHARGERCOMMAND = _descriptor.Descriptor(
name='ChargerCommand',
full_name='kinney.ChargerCommand',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='point', full_name='kinney.ChargerCommand.point', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='kinney.ChargerCommand.limit', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lifetime', full_name='kinney.ChargerCommand.lifetime', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=295,
serialized_end=386,
)
_CHARGERSESSION.fields_by_name['measured'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CHARGERSESSION.fields_by_name['start'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CHARGERSESSION.fields_by_name['end'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CHARGERCOMMAND.fields_by_name['lifetime'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
DESCRIPTOR.message_types_by_name['ChargerSession'] = _CHARGERSESSION
DESCRIPTOR.message_types_by_name['ChargerCommand'] = _CHARGERCOMMAND
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChargerSession = _reflection.GeneratedProtocolMessageType('ChargerSession', (_message.Message,), {
'DESCRIPTOR' : _CHARGERSESSION,
'__module__' : 'orchestrator.api_pb2'
# @@protoc_insertion_point(class_scope:kinney.ChargerSession)
})
_sym_db.RegisterMessage(ChargerSession)
ChargerCommand = _reflection.GeneratedProtocolMessageType('ChargerCommand', (_message.Message,), {
'DESCRIPTOR' : _CHARGERCOMMAND,
'__module__' : 'orchestrator.api_pb2'
# @@protoc_insertion_point(class_scope:kinney.ChargerCommand)
})
_sym_db.RegisterMessage(ChargerCommand)
DESCRIPTOR._options = None
_ORCHESTRATOR = _descriptor.ServiceDescriptor(
name='Orchestrator',
full_name='kinney.Orchestrator',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=388,
serialized_end=467,
methods=[
_descriptor.MethodDescriptor(
name='Charger',
full_name='kinney.Orchestrator.Charger',
index=0,
containing_service=None,
input_type=_CHARGERSESSION,
output_type=_CHARGERCOMMAND,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ORCHESTRATOR)
DESCRIPTOR.services_by_name['Orchestrator'] = _ORCHESTRATOR
# @@protoc_insertion_point(module_scope)
| 8,280 |
scripts/parse_syms.py
|
fengjixuchui/popcorn
| 0 |
2026383
|
#!/usr/bin/env python3
def parse_elf(filename):
import struct
with open(filename, 'rb') as elf:
if __name__ == "__main__":
import sys
for arg in sys.argv[1:]:
parse_elf(arg)
| 200 |
ocmovies_api/movies/migrations/0004_auto_20201214_0856.py
|
Danycm1/P6_Just_stream_it
| 0 |
2026966
|
# Generated by Django 3.1.4 on 2020-12-14 08:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0003_auto_20201214_0852'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'ordering': ['name'], 'verbose_name_plural': 'companies'},
),
migrations.AlterModelOptions(
name='contributor',
options={'ordering': ['name'], 'verbose_name_plural': 'contributors'},
),
migrations.AlterModelOptions(
name='country',
options={'ordering': ['name'], 'verbose_name_plural': 'countries'},
),
migrations.AlterModelOptions(
name='genre',
options={'ordering': ['name'], 'verbose_name_plural': 'genres'},
),
migrations.AlterModelOptions(
name='language',
options={'ordering': ['name'], 'verbose_name_plural': 'languages'},
),
migrations.AlterModelOptions(
name='movie',
options={'ordering': ['id'], 'verbose_name_plural': 'movies'},
),
migrations.AlterModelOptions(
name='movieactor',
options={'ordering': ['movie__id', 'position'], 'verbose_name_plural': 'movieactors'},
),
migrations.AlterModelOptions(
name='moviedirector',
options={'ordering': ['movie__id', 'position'], 'verbose_name_plural': 'moviedirectors'},
),
migrations.AlterModelOptions(
name='moviewriter',
options={'ordering': ['movie__id', 'position'], 'verbose_name_plural': 'moviewriters'},
),
migrations.AlterModelOptions(
name='rating',
options={'ordering': ['name'], 'verbose_name_plural': 'ratings'},
),
]
| 1,861 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.