max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
tests/conftest.py
|
heston/firebase-data
| 0 |
2025225
|
import os.path
import logging
import sys
# Add the module path
module_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..'
))
sys.path.insert(0, module_path)
logging.basicConfig(
format='[%(asctime)s] %(levelname)s %(name)s: %(message)s',
level=logging.DEBUG
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "slow: marks tests as slow (deselect with '-m \"not slow\""
)
| 441 |
segmentation/plot3d.py
|
Neuro-Vision/NeuroVision
| 1 |
2024574
|
import plotly
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import nibabel as nib
import os
import albumentations as A
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
#for plotting 3d and saving to html
import io
from base64 import b64encode
class ImageReader:
def __init__(
self, root:str, img_size:int=256,
normalize:bool=False, single_class:bool=False
) -> None:
pad_size = 256 if img_size > 256 else 224
self.resize = A.Compose(
[
A.PadIfNeeded(min_height=pad_size, min_width=pad_size, value=0),
A.Resize(img_size, img_size)
]
)
self.normalize=normalize
self.single_class=single_class
self.root=root
def read_file(self) -> dict:
scan_type = 'flair'
original = 'segmentation/static/upload/flair.nii'
raw_image = nib.load(original).get_fdata()
# raw_mask = nib.load(original.replace(scan_type, 'seg (1)')).get_fdata()
raw_mask = nib.load('segmentation/static/upload/predicted.nii').get_fdata()
print(raw_mask)
processed_frames, processed_masks = [], []
for frame_idx in range(raw_image.shape[2]):
frame = raw_image[:, :, frame_idx]
mask = raw_mask[:, :, frame_idx]
resized = self.resize(image=frame, mask=mask)
processed_frames.append(resized['image'])
processed_masks.append(
1*(resized['mask'] > 0) if self.single_class else resized['mask']
)
scan_data = np.stack(processed_frames, 0)
if self.normalize:
if scan_data.max() > 0:
scan_data = scan_data/scan_data.max()
scan_data = scan_data.astype(np.float32)
return {
'scan': scan_data,
'segmentation': np.stack(processed_masks, 0),
'orig_shape': raw_image.shape
}
def load_patient_scan(self, idx:int, scan_type:str='flair') -> dict:
patient_id = str(idx).zfill(5)
# scan_filename = f'{self.root}/BraTS2021_{patient_id}/BraTS2021_{patient_id}_{scan_type}.nii.gz'
dummy_file = "flair.nii"
return self.read_file()
import plotly.graph_objects as go
import numpy as np
def generate_3d_scatter(
x:np.array, y:np.array, z:np.array, colors:np.array,
size:int=3, opacity:float=0.2, scale:str='Teal',
hover:str='skip', name:str='MRI'
) -> go.Scatter3d:
return go.Scatter3d(
x=x, y=y, z=z,
mode='markers', hoverinfo=hover,
marker = dict(
size=size, opacity=opacity,
color=colors, colorscale=scale
),
name=name
)
class ImageViewer3d():
def __init__(
self, reader:ImageReader,
mri_downsample:int=10, mri_colorscale:str='Ice'
) -> None:
self.reader = reader
self.mri_downsample = mri_downsample
self.mri_colorscale = mri_colorscale
def load_clean_mri(self, image:np.array, orig_dim:int) -> dict:
shape_offset = image.shape[1]/orig_dim
z, x, y = (image > 0).nonzero()
# only (1/mri_downsample) is sampled for the resulting image
x, y, z = x[::self.mri_downsample], y[::self.mri_downsample], z[::self.mri_downsample]
colors = image[z, x, y]
return dict(x=x/shape_offset, y=y/shape_offset, z=z, colors=colors)
def load_tumor_segmentation(self, image:np.array, orig_dim:int) -> dict:
tumors = {}
shape_offset = image.shape[1]/orig_dim
# 1/1, 1/3 and 1/5 pixels for tumor tissue classes 1(core), 2(invaded) and 4(enhancing)
sampling = {
1: 1, 2: 3, 4: 5
}
for class_idx in sampling:
z, x, y = (image == class_idx).nonzero()
x, y, z = x[::sampling[class_idx]], y[::sampling[class_idx]], z[::sampling[class_idx]]
tumors[class_idx] = dict(
x=x/shape_offset, y=y/shape_offset, z=z,
colors=class_idx/4
)
return tumors
def collect_patient_data(self, scan:dict) -> tuple:
clean_mri = self.load_clean_mri(scan['scan'], scan['orig_shape'][0])
tumors = self.load_tumor_segmentation(scan['segmentation'], scan['orig_shape'][0])
markers_created = clean_mri['x'].shape[0] + sum(tumors[class_idx]['x'].shape[0] for class_idx in tumors)
return [
generate_3d_scatter(
**clean_mri, scale=self.mri_colorscale, opacity=0.4,
hover='skip', name='Brain MRI'
),
generate_3d_scatter(
**tumors[1], opacity=0.8,
hover='all', name='Necrotic tumor core'
),
generate_3d_scatter(
**tumors[2], opacity=0.4,
hover='all', name='Peritumoral invaded tissue'
),
generate_3d_scatter(
**tumors[4], opacity=0.4,
hover='all', name='GD-enhancing tumor'
),
], markers_created
def get_3d_scan(self, patient_idx:int, scan_type:str='flair') -> go.Figure:
scan = self.reader.load_patient_scan(patient_idx, scan_type)
data, num_markers = self.collect_patient_data(scan)
fig = go.Figure(data=data)
fig.update_layout(
title=f"[Patient id:{patient_idx}] brain MRI scan ({num_markers} points)",
legend_title="Pixel class (click to enable/disable)",
font=dict(
family="Courier New, monospace",
size=14,
),
margin=dict(
l=0, r=0, b=0, t=30
),
legend=dict(itemsizing='constant')
)
fig.write_html("segmentation/3D Animation.html")
return fig
# if __name__ == "main" :
# reader = ImageReader('./data', img_size=128, normalize=True, single_class=False)
# viewer = ImageViewer3d(reader, mri_downsample=20)
# fig = viewer.get_3d_scan(0, 't1')
# plotly.offline.iplot(fig)
| 6,757 |
tests/test.py
|
Gordon003/Scratch-To-Python
| 4 |
2024876
|
from scratch_py import manager
import os
# Start Pygame
game = manager.GameManager(800, 800, os.getcwd())
game.change_title("Fish Catcher Game")
print(os.getcwd())
# background
#game.change_background_image("background1.png")
| 227 |
recipes/incremental/__init__.py
|
preserveddarnell/lbry-android
| 1 |
2024713
|
from pythonforandroid.toolchain import PythonRecipe, shprint
import sh
class IncrementalRecipe(PythonRecipe):
version = '17.5.0'
url = 'https://pypi.python.org/packages/8f/26/02c4016aa95f45479eea37c90c34f8fab6775732ae62587a874b619ca097/incremental-{version}.tar.gz'
depends = [('python2', 'python3crystax'), 'setuptools']
call_hostpython_via_targetpython = False
install_in_hostpython = True
recipe = IncrementalRecipe()
| 447 |
tf5.py
|
chula-eic/PLC2018-Junk-Harvestor
| 0 |
2024600
|
import logging
import cv2
import numpy as np
import json
def read_video(cap):
ret, frame = cap.read()
h, w, c = frame.shape
left_raw = frame[:, :int(w/2), :]
right_raw = frame[:, int(w/2):, :]
return (left_raw, right_raw)
#capture via webcam 0
logging.basicConfig(level=logging.DEBUG)
logging.debug("START DEBUGGING")
try:
cap = cv2.VideoCapture(1)
logging.debug("SUCCESFULLY ACTIVATE WEBCAM")
except:
logging.error("ERROR CANNOT ACTIVATE WEBCAM")
exit(0)
logging.debug("WIDTH = %s", str(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
logging.debug("HEIGHT = %s", str(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fps = cap.get(cv2.CAP_PROP_FPS)
delta_time = 100/fps
logging.debug("START VIDEO CAPTURE WITH %d MILLISECONDS INTERVAL", int(delta_time))
logging.debug("START START CAPTURING")
wait = -1
while cap.isOpened() and wait == -1:
left_raw, right_raw = read_video(cap)
cv2.imshow('left', left_raw)
cv2.imshow('right', right_raw)
wait = cv2.waitKey(int(delta_time))
logging.debug("END CAPTURING")
| 1,046 |
src/apps/blog/templatetags/blog_filters.py
|
yrrodriguezb/djangp_apps
| 0 |
2025344
|
from math import ceil
from django import template
register = template.Library()
@register.filter
def partition(value: int, length: int = 3):
return ceil(value / length)
| 175 |
context/uid_generator.py
|
Sunchasing/python-common
| 5 |
2025301
|
import random
from typing import Iterable
from context.context import GlobalContextTable
from types_extensions import void, const
class UIDGenerator:
_DEFAULT_CHARSET: str = "abcdefghijklmnopqrstuvwxyz1234567890"
# The key in the context table holding the set of registered UIDs
CONTEXT_KEY: const(str) = "active_global_uids"
def __init__(self, length: int = 8, charset: str | Iterable[str] = None) -> void:
self.length: int = length
self.charset: str | Iterable[str] = charset or self._DEFAULT_CHARSET
def new(self) -> str:
"""
Generate a new UID and store it in the global context table
:return: The new UID
"""
with GlobalContextTable() as ctx:
active_uids = ctx.get(self.CONTEXT_KEY)
uid_set: set[str] = active_uids.get_current_data() if active_uids else set()
unique = False
while not unique:
rv_ = ""
for _ in range(self.length):
rv_ += self.charset[random.randint(0, len(self.charset) - 1)]
if rv_ not in uid_set:
unique = True
uid_set.add(rv_)
ctx.upsert(self.CONTEXT_KEY, uid_set, preserve_old_data=False)
return rv_
def deregister(self, uid: str) -> void:
"""
Deregister a UID, if such exists
"""
with GlobalContextTable() as ctx:
active_uids = ctx.get(self.CONTEXT_KEY)
if active_uids:
uid_set: set[str] = active_uids.get_current_data()
try:
uid_set.remove(uid)
ctx.upsert(self.CONTEXT_KEY, uid_set, preserve_old_data=False)
except KeyError:
pass
| 1,788 |
L1Trigger/Skimmer/test/testL1Filter_cfg.py
|
ckamtsikis/cmssw
| 852 |
2025403
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("L1SKIM")
# initialize MessageLogger
process.load("FWCore.MessageService.MessageLogger_cfi")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("rfio:/castor/cern.ch/user/e/emiglior/Alignment/SkimmedData/TestAlCaRecoWithGT_50911_10k.root")
)
process.load("L1Trigger.Skimmer.l1Filter_cfi")
process.filterPath = cms.Path(process.l1Filter)
| 434 |
Lib/htmpy/__init__.py
|
kylefmohr/Pyto
| 0 |
2024822
|
"""
HTML + Python
HtmPy is let's you run Python code in a ``<script>`` tag on an HTML page when it's shown on a :class:`~htmpy.WebView`.
This module is also a bridge between Python and JavaScript so the code has access to the ``window`` object. However, the code can be very slow if too many JavaScript functions are called.
This can be easily solved by doing the logic in Python and then calling a JavaScript function previously declared on a ``script`` tag to modify the DOM.
To take advantage of HtmPy, just create an HTML file and the editor will let you show it.
Place your Python code in a ``script`` tag like that:
.. highlight:: html
.. code-block:: html
<script type="text/python">
...
</script>
To access the window object:
.. highlight:: python
.. code-block:: python
from htmpy import window
Then you can just call any function or get any attribute of the window object to modify the DOM.
If you put JavaScript code on a tag before, you can get anything declared on the global scope through the ``window`` object.
You can do the opposite, store a Python function or variable in the ``window`` object and it will be accessible from JavaScript.
HtmPy will bridge Python functions so they can be called from JavaScript, so you could use ``addEventListener`` for example. However, the functions will run asynchronously and will not return any value.
You can use :class:`~htmpy.WebView` to show HTML + Python pages on your custom UI.
"""
from . import jsobject as _jsobject
from . import webview as _webview
from ._window import window
from pyto import __Class__
class _FunctionHolder:
def __init__(self):
self._dict = {}
self._web_views = {}
def call(self, id, arguments):
args = []
function = self[id]
web_view = self._web_views[id]
for i in range(function.__code__.co_argcount):
try:
args.append(_jsobject.JSObject(arguments[i], web_view))
except IndexError:
break
exec("function(*tuple(args))", web_view._globals, locals())
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
return self[name]
_function_holder = _FunctionHolder()
_jsobject._function_holder = _function_holder
_webview._function_holder = _function_holder
WebView = _webview.WebView
JSObject = _jsobject.JSObject
| 2,632 |
bootstrapProject/bootstrapApp/views.py
|
cs-fullstack-2019-spring/django-bootstrap-grid-cw-bettyjware11
| 0 |
2023408
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, "bootstrapApp/index.html")
def next(request):
return render(request, "bootstrapApp/next.html")
def previous(request):
return render(request, "bootstrapApp/previous.html")
def Page2(request):
return render(request, "bootstrapApp/Page2.html")
def Page3(request):
return render(request, "bootstrapApp/Page3.html")
def changeBackgroundColor(request):
return
| 493 |
Magna/ICS/Magna.py
|
parkji30/SuperBIT-Compression-Optimization
| 0 |
2024662
|
## Load Packages
import os, shutil
from Compression import Compression
from Model import LossModel
from Array import ArrayND
from astropy.io import fits
import sys
import time
main_path = "/Users/a16472/Desktop/Balco 2/Magna/"
orig_image_path = "/Users/a16472/Desktop/Balco 2/Magna/Original/"
comp_image_path = "/Users/a16472/Desktop/Balco 2/Magna/Compressed/"
compression_algorithm = 'HCOMPRESS_1'
loss_model = LossModel(main_path+'default.txt')
try:
while True:
time.sleep(1)
print("Magna is Running...")
os.chdir(orig_image_path)
file_names = os.listdir(orig_image_path)
print(file_names)
for name in file_names:
compressor = Compression(comp_dir_path = comp_image_path)
compressed_file = compressor.compress(file = name, algorithm = compression_algorithm)
temp_array = ArrayND(original_image = name, compressed_image_path = comp_image_path + compressed_file)
loss_model.update_array_list(temp_array)
loss_model.write_info(temp_array)
except Exception as err:
exception_type = type(err).__name__
print(err)
print("Magna is now Exiting.")
print(loss_model.image_arrays)
| 1,206 |
accelerator/migrations/0019_add_deferred_user_role.py
|
masschallenge/django-accelerator
| 6 |
2025346
|
# Generated by Django 2.2.10 on 2020-04-09 21:24
from django.db import migrations
def add_deferred_user_role(apps, schema_editor):
DEFERRED_MENTOR = 'Deferred Mentor'
UserRole = apps.get_model('accelerator', 'UserRole')
Program = apps.get_model('accelerator', 'Program')
ProgramRole = apps.get_model('accelerator', 'ProgramRole')
if UserRole.objects.filter(name=DEFERRED_MENTOR).exists():
user_role = UserRole.objects.filter(name=DEFERRED_MENTOR)[0]
else:
user_role = UserRole.objects.create(name=DEFERRED_MENTOR,
sort_order=17)
for program in Program.objects.all():
if not ProgramRole.objects.filter(user_role=user_role,
program=program).exists():
name = "{} {} ({}-{})".format(
(program.end_date.year if program.end_date else ""),
DEFERRED_MENTOR,
program.program_family.url_slug.upper(),
program.pk)
ProgramRole.objects.get_or_create(
program=program,
user_role=user_role,
name=name)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0018_make_location_nonrequired'),
]
operations = [
migrations.RunPython(add_deferred_user_role,
migrations.RunPython.noop)
]
| 1,428 |
main.py
|
gaugustos/API_EspPy
| 1 |
2024382
|
#!/usr/bin/python3
import mariadb
import sys
import os
# Login information
user1 = os.environ.get('DB_Automation_User')
password1 = os.environ.get('DB_Automation_PASSWD')
# Connect to MariaDB Platform
try:
conn = mariadb.connect(
user = user1,
password = <PASSWORD>,
host="192.168.15.2",
port=3306,
database="automationDB"
)
except mariadb.Error as ex:
print(f"An error occurred while connecting to MariaDB: {ex}")
sys.exit(1)
cur = conn.cursor()
def ConfirmRequestData(data1, data2):
print(data1)
print(data2)
#return true
def ReadDB(Data):
DataInputvalues = list()
for i in Data.values():
DataInputvalues.append(i)
DBOutput ={}
DBOutputKey = list()
DBOutputKey = ["ID_Output", "Value_Sensor_1", "Value_Sensor_2", "Value_Sensor_3", "Last_Update"]
query = f"SELECT ID_Output, Value_Sensor_1, Value_Sensor_2, Value_Sensor_3, Last_Update FROM AutomationProject WHERE ID_User= '{DataInputvalues[0]}' and ID_Board= '{DataInputvalues[1]}' and ID_HASH= '{DataInputvalues[2]}';"
cur.execute(query)
for (DBOutputKey) in cur:
DBOutput = DBOutputKey
return DBOutput
def DBwrite(DataInput):
# Get only the JSON keys and transfor to a list
JSON_Keys = DataInput.keys()
DataInputKeys = list()
for i in DataInput.keys():
DataInputKeys.append(i)
#get all title columns
cur.execute("select * from AutomationProject")
ColumsNames = [i[0] #put in ColumnsNames as cur.description with is the 0 possicion for every place
for i in cur.description
]
DB_Data = {}
for index in range (len(DataInputKeys)):
if DataInputKeys[index] == ColumsNames[index + 1]:
DB_Data.update([( DataInputKeys[index] ,'"' + DataInput[DataInputKeys[index]] + '"' )])
# else: # DataInputKeys[index] != ColumsNames[index + 1]:
query = f"INSERT INTO AutomationProject ({list(DB_Data.keys())}) VALUES ({(list(DB_Data.values()))})";
# Cleaning the striging for match the MySQL standard
query1 =(query.replace("'" ,""))
query2 = (query1.replace("[" ,""))
query3 = (query2.replace("]" ,""))
cur.execute(query3)
conn.commit()
def DBcreate(DataInput):
DataInputValue = list()
for i in DataInput.values():
DataInputValue.append(i)
DataInputKeys = list()
for x in DataInput.keys():
DataInputKeys.append(x)
query1 = list()
for i in range(1 ,len(DataInput)):
query1.append(f"{DataInputKeys[i]} {DataInputValue[i]}")
CleanText = CleanDB_String(query1)
query = f"CREATE TABLE {DataInputValue[0]} ({CleanText})"
#query = "CREATE TABLE dsadadffhhkjjk"
print(query)
cur.execute(query)
conn.commit()
def CleanDB_String(TEXT):
string = " ".join(TEXT)
return string
| 2,945 |
NethermanStats.py
|
TheHalfling/LegendaryLivesCharacterGenerator
| 0 |
2022958
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 16:19:46 2018
@author: Sherry
Done
"""
def CorsairStats():
#Get base stats
Agility = 6 + d6()
Alertness = 2 + d6()
Charm = 7 + d6()
Cunning = 9 + d6()
Dexterity = 10 + d6()
Fate = 13 + d6()
Intelligence = 4 + d6()
Knowledge = 12 + d6()
Mechanical = 3 + d6()
Nature = 11 + d6()
Stamina = 5 + d6()
Strength = 8 + d6()
#get speciality list started
Specialties = ['Arcane Lore', 'Caves', 'Inuition', 'Tame']
#determine Age, Night Vision, Racial Benefits
Age = Intelligence + Knowledge - 2
Night_Vision = "Yes"
Racial_Ability = "Speak with Animals"
Uses_Per_Day = 4
#Get physical stats
Height = Strength + d6()
if Height <= 11:
Height = "Very Short"
elif Height <= 13:
Height = "Short"
elif Height <= 16:
Height = "Average"
elif Height <= 18:
Height = "Tall"
elif Height <= 20:
Height = "Very Tall"
Weight = Stamina + d6()
if Weight <= 8:
Weight = "Very Thin"
elif Weight <= 10:
Weight = "Thin"
elif Weight <= 13:
Weight = "Average"
elif Weight <= 15:
Weight = "Heavy"
elif Weight <= 17:
Weight = "Very Heavy"
#get family background
Background = Fate + d6()
if Background == 15:
Background = "Gatherer"
Bronze = 10
Free = 8
new_specs = ["Direction", "Forage"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 16:
Background = "Farmer"
Bronze = 10
Free = 8
new_specs = ["Plants", "Forage"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 17:
Background = "Weaver"
Bronze = 10
Free = 8
new_specs = ["Bargain", "Quickness"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 18:
Background = "Potter Maker"
Bronze = 110
Free = 7
new_specs = ["Build", "Business"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 19:
Background = "Fisher"
Bronze = 110
Free = 7
new_specs = ["Swim", "Forage"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 20:
Background = "Hunter"
Bronze = 110
Free = 7
new_specs = ["Forage", "Bow"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 21:
Background = "Warrior"
Bronze = 210
Free = 6
new_specs = ["Pole Arm", "Bow"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 22:
Background = "Canoe Maker"
Bronze = 210
Free = 6
new_specs = ["Build", "Boating"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 23:
Background = "Toolmaker"
Bronze = 210
Free = 6
new_specs = ["Build", "Repair"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 24:
Background = "Shaman"
Bronze = 310
Free = 5
new_specs = ["Will", "Heal"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
elif Background == 25:
Background = "Chieftan"
Bronze = 310
Free = 5
new_specs = ["Legends", "Shapeshift"]
for ea in new_specs:
if ea not in Specialties:
Specialties.append(ea)
else:
Free += 1
| 4,753 |
apscheduler/triggers/date.py
|
fredthomsen/apscheduler
| 3 |
2025300
|
from datetime import datetime, tzinfo
from typing import Optional, Union
from ..abc import Trigger
from ..marshalling import marshal_date, unmarshal_date
from ..validators import as_aware_datetime, as_timezone, require_state_version
class DateTrigger(Trigger):
"""
Triggers once on the given date/time.
:param run_time: the date/time to run the job at
:param timezone: time zone to use to convert ``run_time`` into a timezone aware datetime, if it
isn't already
"""
__slots__ = 'run_time', '_completed'
def __init__(self, run_time: datetime, timezone: Union[tzinfo, str] = 'local'):
timezone = as_timezone(timezone)
self.run_time = as_aware_datetime(run_time, timezone)
self._completed = False
def next(self) -> Optional[datetime]:
if not self._completed:
self._completed = True
return self.run_time
else:
return None
def __getstate__(self):
return {
'version': 1,
'run_time': marshal_date(self.run_time),
'completed': self._completed
}
def __setstate__(self, state):
require_state_version(self, state, 1)
self.run_time = unmarshal_date(state['run_time'])
self._completed = state['completed']
def __repr__(self):
return f"{self.__class__.__name__}('{self.run_time}')"
| 1,390 |
pybar/testing/tools/create_fixture.py
|
laborleben/pyBAR
| 10 |
2025470
|
''' Script to reduce data files to create unit test fixtures.
'''
import tables as tb
def create_fixture(file_in, file_out, n_readouts, nodes):
with tb.open_file(file_in, 'r') as in_file:
with tb.open_file(file_out, 'w') as out_file:
in_file.copy_node('/configuration', out_file.root, recursive=True)
start, stop = None, None
if 'meta_data' in nodes:
node = in_file.get_node('/meta_data')
meta_data = node[:n_readouts]
try:
start, stop = meta_data['index_start'][0], meta_data['index_stop'][-1]
except IndexError:
start, stop = meta_data['hit_start'][0], meta_data['hit_stop'][-1]
t = out_file.create_table(out_file.root, name=node.name, description=node.description, filters=node.filters)
t.append(meta_data)
for n in nodes:
if n == 'meta_data':
continue
node = in_file.get_node('/' + n)
data = node[start:stop]
if type(node) == tb.earray.EArray:
earray = out_file.create_earray(out_file.root, name=node.name, atom=tb.UIntAtom(), shape=(0,), title=node.title, filters=node.filters)
earray.append(data)
if __name__ == '__main__':
create_fixture(file_in=r'H:\Testbeam_07032016_LFCMOS\original_data\LFCMOS_1_14Neq\lfcmos_3_efficiency\117_lfcmos_3_ext_trigger_scan.h5',
file_out='small.h5',
n_readouts=100,
nodes=['raw_data', 'meta_data'])
| 1,622 |
blaze/sql.py
|
chdoig/blaze
| 1 |
2024643
|
from __future__ import absolute_import, division, print_function
from .compute.sql import select
from .data.sql import SQL, dispatch, first
from .expr import Expr, TableExpr, Projection, Column, UnaryOp
from .expr.scalar.core import Scalar
from .compatibility import basestring
import sqlalchemy as sa
__all__ = ['compute_one', 'SQL']
@dispatch((Column, Projection, Expr, UnaryOp), SQL)
def compute_one(t, ddesc, **kwargs):
return compute_one(t, ddesc.table, **kwargs)
@dispatch(Expr, sa.sql.ClauseElement, dict)
def post_compute(expr, query, d):
""" Execute SQLAlchemy query against SQLAlchemy engines
If the result of compute is a SQLAlchemy query then it is likely that the
data elements are themselves SQL objects which contain SQLAlchemy engines.
We find these engines and, if they are all the same, run the query against
these engines and return the result.
"""
if not all(isinstance(val, SQL) for val in d.values()):
return query
engines = set([dd.engine for dd in d.values()])
if len(set(map(str, engines))) != 1:
raise NotImplementedError("Expected single SQLAlchemy engine")
engine = first(engines)
with engine.connect() as conn: # Perform query
result = conn.execute(select(query)).fetchall()
if isinstance(expr, Scalar):
return result[0][0]
if isinstance(expr, TableExpr) and expr.iscolumn:
return [x[0] for x in result]
return result
@dispatch(SQL)
def drop(s):
s.table.drop(s.engine)
@dispatch(SQL, basestring)
def create_index(s, column, name=None, unique=False):
if name is None:
raise ValueError('SQL indexes must have a name')
sa.Index(name, getattr(s.table.c, column), unique=unique).create(s.engine)
@dispatch(SQL, list)
def create_index(s, columns, name=None, unique=False):
if name is None:
raise ValueError('SQL indexes must have a name')
args = name,
args += tuple(getattr(s.table.c, column) for column in columns)
sa.Index(*args, unique=unique).create(s.engine)
| 2,052 |
spambayes/chi2.py
|
mpwillson/spambayes3
| 1 |
2025478
|
import math as _math
import random
def chi2Q(x2, v, exp=_math.exp, min=min):
"""Return prob(chisq >= x2, with v degrees of freedom).
v must be even.
"""
assert v & 1 == 0
# XXX If x2 is very large, exp(-m) will underflow to 0.
m = x2 / 2.0
sum = term = exp(-m)
for i in range(1, v//2):
term *= m / i
sum += term
# With small x2 and large v, accumulated roundoff error, plus error in
# the platform exp(), can cause this to spill a few ULP above 1.0. For
# example, chi2Q(100, 300) on my box has sum == 1.0 + 2.0**-52 at this
# point. Returning a value even a teensy bit over 1.0 is no good.
return min(sum, 1.0)
def normZ(z, sqrt2pi=_math.sqrt(2.0*_math.pi), exp=_math.exp):
"Return value of the unit Gaussian at z."
return exp(-z*z/2.0) / sqrt2pi
def normP(z):
"""Return area under the unit Gaussian from -inf to z.
This is the probability that a zscore is <= z.
"""
# This is very accurate in a fixed-point sense. For negative z of
# large magnitude (<= -8.3), it returns 0.0, essentially because
# P(-z) is, to machine precision, indistiguishable from 1.0 then.
# sum <- area from 0 to abs(z).
a = abs(float(z))
if a >= 8.3:
sum = 0.5
else:
sum2 = term = a * normZ(a)
z2 = a*a
sum = 0.0
i = 1.0
while sum != sum2:
sum = sum2
i += 2.0
term *= z2 / i
sum2 += term
if z >= 0:
result = 0.5 + sum
else:
result = 0.5 - sum
return result
def normIQ(p, sqrt=_math.sqrt, ln=_math.log):
"""Return z such that the area under the unit Gaussian from z to +inf is p.
Must have 0.0 <= p <= 1.0.
"""
assert 0.0 <= p <= 1.0
# This is a low-accuracy rational approximation from Abramowitz & Stegun.
# The absolute error is bounded by 3e-3.
flipped = False
if p > 0.5:
flipped = True
p = 1.0 - p
if p == 0.0:
z = 8.3
else:
t = sqrt(-2.0 * ln(p))
z = t - (2.30753 + .27061*t) / (1. + .99229*t + .04481*t**2)
if flipped:
z = -z
return z
def normIP(p):
"""Return z such that the area under the unit Gaussian from -inf to z is p.
Must have 0.0 <= p <= 1.0.
"""
z = normIQ(1.0 - p)
# One Newton step should double the # of good digits.
return z + (p - normP(z)) / normZ(z)
def main():
from spambayes.Histogram import Hist
import sys
class WrappedRandom:
# There's no way W-H is equidistributed in 50 dimensions, so use
# Marsaglia-wrapping to shuffle it more.
def __init__(self, baserandom=random.random, tabsize=513):
self.baserandom = baserandom
self.n = tabsize
self.tab = [baserandom() for _i in range(tabsize)]
self.next = baserandom()
def random(self):
result = self.__next__
i = int(result * self.n)
self.next = self.tab[i]
self.tab[i] = self.baserandom()
return result
random = WrappedRandom().random
#from uni import uni as random
#print random
def judge(ps, ln=_math.log, ln2=_math.log(2), frexp=_math.frexp):
H = S = 1.0
Hexp = Sexp = 0
for p in ps:
S *= 1.0 - p
H *= p
if S < 1e-200:
S, e = frexp(S)
Sexp += e
if H < 1e-200:
H, e = frexp(H)
Hexp += e
S = ln(S) + Sexp * ln2
H = ln(H) + Hexp * ln2
n = len(ps)
S = 1.0 - chi2Q(-2.0 * S, 2*n)
H = 1.0 - chi2Q(-2.0 * H, 2*n)
return S, H, (S-H + 1.0) / 2.0
warp = 0
bias = 0.99
if len(sys.argv) > 1:
warp = int(sys.argv[1])
if len(sys.argv) > 2:
bias = float(sys.argv[2])
h = Hist(20, lo=0.0, hi=1.0)
s = Hist(20, lo=0.0, hi=1.0)
score = Hist(20, lo=0.0, hi=1.0)
for _i in range(5000):
ps = [random() for _j in range(50)]
s1, h1, score1 = judge(ps + [bias] * warp)
s.add(s1)
h.add(h1)
score.add(score1)
print("Result for random vectors of 50 probs, +", warp, "forced to", bias)
# Should be uniformly distributed on all-random data.
print()
print('H', end=' ')
h.display()
# Should be uniformly distributed on all-random data.
print()
print('S', end=' ')
s.display()
# Distribution doesn't really matter.
print()
print('(S-H+1)/2', end=' ')
score.display()
def showscore(ps, ln=_math.log, ln2=_math.log(2), frexp=_math.frexp):
H = S = 1.0
Hexp = Sexp = 0
for p in ps:
S *= 1.0 - p
H *= p
if S < 1e-200:
S, e = frexp(S)
Sexp += e
if H < 1e-200:
H, e = frexp(H)
Hexp += e
S = ln(S) + Sexp * ln2
H = ln(H) + Hexp * ln2
n = len(ps)
probS = chi2Q(-2*S, 2*n)
probH = chi2Q(-2*H, 2*n)
print("P(chisq >= %10g | v=%3d) = %10g" % (-2*S, 2*n, probS))
print("P(chisq >= %10g | v=%3d) = %10g" % (-2*H, 2*n, probH))
S = 1.0 - probS
H = 1.0 - probH
score = (S-H + 1.0) / 2.0
print("spam prob", S)
print(" ham prob", H)
print("(S-H+1)/2", score)
if __name__ == '__main__':
main()
| 5,336 |
test/test_dblp.py
|
tbabej/betterbib
| 0 |
2022891
|
# -*- coding: utf-8 -*-
#
import pybtex
import pybtex.database
import betterbib
def test_article0():
source = betterbib.Dblp()
test_entry = pybtex.database.Entry(
'article',
fields={
'title': 'Framework Deflated Krylov Augmented'
},
persons={'author': [
pybtex.database.Person('Liesen'),
pybtex.database.Person('Gaul'),
pybtex.database.Person('Nabben'),
]}
)
bt = source.find_unique(test_entry)
reference = pybtex.database.Entry(
'article',
fields={
'doi': u'10.1137/110820713',
'title': u'A Framework for Deflated and Augmented ' +
'Krylov Subspace Methods.',
'url': u'https://doi.org/10.1137/110820713',
'journal': u'SIAM J. Matrix Analysis Applications',
'number': u'2',
'volume': u'34',
'source': u'DBLP',
'year': 2013,
'pages': u'495-518'
},
persons=pybtex.database.OrderedCaseInsensitiveDict({
'author': [
pybtex.database.Person(u'Gaul, Andr\xe9'),
pybtex.database.Person(u'Gutknecht, <NAME>.'),
pybtex.database.Person(u'Liesen, J\xf6rg'),
pybtex.database.Person(u'Nabben, Reinhard')
]
}))
assert betterbib.pybtex_to_bibtex_string(bt, 'key', sort=True) \
== betterbib.pybtex_to_bibtex_string(reference, 'key', sort=True)
return
if __name__ == '__main__':
test_article0()
| 1,597 |
__init__.py
|
manuelnaranjo/scons-qmake-config
| 0 |
2025492
|
# -*- coding: utf-8 -*-
# A SCons tool to simplify pkg-config usage on SCons
#
# Copyright (c) 2015 <NAME> < <EMAIL> >
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import subprocess
from functools import partial
def exists(env):
# we suppose the tool is always available
return True
def QmakeSupported(context):
text = 'Checking for ${QMAKE_BIN}... '
instruction = '${QMAKE_BIN} --version'
context.Message(context.env.subst(text))
ret = context.TryAction(instruction)[0]
context.Result(ret == 1)
return ret == 1
TEMPLATE = '''
CONFIG -= qt
CONFIG += release
CONFIG += %(name)s
%(extra)s
'''
def prepareQmake(name, extra=None):
pro = tempfile.NamedTemporaryFile(suffix='.pro')
out = tempfile.NamedTemporaryFile('rw+')
pro.write(TEMPLATE % {'name': name, 'extra': extra or ''})
pro.flush()
line = '${QMAKE_BIN} -o %s -nodepend %s' % (out.name, pro.name)
return pro, out ,line
def QmakeCheck(context, name, extra=None):
context.Message('Checking for %s... ' % name)
pro, out, line = prepareQmake(name, extra)
ret = context.TryAction(line)[0]
pro.close()
out.close()
context.Result(ret == 1)
return ret
def RunQmakeExtractFlags(env, name, flags = None):
pro, out, line = prepareQmake(name)
line = env.subst(line)
ret = subprocess.check_call(line, shell=True)
makefile = out.read()
pro.close()
out.close()
if flags is None:
flags = ['DEFINES', 'INCPATH', 'LIBS']
out = {}
values = ''
for line in makefile.split('\n'):
if '=' not in line:
continue
label, value = line.split('=')
label = label.strip()
if label in flags:
value = value.replace('$(SUBLIBS)', '').replace('-I.', '')
values = values + ' ' + value
out.update(env.ParseFlags(values))
for key, val in list(out.iteritems()):
if len(out[key]) == 0:
out.pop(key)
return out
def QmakeGetLibs(env, name, modifyenv = True):
out = RunQmakeExtractFlags(env, name, ['LIBS'])
if modifyenv:
env.AppendUnique(**out)
return out
def QmakeGetCflags(env, name, modifyenv = True):
out = RunQmakeExtractFlags(env, name, ['DEFINES', 'INCPATH'])
if modifyenv:
env.AppendUnique(**out)
return out
def QmakeGetAllFlags(env, name, modifyenv = True):
out = RunQmakeExtractFlags(env, name)
if modifyenv:
env.AppendUnique(**out)
return out
def generate(env):
from SCons import SConf
SConfBase = SConf.SConfBase
if not env.has_key('QMAKE_BIN'):
env['QMAKE_BIN'] = 'qmake'
class QmakeSConfBase(SConfBase):
def __init__(self, env, custom_tests = {}, *a, **kw):
qmake_tests = {
'QmakeSupported': QmakeSupported,
'QmakeCheck': QmakeCheck
}
qmake_tests.update(custom_tests)
SConfBase.__init__(self, env, qmake_tests, *a, **kw)
setattr(SConf, 'SConfBase', QmakeSConfBase)
env.AddMethod(QmakeGetLibs)
env.AddMethod(QmakeGetCflags)
env.AddMethod(QmakeGetAllFlags)
| 3,646 |
tester/test_model/test_replyid.py
|
bukun/TorCMS
| 243 |
2024148
|
# -*- coding:utf-8 -*-
from torcms.model.abc_model import MHelper
from torcms.model.core_tab import TabReplyid
from torcms.model.replyid_model import MReplyid
class TestMReplyid():
def setup(self):
print('setup 方法执行于本类中每条用例之前')
self.uid = ''
self.reply0 = 'notok'
self.reply1 = 'no<PASSWORD>'
def add_message(self, **kwargs):
MReplyid.create_replyid(self.reply0, self.reply1)
aa = MReplyid.get_by_rid(self.reply0)
for i in aa:
if i.reply1 == self.reply1:
self.uid = i.uid
return i
def test_get_by_uid(self):
aa = self.add_message()
b = MReplyid.get_by_uid(aa.uid)
assert b.reply0 == self.reply0
assert b.reply1 == self.reply1
self.tearDown()
def test_create_replyid(self):
aa = self.add_message()
assert aa.reply1 == self.reply1
self.tearDown()
def test_get_by_rid(self):
self.add_message()
aa = MReplyid.get_by_rid(self.reply0)
for i in aa:
if i.reply1 == self.reply1:
assert i.uid == self.uid
self.tearDown()
def tearDown(self):
print("function teardown")
MHelper.delete(TabReplyid, self.uid)
self.uid = ''
| 1,289 |
Dados/reqsigeo.py
|
rogergamageo/tese
| 0 |
2025320
|
# Função para baixar dados do SIGEO-Niteroi-Dados Abertos
# a partir de uma lista com os nomes das camadas e a URL da API
# salvar na pasta dados/sigeo
import wget
import pandas as pd
# url = "https://opendata.arcgis.com/datasets/c5ba3f423a084e559cef63c07447cbb3_30.geojson"
pasta = "/home/rogeriogama/Área de Trabalho/Projetos/tese/Dados/SIGEO/"
# nomea = "logradouro"
# Função de requisição para a API SIGEO
def baixar_sigeo(url, pasta, nomea):
wget.download(url, pasta + nomea + ".geojson")
# Baixar os dados a partir de uma listagem de API
# Falta fazer: Criar uma restrição se o dado já esta baixado para não baixar novamente e baixar apenas dados novos
# if __name__ == "main":
df = pd.read_csv(
"/home/rogeriogama/Área de Trabalho/Projetos/tese/Dados/sigeo_dado.csv", sep=';')
# print(df)
for x in range(len(df)):
baixar_sigeo(df['url'][x], pasta, df['nomea'][x])
| 892 |
test_trafficAssignment.py
|
salomonw/tnet
| 0 |
2025137
|
import sys
import joint.tnet as tnet
import os
import trafficAssignment.assign as ta
netFile = "networks/NYC_full_net.txt"
gFile = "networks/NYC_full_trips.txt"
#netFile = "networks/EMA_net.txt"
#gFile = "networks/EMA_trips.txt"
#netFile = "networks/Braess1_net.txt"
#gFile = "networks/Braess1_trips.txt"
fcoeffs_truth = [1,0,0,0,0.15,0]
tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs_truth)
G = ta.assignment(G=tNet.G, g=tNet.g, fcoeffs=tNet.fcoeffs, flow=False, method='FW', accuracy=0.0001, max_iter=20)
print('-----------------')
tNet.G = G
tNet.fcoeffs = [1,0,0,0,0.20,0]
G = ta.assignment(G=tNet.G, g=tNet.g, fcoeffs=tNet.fcoeffs, flow=True, method='FW', accuracy=0.0001, max_iter=20)
print([G[i][j]['flow'] for i,j in G.edges()])
| 760 |
tests/shake/test_classy_author.py
|
vlcinsky/pytest_xfiles
| 3 |
2023721
|
"""Test user defined fixture `classy_author` (see `conftest.py`) derived from
`package_yaml`. """
def test_custom_fixture(classy_author):
print(classy_author.full_name)
| 175 |
dashboard/apps/reports/tests/test_payroll_upload.py
|
ministryofjustice/moj-product-dashboard
| 6 |
2025234
|
# -*- coding: utf-8 -*-
from datetime import date
from decimal import Decimal
from os.path import dirname, abspath, join
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.utils.datastructures import MultiValueDict
from model_mommy import mommy
import pytest
from dashboard.apps.dashboard.models import Person
from ..forms import PayrollUploadForm
@pytest.mark.django_db
def test_valid_upload_form():
p1 = mommy.make(Person, name='X Surname1', is_contractor=False)
p2 = mommy.make(Person, name='B Surname2', is_contractor=False)
p3 = mommy.make(Person, name='C Surname3', is_contractor=False)
mommy.make(Person, name='C Surname3', is_contractor=True)
fb = open(abspath(join(dirname(__file__), 'data/payroll_test.xls')), 'rb')
form = PayrollUploadForm(
data={'date_range': '%s:%s' % (date(2016, 1, 1), date(2016, 1, 31))},
files=MultiValueDict(
{'payroll_file': [SimpleUploadedFile(fb.name, fb.read())]}
),
)
assert form.is_valid() is True
assert form.cleaned_data['date_range'] == (date(2016, 1, 1), date(2016, 1, 31))
assert form.cleaned_data['payroll_file'] == [{'person': p1,
'rate': Decimal('0.05'),
'staff_number': 123470,
'start': date(2016, 1, 1),
'end': date(2016, 1, 31),
'additional': {'ASLC': Decimal('1'),
'ERNIC': Decimal('1'),
'FTE': Decimal('1'),
'Misc.Allow.': Decimal('1'),
'Write Offs': Decimal('-1')}},
{'person': p2,
'rate': Decimal('0.05'),
'staff_number': 123504,
'start': date(2016, 1, 1),
'end': date(2016, 1, 31),
'additional': {'ASLC': Decimal('1'),
'ERNIC': Decimal('1'),
'FTE': Decimal('1'),
'Misc.Allow.': Decimal('1')}},
{'person': p3,
'rate': Decimal('0.05'),
'staff_number': 123507,
'start': date(2016, 1, 1),
'end': date(2016, 1, 31),
'additional': {'ASLC': Decimal('1'),
'ERNIC': Decimal('1'),
'FTE': Decimal('1'),
'Misc.Allow.': Decimal('1')}}]
assert form.errors == {}
assert form.month == '2016-01'
assert form.save() is None
assert form.save() is None
@pytest.mark.django_db
def test_invalid_upload_form():
mommy.make(Person, name='AD Surname1', is_contractor=False)
mommy.make(Person, name='AB Surname1', is_contractor=False)
mommy.make(Person, name='B Surname2', is_contractor=False)
mommy.make(Person, name='X Surname3', is_contractor=False)
mommy.make(Person, name='P Surname3', is_contractor=False)
fb = open(abspath(join(dirname(__file__), 'data/payroll_test.xls')), 'rb')
form = PayrollUploadForm(
data={'date_range': '%s:%s' % (date(2016, 1, 1), date(2016, 1, 31))},
files=MultiValueDict(
{'payroll_file': [SimpleUploadedFile(fb.name, fb.read())]}
),
)
assert form.is_valid() is False
assert form.errors == {
'payroll_file': ['ERROR ROW 2: Multiple Civil Servants found with '
'Surname "SURNAME1"',
'ERROR ROW 7: Civil Servant not found with Surname '
'"SURNAME3" and initials "CN"']
}
class PayrollUploadTestCase(TestCase):
fixtures = ['auth_group_permissions.yaml', 'test_users']
def test_view_post_with_error(self):
fb = open(abspath(join(dirname(__file__), 'data/payroll_test.xls')), 'rb')
self.client.login(username='test_finance', password='<PASSWORD>')
self.client.get("/admin/reports/report/upload/")
response = self.client.post(
'/admin/reports/report/upload/',
{'date_range': '%s:%s' % (date(2016, 1, 1), date(2016, 1, 31)), 'payroll_file': fb})
self.assertEqual(response.status_code, 200)
self.assertTrue('ERROR' in str(response.content))
def test_view_post_no_error(self):
fb = open(abspath(join(dirname(__file__), 'data/payroll_test.xls')), 'rb')
self.client.login(username='test_finance', password='<PASSWORD>')
self.client.get("/admin/reports/report/upload/")
mommy.make(Person, name='X Surname1', is_contractor=False)
mommy.make(Person, name='B Surname2', is_contractor=False)
mommy.make(Person, name='C Surname3', is_contractor=False)
mommy.make(Person, name='C Surname3', is_contractor=True)
response = self.client.post(
'/admin/reports/report/upload/',
{'date_range': '%s:%s' % (date(2016, 1, 1), date(2016, 1, 31)), 'payroll_file': fb})
self.assertEqual(response.status_code, 200)
self.assertTrue('ERROR' not in str(response.content))
def test_cant_access_upload(self):
fb = open(abspath(join(dirname(__file__), 'data/payroll_test.xls')), 'rb')
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(
'/admin/reports/report/upload/',
{'date_range': '%s:%s' % (date(2016, 1, 1), date(2016, 1, 31)), 'payroll_file': fb})
self.assertEqual(response.status_code, 403)
| 6,435 |
src/custom/receivers.py
|
LePetitTim/visu-back
| 0 |
2023754
|
import logging
import sys
from io import BytesIO
from django.core.management import call_command
from django.dispatch import receiver
from django_geosource.signals import refresh_data_done
logger = logging.getLogger(__name__)
@receiver(refresh_data_done)
def refresh_es(sender, **kwargs):
try:
logger.info('Starting elasticsearch indexing')
sys.stdout.encoding = None
sys.stdout.buffer = BytesIO()
call_command('etl_features_to_es', '-layer', kwargs['layer']) #noqa
logger.info('Elasticsearch indexing sucess')
except Exception:
logger.error('An error occurend during indexing', exc_info=True)
logger.info('End of elasticsearch indexing')
| 703 |
normalise-wav/find_normal_power.py
|
CostasAK/cactbot-user
| 1 |
2025420
|
import os
import re
import math
import numpy as np
import pyloudnorm as pyln
from scipy.io import wavfile
path = os.getenv('APPDATA') + '\\anoyetta\\ACT\\tts cache\\'
files = [f for f in os.listdir(path) if re.search(r'^SAPI5\..*\.wav$', f)]
mean_power = list()
mean_loudness = list()
for f in files:
max_var = 0
fs, data = wavfile.read(path + f)
if np.max(data) > np.max(-1*data):
data = data.astype('float32')/32767.0
else:
data = data.astype('float32')/32768.0
meter = pyln.Meter(fs) # create BS.1770 meter
loudness = meter.integrated_loudness(data) # measure loudness
mean_loudness.append(loudness)
print('Loudness: ', loudness)
mean_loudness = np.mean(mean_loudness)
print('meanL: ', mean_loudness)
normal_power = {
"fs": fs,
"loudness": mean_loudness
}
np.save('normal_power', normal_power, allow_pickle=True)
| 930 |
funcstructs/utils/subsequences.py
|
caleblevy/endofunction-structures
| 5 |
2023470
|
"""<NAME>, 2015."""
import functools
import operator
def runs(seq, comparison):
""" Given an iterable seq and a comparison function, returns a generator of
the subsequences of seq such that comparison(seq[I],seq[I+1]) holds for
0<=I<=len(seq)-1.
For example, if comparison is >=, then this returns nondecreasing
subsequences, while comparison of > returns increasing. Equivalent to
sympy's runs() method."""
seq = iter(seq)
term_prev = next(seq)
subseq = [term_prev]
for term in seq:
if comparison(term, term_prev):
subseq.append(term)
else:
yield subseq
subseq = [term]
term_prev = term
if subseq:
yield subseq
def runner(comparison):
"""Generator function factory for monotone subsequences."""
return functools.partial(runs, comparison=comparison)
increasing = runner(operator.gt)
nondecreasing = runner(operator.ge)
decreasing = runner(operator.lt)
nonincreasing = runner(operator.le)
def startswith(seq, start):
""" Given a sequence seq and boolean function of a single input cond,
returns a generator of subsequences such that a new subsequence begins if
and only if cond is true for the first element in the subsequence. If cond
is never true, returns the original sequence. """
subseq = []
for el in seq:
if el == start:
if subseq:
yield subseq
subseq = [el]
else:
subseq.append(el)
if subseq:
yield subseq
def endswith(seq, end):
""" Return a generator returning subsequences of seq each ending with an
element satisfying the boolean lambda function cond. """
subseq = []
for el in seq:
subseq.append(el)
if el == end:
yield subseq
subseq = []
if subseq:
yield subseq
| 1,879 |
CLE/Module_DeploymentMonitoring/src/config.py
|
CherBoon/Cloudtopus
| 3 |
2023739
|
# AWS configuration values
AWS_USER = '<can input personal info here>'
AWS_ACCESS_KEY_ID = '<can input personal info here>'
AWS_SECRET_ACCESS_KEY = '<can input personal info here>'
REGION_NAME = 'ap-southeast-1'
OUTPUT_FILE = 'json'
# SERVER configuration values
SSH_KEYS_FOLDER = '/home/ec2-user/.ssh/authorized_keys'
| 320 |
extra_foam/special_suite/tests/test_vectorview.py
|
zhujun98/EXtra-foam
| 0 |
2024454
|
from collections import Counter
import pytest
import numpy as np
from extra_foam.pipeline.tests import _TestDataMixin
from extra_foam.special_suite import logger, mkQApp
from extra_foam.pipeline.exceptions import ProcessingError
from extra_foam.special_suite.vector_view import (
VectorViewProcessor, VectorViewWindow, VectorPlot, VectorCorrelationPlot,
InTrainVectorCorrelationPlot
)
from . import _SpecialSuiteWindowTestBase, _SpecialSuiteProcessorTestBase
app = mkQApp()
logger.setLevel('INFO')
class TestCamViewWindow(_SpecialSuiteWindowTestBase):
@classmethod
def setUpClass(cls):
cls._win = VectorViewWindow('SCS')
@classmethod
def tearDownClass(cls):
# explicitly close the MainGUI to avoid error in GuiLogger
cls._win.close()
@staticmethod
def data4visualization():
"""Override."""
return {
"vector1": np.arange(10),
"vector2": np.arange(10) + 5,
"vector1_full": np.arange(100),
"vector2_full": np.arange(100) + 5,
}
def testWindow(self):
win = self._win
self.assertEqual(3, len(win._plot_widgets_st))
counter = Counter()
for key in win._plot_widgets_st:
counter[key.__class__] += 1
self.assertEqual(1, counter[VectorPlot])
self.assertEqual(1, counter[InTrainVectorCorrelationPlot])
self.assertEqual(1, counter[VectorCorrelationPlot])
self._check_update_plots()
def testCtrl(self):
win = self._win
ctrl_widget = win._ctrl_widget_st
proc = win._worker_st
# test default values
self.assertEqual('XGM intensity', proc._vector1)
self.assertEqual('', proc._vector2)
# test set new values
widget = ctrl_widget.vector1_cb
widget.setCurrentText("ROI FOM")
self.assertEqual("ROI FOM", proc._vector1)
widget = ctrl_widget.vector2_cb
widget.setCurrentText("Digitizer pulse integral")
self.assertEqual("Digitizer pulse integral", proc._vector2)
class TestVectorViewProcessor(_TestDataMixin, _SpecialSuiteProcessorTestBase):
@pytest.fixture(autouse=True)
def setUp(self):
self._proc = VectorViewProcessor(object(), object())
self._proc._vector1 = "XGM intensity"
def testProcessing(self):
data, processed = self.simple_data(1001, (4, 2, 2))
proc = self._proc
with pytest.raises(ProcessingError, match="XGM intensity is not available"):
proc.process(data)
processed.pulse.xgm.intensity = np.random.rand(4)
ret = proc.process(data)
self._check_processed_data_structure(ret)
self._proc._vector2 = "ROI FOM"
processed.pulse.roi.fom = np.random.rand(5)
with pytest.raises(ProcessingError, match="Vectors have different lengths"):
proc.process(data)
processed.pulse.roi.fom = np.random.rand(4)
proc.process(data)
self._proc._vector2 = "Digitizer pulse integral"
processed.pulse.digitizer.ch_normalizer = 'B'
processed.pulse.digitizer['B'].pulse_integral = np.random.rand(4)
proc.process(data)
def _check_processed_data_structure(self, ret):
"""Override."""
data_gt = TestCamViewWindow.data4visualization().keys()
assert set(ret.keys()) == set(data_gt)
| 3,384 |
pava/implementation/natives/java/lang/ProcessEnvironment.py
|
laffra/pava
| 4 |
2024774
|
def add_native_methods(clazz):
def environmentBlock____(a0):
raise NotImplementedError()
clazz.environmentBlock____ = staticmethod(environmentBlock____)
| 171 |
samples/cfgs/data/dss18_cls_task_incr.py
|
openvinotoolkit/model_preparation_algorithm
| 0 |
2024734
|
_base_ = [
'./pipelines/hflip_resize.py'
]
__dataset_type = 'LwfTaskIncDataset'
__train_pipeline = {{_base_.train_pipeline}}
__test_pipeline = {{_base_.test_pipeline}}
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
pipeline_options=dict(
Resize=dict(
size=(256, 128)
),
Normalize=dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375]
)
),
train=dict(
type=__dataset_type,
data_prefix='data/dss18/DSS18_person',
data_file='data/dss18/csvs/dss18.data.csv',
ann_file='data/dss18/csvs/dss18.anno.train.csv',
pipeline=__train_pipeline,
tasks=dict(
Age=['Other', 'Senior', 'Kids', 'Unknown'],
)
),
val=dict(
type=__dataset_type,
data_prefix='data/dss18/DSS18_person',
data_file='data/dss18/csvs/dss18.data.csv',
ann_file='data/dss18/csvs/dss18.anno.val.csv',
pipeline=__test_pipeline,
tasks=dict(
Age=['Other', 'Senior', 'Kids', 'Unknown'],
Gender=['Male', 'Female', 'Unknown'],
Backpack=['Yes', 'No'],
Longhair=['Yes', 'No', 'Unknown']
)
),
test=dict(
# replace `data/val` with `data/test` for standard test
type=__dataset_type,
data_prefix='data/dss18/DSS18_person',
data_file='data/dss18/csvs/dss18.data.csv',
ann_file='data/dss18/csvs/dss18.anno.test.csv',
pipeline=__test_pipeline,
tasks=dict(
Age=['Other', 'Senior', 'Kids', 'Unknown'],
Gender=['Male', 'Female', 'Unknown'],
Backpack=['Yes', 'No'],
Longhair=['Yes', 'No', 'Unknown']
)
)
)
| 1,754 |
Python/Python For Absolute Beginner/76 Else & Finally In Except.py
|
omkarsutar1255/Python-Data
| 0 |
2022684
|
f1 = open("reading file 1")
try:
f2 = open("reading 2") # file not exist
except EOFError as e:
print("EOF ala ahe")
except IOError as e:
print("IO ala ahe")
else:
print("this will run only if except is not running")
finally: # Either try run or except run the program in finally always run
print("run this anyway")
f1.close()
# f2.close()
print("Important stuff")
| 416 |
python2.7libs/hammer_tools/merge_objects.py
|
anvdev/Hammer-Tools
| 19 |
2024465
|
from __future__ import print_function
try:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
Signal = pyqtSignal
except ImportError:
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
import hou
def mergeObjects(nodes, name=None, stash=False, add_groups=False, add_name_attrib=False, attrib_name='name', delete_original=False):
if not nodes:
return
with hou.undos.group('Merge Objects'):
geo = nodes[0].parent().createNode('geo', name)
merge = geo.createNode('merge', name)
merge.setDisplayFlag(True)
for node in nodes:
new_node = geo.createNode('object_merge')
object_merge = new_node
object_merge.parm('objpath1').set(node.path())
object_merge.parm('xformtype').set(1)
if add_groups:
new_node = new_node.createOutputNode('groupcreate')
new_node.parm('groupname').set(node.name())
if add_name_attrib:
new_node = new_node.createOutputNode('name')
new_node.parm('attribname').set(attrib_name or 'name')
new_node.parm('name1').set(node.name())
if stash:
new_node = new_node.createOutputNode('stash')
new_node.setName(node.name(), True)
new_node.parm('stashinput').pressButton()
input_nodes = new_node.inputs()
while input_nodes:
input_node = input_nodes[0]
input_nodes = input_node.inputs()
input_node.destroy()
if delete_original:
node.destroy()
else:
object_merge.setName(node.name(), True)
merge.setNextInput(new_node)
geo.layoutChildren()
class MergeObjectsOptions(QDialog):
def __init__(self, parent=None):
super(MergeObjectsOptions, self).__init__(parent, Qt.Window)
self.setWindowTitle('Merge Options')
self.setStyleSheet(hou.qt.styleSheet())
main_layout = QVBoxLayout(self)
main_layout.setContentsMargins(4, 4, 4, 4)
main_layout.setSpacing(2)
self.name_field = QLineEdit()
self.name_field.setPlaceholderText('New name')
main_layout.addWidget(self.name_field)
self.stash_toggle = QCheckBox('Stash Geometry')
main_layout.addWidget(self.stash_toggle)
self.delete_original_toggle = QCheckBox('Delete Sources')
self.delete_original_toggle.setChecked(True)
self.delete_original_toggle.setEnabled(False)
self.stash_toggle.stateChanged.connect(self.delete_original_toggle.setEnabled)
main_layout.addWidget(self.delete_original_toggle)
self.group_toggle = QCheckBox('Add Groups')
main_layout.addWidget(self.group_toggle)
self.name_attrib_toggle = QCheckBox('Add Name Attribute')
main_layout.addWidget(self.name_attrib_toggle)
self.name_attrib_name_field = QLineEdit('name')
self.name_attrib_name_field.setPlaceholderText('Attribute name')
self.name_attrib_name_field.setEnabled(False)
self.name_attrib_toggle.stateChanged.connect(self.name_attrib_name_field.setEnabled)
main_layout.addWidget(self.name_attrib_name_field)
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
main_layout.addWidget(button_box)
@classmethod
def getOptions(cls):
dialog = cls(hou.qt.mainWindow())
enable = dialog.exec_()
return {'enable': enable,
'name': dialog.name_field.text().strip() if dialog.name_field.text().strip() else None,
'stash': dialog.stash_toggle.isChecked(),
'delete_original': dialog.delete_original_toggle.isChecked(),
'add_groups': dialog.group_toggle.isChecked(),
'add_name_attrib': dialog.name_attrib_toggle.isChecked(),
'attrib_name': dialog.name_attrib_name_field.text()} if enable else {'enable': enable}
def mergeSelectedObjects():
selected_nodes = hou.selectedNodes()
if selected_nodes and selected_nodes[0].type().category() == hou.objNodeTypeCategory():
options = MergeObjectsOptions.getOptions()
if options.pop('enable'):
mergeObjects(selected_nodes, **options)
| 4,544 |
application/commonApp/migrations/0001_initial.py
|
Marcelotsvaz/vaz-projects
| 0 |
2025101
|
# Generated by Django 4.0.1 on 2022-03-02 01:00
import commonApp.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='UserImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('identifier', models.SlugField(max_length=100, verbose_name='identifier')),
('alt', models.CharField(blank=True, max_length=250, verbose_name='description')),
('attribution', models.CharField(blank=True, max_length=250, verbose_name='attribution')),
('notes', models.CharField(blank=True, max_length=250, verbose_name='notes')),
('image_original', models.ImageField(upload_to=commonApp.models.getUploadFolder('{}-original', method='identifier'), verbose_name='image')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
],
options={
'verbose_name': 'user image',
'verbose_name_plural': 'user images',
},
),
migrations.AddConstraint(
model_name='userimage',
constraint=models.UniqueConstraint(fields=('identifier', 'content_type', 'object_id'), name='uniqueForObject'),
),
]
| 1,629 |
d2go/model_zoo/model_zoo.py
|
sstsai-adl/d2go
| 687 |
2025338
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Optional
import pkg_resources
import torch
from d2go.runner import create_runner
from d2go.utils.launch_environment import MODEL_ZOO_STORAGE_PREFIX
from detectron2.checkpoint import DetectionCheckpointer
class _ModelZooUrls(object):
"""
Mapping from names to officially released D2Go pre-trained models.
"""
CONFIG_PATH_TO_URL_SUFFIX = {
"faster_rcnn_fbnetv3a_C4.yaml": "268421013/model_final.pth",
"faster_rcnn_fbnetv3a_dsmask_C4.yaml": "268412271/model_0499999.pth",
"faster_rcnn_fbnetv3g_fpn.yaml": "250356938/model_0374999.pth",
"mask_rcnn_fbnetv3a_C4.yaml": "268421013/model_final.pth",
"mask_rcnn_fbnetv3a_dsmask_C4.yaml": "268412271/model_0499999.pth",
"mask_rcnn_fbnetv3g_fpn.yaml": "287445123/model_0409999.pth",
"keypoint_rcnn_fbnetv3a_dsmask_C4.yaml": "250430934/model_0389999.pth",
}
def get_checkpoint_url(config_path):
"""
Returns the URL to the model trained using the given config
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: a URL to the model
"""
name = config_path.replace(".yaml", "")
if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path]
return MODEL_ZOO_STORAGE_PREFIX + suffix
raise RuntimeError("{} not available in Model Zoo!".format(name))
def get_config_file(config_path):
"""
Returns path to a builtin config file.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: the real path to the config file.
"""
cfg_file = pkg_resources.resource_filename(
"d2go", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in Model Zoo!".format(config_path))
return cfg_file
def get_config(
config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner"
):
"""
Returns a config object for a model in model zoo.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
CfgNode: a config object
"""
cfg_file = get_config_file(config_path)
runner = create_runner(runner)
cfg = runner.get_default_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
return cfg
def get(
config_path,
trained: bool = False,
device: Optional[str] = None,
runner="d2go.runner.GeneralizedRCNNRunner",
):
"""
Get a model specified by relative path under Detectron2's official ``configs/`` directory.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): see :func:`get_config`.
device (str or None): overwrite the device in config, if given.
Returns:
nn.Module: a d2go model. Will be in training mode.
Example:
::
from d2go import model_zoo
model = model_zoo.get("faster_rcnn_fbnetv3a_C4.yaml", trained=True)
"""
cfg = get_config(config_path, trained)
if device is not None:
cfg.MODEL.DEVICE = device
elif not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
runner = create_runner(runner)
model = runner.build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
return model
| 4,216 |
src/DataProvider.py
|
kat3chrome/ConversionChartTrainer
| 0 |
2024746
|
from random import choice
from string import digits, ascii_lowercase, ascii_uppercase, ascii_letters, printable
from src.Level import Level, LevelType
from src.Value import Value, ValueType
class DataProvider():
def __init__(self, initial, final, level):
self.initial = Value(initial)
self.final = Value(final)
self.level = Level(level)
def __get_character(self):
if self.level.type == LevelType.DIGITS:
return choice(digits)
elif self.level.type == LevelType.LOWERCASE:
return choice(ascii_lowercase)
elif self.level.type == LevelType.UPPERCASE:
return choice(ascii_uppercase)
elif self.level.type == LevelType.LETTERS:
return choice(ascii_letters)
elif self.level.type == LevelType.PRINTABLE:
return choice(printable)
def __get_character_tuple(self):
def integer_to_base(integer, base):
if not 1 <= base <= 36:
return chr(integer)
letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
result = ""
while integer:
result += letters[(integer % base) % len(letters)]
integer //= base
return result[::-1] or "0"
def get_character_by_type(character, result_type):
if result_type.type == ValueType.Number:
character = integer_to_base(ord(character), int(result_type.name))
return character
character = self.__get_character()
character_initial = get_character_by_type(character, self.initial)
character_final = get_character_by_type(character, self.final)
return character, character_initial, character_final
def get_data(self, size):
data = [self.__get_character_tuple() for i in range(int(size))]
return {
'ASCII': [i[0] for i in data],
'initial': [i[1] for i in data],
'final': [i[2] for i in data]
}
| 2,009 |
congress_tempest_tests/tests/scenario/congress_datasources/test_murano.py
|
mail2nsrajesh/congress
| 0 |
2025362
|
# Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from congress_tempest_tests.tests.scenario import manager_congress
CONF = config.CONF
class TestMuranoDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestMuranoDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'murano', False):
msg = ("%s skipped as murano is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
if not (CONF.network.project_networks_reachable
or CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestMuranoDriver, self).setUp()
self.congress_client = (
self.admin_manager.congress_client)
@decorators.attr(type='smoke')
@test.services('compute')
def test_murano_predeployment(self):
def _delete_policy_rules(policy_name):
result = self.congress_client.list_policy_rules(
policy_name)['results']
for rule in result:
self.congress_client.delete_policy_rule(
policy_name,
rule['id'])
def _create_random_policy():
policy_name = "murano_%s" % ''.join(random.choice(string.lowercase)
for x in range(10))
body = {"name": policy_name}
resp = self.congress_client.create_policy(body)
self.addCleanup(_delete_policy_rules, resp['name'])
return resp['name']
def _create_datasource():
body = {"config": {"username": CONF.auth.admin_username,
"tenant_name": CONF.auth.admin_project_name,
"password": CONF.auth.admin_password,
"auth_url": CONF.identity.uri},
"driver": "murano",
"name": "murano"}
try:
datasource = self.congress_client.create_datasource(body)['id']
self.addCleanup(self.congress_client.delete_datasource,
datasource)
except exceptions.Conflict:
pass
def _create_rule(policy_name, rule):
self.congress_client.create_policy_rule(policy_name, rule)
def _simulate_policy(policy_name, query):
resp = self.congress_client.execute_policy_action(
policy_name,
"simulate",
False,
False,
query)
return resp['result']
rule1 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.medium\")"
}
rule2 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.small\")"
}
rule3 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.tiny\")"
}
rule4 = {
"rule": "murano_pending_envs(env_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:states(env_id, env_state),"
"equal(env_state, \"pending\")"
}
rule5 = {
"rule": "murano_instances(env_id, instance_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:objects(service_id, env_id, service_type),"
"murano:parent_types(service_id, \"io.murano.Object\"),"
"murano:parent_types(service_id, \"io.murano.Application\"),"
"murano:parent_types(service_id, service_type),"
"murano:objects(instance_id, service_id, instance_type),"
"murano:parent_types(instance_id,"
"\"io.murano.resources.Instance\"),"
"murano:parent_types(instance_id, \"io.murano.Object\"),"
"murano:parent_types(instance_id, instance_type)"
}
rule6 = {
"rule": "murano_instance_flavors(instance_id, flavor) :- "
"murano:properties(instance_id, \"flavor\", flavor)"
}
rule7 = {
"rule": "predeploy_error(env_id) :- "
"murano_pending_envs(env_id),"
"murano_instances(env_id, instance_id),"
"murano_instance_flavors(instance_id, flavor),"
"not allowed_flavors(flavor)"
}
sim_query1 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.small\")"
}
sim_query2 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.large\")"
}
_create_datasource()
policy_name = _create_random_policy()
_create_rule(policy_name, rule1)
_create_rule(policy_name, rule2)
_create_rule(policy_name, rule3)
_create_rule(policy_name, rule4)
_create_rule(policy_name, rule5)
_create_rule(policy_name, rule6)
_create_rule(policy_name, rule7)
result = _simulate_policy(policy_name, sim_query1)
self.assertEmpty(result)
result = _simulate_policy(policy_name, sim_query2)
self.assertEqual('predeploy_error("env_uuid")', result[0])
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('murano'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')
| 8,821 |
models/tensorflow/nnf_tf_freezer/convert_graph_fp16.py
|
lynex/nnfusion
| 639 |
2025414
|
import tensorflow as tf
from tensorflow.core.framework import types_pb2, graph_pb2, attr_value_pb2
from tensorflow.tools.graph_transforms import TransformGraph
from google.protobuf import text_format
import numpy as np
def convert_graph_to_fp16(source_graph_def, target_type='fp16', input_name=None, output_names=None, keep_fp32_node_name=[]):
if target_type == 'fp16':
dtype = types_pb2.DT_HALF
elif target_type == 'fp64':
dtype = types_pb2.DT_DOUBLE
else:
dtype = types_pb2.DT_FLOAT
target_graph_def = graph_pb2.GraphDef()
target_graph_def.versions.CopyFrom(source_graph_def.versions)
for node in source_graph_def.node:
# replicate node
new_node = target_graph_def.node.add()
new_node.op = node.op
new_node.name = node.name
new_node.input.extend(node.input)
attrs = list(node.attr.keys())
# replace dtype in node attr with target dtype
for attr in attrs:
# keep special node in fp32
new_node.attr[attr].CopyFrom(node.attr[attr])
if node.name in keep_fp32_node_name:
continue
if node.attr[attr].type == types_pb2.DT_FLOAT:
# modify node dtype
new_node.attr[attr].type = dtype
if attr == "value":
tensor = node.attr[attr].tensor
if tensor.dtype == types_pb2.DT_FLOAT:
# if float_val exists
if tensor.float_val:
float_val = tf.make_ndarray(node.attr[attr].tensor)
new_node.attr[attr].tensor.CopyFrom(tf.make_tensor_proto(float_val, dtype=dtype))
continue
# if tensor content exists
if tensor.tensor_content:
tensor_shape = [x.size for x in tensor.tensor_shape.dim]
tensor_weights = tf.make_ndarray(tensor)
# reshape tensor
tensor_weights = np.reshape(tensor_weights, tensor_shape)
new_node.attr[attr].tensor.CopyFrom(tf.make_tensor_proto(tensor_weights, dtype=dtype))
continue
# transform graph
if output_names:
if not input_name:
input_name = []
transforms = ["strip_unused_nodes"]
target_graph_def = TransformGraph(target_graph_def, input_name, output_names, transforms)
# write graph_def to model
print("Converting done ...")
return target_graph_def
| 2,558 |
Practice/Practice1.py
|
sethmh82/SethDevelopment
| 0 |
2024516
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 11:37:54 2021
@author: SethHarden
"""
"""
1. We need to link each number with its original index.
[1, 1]
[2, 2]
[2, 3]
[3, 4]
[4, 5]
[5, 6]
Structure a node that represents the value and original index.
If we use a tuple we can more easily setup a function.
Node: (value, location)
We'll have:
Original Array = [1, 2, 2, 3, 4, 5]
Queue = [(1,1), (2,2), (2,3)]
Loop x times
pop the array values
1, 2, 2, 3, 4
------------------------
We know the returned array will ONLY have X values.
We know these values ONLY represent the "original" array index #
We can setup a node that stores:
Value
Original Index, if we have 2 values
"""
| 716 |
tests/test_pytorch_synthesizer.py
|
shlomihod/smartnoise-sdk-synth
| 56 |
2024835
|
import subprocess
import os
import pytest
import string
import pandas as pd
from snsynth.preprocessors import GeneralTransformer
from snsynth.pytorch import PytorchDPSynthesizer
from snsynth.pytorch.nn import DPGAN, DPCTGAN, PATECTGAN
git_root_dir = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
meta_path = os.path.join(git_root_dir, os.path.join("datasets", "PUMS_pid.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("datasets", "PUMS_pid.csv"))
df = pd.read_csv(csv_path)
df_non_continuous = df[['sex','educ','race','married']]
nf = df.to_numpy()
nf_non_continuous = df_non_continuous.to_numpy()
@pytest.mark.torch
class TestPytorchDPSynthesizer_DPGAN:
def setup(self):
self.dpgan = PytorchDPSynthesizer(1.0, DPGAN(), GeneralTransformer())
def test_fit(self):
self.dpgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
assert self.dpgan.gan.generator
def test_sample(self):
self.dpgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
sample_size = len(df_non_continuous)
synth_data = self.dpgan.sample(sample_size)
assert synth_data.shape == df_non_continuous.shape
def test_fit_continuous(self):
dpgan = DPGAN(epsilon=1.0)
df_continuous = df[['age','educ','income']]
dpgan.train(df_continuous)
synth_data = dpgan.generate(len(df_continuous))
assert synth_data.shape == df_continuous.shape
class TestPytorchDPSynthesizer_DPCTGAN:
def setup(self):
self.dpctgan = PytorchDPSynthesizer(1.0, DPCTGAN(), None)
def test_fit(self):
df_non_continuous = df[['sex','educ','race','married']]
self.dpctgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
assert self.dpctgan.gan._generator
def test_sample(self):
self.dpctgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
sample_size = len(df)
synth_data = self.dpctgan.sample(sample_size)
assert synth_data.shape == df_non_continuous.shape
def test_fit_numpy(self):
dpctgan = DPCTGAN(epsilon=1.0)
dpctgan.train(nf_non_continuous, categorical_columns=[0, 1, 2, 3])
class TestPytorchDPSynthesizer_PATECTGAN:
def setup(self):
self.patectgan = PytorchDPSynthesizer(1.0, PATECTGAN(), None)
def test_fit(self):
self.patectgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
assert self.patectgan.gan._generator
def test_sample(self):
self.patectgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
sample_size = len(df)
synth_data = self.patectgan.sample(sample_size)
assert synth_data.shape == df_non_continuous.shape
class TestPytorchDPSynthesizer_PATECTDRAGAN:
def setup(self):
self.patectgan = PytorchDPSynthesizer(1.0, PATECTGAN(regularization='dragan'), None)
def test_fit(self):
self.patectgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
assert self.patectgan.gan._generator
def test_sample(self):
self.patectgan.fit(df_non_continuous, categorical_columns=['sex','educ','race','married'])
sample_size = len(df)
synth_data = self.patectgan.sample(sample_size)
assert synth_data.shape == df_non_continuous.shape
| 3,476 |
PithonLearn/result12.py
|
CarlosEduardo2021/Python_Estrutura
| 1 |
2023296
|
nome = str(input('Qual seu nome? '))
if nome == 'jhon':
print('Nome correto !')
elif nome == 'Pedro' or nome == 'Maria' or nome == 'Paulo':
print('Tente novamente !')
elif nome in 'Ana <NAME>':
print('Nomes cadastrados')
else:
print('Nome incorreto !')
print('Tenha um bom dia {} !' .format(nome))
| 314 |
loss.py
|
mr4msm/ai_edge_seg
| 0 |
2025163
|
# -*- coding: utf-8 -*-
import numpy as np
from chainer import Chain
from chainer import functions as F
from chainer.backends import cuda
class Loss(Chain):
def __init__(self, model):
super(Loss, self).__init__()
self.class_weights = self.xp.asarray(
[0.625, 1.25, 2.5, 0.3125, 0.3125])
with self.init_scope():
self.model = model
def forward(self, x, t, ignore_label=-1):
y = self.model(x)
y = y.transpose((0, 2, 3, 1))
y = y.reshape((-1, y.shape[-1]))
y_exp = self.xp.exp(y.array)
y_softmax = y_exp / y_exp.sum(axis=1, keepdims=True)
y = F.log_softmax(y)
t = t.ravel()
t_valid = (t != ignore_label)
t *= t_valid
focal_weights = self.class_weights[t] * \
(1 - y_softmax[np.arange(t.size), t])
loss = y[np.arange(t.size), t] * focal_weights
loss *= t_valid
return -F.sum(loss) / t_valid.sum()
def to_cpu(self):
super(Loss, self).to_cpu()
self.class_weights = cuda.to_cpu(self.class_weights)
def to_gpu(self, device=None):
super(Loss, self).to_gpu(device=device)
self.class_weights = cuda.to_gpu(self.class_weights, device=device)
| 1,255 |
sentry_auth_thalia/__init__.py
|
thaliawww/sentry-thalia
| 0 |
2023017
|
from __future__ import absolute_import
from sentry.auth import register
from .provider import ThaliaAuthProvider
register('thalia', ThaliaAuthProvider)
| 155 |
src/pyrsslocal/xmlhelper/xml_exceptions.py
|
sdpython/pyrsslocal
| 2 |
2024956
|
"""
@file
@brief Exceptions raised by files in this folder.
"""
class XmlException(Exception):
"""
Raised when something is wring about the parsing.
"""
pass
| 176 |
brotab/mediator/remote_api.py
|
craigevil/brotab
| 3 |
2022622
|
from typing import List
from urllib.parse import quote_plus
from brotab.mediator.log import mediator_logger
from brotab.mediator.transport import Transport
class BrowserRemoteAPI:
"""
Communicates with a browser using stdin/stdout. This mediator is supposed
to be run by the browser after a request from the helper extension.
"""
def __init__(self, transport: Transport):
self._transport: Transport = transport
def list_tabs(self):
command = {'name': 'list_tabs'}
self._transport.send(command)
return self._transport.recv()
def query_tabs(self, query_info: str):
mediator_logger.info('query info: %s', query_info)
command = {'name': 'query_tabs', 'query_info': query_info}
self._transport.send(command)
return self._transport.recv()
def move_tabs(self, move_triplets: str):
"""
:param move_triplets: Comma-separated list of:
<tabID> <windowID> <newIndex>
"""
mediator_logger.info('move_tabs, move_triplets: %s', move_triplets)
triplets = [list(map(int, triplet.split(' ')))
for triplet in move_triplets.split(',')]
mediator_logger.info('moving tab ids: %s', triplets)
command = {'name': 'move_tabs', 'move_triplets': triplets}
self._transport.send(command)
return self._transport.recv()
def open_urls(self, urls: List[str], window_id=None):
"""
Open specified list of URLs in a window, specified by window_id.
If window_id is None, currently active window is used.
"""
mediator_logger.info('open urls: %s', urls)
command = {'name': 'open_urls', 'urls': urls}
if window_id is not None:
command['window_id'] = window_id
self._transport.send(command)
return self._transport.recv()
def close_tabs(self, tab_ids: str):
"""
:param tab_ids: Comma-separated list of tab IDs to close.
"""
int_tab_ids = [int(id_) for id_ in tab_ids.split(',')]
mediator_logger.info('closing tab ids: %s', int_tab_ids)
command = {'name': 'close_tabs', 'tab_ids': int_tab_ids}
self._transport.send(command)
return self._transport.recv()
def new_tab(self, query: str):
url = "https://www.google.com/search?q=%s" % quote_plus(query)
mediator_logger.info('opening url: %s', url)
command = {'name': 'new_tab', 'url': url}
self._transport.send(command)
return self._transport.recv()
def activate_tab(self, tab_id: int, focused: bool):
mediator_logger.info('activating tab id: %s', tab_id)
command = {'name': 'activate_tab', 'tab_id': tab_id, 'focused': focused}
self._transport.send(command)
def get_active_tabs(self) -> str:
mediator_logger.info('getting active tabs')
command = {'name': 'get_active_tabs'}
self._transport.send(command)
return self._transport.recv()
def get_words(self, tab_id: str, match_regex: str, join_with: str):
mediator_logger.info('getting tab words: %s', tab_id)
command = {
'name': 'get_words',
'tab_id': tab_id,
'match_regex': match_regex,
'join_with': join_with,
}
self._transport.send(command)
return self._transport.recv()
def get_text(self, delimiter_regex: str, replace_with: str):
mediator_logger.info('getting text, delimiter_regex=%s, replace_with=%s',
delimiter_regex, replace_with)
command = {
'name': 'get_text',
'delimiter_regex': delimiter_regex,
'replace_with': replace_with,
}
self._transport.send(command)
return self._transport.recv()
def get_html(self, delimiter_regex: str, replace_with: str):
mediator_logger.info('getting html, delimiter_regex=%s, replace_with=%s',
delimiter_regex, replace_with)
command = {
'name': 'get_html',
'delimiter_regex': delimiter_regex,
'replace_with': replace_with,
}
self._transport.send(command)
return self._transport.recv()
def get_browser(self):
mediator_logger.info('getting browser name')
command = {'name': 'get_browser'}
self._transport.send(command)
return self._transport.recv()
def default_remote_api(transport: Transport) -> BrowserRemoteAPI:
return BrowserRemoteAPI(transport)
| 4,572 |
data-handling-scripts/write_train_val.py
|
bolero2/DeepLearning-dc
| 2 |
2025033
|
import os
import random
import glob
path = "C:\\Users\\bolero\\Downloads\\polyps_test_kvasir300_relat_ccwh_integ\\"
text_path = "C:\\Users\\bolero\\Downloads\\polyps_test_kvasir300_relat_ccwh_integ\\"
ext = 'jpg'
val_rate = 1.0
os.chdir(path)
print("os -> Change directory : ", path)
file_list = list()
for file in glob.glob(f'*.{ext}'):
file_list.append(file)
print("File number :", len(file_list))
random.shuffle(file_list)
sentence_train = list()
sentence_val = list()
for i in range(0, len(file_list)):
if i < len(file_list) * (1 - val_rate):
sentence_train.append(file_list[i][:-4] + '\n')
else:
sentence_val.append(file_list[i][:-4] + '\n')
f1 = open(text_path + "train.txt", 'w')
f2 = open(text_path + "val.txt", 'w')
for sent in sentence_train:
# new_sent = '/content/drive/My Drive/DeepLearning/Dataset/Detection/no_polyps_test_kvasir300_relat_ccwh_integ' + sent[:-1] + ".jpg\n"
new_sent = f'/home/clt_dc/dataset/detection/detectoRS_ct_deeplesion/{sent[:-1]}.{ext}\n'
# new_sent = sent
f1.write(new_sent)
for sent in sentence_val:
# new_sent = '/content/drive/My Drive/DeepLearning/Dataset/Detection/no_polyps_test_kvasir300_relat_ccwh_integ' + sent[:-1] + ".jpg\n"
new_sent = f'/home/clt_dc/dataset/detection/detectoRS_ct_deeplesion/{sent[:-1]}.{ext}\n'
# new_sent = sent
f2.write(new_sent)
f1.close()
f2.close()
| 1,438 |
corehq/apps/cleanup/management/commands/local_commcare_export.py
|
kkrampa/commcare-hq
| 1 |
2024819
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import sys
import os
from collections import namedtuple
import dateutil
from django.core.management.base import BaseCommand, CommandError
from requests import ConnectionError
from tastypie.bundle import Bundle
from corehq.apps.api.es import ElasticAPIQuerySet, CaseES, es_search_by_params, XFormES
from corehq.apps.api.models import ESCase, ESXFormInstance
from corehq.apps.api.resources.v0_4 import CommCareCaseResource, XFormInstanceResource
from corehq.apps.api.serializers import CommCareCaseSerializer, XFormInstanceSerializer
from corehq.elastic import ESError
class MockApi(namedtuple('MockApi', 'query_set resource serializer')):
def serialize(self, obj):
return json.loads(self.serializer.serialize(self.resource.full_dehydrate(Bundle(obj=obj))))
def _get_case_mock(project, params):
# this is mostly copy/paste/modified from CommCareCaseResource
es_query = es_search_by_params(params, project)
query_set = ElasticAPIQuerySet(
payload=es_query,
model=ESCase,
es_client=CaseES(project),
).order_by('server_modified_on')
return MockApi(
query_set, CommCareCaseResource(), CommCareCaseSerializer()
)
def _get_form_mock(project, params):
# this is mostly copy/paste/modified from XFormInstanceResource
include_archived = 'include_archived' in params
es_query = es_search_by_params(params, project, ['include_archived'])
if include_archived:
es_query['filter']['and'].append({'or': [
{'term': {'doc_type': 'xforminstance'}},
{'term': {'doc_type': 'xformarchived'}},
]})
else:
es_query['filter']['and'].append({'term': {'doc_type': 'xforminstance'}})
query_set = ElasticAPIQuerySet(
payload=es_query,
model=ESXFormInstance,
es_client=XFormES(project),
).order_by('received_on')
return MockApi(
query_set, XFormInstanceResource(), XFormInstanceSerializer()
)
def _get_mock_api(resource, project, params):
if resource == 'case':
return _get_case_mock(project, params)
elif resource == 'form':
return _get_form_mock(project, params)
else:
raise ValueError("Unknown/unsupported resource type '{}'".format(resource))
def local_on_backoff(details):
from commcare_export.commcare_hq_client import on_backoff
on_backoff(details)
def local_on_giveup(details):
from commcare_export.commcare_hq_client import on_giveup
on_giveup(details)
class LocalCommCareHqClient(object):
"""
Like CommCareHqClient but for a local environment
"""
def __init__(self, url, project, limit, checkpoint_manager=None):
self.url = url
self.project = project
self.limit = limit
self._checkpoint_manager = checkpoint_manager
def get(self, es_query_set, start, params=None):
import backoff
@backoff.on_exception(
backoff.expo, (ESError, ConnectionError),
max_time=300, on_backoff=local_on_backoff, on_giveup=local_on_giveup,
)
def _inner(es_query_set, start, params):
from commcare_export.cli import logger
logger.info("Fetching batch: {}-{}".format(start, start + self.limit))
return list(es_query_set[start:start + self.limit])
return _inner(es_query_set, start, params)
def iterate(self, resource, paginator, params=None):
"""
Iterates through what the API would have been had it been passed in.
"""
from commcare_export.cli import logger
# resource is either 'form' or 'case'
# params are api params
# (e.g. {'limit': 1000, u'type': u'pregnant_mother', 'order_by': 'server_date_modified'})
params = dict(params or {})
mock_api = _get_mock_api(resource, self.project, params)
def iterate_resource(resource=resource, params=params):
more_to_fetch = True
last_batch_ids = set()
count = 0
total_count = mock_api.query_set.count()
while more_to_fetch:
batch = self.get(mock_api.query_set, count, params)
batch_list = [mock_api.serialize(obj) for obj in batch]
logger.info('Received {}-{} of {}'.format(count, count + self.limit, total_count))
if not batch_list:
more_to_fetch = False
else:
for obj in batch_list:
if obj['id'] not in last_batch_ids:
yield obj
if count < total_count:
last_batch_ids = {obj['id'] for obj in batch_list}
count += self.limit
else:
more_to_fetch = False
self.checkpoint(paginator, batch_list)
from commcare_export.repeatable_iterator import RepeatableIterator
return RepeatableIterator(iterate_resource)
def checkpoint(self, paginator, batch):
from commcare_export.commcare_minilinq import DatePaginator
if self._checkpoint_manager and isinstance(paginator, DatePaginator):
since_date = paginator.get_since_date({"objects": batch})
self._checkpoint_manager.set_batch_checkpoint(checkpoint_time=since_date)
class Command(BaseCommand):
help = "For running commcare-export commands against a local environment. " \
"This is mostly a once-off for the ICDS data team."
def add_arguments(self, parser):
parser.add_argument('--project')
parser.add_argument('--query')
parser.add_argument('--output-format')
parser.add_argument('--output')
parser.add_argument('--limit', type=int, default=200)
def handle(self, project, query, output_format, output, limit, **options):
# note: this is heavily copy/paste/modified from commcare_export.cli
commcare_hq = 'local_commcare_export'
try:
# local development only
sys.path.append(os.path.join(os.getcwd(), 'lib', 'commcare-export'))
import commcare_export # noqa
except ImportError:
raise CommandError(
'This command requires commcare-export to be installed! '
'Please run: pip install commcare-export. You may also need to run: '
'pip install openpyxl==2.6.0b1 '
'afterwards to run CommCare due to version incompatibilities.'
)
from commcare_export import misc
from commcare_export.checkpoint import CheckpointManager
from commcare_export.cli import _get_writer, _get_query_from_file
from commcare_export.commcare_minilinq import CommCareHqEnv
from commcare_export.env import BuiltInEnv, JsonPathEnv, EmitterEnv
print('commcare-export is installed.')
writer = _get_writer(output_format, output, strict_types=False)
query_obj = _get_query_from_file(
query,
None, # missing_value
writer.supports_multi_table_write,
writer.max_column_length,
writer.required_columns
)
checkpoint_manager = None
if writer.support_checkpoints:
md5 = misc.digest_file(query)
checkpoint_manager = CheckpointManager(
output,
query,
md5,
project,
commcare_hq,
)
since = checkpoint_manager.get_time_of_last_checkpoint()
else:
since = None
commcarehq_base_url = commcare_hq
api_client = LocalCommCareHqClient(
url=commcarehq_base_url,
project=project,
limit=limit,
checkpoint_manager=checkpoint_manager
)
if since is not None:
since = dateutil.parser.parse(since)
env = (
BuiltInEnv({'commcarehq_base_url': commcarehq_base_url})
| CommCareHqEnv(api_client, since=since)
| JsonPathEnv({})
| EmitterEnv(writer)
)
with env:
try:
lazy_result = query_obj.eval(env)
if lazy_result is not None:
# evaluate lazy results
for r in lazy_result:
list(r) if r else r
except KeyboardInterrupt:
print('\nExport aborted')
return
if checkpoint_manager:
checkpoint_manager.set_final_checkpoint()
| 8,703 |
python/191106-fizzbuzz.py
|
Suellaiy/TIL
| 0 |
2022986
|
print([ "fizzbuzz" if i % 15 == 0 else "fizz" if i % 3 == 0 else "buzz" if i%5==0 else i for i in range(1,100+1)])
| 116 |
HackerRank_InterviewPrepKit/Arrays/New_Year_Chaos.py
|
nosy0411/problem_solving
| 0 |
2025134
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumBribes function below.
def minimumBribes(q):
bribe=0
i=len(q)-2
while(1):
if (q[i]-(i+1))>=3:
print("Too chaotic")
break
else:
if i==len(q)-2:
if q[i+1]<q[i]:
bribe+=1
temp=q[i+1]
q[i+1]=q[i]
q[i]=temp
else:
if q[i+2]<q[i]:
bribe+=2
temp=q[i]
q[i]=q[i+1]
q[i+1]=q[i+2]
q[i+2]=temp
elif q[i+1]<q[i]:
bribe+=1
temp=q[i+1]
q[i+1]=q[i]
q[i]=temp
i-=1
if i==-1:
print(bribe)
break
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
n = int(input())
q = list(map(int, input().rstrip().split()))
minimumBribes(q)
| 1,099 |
pw_build_info/py/pw_build_info/build_id.py
|
Tiggerlaboratoriet/pigweed
| 1 |
2022661
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Library that parses an ELF file for a GNU build-id."""
import argparse
import logging
from pathlib import Path
import sys
from typing import BinaryIO, Optional
import elftools # type: ignore
from elftools.elf import elffile, notes, sections # type: ignore
_LOG = logging.getLogger('build_id_parser')
_PW_BUILD_ID_SYM_NAME = 'gnu_build_id_begin'
class GnuBuildIdError(Exception):
"""An exception raised when a GNU build ID is malformed."""
def read_build_id_from_section(elf_file: BinaryIO) -> Optional[bytes]:
"""Reads a build ID from a .note.gnu.build-id section."""
parsed_elf_file = elffile.ELFFile(elf_file)
build_id_section = parsed_elf_file.get_section_by_name(
'.note.gnu.build-id')
if build_id_section is None:
return None
section_notes = list(n for n in notes.iter_notes(
parsed_elf_file, build_id_section['sh_offset'],
build_id_section['sh_size']))
if len(section_notes) != 1:
raise GnuBuildIdError('GNU build ID section contains multiple notes')
build_id_note = section_notes[0]
if build_id_note['n_name'] != 'GNU':
raise GnuBuildIdError('GNU build ID note name invalid')
if build_id_note['n_type'] != 'NT_GNU_BUILD_ID':
raise GnuBuildIdError('GNU build ID note type invalid')
return bytes.fromhex(build_id_note['n_desc'])
def _addr_is_in_segment(addr: int, segment) -> bool:
"""Checks if the provided address resides within the provided segment."""
# Address references uninitialized memory. Can't read.
if addr >= segment['p_vaddr'] + segment['p_filesz']:
raise GnuBuildIdError('GNU build ID is runtime-initialized')
return addr in range(segment['p_vaddr'], segment['p_memsz'])
def _read_build_id_from_offset(elf, offset: int) -> bytes:
"""Attempts to read a GNU build ID from an offset in an elf file."""
note = elftools.common.utils.struct_parse(elf.structs.Elf_Nhdr,
elf.stream,
stream_pos=offset)
elf.stream.seek(offset + elf.structs.Elf_Nhdr.sizeof())
name = elf.stream.read(note['n_namesz'])
if name != b'GNU\0':
raise GnuBuildIdError('GNU build ID note name invalid')
return elf.stream.read(note['n_descsz'])
def read_build_id_from_symbol(elf_file: BinaryIO) -> Optional[bytes]:
"""Reads a GNU build ID using gnu_build_id_begin to locate the data."""
parsed_elf_file = elffile.ELFFile(elf_file)
matching_syms = None
for section in parsed_elf_file.iter_sections():
if not isinstance(section, sections.SymbolTableSection):
continue
matching_syms = section.get_symbol_by_name(_PW_BUILD_ID_SYM_NAME)
if matching_syms is not None:
break
if matching_syms is None:
return None
if len(matching_syms) != 1:
raise GnuBuildIdError('Multiple GNU build ID start symbols defined')
gnu_build_id_sym = matching_syms[0]
section_number = gnu_build_id_sym['st_shndx']
if section_number == 'SHN_UNDEF':
raise GnuBuildIdError('GNU build ID start symbol undefined')
matching_section = parsed_elf_file.get_section(section_number)
build_id_start_addr = gnu_build_id_sym['st_value']
for segment in parsed_elf_file.iter_segments():
if segment.section_in_segment(matching_section):
offset = build_id_start_addr - segment['p_vaddr'] + segment[
'p_offset']
return _read_build_id_from_offset(parsed_elf_file, offset)
return None
def read_build_id(elf_file: BinaryIO) -> Optional[bytes]:
"""Reads a GNU build ID from an ELF binary."""
# Prefer to read the build ID from a dedicated section.
maybe_build_id = read_build_id_from_section(elf_file)
if maybe_build_id is not None:
return maybe_build_id
# If there's no dedicated section, try and use symbol information to find
# the build info.
return read_build_id_from_symbol(elf_file)
def find_matching_elf(uuid: bytes, search_dir: Path) -> Optional[Path]:
"""Recursively searches a directory for an ELF file with a matching UUID."""
elf_file_paths = search_dir.glob('**/*.elf')
for elf_file in elf_file_paths:
try:
candidate_id = read_build_id(open(elf_file, 'rb'))
except GnuBuildIdError:
continue
if candidate_id is None:
continue
if candidate_id == uuid:
return elf_file
return None
def _main(elf_file: BinaryIO) -> int:
logging.basicConfig(format='%(message)s', level=logging.INFO)
build_id = read_build_id(elf_file)
if build_id is None:
_LOG.error('Error: No GNU build ID found.')
return 1
_LOG.info(build_id.hex())
return 0
def _parse_args():
"""Parses command-line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('elf_file',
type=argparse.FileType('rb'),
help='The .elf to parse build info from')
return parser.parse_args()
if __name__ == '__main__':
sys.exit(_main(**vars(_parse_args())))
| 5,763 |
scripts/DNN.py
|
jayroxis/Cophy-PGNN
| 10 |
2025509
|
import torch
from collections import OrderedDict
# Multi-layer Perceptron
class DNN(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
output_size,
depth,
act=torch.nn.Tanh,
softmax=False
):
super(DNN, self).__init__()
layers = [('input', torch.nn.Linear(input_size, hidden_size))]
layers.append(('input_activation', act()))
for i in range(depth):
layers.append(
('hidden_%d' % i, torch.nn.Linear(hidden_size, hidden_size))
)
layers.append(('activation_%d' % i, act()))
layers.append(('output', torch.nn.Linear(hidden_size, output_size)))
if softmax == True:
layers.append(('softmax', torch.nn.Softmax()))
layerDict = OrderedDict(layers)
self.layers = torch.nn.Sequential(layerDict)
def forward(self, x):
out = self.layers(x)
return out
| 973 |
pennstate-arcpy/L4_RhinoPathsCsv.py
|
tofritz/ExampleWork
| 0 |
2023579
|
# Define a function which checks if a rhino is in the dictionary and
# if not, creates an array as a value to the rhino name as the key while adding
# a point object to the array. If the rhino is in the dictionary, the array
# is updated with a point object with the given lat and lon arguments.
def updateDict(rhino, dictionary, lat, lon):
if rhino not in dictionary:
dictionary[rhino] = arcpy.Array()
vertex = arcpy.Point(lat, lon)
dictionary[rhino].add(vertex)
else:
vertex = arcpy.Point(lat, lon)
dictionary[rhino].add(vertex)
# Import necessary modules
import arcpy
import csv
arcpy.env.overwriteOutput = True
# Attempt to open the CSV file with rhino path points
try:
fileHandle = open(r'C:\TmpWrkDirGIS\GEOG485\Lesson4\RhinoObservations.csv', 'r')
except:
print('Input CSV file could not be opened')
# Create neccessary variables for updating the feature class
csvReader = csv.reader(fileHandle)
header = csvReader.next()
latIndex = header.index('X')
lonIndex = header.index('Y')
rhinoIndex = header.index('Rhino')
# Create a dictionary to contain rhino arrays
rhinoDict = {}
# Attempt to create the polyline feature class and add a NAME field
try:
spatialRef = arcpy.SpatialReference('WGS 1984')
rhinoFC = arcpy.CreateFeatureclass_management(r'C:\TmpWrkDirGIS\GEOG485\Lesson4', 'rhinopaths.shp', 'POLYLINE', '', '', '', spatialRef)
arcpy.AddField_management(rhinoFC, 'NAME', 'TEXT', '', '', 20)
except:
print('Error in creating polyline feature class')
# Attempt to update the dictionary by iterating through the rows in the CSV
try:
for row in csvReader:
updateDict(row[rhinoIndex], rhinoDict, row[latIndex], row[lonIndex])
# Once the dictionary is updated, insert the arrays into the polyline feature class
# using the key for the NAME field and the value as a polyline object
try:
with arcpy.da.InsertCursor(rhinoFC, ('SHAPE@', 'NAME')) as cursor:
for rhino in rhinoDict:
polyline = arcpy.Polyline(rhinoDict[rhino], spatialRef)
cursor.insertRow((polyline, rhino))
del cursor
except:
print('Error inserting tracking points into feature class')
except:
print('Error creating rhino point arrays')
| 2,295 |
specviz/third_party/glue/tests/test_utils.py
|
ibusko/specviz
| 0 |
2024976
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.wcs import WCS
from astropy.tests.helper import assert_quantity_allclose
pytest.importorskip("glue") # noqa
from glue.core import Data
from glue.core.component import Component
from glue.core.coordinates import WCSCoordinates
from ..utils import glue_data_has_spectral_axis, glue_data_to_spectrum1d, SpectralCoordinates
def test_conversion_utils_notspec():
data = Data(label='not spectrum')
assert not glue_data_has_spectral_axis(data)
def test_conversion_utils_1d():
# Set up simple spectral WCS
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['VELO-LSR']
wcs.wcs.set()
# Set up glue Coordinates object
coords = WCSCoordinates(wcs=wcs)
data = Data(label='spectrum', coords=coords)
data.add_component(Component(np.array([3.4, 2.3, -1.1, 0.3]), units='Jy'), 'x')
assert glue_data_has_spectral_axis(data)
spec = glue_data_to_spectrum1d(data, data.id['x'])
assert_quantity_allclose(spec.spectral_axis, [1, 2, 3, 4] * u.m / u.s)
assert_quantity_allclose(spec.flux, [3.4, 2.3, -1.1, 0.3] * u.Jy)
def test_conversion_utils_3d():
# Set up simple spectral WCS
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-LSR']
wcs.wcs.set()
# Set up glue Coordinates object
coords = WCSCoordinates(wcs=wcs)
data = Data(label='spectral-cube', coords=coords)
data.add_component(Component(np.ones((3, 4, 5)), units='Jy'), 'x')
assert glue_data_has_spectral_axis(data)
spec = glue_data_to_spectrum1d(data, data.id['x'], statistic='sum')
assert_quantity_allclose(spec.spectral_axis, [1, 2, 3] * u.m / u.s)
assert_quantity_allclose(spec.flux, [20, 20, 20] * u.Jy)
def test_conversion_utils_spectral_coordinates():
# Set up glue Coordinates object
coords = SpectralCoordinates([1, 4, 10] * u.micron)
data = Data(label='spectrum1d', coords=coords)
data.add_component(Component(np.array([3, 4, 5]), units='Jy'), 'x')
assert_allclose(data.coords.pixel2world([0, 0.5, 1, 1.5, 2]),
[[1, 2.5, 4, 7, 10]])
assert glue_data_has_spectral_axis(data)
spec = glue_data_to_spectrum1d(data, data.id['x'])
assert_quantity_allclose(spec.spectral_axis, [1, 4, 10] * u.micron)
assert_quantity_allclose(spec.flux, [3, 4, 5] * u.Jy)
| 2,398 |
robocoach1/photos/Checkvid.py
|
DSP209/Robo1
| 0 |
2025511
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 25 11:10:02 2017
@author: david
"""
import os
import numpy as np,cv2
fn = 'drop.avi'
cd = os.getcwd()
cap = cv2.VideoCapture(cd+'/'+fn,0)
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH ))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT ))
fr = int(cap.get(cv2.CAP_PROP_FPS ))
fc = int(cap.get(cv2.CAP_PROP_FRAME_COUNT ))
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
stat = cap.isOpened()
| 468 |
account/processing.py
|
fitahol/fitahol
| 2 |
2025513
|
#!/usr/bin/env python
# coding=utf-8
"""
__created__ = '13/1/2016'
__author__ = 'deling.ma'
"""
def get_username(account):
if account.isdigit():
return "phone", account
else:
username = "@".join([account.split("@")[0],
account.split("@")[-1].split(".")[0]])
return "email", username
| 347 |
test/unit/data_operations/test_text_preprocessing.py
|
rozlana-g/FEDOT
| 358 |
2023357
|
import numpy as np
from fedot.core.data.data import InputData
from fedot.core.pipelines.node import PrimaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
def test_clean_text_preprocessing():
test_text = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
input_data = InputData(features=np.array(test_text),
target=[0, 1, 1, 0],
idx=np.arange(0, len(test_text)),
task=Task(TaskTypesEnum.classification),
data_type=DataTypesEnum.text)
preprocessing_pipeline = Pipeline(PrimaryNode('text_clean'))
preprocessing_pipeline.fit(input_data)
predicted_output = preprocessing_pipeline.predict(input_data)
cleaned_text = predicted_output.predict
assert len(test_text) == len(cleaned_text)
| 1,074 |
app.py
|
BrendanWightman/WebProgrammingProject
| 1 |
2025228
|
# A very simple Flask Hello World app for you to get started with...
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello from Flask!'
| 186 |
rpy2json/project/nodes_sounds_videos.py
|
Grimalkin8675/rpy2html
| 1 |
2024504
|
import re
from os import path
from utils import remove_invalid_chars, replace_bools
def parse(renpy_nodes, renpy_ast, config):
"""
Parse all node to a useable format. The node Label named 'start' will be the
node with id '0'.
:param renpy_nodes: all nodes from renpy game (renpy.game.script.namemap)
:param renpy_ast: the renpy ast module (renpy.ast)
:returns: a dict with all nodes and sounds:
{
'nodes': {
'0': {
'class_name': 'Say',
'arguments': ['char_name', 'Hello World!', [1]]
},
...
},
'sounds': {
...
}
}
"""
res = {
'nodes': {},
'sounds': {},
'videos': {}
}
res['sounds']['main_menu_music'] = config.main_menu_music
start_node = label_by_name(renpy_nodes, renpy_ast.Label, 'start')
real_start = real_next(renpy_nodes, renpy_ast, start_node)
nexts = [real_start]
while len(nexts) != 0:
node = nexts[0]
nexts = nexts[1:]
if node and id(node) not in res['nodes']:
nexts += parse_node(renpy_nodes, renpy_ast, node, res)
id_start = id(real_start)
res['nodes'][0] = res['nodes'][id_start]
del res['nodes'][id_start]
return res
def label_by_name(renpy_nodes, renpy_Label, name):
for _key, node in renpy_nodes.iteritems():
if isinstance(node, renpy_Label) and node.name == name:
return node
def show_unknown_node_warning(node):
print('[WARNING] unknown node %s found, didn\'t continue this branch.' % (node.diff_info(), ))
# adds node converted to acc['nodes']
# if node uses a sound, adds it to acc['sounds']
# returns a list of nodes
def parse_node(renpy_nodes, renpy_ast, node, acc):
if isinstance(node, renpy_ast.Menu):
return menu_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.Say):
return say_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.If):
return if_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.Python):
return python_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.Scene):
return scene_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.Show):
return show_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.Hide):
return hide_to_str(renpy_nodes, renpy_ast, node, acc)
elif isinstance(node, renpy_ast.UserStatement):
return play_stop_to_str(renpy_nodes, renpy_ast, node, acc)
else:
show_unknown_node_warning(node)
return []
def real_next(renpy_nodes, renpy_ast, node):
while True:
# known nodes
if ( isinstance(node, renpy_ast.Menu)
or isinstance(node, renpy_ast.Say)
or isinstance(node, renpy_ast.If)
or isinstance(node, renpy_ast.Python)
or isinstance(node, renpy_ast.Scene)
or isinstance(node, renpy_ast.Show)
or isinstance(node, renpy_ast.Hide)
or isinstance(node, renpy_ast.UserStatement)):
return node
# don't keep jumps
elif isinstance(node, renpy_ast.Jump):
node = label_by_name(renpy_nodes, renpy_ast.Label, node.target)
# ignore useless nodes
elif ( isinstance(node, renpy_ast.Label)
or isinstance(node, renpy_ast.Translate)
or isinstance(node, renpy_ast.EndTranslate)
or isinstance(node, renpy_ast.Pass)
or isinstance(node, renpy_ast.Return)):
node = node.next
elif node:
show_unknown_node_warning(node)
break
else:
break
def menu_to_str(renpy_nodes, renpy_ast, node, acc):
nexts = []
menu_items = []
display_texts = []
for item in node.items:
(text, cond, lnext) = item
if lnext:
next = real_next(renpy_nodes, renpy_ast, lnext[0])
acc['nodes'][id(item)] = {
'class_name': 'MenuItem',
'arguments': [
text,
replace_bools(cond),
[str(id(next))] if next else []
]
}
nexts.append(next)
menu_items.append(item)
else:
display_texts.append(text)
id_menu_items = [str(id(menu_item)) for menu_item in menu_items]
acc['nodes'][id(node)] = {
'class_name': 'Menu',
'arguments': [
'\n'.join(display_texts),
id_menu_items
]
}
return nexts
def say_to_str(renpy_nodes, renpy_ast, node, acc):
next = real_next(renpy_nodes, renpy_ast, node.next)
acc['nodes'][id(node)] = {
'class_name': 'Say',
'arguments': [
node.who,
node.what,
[str(id(next))] if next else []
]
}
return [next]
def if_to_str(renpy_nodes, renpy_ast, node, acc):
else_found = False
for if_block in node.entries:
if if_block[0] == 'True':
else_found = True
if not else_found:
node.entries.append(('true', [node.next]))
nexts_of_blocks = []
nexts_of_if = []
for if_block in node.entries:
(cond, block) = if_block
id_if_block = id(if_block)
next_of_block = real_next(renpy_nodes, renpy_ast, block[0])
acc['nodes'][id_if_block] = {
'class_name': 'IfBlock',
'arguments': [
replace_bools(cond),
[str(id(next_of_block))] if next_of_block else []
]
}
nexts_of_blocks.append(next_of_block)
nexts_of_if.append(str(id_if_block))
acc['nodes'][id(node)] = {
'class_name': 'If',
'arguments': [nexts_of_if],
}
return nexts_of_blocks
VIDEO = re.compile(r'^renpy\.movie_cutscene\((.+)\)$')
def python_to_str(renpy_nodes, renpy_ast, node, acc):
next = real_next(renpy_nodes, renpy_ast, node.next)
match = re.search(VIDEO, node.code.source)
if match != None and len(match.groups()) == 1: # it's a video
file = match.group(1)[1:-1]
vid_name = remove_invalid_chars(file)
acc['videos'][vid_name] = file
acc['nodes'][id(node)] = {
'class_name': 'Video',
'arguments': [
vid_name,
[str(id(next))] if next else []
]
}
else:
acc['nodes'][id(node)] = {
'class_name': 'PyExpr',
'arguments': [
replace_bools(node.code.source),
[str(id(next))] if next else []
]
}
return [next]
def scene_to_str(renpy_nodes, renpy_ast, node, acc):
next = real_next(renpy_nodes, renpy_ast, node.next)
acc['nodes'][id(node)] = {
'class_name': 'Scene',
'arguments': [
node.imspec[0][0],
[str(id(next))] if next else []
]
}
return [next]
def show_to_str(renpy_nodes, renpy_ast, node, acc):
next = real_next(renpy_nodes, renpy_ast, node.next)
acc['nodes'][id(node)] = {
'class_name': 'Show',
'arguments': [
node.imspec[0][0],
[str(id(next))] if next else []
]
}
return [next]
def hide_to_str(renpy_nodes, renpy_ast, node, acc):
next = real_next(renpy_nodes, renpy_ast, node.next)
acc['nodes'][id(node)] = {
'class_name': 'Hide',
'arguments': [
node.imspec[0][0],
[str(id(next))] if next else []
]
}
return [next]
WORD = re.compile(r'([\w.]+|".*?")')
def play_stop_to_str(renpy_nodes, renpy_ast, node, acc):
cmd = re.findall(WORD, node.line)
next = real_next(renpy_nodes, renpy_ast, node.next)
id_nexts = [str(id(next))] if next else []
if cmd[0] == 'play' and len(cmd) >= 3:
channel = cmd[1]
file = cmd[2][1:-1]
snd_name = remove_invalid_chars(file)
acc['sounds'][snd_name] = file
acc['nodes'][id(node)] = {
'class_name': 'Play',
'arguments': [
channel,
snd_name,
id_nexts
]
}
return [next]
elif cmd[0] == 'voice' and len(cmd) >= 2:
file = cmd[1][1:-1]
snd_name = remove_invalid_chars(file)
acc['sounds'][snd_name] = file
acc['nodes'][id(node)] = {
'class_name': 'Play',
'arguments': [
'voice',
snd_name,
id_nexts
]
}
return [next]
elif cmd[0] == 'stop' and len(cmd) >= 2:
channel = cmd[1]
acc['nodes'][id(node)] = {
'class_name': 'Stop',
'arguments': [
channel,
id_nexts
]
}
return [next]
else:
print('[WARNING] unrecognized UserStatement: %s, didn\'t continue this branch.' % node.line)
| 9,073 |
tvrenamer/tests/core/test_episode.py
|
shad7/tvrenamer
| 1 |
2024796
|
import os
import mock
from tvrenamer.core import episode
from tvrenamer import exceptions as exc
from tvrenamer.tests import base
class EpisodeTest(base.BaseTest):
def setUp(self):
super(EpisodeTest, self).setUp()
self.media = self.create_tempfiles(
[('revenge.s04e12.hdtv.x264-2hd', 'dummy data')],
'.mp4')[0]
self.filename = os.path.basename(self.media)
self.dirname = os.path.dirname(self.media)
def test_str_repr(self):
ep = episode.Episode(self.media)
ep_str = ''
ep_str += self.media
ep_str += ' => ['
ep_str += self.dirname
ep_str += ' '
ep_str += self.filename
ep_str += '|None .mp4] '
ep_str += 'meta: ['
ep_str += ' S E[]] '
ep_str += 'formatted: /'
self.assertEqual(str(ep), ep_str)
self.assertEqual(repr(ep), ep_str)
def test_call(self):
ep = episode.Episode(self.media)
with mock.patch.object(ep, 'parse'):
with mock.patch.object(ep, 'enhance'):
with mock.patch.object(ep, 'format_name'):
with mock.patch.object(ep, 'rename'):
ep()
self.assertEqual(ep.state, episode.const.DONE)
ep = episode.Episode(self.media)
with mock.patch.object(ep, 'parse', side_effect=OSError):
ep()
self.assertEqual(ep.state, episode.const.FAILED)
ep = episode.Episode(self.media)
with mock.patch.object(ep, 'parse',
side_effect=exc.InvalidFilename):
ep()
self.assertEqual(ep.state, episode.const.FAILED)
def test_status(self):
ep = episode.Episode(self.media)
self.assertTrue(self.media in ep.status)
def test_parse(self):
ep = episode.Episode(self.media)
ep.parse()
self.assertEqual(ep.episode_numbers, [12])
self.assertEqual(ep.series_name, 'revenge')
self.assertEqual(ep.season_number, 4)
ep = episode.Episode(self.media)
with mock.patch.object(episode.parser, 'parse_filename',
return_value=None):
self.assertRaises(exc.InvalidFilename, ep.parse)
ep = episode.Episode(self.media)
with mock.patch.object(episode.parser, 'parse_filename',
return_value={'pattern': ''}):
self.assertRaises(exc.ConfigValueError, ep.parse)
ep = episode.Episode(self.media)
with mock.patch.object(episode.parser, 'parse_filename',
return_value={'pattern': '',
'episode_numbers': []}):
self.assertRaises(exc.ConfigValueError, ep.parse)
def test_enhance(self):
ep = episode.Episode(self.media)
ep.parse()
with mock.patch.object(ep.api, 'get_series_by_name',
return_value=(None, '')):
self.assertRaises(exc.ShowNotFound, ep.enhance)
with mock.patch.object(ep.api, 'get_series_by_name',
return_value=({}, '')):
with mock.patch.object(ep.api, 'get_series_name',
return_value='Revenge'):
with mock.patch.object(ep.api, 'get_episode_name',
return_value=(['Madness'], None)):
ep.enhance()
self.assertEqual(ep.series_name, 'Revenge')
self.assertEqual(ep.episode_names, ['Madness'])
ep = episode.Episode(self.media)
ep.parse()
with mock.patch.object(ep.api, 'get_series_by_name',
return_value=({}, '')):
with mock.patch.object(ep.api, 'get_series_name',
return_value='Revenge'):
with mock.patch.object(ep.api, 'get_episode_name',
return_value=(None, '')):
self.assertRaises(exc.EpisodeNotFound, ep.enhance)
def test_format_name(self):
ep = episode.Episode(self.media)
ep.series_name = 'Revenge'
ep.season_number = 4
ep.episode_numbers = [12]
ep.episode_names = ['Madness']
self.CONF.set_override('move_files_enabled', False)
ep.format_name()
self.assertEqual(ep.formatted_dirname, os.path.dirname(self.media))
self.assertEqual(ep.formatted_filename,
'Revenge - 04x12 - Madness.mp4')
self.CONF.set_override(
'filename_format_ep',
'S%(seasonnumber)02dE%(episode)s-%(episodename)s%(ext)s')
self.CONF.set_override('directory_name_format',
'%(seriesname)s/Season %(seasonnumber)02d')
self.CONF.set_override('move_files_enabled', True)
with mock.patch('tvrenamer.core.formatter.find_library',
return_value='/tmp'):
ep.format_name()
self.assertEqual(ep.formatted_filename, 'S04E12-Madness.mp4')
self.assertEqual(ep.formatted_dirname, '/tmp/Revenge/Season 04')
def test_rename(self):
ep = episode.Episode(self.media)
ep.series_name = 'Revenge'
ep.season_number = 4
ep.episode_numbers = [12]
ep.episode_names = ['Madness']
self.CONF.set_override('move_files_enabled', False)
ep.out_location = os.path.join(self.dirname,
'Revenge - 04x12 - Madness.mp4')
with mock.patch.object(episode.renamer, 'execute') as mock_renamer:
ep.rename()
mock_renamer.assert_called_with(
self.media,
os.path.join(self.dirname, 'Revenge - 04x12 - Madness.mp4'))
self.CONF.set_override('move_files_enabled', True)
ep.out_location = os.path.join(
'/tmp', '.', 'Revenge - 04x12 - Madness.mp4')
with mock.patch.object(episode.renamer, 'execute') as mock_renamer:
ep.rename()
mock_renamer.assert_called_with(
self.media,
os.path.join('/tmp', '.', 'Revenge - 04x12 - Madness.mp4'))
| 6,263 |
code/Solution_0046_permute.py
|
qizhenkang/myLeetCode
| 0 |
2023903
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 5 10:35:10 2021
@author: qizhe
"""
from typing import List
class Solution:
# def permute(self, nums: List[int]) -> List[List[int]]:
# def dfs(nums, size, depth, path, used, res):
# if depth == size:
# # 出问题的原因,可能是,path 在res里是引用的,所以path修改就会导致res也改了
# res.append(path[:])
# # 改动 path -> path[:]
# return
# for i in range(size):
# if not used[i]:
# used[i] = True
# path.append(nums[i])
# # print(path)
# dfs(nums, size, depth + 1, path, used, res)
# used[i] = False
# path.pop()
# # print(res)
# size = len(nums)
# if len(nums) == 0:
# return []
# used = [False for _ in range(size)]
# res = []
# dfs(nums, size, 0, [], used, res)
# return res
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def backtrack(first = 0):
# 所有数都填完了
if first == n:
res.append(nums[:])
print('first = ',first)
print('res = ',res)
for i in range(first, n):
# print(first,i)
# 动态维护数组
nums[first], nums[i] = nums[i], nums[first]
print('nums 1 = ',nums)
# 继续递归填下一个数
backtrack(first + 1)
print('nums 2 = ',nums)
# 撤销操作
nums[first], nums[i] = nums[i], nums[first]
n = len(nums)
res = []
backtrack()
return res
if __name__ == '__main__':
nums = [1, 2, 3]
solution = Solution()
res = solution.permute(nums)
print(res)
| 1,942 |
drapps/plugins/apps/evaluated.py
|
schwa-lab/dr-apps-python
| 3 |
2023589
|
# vim: set et nosi ai ts=2 sts=2 sw=2:
# -*- coding: utf-8 -*-
"""
Apps which evaluate a function for each doc and act upon the result.
"""
from __future__ import absolute_import, print_function, unicode_literals
import io
import os
import sys
from schwa import dr
import six
from drapps.api import App, Evaluator
from drapps.appargs import DESERIALISE_AP, OSTREAM_AP, ArgumentParser, get_evaluator_ap
class FormatApp(App):
"""
Print out a formatted evaluation of each document.
"""
# e.g. dr format json
# dr format django '{% if ... %}'
arg_parsers = (get_evaluator_ap(), DESERIALISE_AP)
def __call__(self):
evaluator = self.evaluator
for i, doc in enumerate(self.stream_reader):
print(evaluator(doc, i))
class GrepApp(App): # grep
"""
Filter the documents using an evaluator.
A string consisting of only whitespace or the word 'false' evaluates to false.
"""
arg_parsers = (get_evaluator_ap(), DESERIALISE_AP, OSTREAM_AP)
def __call__(self):
evaluator = self.evaluator
reader, writer = self.stream_reader_writer
for i, doc in enumerate(reader):
if evaluator.as_boolean(doc, i):
# TODO: avoid re-serialising
writer.write(doc)
class RandomEvaluator(Evaluator):
"""Shuffle the input randomly"""
ap = ArgumentParser()
ap.add_argument('--seed', dest='rand_seed', type=int, default=None)
arg_parsers = (ap,)
def __init__(self, argparser, args):
super(RandomEvaluator, self).__init__(argparser, args)
import random
self.gen_random = random.Random(self.args.rand_seed).random
def __call__(self, doc, ind):
return self.gen_random()
class SortApp(App):
"""
Sort the documents using an evaluator.
"""
arg_parsers = (get_evaluator_ap({'random': RandomEvaluator}), DESERIALISE_AP, OSTREAM_AP)
def __call__(self):
reader, schema = self.get_reader_and_schema()
tmp_out = io.BytesIO()
tmp_writer = dr.Writer(tmp_out, schema)
evaluator = self.evaluator
items = []
for i, doc in enumerate(reader):
# TODO: avoid re-serialising
doc_key = evaluator(doc, i)
tmp_writer.write(doc)
doc_data = tmp_out.getvalue()
tmp_out.truncate(0)
items.append((doc_key, doc_data))
items.sort()
for doc_key, doc_data in items:
out = self.args.out_stream
if six.PY3:
out = out.buffer
out.write(doc_data)
class SetFieldApp(App):
"""
Set a named field on each document to a value.
"""
field_name_ap = ArgumentParser()
field_name_ap.add_argument('field_name', help='The field name to set')
arg_parsers = (field_name_ap, get_evaluator_ap(), DESERIALISE_AP, OSTREAM_AP)
def __call__(self):
attr = self.args.field_name
evaluator = self.evaluator
reader, writer = self.stream_reader_writer
for i, doc in enumerate(reader):
if attr not in doc._dr_s2p:
# TODO: externalise reflection methods
doc._dr_s2p[attr] = attr
doc._dr_fields[attr] = dr.Field(serial=attr)
setattr(doc, attr, evaluator(doc, i))
writer.write(doc)
class KFoldsEvaluator(Evaluator):
"""Distribute to each of k folds"""
ap = ArgumentParser()
ap.add_argument('kfolds', type=int)
arg_parsers = (ap,)
def __call__(self, doc, ind):
return ind % self.args.kfolds
class SplitApp(App):
"""
Split a stream into k files, or a separate file for each key determined per doc.
To perform stratified k-fold validation, first sort the corpus by the stratification label.
If the evaluation returns a list, the document is written to each key in the list.
"""
multioutput_ap = ArgumentParser()
multioutput_ap.add_argument('-t', '--template', dest='path_tpl', default='fold{n:03d}.dr', help='A template for output paths (default: %(default)s). {n} substitutes for fold number, {key} for evaluation output.')
multioutput_ap.add_argument('--overwrite', action='store_true', default=False, help='Overwrite an output file if it already exists.')
multioutput_ap.add_argument('--sparse', action='store_true', default=False, help='Use append mode to write files, and close the handle between writes')
multioutput_ap.add_argument('--make-dirs', action='store_true', default=False, help='Make directories when necessary')
arg_parsers = (DESERIALISE_AP, multioutput_ap, get_evaluator_ap({'k': KFoldsEvaluator}),)
def __init__(self, argparser, args):
if '{' not in args.path_tpl:
argparser.error('Output path template must include a substitution (e.g. {n:02d} or {key})')
super(SplitApp, self).__init__(argparser, args)
if self.args.sparse:
if self.args.overwrite:
argparser.error('--overwrite does not apply with --sparse')
if isinstance(self.evaluator, KFoldsEvaluator):
argparser.error('k-folds cannot be used with --sparse')
if any(expr in args.path_tpl for expr in ('{n}', '{n!', '{n:')): # FIXME: use regexp
argparser.error('--sparse must use filenames templated by key')
def __call__(self):
# TODO: clean up!!
evaluator = self.evaluator
if isinstance(evaluator, KFoldsEvaluator):
# avoid full deserialisation
# TODO: make more generic
reader = self.raw_stream_reader
from drapps.util import RawDocWriter
make_writer = RawDocWriter
else:
reader, schema = self.get_reader_and_schema()
make_writer = lambda out: dr.Writer(out, schema)
if self.args.make_dirs:
def fopen(path, mode):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
cur = ''
for part in dirname.split(os.path.sep):
cur += part
if part and not os.path.exists(cur):
os.mkdir(cur)
cur += os.path.sep
return open(path, mode)
else:
fopen = open
def new_writer(key):
fold_num = len(writers)
path = self.args.path_tpl.format(n=fold_num, key=key)
if not self.args.overwrite and os.path.exists(path):
print('Path {0} already exists. Use --overwrite to overwrite.'.format(path), file=sys.stderr)
sys.exit(1)
print('Writing fold {k} to {path}'.format(k=fold_num, path=path), file=sys.stderr)
return make_writer(fopen(path, 'wb'))
if self.args.sparse:
get_writer = lambda key: make_writer(fopen(self.args.path_tpl.format(key=key), 'ab'))
else:
writers = {}
def get_writer(key):
try:
writer = writers[key]
except KeyError:
writer = writers[key] = new_writer(key)
return writer
for i, doc in enumerate(reader):
val = evaluator(doc, i)
for key in val if isinstance(val, list) else (val,):
writer = get_writer(key)
writer.write(doc)
FormatApp.register_name('format')
GrepApp.register_name('grep')
SortApp.register_name('sort')
SplitApp.register_name('split')
| 6,879 |
sandbox/dead_code/resolver-dot-generator/util.py
|
pombreda/pkgcore
| 1 |
2025290
|
# Copyright: 2006 <NAME> <<EMAIL>>
# License: GPL2/BSD
def mangle_name(arg):
return '"%s"' % str(arg).replace('"', '\\"')
def dump_edge(parent, child, text):
return "%s->%s [label=%s];" % (mangle_name(parent), mangle_name(child), mangle_name(text))
def dump_dot_file_from_graph(graph, filepath, graph_name="dumped_graph"):
if isinstance(filepath, basestring):
fd = open(filepath, "w")
else:
fd = filepath
if not hasattr(fd, "write"):
raise TypeError("filepath must be either a file instance or a string filepath: got %s" % filepath)
fd.write("digraph %s {\n" % graph_name)
for a,data in graph.atoms.iteritems():
for parent in data[0]:
if data[1]:
for matches in data[1]:
fd.write("\t%s\n" % dump_edge(parent, matches, a))
else:
fd.write("\t%s\n" % dump_edge(parent, a, a))
fd.write("\tnode [shape=circle];\n")
for x in graph.pkgs.keys():
fd.write("\t%s\n" % mangle_name(x))
# fd.write("\tnode [shape=circle];\n\t%s;\n" % " ".join(map(mangle_name, graph.pkgs.keys())))
l = list(graph.unresolved_atoms())
if l:
fd.write("\tnode [shape=box];\n\t%s;\n" % " ".join(map(mangle_name, graph.unresolved_atoms())))
del l
fd.write("}\n");
| 1,178 |
python/advance_ddos/server_client_ddos/config.py
|
looopTools/scripts
| 0 |
2025282
|
import yaml
import io
class Config:
site = None
interval = None
def __init__(self, site, interval):
self.site = site
self.interval = interval
@staticmethod
def _read_configuration_file(path):
with open(path, 'r') as stream:
try:
return yaml.load(stream)
except yaml.YAMLError as e:
print(e)
return None
@staticmethod
def build(path):
config = Config._read_configuration_file(path)['config']
return Config(config['site'] ,config['interval'])
return None
| 605 |
day21/snake/scoreboard.py
|
nurmatthias/100DaysOfCode
| 0 |
2025054
|
from turtle import Turtle
ALIGNMENT = "center"
FONT = "Verdana"
FONT_SIZE = 12
class ScoreBoard(Turtle):
def __init__(self) -> None:
super().__init__()
self.score = 0
self.shape("circle")
self.color("white")
self.speed("fastest")
self.hideturtle()
self.penup()
self.update_scoreboard()
def increase_score(self):
self.score += 1
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.goto(x=0, y=310)
self.write(f"Score: {self.score}", align=ALIGNMENT,
font=(FONT, FONT_SIZE, "normal"))
def game_over(self):
self.clear()
self.goto(0, 0)
self.write("GAME OVER", align=ALIGNMENT,
font=(FONT, FONT_SIZE, "normal"))
self.goto(0, -40)
self.write(f"Your score was {self.score}", align=ALIGNMENT,
font=(FONT, FONT_SIZE, "normal"))
def pause(self):
self.clear()
self.goto(0, 0)
self.write("GAME IS PAUSED", align=ALIGNMENT,
font=(FONT, FONT_SIZE, "normal"))
| 1,141 |
src/txpasslib/test/doubles.py
|
mithrandi/txpasslib
| 1 |
2022899
|
"""
Test doubles.
"""
from twisted.internet.interfaces import IReactorFromThreads
from zope.interface import implementer
@implementer(IReactorFromThreads)
class SynchronousReactorThreads(object):
"""
An implementation of ``IReactorFromThreads`` that calls things
synchronously in the same thread.
"""
def callFromThread(self, f, *args, **kwargs): # noqa
f(*args, **kwargs)
__all__ = ['SynchronousReactorThreads']
| 446 |
network/urls.py
|
kroos783/network
| 0 |
2023256
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("following", views.following, name="following"),
path("user/<str:username>", views.userPage, name="userPage"),
# API Routes
path("posts/submit", views.submit_post, name="submit_post"),
path("posts/<str:postbox>", views.show_posts, name="show_post"),
path("follow/<str:userID>", views.follow, name="follow"),
path("like/<int:postID>", views.like, name="like"),
path("like/count/<int:postID>", views.like_count, name="like"),
path("like/countnotloggin/<int:postID>",
views.like_count_notloggin, name="like"),
path("edit/<int:postID>", views.editPost, name="infoEditPost"),
path("edit/submit", views.editPostInDB, name="editPost")
]
| 958 |
backend/fief/dependencies/tasks.py
|
fief-dev/fief
| 1 |
2024025
|
from fief.tasks import SendTask, send_task
async def get_send_task() -> SendTask:
return send_task
| 105 |
yolapy/services.py
|
yola/yolapy
| 0 |
2022787
|
from demands import HTTPServiceClient
from yolapy.configuration import config as defaults
from yolapy.resources import (
campaign,
cname_zone,
partner,
site,
siteimport,
subscription,
user,
wl_task,
)
class Yola(
HTTPServiceClient,
campaign.CampaignResourceMixin,
cname_zone.CnameZoneMixin,
partner.PartnerResourceMixin,
site.SiteResourceMixin,
siteimport.SiteImportResourceMixin,
subscription.SubscriptionResourceMixin,
user.UserResourceMixin,
wl_task.WLTaskResourceMixin,
):
"""Client for Yola's API.
If using yolapy.configuration::
configure(
url='https://wl.yola.net/',
auth=('username', 'password'))
yola = Yola()
yola.get_user('user_id')
Or configured manually::
yola = Yola(
url='https://wl.yola.net/',
auth=('username', 'password'))
yola.get_user('user_id')
When appropriate, successful responses will return parsed json objects.
Failures will raise instances of ``demands.HTTPServiceError``.
"""
def __init__(self, **kwargs):
"""Initialize with optional headers.
Auth and url defaults are pulled from yolapy.configuration.
Passed arguments will override configuration::
Yola(headers={'Header-Name': 'value'})
"""
config = {}
config.update(defaults)
config.update(kwargs)
assert(config['url'])
assert(config['auth'])
self.username = config['auth'][0]
super(Yola, self).__init__(**config)
| 1,593 |
antilles/utils/math.py
|
biomicrodev/antilles
| 0 |
2025172
|
import math
from typing import Tuple, Iterator
import numpy
def cart2pol(x: float, y: float, in_deg: bool = True) -> Tuple[float, float]:
r = math.sqrt(pow(x, 2) + pow(y, 2))
theta = math.atan2(y, x)
if in_deg:
theta = math.degrees(theta)
return r, theta
def pol2cart(r: float, theta: float, in_degs: bool = True) -> Tuple[float, float]:
if in_degs:
theta = (theta + 180) % 360 - 180
theta = math.radians(theta)
else:
theta = (theta + (math.pi / 2)) % math.pi - (math.pi / 2)
x = r * math.cos(theta)
y = r * math.sin(theta)
return x, y
def make_even_grid(n: int) -> Tuple[int, int]:
nx = int(math.ceil(math.sqrt(n)))
ny = int(math.ceil(float(n) / float(nx)))
return nx, ny
def init_arrow_coords(dims: Tuple[int, int], n: int) -> Iterator[Tuple[int, int]]:
w, h = dims
nx, ny = make_even_grid(n)
xx = numpy.linspace(0, w, num=nx + 1, endpoint=False)[1:]
yy = numpy.linspace(0, h, num=ny + 1, endpoint=False)[1:]
yy, xx = numpy.meshgrid(yy, xx)
xx, yy = numpy.ravel(xx), numpy.ravel(yy)
xx, yy = xx[:n], yy[:n]
xx, yy = (int(round(x)) for x in xx), (int(round(y)) for y in yy)
return zip(xx, yy)
| 1,226 |
setup.py
|
aleksandermajos/BIGAI
| 1 |
2025262
|
from setuptools import find_packages, setup
setup(
name='BIGAI',
version='0.0.2.5',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
download_url='https://github.com/aleksandermajos/BIGAI',
license='LICENSE.txt',
description='An AI Library as a Baseline to create AI Projects',
long_description=open('README.md').read(),
install_requires=[
"torch>=1.8",
"pytorch-lightning>=1.2.5",
],
)
| 501 |
LeetCode/Answers/Leetcode-python-solution/020ValidParentheses.py
|
quantumlaser/code2016
| 0 |
2025164
|
# Problem: http://oj.leetcode.com/problems/valid-parentheses/
# Analysis: http://blog.csdn.net/lilong_dream/article/details/21694751
# <EMAIL>
class Solution:
# @return a boolean
def isValid(self, s):
if len(s) == 0:
return True
st = [s[0]]
i = 1
while i < len(s):
if len(st) == 0:
st.append(s[i])
else:
tmp = st.pop()
if self.isMatch(tmp, s[i]):
pass
else:
st.append(tmp)
st.append(s[i])
i += 1
if len(st) == 0:
return True
return False
def isMatch(self, s, p):
if (s == '(' and p == ')') or \
(s == '{' and p == '}') or \
(s == '[' and p == ']'):
return True
return False
if __name__ == "__main__":
slt = Solution()
s = "()"
print slt.isValid(s)
| 1,033 |
tools/generate-stubs.py
|
CityOfZion/BigIntegerCpp
| 3 |
2025520
|
"""
Build stub files using just 1 interpreter (python 3.8 hardcoded)
and using a specified platform ('macosx', 'linux' or 'win')
Syntax:
generate-stubs <wheel dir> <platform> <extension name>
Example:
generate-stubs ./dist macosx pybiginteger
"""
import sys
import os
import subprocess
import re
def install_pkg(package_name, wheel_dir, platform):
for file in os.listdir(wheel_dir):
if 'cp38' in file and platform in file and file.endswith('.whl'):
# install without dependencies, because it depends on the stub files which we're building here
subprocess.run(['pip', 'install', wheel_dir + '/' + file, '--no-dependencies', '--force-reinstall'])
version = re.search(f"{package_name}-(.+?)-.*", file).group(1)
return version
def create_setup(package_name, package_version, wheel_dir):
with open(f"{wheel_dir}/setup.py", "w") as setuppy:
setuppy.write("""from setuptools import setup
setup(
name='{package_name}-stubs',
maintainer="<NAME>",
maintainer_email="<EMAIL>",
description="PEP 561 type stubs for {package_name}",
version='{package_version}',
packages=['{package_name}-stubs'],
# PEP 561 requires these
install_requires=['{package_name}=={package_version}'],
include_package_data=True
)
""".format(package_name=package_name, package_version=package_version))
with open(f"{wheel_dir}/MANIFEST.in", "w") as manifest_setup:
manifest_setup.write(f"""include {package_name}-stubs/*""")
def main():
wheel_dir = sys.argv[1]
platform = sys.argv[2]
package_name = sys.argv[3]
# prepare by installing the extension and stubgen toll
subprocess.check_call(['python', '-m', 'pip', 'install', 'pybind11-stubgen'])
package_version = install_pkg(package_name, wheel_dir, platform)
# generate stubs
subprocess.check_call(['pybind11-stubgen', '--skip-signature-downgrade', '--ignore-invalid=all', '--no-setup-py', '-o', '.', package_name], cwd=wheel_dir)
# create setup.py with proper information
create_setup(package_name, package_version, wheel_dir)
# polish up stub files
subprocess.check_call(['python', './tools/fix-stubs.py', f"./{wheel_dir}/{package_name}-stubs/__init__.pyi"])
# build wheel
subprocess.check_call(['python', 'setup.py', 'bdist_wheel', '--dist-dir', '.'], cwd=wheel_dir)
if __name__ == "__main__":
main()
| 2,415 |
perception/features.py
|
esteng/guiding-multi-step
| 69 |
2025136
|
"""
Classes for features of a 3D object surface.
Author: <NAME>
"""
from abc import ABCMeta, abstractmethod
import numpy as np
class Feature:
""" Abstract class for features """
__metaclass__ = ABCMeta
def __init__(self):
pass
class LocalFeature(Feature):
""" Local (e.g. pointwise) features on shape surfaces.
Attributes
----------
descriptor : :obj:`numpy.ndarray`
vector to describe the point
reference_frame : :obj:`numpy.ndarray`
reference frame of the descriptor, as an array
point : :obj:`numpy.ndarray`
3D point on shape surface that descriptor corresponds to
normal : :obj:`numpy.ndarray`
3D surface normal on shape surface at corresponding point
"""
__metaclass__ = ABCMeta
def __init__(self, descriptor, rf, point, normal):
self.descriptor_ = descriptor
self.rf_ = rf
self.point_ = point
self.normal_ = normal
@property
def descriptor(self):
return self.descriptor_
@property
def reference_frame(self):
return self.rf_
@property
def keypoint(self):
return self.point_
@property
def normal(self):
return self.normal_
class GlobalFeature(Feature):
""" Global features of a full shape surface.
Attributes
----------
key : :obj:`str`
object key in database that descriptor corresponds to
descriptor : :obj:`numpy.ndarray`
vector to describe the object
pose : :obj:`autolab_core.RigidTransform`
pose of object for the descriptor, if relevant
"""
__metaclass__ = ABCMeta
def __init__(self, key, descriptor, pose=None):
self.key_ = key
self.descriptor_ = descriptor
self.pose_ = pose
@property
def key(self):
return self.key_
@property
def descriptor(self):
return self.descriptor_
@property
def pose(self):
return self.pose_
class SHOTFeature(LocalFeature):
""" Signature of Oriented Histogram (SHOT) features """
def __init__(self, descriptor, rf, point, normal):
LocalFeature.__init__(self, descriptor, rf, point, normal)
class MVCNNFeature(GlobalFeature):
""" Multi-View Convolutional Neural Network (MV-CNN) descriptor """
def __init__(self, key, descriptor, pose=None):
GlobalFeature.__init__(self, key, descriptor, pose)
class BagOfFeatures:
""" Wrapper for a list of features, created for the sake of future bag-of-words reps.
Attributes
----------
features : :obj:`list` of :obj:`Feature`
list of feature objects
"""
def __init__(self, features = None):
self.features_ = features
if self.features_ is None:
self.features_ = []
self.num_features_ = len(self.features_)
def add(self, feature):
""" Add a new feature to the bag.
Parameters
----------
feature : :obj:`Feature`
feature to add
"""
self.features_.append(feature)
self.num_features_ = len(self.features_)
def extend(self, features):
""" Add a list of features to the bag.
Parameters
----------
feature : :obj:`list` of :obj:`Feature`
features to add
"""
self.features_.extend(features)
self.num_features_ = len(self.features_)
def feature(self, index):
""" Returns a feature.
Parameters
----------
index : int
index of feature in list
Returns
-------
:obj:`Feature`
"""
if index < 0 or index >= self.num_features_:
raise ValueError('Index %d out of range' %(index))
return self.features_[index]
def feature_subset(self, indices):
""" Returns some subset of the features.
Parameters
----------
indices : :obj:`list` of :obj:`int`
indices of the features in the list
Returns
-------
:obj:`list` of :obj:`Feature`
"""
if isinstance(indices, np.ndarray):
indices = indices.tolist()
if not isinstance(indices, list):
raise ValueError('Can only index with lists')
return [self.features_[i] for i in indices]
@property
def num_features(self):
return self.num_features_
@property
def descriptors(self):
""" Make a nice array of the descriptors """
return np.array([f.descriptor for f in self.features_])
@property
def reference_frames(self):
""" Make a nice array of the reference frames """
return np.array([f.reference_frame for f in self.features_])
@property
def keypoints(self):
""" Make a nice array of the keypoints """
return np.array([f.keypoint for f in self.features_])
@property
def normals(self):
""" Make a nice array of the normals """
return np.array([f.normal for f in self.features_])
| 5,040 |
dataset_test.py
|
Cinkupis/chasing-arguments
| 0 |
2023554
|
import unittest
import datasets
import os
class TestDatasets(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDatasets, self).__init__(*args, **kwargs)
self.ds = datasets.CheckDownloadUnzipData()
def test_download_glove(self):
self.ds.check_glove_840b()
self.assertEqual(os.path.isfile(datasets.glove_840b_zip_file), True)
def test_download_snli(self):
self.ds.check_snli()
self.assertEqual(os.path.isfile(datasets.snli_zip_file), True)
def test_unzip_data(self):
self.ds.unzip_all()
self.assertEqual(os.path.isfile(datasets.glove_vectors_840B_300d), True)
self.assertEqual(os.path.isfile(datasets.snli_full_dataset_file), True)
self.assertEqual(os.path.isfile(datasets.snli_test_file), True)
self.assertEqual(os.path.isfile(datasets.snli_dev_file), True)
if __name__ == '__main__':
unittest.main()
| 932 |
quiz_brain.py
|
bv94/quiz-app
| 0 |
2023237
|
import os
class Quizbrain:
def __init__(self, q_list):
self.question_number = 0
self.question_list = q_list
self.score = 0
def next_question(self):
current_question = self.question_list[self.question_number - 1]
self.question_number += 1
return input(f'q.{self.question_number}: {current_question.text}(t,f)\n')
def evaluator(self, guess, answer):
guess = str(self.answer_corrector[guess])
print(answer)
if guess == answer:
input("that's correct")
# os.system("clear")
self.score += 1
return True
else:
input("that is wrong")
# os.system("clear")
return False
def still_has_questions(self):
return self.question_number < len(self.question_list)
answer_corrector = {
't': True,
'f': False,
"T": True,
'F': False
}
| 777 |
buildroot/package/lmsmpris/src/audiocontrol2.py
|
mgrosso/hifiberry-os
| 637 |
2024114
|
import signal
import configparser
import logging
from mpris import MPRISController
from metadata import MetadataConsole, MetadataScrobbler
from webserver import AudioControlWebserver
mpris = MPRISController()
def pause_all(signalNumber=None, frame=None):
"""
Pause all players on SIGUSR1
"""
if mpris is not None:
mpris.pause_all()
def print_state(signalNumber=None, frame=None):
"""
Display state on USR2
"""
if mpris is not None:
print("\n" + str(mpris))
def parse_config():
config = configparser.ConfigParser()
config.read('/etc/audiocontrol2.conf')
# Auto pause for mpris players
auto_pause = config.getboolean('mpris', 'auto_pause',
fallback=False)
logging.debug("Setting auto_pause for MPRIS players to %s",
auto_pause)
mpris.auto_pause = auto_pause
# Console metadata logger
if config.getboolean('metadata', 'logger-console', fallback=False):
logging.debug("Starting console logger")
mpris.register_metadata_display(MetadataConsole())
# Web server
if config.getboolean('webserver', 'webserver-enable', fallback=False):
logging.debug("Starting webserver")
port = config.getint('webserver',
'webserver-port',
fallback=9001)
ws = AudioControlWebserver(port=port)
ws.run_server()
mpris.register_metadata_display(ws)
# Scrobbler
scrobbler_network = config.get("scrobbler", "scrobbler-network",
fallback="lastfm")
scrobbler_apikey = config.get("scrobbler", "scrobbler-apikey")
scrobbler_apisecret = config.get("scrobbler", "scrobbler-apisecret")
scrobbler_username = config.get("scrobbler", "scrobbler-username")
scrobbler_password = config.get("scrobbler", "scrobbler-password")
if (scrobbler_apikey is not None) and \
(scrobbler_apisecret is not None) and \
(scrobbler_apisecret is not None) and \
(scrobbler_password is not None):
try:
scrobbler = MetadataScrobbler(scrobbler_apikey,
scrobbler_apisecret,
scrobbler_username,
scrobbler_password,
None,
scrobbler_network)
mpris.register_metadata_display(scrobbler)
logging.info("Scrobbling to %s", scrobbler_network)
except Exception as e:
logging.error(e)
def main():
parse_config()
signal.signal(signal.SIGUSR1, pause_all)
signal.signal(signal.SIGUSR2, print_state)
# mpris.print_players()
mpris.main_loop()
main()
| 2,831 |
syntropy_sdk/models/agent_filters_object.py
|
SyntropyNet/syntropy-python-sdk
| 1 |
2024998
|
# coding: utf-8
"""
syntropy-controller
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AgentFiltersObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"agent_names": "list[TsoaPickAgentAgentNameOrAgentId_]",
"versions": "list[TsoaPickAgentAgentVersion_]",
"countries": "list[TsoaPickAgentAgentLocationCountry_]",
}
attribute_map = {
"agent_names": "agentNames",
"versions": "versions",
"countries": "countries",
}
def __init__(self, agent_names=None, versions=None, countries=None): # noqa: E501
"""AgentFiltersObject - a model defined in Swagger""" # noqa: E501
self._agent_names = None
self._versions = None
self._countries = None
self.discriminator = None
self.agent_names = agent_names
self.versions = versions
self.countries = countries
@property
def agent_names(self):
"""Gets the agent_names of this AgentFiltersObject. # noqa: E501
:return: The agent_names of this AgentFiltersObject. # noqa: E501
:rtype: list[TsoaPickAgentAgentNameOrAgentId_]
"""
return self._agent_names
@agent_names.setter
def agent_names(self, agent_names):
"""Sets the agent_names of this AgentFiltersObject.
:param agent_names: The agent_names of this AgentFiltersObject. # noqa: E501
:type: list[TsoaPickAgentAgentNameOrAgentId_]
"""
if agent_names is None:
raise ValueError(
"Invalid value for `agent_names`, must not be `None`"
) # noqa: E501
self._agent_names = agent_names
@property
def versions(self):
"""Gets the versions of this AgentFiltersObject. # noqa: E501
:return: The versions of this AgentFiltersObject. # noqa: E501
:rtype: list[TsoaPickAgentAgentVersion_]
"""
return self._versions
@versions.setter
def versions(self, versions):
"""Sets the versions of this AgentFiltersObject.
:param versions: The versions of this AgentFiltersObject. # noqa: E501
:type: list[TsoaPickAgentAgentVersion_]
"""
if versions is None:
raise ValueError(
"Invalid value for `versions`, must not be `None`"
) # noqa: E501
self._versions = versions
@property
def countries(self):
"""Gets the countries of this AgentFiltersObject. # noqa: E501
:return: The countries of this AgentFiltersObject. # noqa: E501
:rtype: list[TsoaPickAgentAgentLocationCountry_]
"""
return self._countries
@countries.setter
def countries(self, countries):
"""Sets the countries of this AgentFiltersObject.
:param countries: The countries of this AgentFiltersObject. # noqa: E501
:type: list[TsoaPickAgentAgentLocationCountry_]
"""
if countries is None:
raise ValueError(
"Invalid value for `countries`, must not be `None`"
) # noqa: E501
self._countries = countries
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(AgentFiltersObject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AgentFiltersObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 5,314 |
src/dispenv/docker.py
|
pmbaumgartner/dispenv
| 3 |
2024480
|
import re
import shutil
from pathlib import Path
from subprocess import CompletedProcess, run
from typing import Any, Dict, List, NewType, Optional, Tuple
import srsly
from ._types import EnvData, URLString
from .checks import docker_installed, docker_running, run_docker_checks
from .consts import (
DEVCONTAINER_TEMPLATE,
DOCKERFILE_TEMPLATE,
DOCKERFILE_TEMPLATE_WITH_REQUIREMENTS,
)
from .github import get_requirements_from_gist
from wasabi import msg
def create(env_data: EnvData):
run_docker_checks()
folder_path = Path(env_data.folder_name).resolve()
msg.info(f"Creating Folder: {folder_path}")
folder_path.mkdir(parents=True, exist_ok=False)
msg.info(f"Creating Environment: {env_data.environment_name}")
(folder_path / ".devcontainer").mkdir(exist_ok=False)
(folder_path / ".devcontainer" / "devcontainer.json").write_text(
DEVCONTAINER_TEMPLATE.format(environment_name=env_data.environment_name)
)
dockerfile_path = folder_path / "Dockerfile"
if env_data.requirements_txt_gist:
get_requirements_from_gist(env_data.requirements_txt_gist, folder_path)
dockerfile_path.write_text(
DOCKERFILE_TEMPLATE_WITH_REQUIREMENTS.format(
python_version=env_data.python_version
)
)
else:
dockerfile_path.write_text(
DOCKERFILE_TEMPLATE.format(python_version=env_data.python_version)
)
if env_data.options.build_image:
msg.info("Building Docker Image")
run(
[
"docker",
"build",
"-f",
str(dockerfile_path),
"-t",
str(env_data.environment_name),
str(folder_path),
]
)
msg.good("Built Docker Image")
srsly.write_yaml(folder_path / ".dispenv.yaml", env_data.dict())
def cleanup(dispenv_data: Dict[str, Any]) -> None:
msg.info("Removing Folder")
folder_path = Path(dispenv_data["folder_name"]).resolve()
run(["rm", "-rf", str(folder_path)], capture_output=True)
msg.info("Stopping containers running image.")
docker_ps_output = run(["docker", "ps", "-a"], capture_output=True)
container_ids = get_containers_running_image(
docker_ps_output, dispenv_data["environment_name"]
)
for cid in container_ids:
run(["docker", "stop", cid], capture_output=True)
run(["docker", "rm", cid], capture_output=True)
msg.info("Removing image.")
docker_ps_output = run(["docker", "images"], capture_output=True)
for image in get_images(docker_ps_output, dispenv_data["environment_name"]):
run(["docker", "rmi", image])
msg.good("Cleanup Complete.")
def _imagecheck(input_image: str, reference_image: str):
# vscode-dev container start with `vsc`
input_image = input_image.strip()
vsc_image = input_image.startswith(f"vsc-{reference_image}")
default_image = input_image == reference_image
return vsc_image or default_image
def get_containers_running_image(
docker_ps_process: CompletedProcess, image_name: str
) -> List[str]:
lines = [
line.split()
for line in docker_ps_process.stdout.decode().split("\n")[1:]
if line.strip()
]
container_ids_running_image = [
line[0].strip() for line in lines if _imagecheck(line[1], image_name)
]
return container_ids_running_image
def get_images(docker_images_process, image_name):
lines = [
line.split()
for line in docker_images_process.stdout.decode().split("\n")[1:]
if line.strip()
]
images = [line[0].strip() for line in lines if _imagecheck(line[0], image_name)]
return images
| 3,735 |
Python/Stepik/Beginner/Nested_loops/digital-root.py
|
SergeyOcheretenko/PythonLearning
| 0 |
2024294
|
n = int(input())
sm = 0
while True:
while n != 0:
sm += n % 10
n //= 10
if sm > 9:
n = sm
sm = 0
else:
print(sm)
break
| 179 |
pyjpegtbx/functions.py
|
xigua0106/pyjpegtbx
| 1 |
2025265
|
#!/usr/bin/env python
#encoding=utf-8
import ctypes
import platform
from .structs import (
jmp_buf, jpeg_error_mgr, j_decompress_ptr, j_compress_ptr,
JSAMPARRAY, jvirt_barray_ptr
)
__all__ = [
'cfopen', 'cfclose',
'csetjmp', 'clongjmp',
'jfuncs',
'funcs_metadata',
]
_all_libs = (
('libjpeg.dll', 'libjpeg.so', 'libjpeg.dylib'),
('c.dll', 'libc.so', 'libc.dylib'),
)
def __loadLib(liblst):
found = False
for libname in liblst:
try:
_lib = ctypes.cdll.LoadLibrary(libname)
found = True
return _lib
except OSError:
pass
if not found:
raise ImportError("ERROR: fail to load the dynamic library.")
if platform.system() == "Windows":
_jpeg = ctypes.CDLL("libjpeg")
_c = ctypes.cdll.msvcrt
else:
_jpeg = __loadLib(_all_libs[0])
_c = __loadLib(_all_libs[1])
def jround_up(a, b):
a += b - 1
return a - (a % b)
cfopen = _c.fopen
cfopen.restype = ctypes.c_void_p
cfopen.argtypes = (
ctypes.POINTER(ctypes.c_char), ctypes.POINTER(ctypes.c_char)
)
cfclose = _c.fclose
cfclose.restype = None
cfclose.argtypes = (ctypes.c_void_p, )
# csetjmp = _c.setjmp
# csetjmp.restype = ctypes.c_int
# csetjmp.argtypes = (jmp_buf, )
# clongjmp = _c.longjmp
# clongjmp.restype = None
# clongjmp.argtypes = (jmp_buf, ctypes.c_int)
jfuncs = {}
def register_jpeg_function(funcname, restype, argtypes, asFuncname=None):
func = _jpeg.__getattr__(funcname)
func.restype = restype
func.argtypes = argtypes
if asFuncname is None:
asFuncname = funcname
jfuncs[asFuncname] = func
funcs_metadata = (
('jpeg_std_error',
ctypes.POINTER(jpeg_error_mgr),
(ctypes.POINTER(jpeg_error_mgr),),
'jStdError'),
('jpeg_CreateDecompress',
None,
(j_decompress_ptr, ctypes.c_int, ctypes.c_size_t),
'jCreaDecompress'),
('jpeg_CreateCompress',
None,
(j_compress_ptr, ctypes.c_int, ctypes.c_size_t),
'jCreaCompress'),
('jpeg_stdio_src',
None,
(j_decompress_ptr, ctypes.c_void_p),
'jStdSrc'),
('jpeg_stdio_dest',
None,
(j_compress_ptr, ctypes.c_void_p),
'jStdDest'),
('jpeg_mem_src',
None,
(j_decompress_ptr, ctypes.c_char_p, ctypes.c_ulong),
'jMemSrc'),
('jpeg_mem_dest',
None,
(j_compress_ptr,
ctypes.POINTER(ctypes.POINTER(ctypes.c_char)),
ctypes.POINTER(ctypes.c_long)),
'jMemDest'),
('jpeg_start_compress',
None,
(j_compress_ptr, ctypes.c_int),
'jStrtCompress'),
('jpeg_start_decompress',
ctypes.c_int,
(j_decompress_ptr, ),
'jStrtDecompress'),
('jpeg_set_defaults',
None,
(j_compress_ptr, ),
'jSetDefaults'),
('jpeg_set_quality',
None,
(j_compress_ptr, ctypes.c_int, ctypes.c_int),
'jSetQuality'),
('jpeg_simple_progression',
None,
(j_compress_ptr, ),
'jSimProgress'),
('jpeg_read_header',
None,
(j_decompress_ptr, ctypes.c_bool),
'jReadHeader'),
('jpeg_write_scanlines',
ctypes.c_uint,
(j_compress_ptr, JSAMPARRAY, ctypes.c_uint),
'jWrtScanlines'),
('jpeg_read_scanlines',
ctypes.c_uint,
(j_decompress_ptr, JSAMPARRAY, ctypes.c_uint),
'jReadScanlines'),
('jpeg_write_coefficients',
None,
(j_compress_ptr, ctypes.POINTER(jvirt_barray_ptr)),
'jWrtCoefs'),
('jpeg_read_coefficients',
jvirt_barray_ptr,
(j_decompress_ptr, ),
'jReadCoefs'),
('jpeg_finish_compress',
ctypes.c_int,
(j_compress_ptr, ),
'jFinCompress'),
('jpeg_finish_decompress',
ctypes.c_int,
(j_decompress_ptr, ),
'jFinDecompress'),
('jpeg_destroy_compress',
None,
(j_compress_ptr, ),
'jDestCompress'),
('jpeg_destroy_decompress',
None,
(j_decompress_ptr, ),
'jDestDecompress'),
)
jpeg_alloc_quant_table = _jpeg.jpeg_alloc_quant_table
jpeg_alloc_huff_table = _jpeg.jpeg_alloc_huff_table
for funcname, res, args, shortname in funcs_metadata:
register_jpeg_function(funcname, res, args, shortname)
| 4,319 |
Recursion/Sorting/selection_sort.py
|
sounak95/100_days_of_code
| 0 |
2025453
|
def selection_sort(arr, r,c,max):
if r == 0:
return
if c < r:
if arr[c]>arr[max]:
selection_sort(arr,r,c+1,c)
else:
selection_sort(arr,r,c+1,max)
else:
arr[r-1],arr[max]= arr[max], arr[r-1]
selection_sort(arr,r-1,0,0)
if __name__ == "__main__":
arr=[4,3,2,1]
selection_sort(arr,len(arr),0, 0)
print(arr)
| 395 |
tests/unit_scarlett_os.py
|
bossjones/scarlett-os
| 5 |
2022890
|
# -*- coding: utf-8 -*-
import doctest
import sys
import time
import unittest
import scarlett_os
class _TextTestResult(unittest.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = "=" * 70
separator2 = "-" * 70
def __init__(self, stream, descriptions, verbosity):
unittest.TestResult.__init__(self)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
self.currentTestCase = None
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
if self.showAll:
if self.currentTestCase != test.__class__:
self.currentTestCase = test.__class__
self.stream.writeln()
self.stream.writeln("[%s]" % self.currentTestCase.__name__)
self.stream.write(" " + self.getDescription(test))
self.stream.write(" ... ")
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write(".")
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write("E")
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write("F")
def addSkip(self, test, reason):
unittest.TestResult.addSkip(self, test, reason)
if self.showAll:
self.stream.writeln("SKIP : " + reason)
elif self.dots:
self.stream.write("S")
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList("ERROR", self.errors)
self.printErrorList("FAIL", self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln(
"%s: [%s] --> %s "
% (flavour, test.__class__.__name__, self.getDescription(test))
)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class _WritelnDecorator:
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self, stream):
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream, attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write("\n") # text-mode streams translate to \r\n if needed
class TestRunner:
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=2):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln(
"Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)
)
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed:
self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
def run_test_module(test_modules_list=None, test_prefix=None):
suite = unittest.TestSuite()
finder = doctest.DocTestFinder(exclude_empty=False) # finder for doctest
if test_prefix:
unittest.TestLoader.testMethodPrefix = test_prefix
if not test_modules_list:
test_modules_list = []
elif not isinstance(test_modules_list, list):
test_modules_list = [test_modules_list]
test_modules_list.append("__main__")
for test in test_modules_list:
# Doctest
suite.addTest(doctest.DocTestSuite(test, test_finder=finder))
# unittest
suite.addTest(unittest.loader.TestLoader().loadTestsFromModule(test))
TestRunner().run(suite)
| 5,266 |
exercises/knowledge_databases.py
|
omar19-meet/y2s18-databases
| 0 |
2025517
|
from knowledge_model import Base, Knowledge
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///knowledge.db')
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
def add_article(topic_name,article_topic,article_title,rating):
knowledge_object=Knowledge(
topic_name=topic_name,
article_topic=article_topic,
article_title=article_title,
rating=rating)
session.add(knowledge_object)
session.commit()
def query_all_articles():
knowledge=session.query(Knowledge).all()
return knowledge
def query_article_by_topic(name):
knowledge=session.query(Knowledge).filter_by(topic_name=name).all()
return knowledge
def query_article_by_rating(threshold):
knowledge=session.query(Knowledge).filter(Knowledge.rating< threshold).all()
return knowledge
def query_article_by_primary_key(id):
knowledge=session.query(Knowledge).filter_by(article_id=id).first()
return knowledge
def delete_article_by_topic(name):
session.query(Knowledge).filter_by(article_topic=name).delete()
session.commit()
def delete_all_articles():
session.query(Knowledge).delete()
session.commit()
def edit_article_rating(updated_rating, topic_name):
knowledge=session.query(Knowledge).filter_by(topic_name=topic_name).first()
knowledge.rating=updated_rating
session.commit()
def delete_article_by_rating(threshold):
session.query(Knowledge).filter(Knowledge.rating<threshold).delete()
session.commit()
def query_top_five():
b=0
knowledge=session.query(Knowledge).all()
for i in knowledge:
for a in range(10,1,-1):
while b<5:
if i.rating==a:
b+=1
return knowledge
#add_article("jazz","<NAME>","The Louis Armstrong Foundation", 6)
#edit_article_rating(9,"jazz")
#print(query_all_articles())
print(query_top_five())
| 1,825 |
setup.py
|
Edanflame/vnpy_femas
| 0 |
2023669
|
import platform
from setuptools import Extension, setup
def get_ext_modules() -> list:
"""
获取三方模块
Windows需要编译封装接口
暂不支持Linux和Mac
"""
extra_compile_flags = ["-O2", "-MT"]
extra_link_args = []
runtime_library_dirs = []
vnfemasmd = Extension(
"vnpy_femas.api.vnfemasmd",
[
"vnpy_femas/api/vnfemas/vnfemasmd/vnfemasmd.cpp",
],
include_dirs=["vnpy_femas/api/include",
"vnpy_femas/api/vnfemas"],
define_macros=[],
undef_macros=[],
library_dirs=["vnpy_femas/api/libs", "vnpy_femas/api"],
libraries=["USTPmduserapiAF", "USTPtraderapiAF"],
extra_compile_args=extra_compile_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dirs,
depends=[],
language="cpp",
)
vnfemastd = Extension(
"vnpy_femas.api.vnfemastd",
[
"vnpy_femas/api/vnfemas/vnfemastd/vnfemastd.cpp",
],
include_dirs=["vnpy_femas/api/include",
"vnpy_femas/api/vnfemas"],
define_macros=[],
undef_macros=[],
library_dirs=["vnpy_femas/api/libs", "vnpy_femas/api"],
libraries=["USTPmduserapiAF", "USTPtraderapiAF"],
extra_compile_args=extra_compile_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dirs,
depends=[],
language="cpp",
)
return [vnfemastd, vnfemasmd]
setup(
ext_modules=get_ext_modules(),
)
| 1,547 |
ont-contracts/demo/test.py
|
siovanus/chainlink
| 1 |
2025355
|
from ontology.builtins import concat
from ontology.interop.Ontology.Runtime import Base58ToAddress
from ontology.interop.System.App import RegisterAppCall, DynamicAppCall
from ontology.interop.System.ExecutionEngine import GetExecutingScriptHash
from ontology.interop.System.Runtime import CheckWitness, Notify
from ontology.interop.System.Storage import GetContext, Put, Get
from ontology.libont import bytearray_reverse
CURRENT_PRICE = 'CurrentPrice'
OWNER = Base58ToAddress('AbG3ZgFrMK6fqwXWR1WkQ1d1EYVunCwknu')
ChainlinkCall = RegisterAppCall('ed6bb0abbe24e5603a7f2a5c44e056f3eaeb5949', 'operation', 'args')
ChainlinkClientCall = RegisterAppCall('fb11d3b30a54ae147e86f57d9e554578f68a0041', 'operation', 'args')
Link = RegisterAppCall('bfb52e4b8a5b49099e1ac0ef55789053f2ea347d', 'operation', 'args')
OracleCall = RegisterAppCall('04dc7f8a0ff88de0784ef742650a1d79495565ae', 'operation', 'args')
CBORCall = RegisterAppCall('3f75e2814021abed8a616da8d408d1347cac988f', 'operation', 'args')
ContractAddress = GetExecutingScriptHash()
def Main(operation, args):
if operation == 'requestEthereumPrice':
assert (len(args) == 3)
oracle = args[0]
jobId = args[1]
payment = args[2]
return requestEthereumPrice(oracle, jobId, payment)
return False
def requestEthereumPrice(oracle, jobId, payment):
# assert (CheckWitness(OWNER))
req = ChainlinkClientCall('buildChainlinkRequest', [jobId, ContractAddress, 'fullfill'])
req = ChainlinkCall('add', [req, "url", "https://etherprice.com/api"])
req = ChainlinkCall('addStringArray', [req, "path", ["recent", "usd"]])
# Notify([OWNER, oracle, req, payment])
assert (ChainlinkClientCall('sendChainlinkRequestTo', [OWNER, oracle, req, payment]))
return [OWNER, oracle, req, payment]
def addStringArray(request, key, values):
request = CBORCall('encodeString', [request, key])
request = CBORCall('startArray', request)
for value in range(values):
request = CBORCall('encodeString', [request, value])
request = CBORCall('endSequence', request)
return request
def DynamicCallFunction(callAddress, callbackFunctionId, params):
res = DynamicAppCall(callAddress, callbackFunctionId, params)
if res and res == b'\x01':
return True
else:
return False
def DynamicCallFunctionResult(callAddress, callbackFunctionId, params):
return DynamicAppCall(callAddress, callbackFunctionId, params)
| 2,460 |
lc/0750_NumOfCornerRectangles.py
|
xiangshiyin/coding-challenge
| 0 |
2025182
|
class Solution:
def countCornerRectangles(self, grid: List[List[int]]) -> int:
m = len(grid)
n = len(grid[0])
if m == 1 or n == 1:
return 0
# step 1: traverse the list and build lookup tables of 1s in each row
tb = [
set([j for j in range(n) if grid[i][j] == 1])
for i in range(m)
]
# step 2: traverse each row again, search corner rectangles
counter = 0
for i in range(1, m):
if len(tb[i]) > 1:
for j in range(i):
overlap = len(tb[j] & tb[i])
counter += overlap * (overlap - 1) // 2
return counter
| 718 |
lib/spack/spack/test/cmd/init_py_functions.py
|
LiamBindle/spack
| 2,360 |
2024787
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
from spack.cmd import (
CommandNameError,
PythonNameError,
cmd_name,
python_name,
require_cmd_name,
require_python_name,
)
def test_require_python_name():
"""Python module names should not contain dashes---ensure that
require_python_name() raises the appropriate exception if one is
detected.
"""
require_python_name("okey_dokey")
with pytest.raises(PythonNameError):
require_python_name("okey-dokey")
require_python_name(python_name("okey-dokey"))
def test_require_cmd_name():
"""By convention, Spack command names should contain dashes rather than
underscores---ensure that require_cmd_name() raises the appropriate
exception if underscores are detected.
"""
require_cmd_name("okey-dokey")
with pytest.raises(CommandNameError):
require_cmd_name("okey_dokey")
require_cmd_name(cmd_name("okey_dokey"))
| 1,114 |
python/geeksforgeeks/trees/check_sum.py
|
othonreyes/code_problems
| 0 |
2024903
|
"""
https://www.geeksforgeeks.org/check-for-children-sum-property-in-a-binary-tree/
"""
class Node:
def __init__(self, v):
self.left = None
self.right = None
self.value = v
def check_sum(n:Node) -> int:
if not n:
return 0
if not n.left and not n.right:
return n.value
if (check_sum(n.left) + check_sum(n.right)) == n.value:
return n.value
return -1
def check_sum_root(root:Node) -> bool:
if not root:
return False
val = check_sum(root)
if val != root.value:
return False
return True
if __name__ == "__main__":
root = Node(10)
root.left = Node(8)
root.left.left = Node(3)
root.left.right = Node(5)
root.right = Node(2)
root.right.right = Node(2)
print(check_sum_root(root))
root = Node(10)
root.left = Node(8)
root.left.left = Node(11)
root.left.right = Node(5)
root.right = Node(2)
root.right.right = Node(2)
print(check_sum_root(root))
| 919 |
src/utils/tools/testSVM.py
|
nArrow4/AutoAim
| 2 |
2023634
|
'''
@Email: <EMAIL>
@Author: <NAME>
@Github: nArrow4
@Date: 2021-09-12 11:48:08
'''
import cv2
svm = cv2.ml.SVM_load('src/utils/tools/svm_numbers.xml.bak')
import os
import numpy as np
pic_root = '/home/zhiyu/AutoAim/src/utils/data/pictures/red_armor'
classes = ["0", "1", "2", "3", "4", "5"]
data_image = []
data_label = []
for class_ in classes:
dir_ = os.path.join(pic_root, str(class_))
# print(dir_)
# data_label.extend([class_ for i in range(len(os.listdir(dir_)))])
for file in os.listdir(dir_):
# print(file)
image = cv2.imread(os.path.join(dir_, file))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hist = cv2.equalizeHist(gray)
res = hist.flatten()
if len(res) == 1024:
# data_image.append(np.array(res.flatten()))
# data_label.append(np.array(class_))
data_image.append(res.flatten())
data_label.append(float(class_))
# cv2.imshow("frame", hist)
# cv2.waitKey(0)
data_image = np.array(data_image, dtype=np.float32)
data_label = np.array(data_label)
x_test = data_image
y_pred = svm.predict(x_test)[1]
y_test = data_label
# print(y_test)
# print(y_pred)
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test, y_pred, target_names=classes))
print(confusion_matrix(y_test, y_pred))
| 1,380 |
model_utils.py
|
pnellesen/python_nanodegree
| 0 |
2024780
|
import numpy as np
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
import torch.nn.functional as F
import utils
# save trained model to a checkpoint for loading later
def save_model(model,arch,epochs,gpu,learnrate,save_dir,output_size):
train_data,valid_data = get_datasets()
model.class_to_idx = train_data.class_to_idx
if (arch == 'resnet18'):
classifier = model.fc
else:
classifier = model.classifier
checkpoint = {
'arch':arch,
'input_size': model.input_size,
'output_size': output_size,
'classifier_layers': [each for each in classifier],
'state_dict': model.state_dict(),
'optimizer_state_dict':model.optimizer.state_dict(),
'epoch':epochs,
'class_to_idx':model.class_to_idx,
}
torch.save(checkpoint, save_dir + '/checkpoint.pth')
return
# Train our model. This is based on training algorithm from the 1st part of the project
def train_model(arch,model,learnrate,epochs,device='cpu'):
criterion = nn.NLLLoss()
steps = 0
running_loss = 0
print_every = 40
# Only train the classifier parameters, feature parameters are frozen
if (arch == "resnet18"):
model.optimizer = optim.Adam(model.fc.parameters(), lr=learnrate)
else:
model.optimizer = optim.Adam(model.classifier.parameters(), lr=learnrate)
model.to(device)
model.train()
trainloader, validloader = get_dataloaders()
print("Begin training\ntrainloader size: {}".format(len(trainloader.dataset)))
for ep in range(epochs):
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
# DO NOT FORGET THIS NEXT LINE!!
model.optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
model.optimizer.step()
# Validations/Checks
running_loss += loss.item()
if steps % print_every == 0:
# Make sure network is in eval mode for inference
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
test_loss, accuracy = validate(model, validloader, device)
print("Epoch: {}/{}.. ".format(ep+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(test_loss/len(validloader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validloader))
)
running_loss = 0
# Make sure training is back on
model.train()
print("Training complete.")
return model
# Based on validation code from Transfer Learning:
def validate(model, imageloader, device = 'cpu'):
test_loss = 0
accuracy = 0
criterion = nn.NLLLoss()
model.eval()
for images, labels in imageloader:
model,labels,images = model.to(device), labels.to(device), images.to(device)
output = model.forward(images)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
if (device == 'cuda'):
accuracy += equality.type(torch.cuda.FloatTensor).mean()
else:
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def load_model(arch=None, hidden_units=256, output_size=102):
if (arch == None):
print("load_model: no architecture specified!")
return
else:
# My understanding is that the number of inputs to our custom classifier layers needs to be identical
# to the inputs to the pretrained model classifiers, so I'm pulling that from the first "in_features"
# parameter of each classifier for the various models and setting it to my "input_size" variable before
# generating my custom classifier
if (arch == "alexnet"):
model = models.alexnet(pretrained=True)
for param in model.parameters():
param.requires_grad = False
input_size=model.classifier[1].in_features
model.classifier = get_classifier_layers(input_size=input_size,hidden_units=hidden_units,output_size=output_size)
model.input_size = model.classifier[0].in_features
elif (arch == "resnet18"):
model = models.resnet18(pretrained=True)
for param in model.parameters():
param.requires_grad = False
input_size=model.fc.in_features
model.fc = get_classifier_layers(input_size=input_size,hidden_units=hidden_units,output_size=output_size)
model.input_size = model.fc[0].in_features
else:
model = models.vgg13(pretrained=True)
for param in model.parameters():
param.requires_grad = False
input_size=model.classifier[0].in_features
model.classifier = get_classifier_layers(input_size=input_size,hidden_units=hidden_units,output_size=output_size)
model.input_size = model.classifier[0].in_features
return model
def get_data_dirs(base_dir='flowers'):
train_dir = base_dir + '/train'
valid_dir = base_dir + '/valid'
test_dir = base_dir + '/test'
return train_dir, valid_dir
def get_datasets():
train_dir,valid_dir = get_data_dirs()
train_transforms, valid_transforms = get_transforms()
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
return train_data,valid_data
def get_dataloaders():
train_dir, valid_dir = get_data_dirs()
train_data, valid_data = get_datasets()
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32)
return trainloader, validloader
def get_transforms():
means, sds = utils.get_means_sd()
train_transforms = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, sds)
])
valid_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(means, sds)
])
return train_transforms, valid_transforms
def get_classifier_layers(input_size=25088,hidden_units=256,output_size=102,layer_count=3):
#NOTE: output_size determined by the number of classes being trained for such as 102 for our flower classifier
# create our hidden layers with in_features and out_features evenly spread out between hidden_units and output_size
# the total number of hidden layers is determined by the "layer_count" parameter
# the last hidden layer is our final output layer
input_list = (np.flip(np.linspace(output_size, hidden_units, num=layer_count),axis=0)).astype(int)
# add the first input_layer. input_size should ideally be determined from
# the output size of the model architecture features (e.g. VGG has 25088 outputs)
hidden_layers = nn.ModuleList([nn.Linear(int(input_size), int(input_list[0]))])
# Add the rest of the input layers
layer_sizes = zip(input_list[:-1], input_list[1:])
for (h1, h2) in layer_sizes:
hidden_layers.extend([nn.ReLU()])
hidden_layers.extend([nn.Dropout(p=0.5)])
hidden_layers.extend([nn.Linear(int(h1), int(h2))])
# Add our softMax function
hidden_layers.extend([(nn.LogSoftmax(dim=1))])
# Convert to a nn.Sequential
# https://discuss.pytorch.org/t/append-for-nn-sequential-or-directly-converting-nn-modulelist-to-nn-sequential/7104
sequential = nn.Sequential(*hidden_layers)
return sequential
| 8,443 |
databuddy/databuddy.py
|
SuryaSankar/databuddy
| 0 |
2024212
|
"""Main module."""
from contextlib import contextmanager
from datetime import datetime, timedelta
from flask_sqlalchemy_session import flask_scoped_session
import pandas as pd
from sqlalchemy import func
from sqlalchemy.orm import scoped_session, sessionmaker
from toolspy.datetime_tools import n_days_ago
from .utils.datetime_utils import tz_converted_date, tz_convert
class SqlaQueryBuilder(object):
def __init__(self, engine, flask_app=None, timedelta_mins_from_utc=0):
self.engine = engine
self.timedelta_mins_from_utc = timedelta_mins_from_utc
self.sessionmaker = sessionmaker(bind=self.engine)
self.app = flask_app
if flask_app:
self.session = flask_scoped_session(self.sessionmaker, flask_app)
else:
self.session = scoped_session(self.sessionmaker)
@contextmanager
def scoped_session(self, commit=False):
"""Provide a transactional scope around a series of operations."""
session = self.session()
try:
yield session
if commit:
session.commit()
except:
if commit:
session.rollback()
raise
finally:
session.close()
def local_time(self):
return datetime.utcnow() + timedelta(
minutes=self.timedelta_mins_from_utc)
def local_tz_converted_date(self, datetime_col):
return tz_converted_date(
datetime_col, self.timedelta_mins_from_utc)
def local_tz_convert(self, datetime_col):
return tz_convert(
datetime_col, self.timedelta_mins_from_utc)
def local_n_days_ago(self, n):
return n_days_ago(
n, timedelta_mins_from_utc=self.timedelta_mins_from_utc)
def construct_query(
self, fields_to_query, joins=None, filters=None):
q = self.session.query(*fields_to_query)
if filters:
q = q.filter(*filters)
return q
def construct_interval_query(
self, interval_field_name,
interval_field_label, interval_timestamp_format,
session=None,
fields_to_query=None, filters=None):
interval_field = func.date_format(
self.local_tz_converted_date(
interval_field_name),
interval_timestamp_format)
fields_to_query = [
interval_field.label(interval_field_label)
] + (fields_to_query or [])
if not session:
session = self.session
q = self.session.query(*fields_to_query)
if filters:
q = q.filter(*filters)
q = q.group_by(interval_field)
# q = q.order_by(interval_field)
return q
def convert_query_to_df(self, query, index_col):
return pd.read_sql(
query, self.engine, index_col=index_col)
def convert_interval_query_to_df(
self, query, interval_field_label,
interval_timestamp_format):
return pd.read_sql(
query, self.engine,
parse_dates={
interval_field_label: interval_timestamp_format},
index_col=interval_field_label)
def construct_interval_df(
self, interval_field_name,
interval_field_label, interval_timestamp_format,
session=None,
fields_to_query=None, filters=None):
return self.convert_interval_query_to_df(
self.construct_interval_query(
interval_field_name,
interval_field_label,
interval_timestamp_format,
session=session,
fields_to_query=fields_to_query, filters=filters
).subquery(),
interval_field_label,
interval_timestamp_format
)
def construct_daily_query(
self, day_field_name, session=None,
fields_to_query=None,
filters=None, day_field_label='day'):
return self.construct_interval_query(
interval_field_name=day_field_name,
interval_field_label=day_field_label,
interval_timestamp_format='%Y-%m-%d',
session=session,
fields_to_query=fields_to_query, filters=filters
)
def convert_daily_query_to_df(
self, query, day_field_label='day'):
return self.convert_interval_query_to_df(
query, interval_field_label=day_field_label,
interval_timestamp_format='%Y-%m-%d')
def construct_daily_df(
self, day_field_name, fields_to_query=None,
session=None,
filters=None, day_field_label='day'):
return self.construct_interval_df(
interval_field_name=day_field_name,
interval_field_label=day_field_label,
interval_timestamp_format='%Y-%m-%d',
session=session,
fields_to_query=fields_to_query, filters=filters
)
def construct_monthly_query(
self, month_field_name, fields_to_query=None,
session=None, filters=None, month_field_label='month'):
return self.construct_interval_query(
interval_field_name=month_field_name,
interval_field_label=month_field_label,
interval_timestamp_format='%Y-%m',
session=session,
fields_to_query=fields_to_query, filters=filters
)
def convert_monthly_query_to_df(self, query, month_field_label='month'):
return pd.read_sql(
query, self.engine,
parse_dates={month_field_label: '%Y-%m'},
index_col=month_field_label)
def construct_monthly_df(
self, month_field_name, fields_to_query=None,
session=None,
filters=None, month_field_label='month'):
return self.convert_monthly_query_to_df(
self.construct_monthly_query(
month_field_name,
session=session,
fields_to_query=fields_to_query,
filters=filters,
month_field_label=month_field_label
).subquery(),
month_field_label=month_field_label
)
| 6,203 |
GanApp/static/vendor/core/DataLoader.py
|
Hecodes98/ProyectosBlanda
| 0 |
2025421
|
from __future__ import print_function, division
import scipy
from glob import glob
import numpy as np
import scipy.misc
class DataLoader:
def __init__(self, img_resolution=(256, 256)):
self.img_resolution=img_resolution
def load_data(self):
img = self.imread("img.jpg")
#img = scipy.misc.imresize(img, self.img_resolution)
imgs=[]
imgs.append(img)
imgs = np.array(imgs)/127.5 - 1.
return imgs
def load_img(self, path):
img = self.imread(path)
img = scipy.misc.imresize(img, self.img_resolution)
img = img/127.5 - 1.
return img[np.newaxis, :, :, :]
def imread(self, path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
| 792 |
tiktok_dl/utils.py
|
skyme5/tiktok-dl
| 0 |
2025287
|
import re
from datetime import datetime
from loguru import logger
from requests.exceptions import InvalidURL
def format_utctime(time: int, fmt: str) -> str:
"""Format unixtimestamp to custom time format string.
Args:
time (int): unixtimestamp.
fmt (str): time format string.
Returns:
str: unixtimestamp formatted to custom fmt.
"""
return datetime.utcfromtimestamp(time).strftime(fmt)
def search_regex(
pattern, string: str, name: str, default=object(), fatal=True, flags=0, group=None
):
"""Perform a regex search on the given string, using a single or a list of patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, type(re.compile("")))):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not default:
return default
elif fatal:
raise re.error("Unable to extract %s" % name)
else:
logger.error("unable to extract {}", name)
return None
def valid_url_re():
"""TikTok URL RegExp.
Captures id of the TikTok Video.
"""
return re.compile(
r"https?://www\.tiktokv?\.com/(?:@[\w\._]+|share)/video/(?P<id>\d+)"
)
def match_id(url: str, valid_re):
"""Get id of the TikTok Video.
Args:
url (str): TikTok Video URL.
valid_re (re): Instance of re.
Raises:
InvalidURL: Given url is Invalid.
re.error: RegExp was unable to extract any id.
Returns:
str: id of the TikTok Video.
"""
m = valid_re.match(url)
if m is None:
raise InvalidURL("Url is invalid {}".format(url))
if m.group("id") is None:
raise re.error("unable to find video id {}".format(url))
return str(m.group("id"))
def try_get(src, getter, expected_type=None):
"""Getter for Object with type checking.
Args:
src (object): Object for getter.
getter (lambda): Lambda expression for getting item from Object.
expected_type (type, optional): Expected type from the getter. Defaults to None.
Returns:
expected_type: Value of getter for Object.
"""
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def str_or_none(v, default=None):
"""Check if str."""
return default if v is None else str(v)
def int_or_none(v, default=None, get_attr=None):
"""Check if input is int.
Args:
v (int): Input to check.
default (type, optional): Expected type of get_attr. Defaults to None.
get_attr (getter, optional): Getter to use. Defaults to None.
Returns:
int or None: Return int if valid or None.
"""
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == "":
v = None
if v is None:
return default
try:
return int(v)
except (ValueError, TypeError):
return default
| 3,626 |
evaluate_net.py
|
ryanreadbooks/VotingNet6DPose
| 3 |
2025077
|
from typing import List, Tuple
import numpy as np
import cv2 as cv
import torch
from torch.backends import cudnn
import torch.nn.functional as F
from nets import VotingNet
from evaluator import VoteProcedure
from utils import draw_linemod_mask_v2, draw_3d_bbox, draw_points, draw_vector_field
from datasets import Linemod, LinemodDatasetProvider
from configs import constants
from configs.configuration import regular_config
from PIL import Image
from utils.output_extractor import OutputExtractor, LinemodOutputExtractor
import torch.utils.data as Data
import torchvision.transforms as transforms
if __name__ == '__main__':
cudnn.benchmark = True
cudnn.deterministic = True
tra = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=constants.IMAGE_MEAN, std=constants.IMAGE_STD)])
# test_dataset = Linemod(constants.DATASET_PATH_ROOT, train=False, category='cat', dataset_size=1, transform=tra)
# test_dataloader = Data.DataLoader(test_dataset, batch_size=1, pin_memory=True)
linemod_dataset = Linemod(train=False, transform=tra)
test_dataloader = Data.DataLoader(linemod_dataset, batch_size=1, pin_memory=True)
# net = VotingNet()
net = VotingNet()
last_state = torch.load('/home/ryan/Codes/VotingNet6DPose/log_info/models/linemod_cat_fps_debug_epoch500_loss0.056774.pth')
net.load_state_dict(last_state)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
net.to(device)
net.eval()
print('Model loaded into {}, evaluation starts...'.format(device))
for i, data in enumerate(test_dataloader):
eval_img, mask_label, vmap_label, label, test_img_path = data
print('Mask_label shape: ', mask_label.shape) # shape (1, 480, 640)
eval_img = eval_img.to(device)
out = net(eval_img)
pred_mask: torch.Tensor = out[0]
pred_vector_map: torch.Tensor = out[1]
pred_mask, pred_vector_map = pred_mask.cpu().detach().numpy(), pred_vector_map.cpu().detach().numpy()
# find center of the mask
mask = np.where(pred_mask >= 0.5, 1, 0).astype(np.uint8)
population = np.where(mask[0][0] >= 1)[:2]
population: List[Tuple] = list(zip(population[1], population[0])) # the set of coordinates, format List[(x,y)]
center = np.asarray(population).mean(axis=0)
center_x, center_y = int(center[0]), int(center[1])
region_x = 100
region_y = 100
kps_from_img_process = list()
# visualization
region = 20
max_detected_kp = 10
for k in range(regular_config.num_keypoint):
# print(vmap_label[0][k * 2: (k + 1) * 2].shape)
# print(pred_vector_map[0][k * 2: (k + 1) * 2].shape)
gt_vmap_img = draw_vector_field(vmap_label[0][k * 2: (k + 1) * 2])
pred_vmap_img = draw_vector_field(pred_vector_map[0][k * 2: (k + 1) * 2])
#
# # 截取mask附近的区域出来
# region_start_y = max(0, center_y - region_y)
# region_start_x = max(0, center_x - region_x)
# region_pred = pred_vmap_img[region_start_y: min(center_y + region_y, 480 - 1), region_start_x: min(center_x + region_x, 640 - 1)]
# # 获得可能的关键点
# track = cv.goodFeaturesToTrack(cv.cvtColor(region_pred, cv.COLOR_RGB2GRAY), max_detected_kp, 0.05, 10)
# try:
# kps = track.reshape(max_detected_kp, 2)
# except:
# kps = track
# pass
# # 对关键点进行筛选
# # 这里的kp是在小区域里面的坐标, 还要转换成在原始坐标系里面的坐标
# for kp in kps:
# has_color = set()
# # 附近10个pixels的邻域
# kp_x, kp_y = int(kp[0]), int(kp[1])
# neighborhood = region_pred[kp_y - region: kp_y + region, kp_x - region: kp_x + region]
# # 判断是否有8种颜色在邻域中,如果是,则认为是关键点
# height, width = neighborhood.shape[0], neighborhood.shape[1]
# for v in range(height):
# for u in range(width):
# color = tuple(neighborhood[v, u, :])
# has_color.add(color)
# print('length of kps:', len(has_color), kp)
# if len(has_color) == 8:
# kp[0] += region_start_x
# kp[1] += region_start_y
# print(kp)
# kps_from_img_process.append(kp)
# has_color.clear()
# break
Image.fromarray(gt_vmap_img, 'RGB').save('/home/ryan/Codes/VotingNet6DPose/log_info/results/gt_vmap_{:d}.png'.format(k))
Image.fromarray(pred_vmap_img, 'RGB').save('/home/ryan/Codes/VotingNet6DPose/log_info/results/pred_vmap_{:d}.png'.format(k))
# kps_from_img_process_nparr = np.asarray(kps_from_img_process)
# print('kps_from_img_process: \n', kps_from_img_process_nparr)
# 看一下得出的结果和真实结果之间的损失是多少
# print('pred_mask.shape', pred_mask.shape)
# print('mask_label.shape', mask_label.shape)
# print('pred_vector_map.shape', pred_vector_map.shape)
# print('vmap_label.shape', vmap_label.shape)
# mask_loss = F.cross_entropy(pred_mask, mask_label.type(dtype=torch.long).to(device))
# vector_map_loss = F.smooth_l1_loss(pred_vector_map, vmap_label.to(device))
# print('Loss: mask loss == {:.10f}, vector map loss == {:.6f}'.format(mask_loss.item(), vector_map_loss.item()))
print('==============================')
# pred_vector_map: np.ndarray = pred_vector_map.detach()
# print('Network output mask shape', pred_mask.shape)
# print('Network output vector map shape', pred_vector_map.shape)
# 每个像素点的概率
# pred_mask = torch.softmax(pred_mask, dim=1)
# 通道方向取argmax得到预测的每个像素点所属的类别
binary_mask = np.where(pred_mask >= 0.5, 255, 0).astype(np.uint8)
print('Binary mask shape', binary_mask.shape) # shape (480, 640)
# 将mask二值化用来显示
# binary_mask = torch.where(binary_mask == torch.tensor(0.0, device=device), torch.tensor(255).to(device), torch.tensor(0).to(device))
# binary_mask_np = binary_mask.cpu().detach().numpy().astype(np.uint8)
# 将二值化的mask保存成图片显示
Image.fromarray(binary_mask[0][0], 'L').save('/home/ryan/Codes/VotingNet6DPose/log_info/results/predicted_mask_cat.png')
# 将gt的mask绘制出来
gt_mask_label = draw_linemod_mask_v2(mask_label.cpu().numpy()[0])
# gt_mask_label = draw_linemod_label(mask_label.cpu().numpy()[0])
gt_mask_label.save('/home/ryan/Codes/VotingNet6DPose/log_info/results/gt_mask.jpg')
# 尝试投票过程
# 将属于cat类别的vector map的部分取出来,shape (18, h, w)
cat_vector_map: np.ndarray = pred_vector_map[0]
# 构建只有0、1的二值mask,0表示该像素是背景,1表示该像素是物体
cat_binary_mask = np.where(binary_mask == 255, 1, 0)
# 创建一个投票过程
the_voting_room = VoteProcedure((480, 640))
# 进行操作,直接获得关键点位置
pred_keypoints: np.ndarray = the_voting_room.provide_keypoints(cat_binary_mask, cat_vector_map, 0.9, True)
print('Predicted Keypoints:\n', pred_keypoints)
# draw the 3d bbox to check
# 将原始图片读取出来
print(test_img_path)
print(type(test_img_path))
test_img_path: str = test_img_path[0]
test_image = Image.open(test_img_path)
test_image_points = test_image.copy()
test_image.save('/home/ryan/Codes/VotingNet6DPose/log_info/results/test_original_image.jpg')
# test_image_with_boxes = draw_3d_bbox(test_image, pred_keypoints[1:], 'blue')
test_image_label_path = test_img_path.replace('JPEGImages', 'labels')
test_image_label_path = test_image_label_path.replace('jpg', 'txt')
gt_keypoints = LinemodDatasetProvider.provide_keypoints_coordinates(test_image_label_path)[1].numpy()
print('GT keypoints:\n', gt_keypoints)
# test_image_with_boxes = draw_3d_bbox(test_image_with_boxes, gt_keypoints[1:], 'green')
# 保存结果
# test_image_with_boxes.save('/home/ryan/Codes/VotingNet6DPose/log_info/results/result_image.jpg')
point_image = draw_points(test_image_points, pred_keypoints, color='blue')
draw_points(point_image, gt_keypoints, color='green').save('/home/ryan/Codes/VotingNet6DPose/log_info/results/result_points_img.jpg')
if i == 0:
break
| 8,438 |
bot/modules/roles.py
|
monkeydg/POG-bot
| 2 |
2023805
|
# @CHECK 2.0 features OK
import modules.config as cfg
from discord import Status
_roles_dict = dict()
_guild = None
def init(client):
global _guild
_guild = client.get_channel(cfg.channels["rules"]).guild
for role in cfg.roles.keys():
_roles_dict[role] = _guild.get_role(cfg.roles[role])
def is_admin(member):
""" Check if user is admin
"""
if member is None:
return False
return _roles_dict["admin"] in member.roles
def is_muted(member):
if member is None:
return False
return _roles_dict["muted"] in member.roles
async def remove_roles(p_id):
memb = _guild.get_member(p_id)
if memb is None:
return
if _roles_dict["registered"] in memb.roles:
await memb.remove_roles(_roles_dict["registered"])
if _roles_dict["notify"] in memb.roles:
await memb.remove_roles(_roles_dict["notify"])
async def role_update(player):
if player.is_timeout:
await remove_roles(player.id)
return
if player.is_away:
await remove_roles(player.id)
return
await perms_muted(False, player.id)
memb = _guild.get_member(player.id)
if memb is None:
return
if player.is_notify and memb.status not in (Status.offline, Status.dnd) and not (player.is_lobbied or player.match):
if _roles_dict["notify"] not in memb.roles:
await memb.add_roles(_roles_dict["notify"])
if _roles_dict["registered"] in memb.roles:
await memb.remove_roles(_roles_dict["registered"])
else:
if _roles_dict["registered"] not in memb.roles:
await memb.add_roles(_roles_dict["registered"])
if _roles_dict["notify"] in memb.roles:
await memb.remove_roles(_roles_dict["notify"])
async def perms_muted(value, p_id):
memb = _guild.get_member(p_id)
if memb is None:
return
channel = _guild.get_channel(cfg.channels["muted"])
if value:
over = _guild.get_channel(cfg.channels["lobby"]).overwrites_for(_roles_dict["registered"])
if memb not in channel.overwrites:
await channel.set_permissions(memb, overwrite=over)
else:
if memb in channel.overwrites:
await channel.set_permissions(memb, overwrite=None)
async def modify_match_channel(channel, view):
ov_notify = channel.overwrites_for(_roles_dict["notify"])
ov_registered = channel.overwrites_for(_roles_dict["registered"])
ov_notify.view_channel = view
ov_notify.send_messages = view
ov_registered.view_channel = view
ov_registered.send_messages = view
await channel.set_permissions(_roles_dict["notify"], overwrite=ov_notify)
await channel.set_permissions(_roles_dict["registered"], overwrite=ov_registered)
# await channel.edit(name=f"pog-match-{match.id}")
async def channel_freeze(value, id):
channel = _guild.get_channel(id)
ov_notify = channel.overwrites_for(_roles_dict["notify"])
ov_registered = channel.overwrites_for(_roles_dict["registered"])
ov_notify.send_messages = not value
ov_registered.send_messages = not value
await channel.set_permissions(_roles_dict["notify"], overwrite=ov_notify)
await channel.set_permissions(_roles_dict["registered"], overwrite=ov_registered)
| 3,274 |
pytorch/dataset_handlers/cellari_dataset.py
|
mistermoutan/ModelsGenesis
| 0 |
2024770
|
import os
import numpy as np
import matplotlib.pyplot as plt
def make_dir(dir: str):
if not os.path.exists(dir):
os.makedirs(dir)
class CellariHeartDataset:
def __init__(self, data_dir: str):
self.data_dir = data_dir
make_dir(os.path.join(data_dir, "x_cubes_full_test/"))
make_dir(os.path.join(data_dir, "y_cubes_full_test/"))
self.data_folders = os.listdir(data_dir)
self.input_folders = [
os.path.join(data_dir, i, "input_data", "input_data_raw")
for i in self.data_folders
if ("x_cubes" not in i and "y_cubes" not in i)
]
self.target_folders = [
os.path.join(data_dir, i, "input_data", "input_masks_annotated") for i in self.data_folders if i not in ("x", "y")
]
print(self.input_folders, "\n", self.target_folders)
def make_cubes(self):
for input_folder, target_folder in zip(self.input_folders, self.target_folders):
if "heartdata4-test-dataset,165" not in input_folder:
continue
xs = os.listdir(input_folder)
ys = os.listdir(target_folder)
for idx, i in enumerate(xs):
splits = i.split("_")
a = "_".join(i for i in splits[:-1])
b = int(splits[-1][:-4]) # slice nr
xs[idx] = (a, b, i)
for idx, i in enumerate(ys):
splits = i.split("_")
a = "_".join(i for i in splits[:-1])
b = int(splits[-1][:-4])
ys[idx] = (a, b, i)
ys.sort()
xs.sort()
ys = [i[-1] for i in ys]
xs = [i[-1] for i in xs]
assert len(xs) % 12 == 0 and len(ys) % 12 == 0 and len(xs) == len(ys)
nr_cubes = int(len(xs) / 12)
print("{} CUBES".format(nr_cubes))
np_arrays_x = [[np.zeros((480, 480, 12))] for i in range(nr_cubes)]
np_arrays_y = [[np.zeros((480, 480, 12))] for i in range(nr_cubes)]
for idx, (x_, y_) in enumerate(zip(xs, ys)):
assert x_ == y_, "files have same name rapaz \n {} {}".format(x_, y_)
x = plt.imread(os.path.join(os.path.join(input_folder, x_)))
y = plt.imread(os.path.join(os.path.join(target_folder, y_)))
y = self._get_proper_slice_of_y(y, os.path.join(target_folder, y_))
np_arrays_x[idx // 12][0][:, :, idx % 12] = x
if len(np_arrays_x[idx // 12]) == 1:
np_arrays_x[idx // 12].append(x_)
np_arrays_y[idx // 12][0][:, :, idx % 12] = y
if len(np_arrays_y[idx // 12]) == 1:
np_arrays_y[idx // 12].append(y_)
# save cubes as numpy arrays
for array_name_tuple_x, array_name_tuple_y in zip(np_arrays_x, np_arrays_y):
x_array = array_name_tuple_x[0]
x_file_name = array_name_tuple_x[1]
y_array = array_name_tuple_y[0]
y_file_name = array_name_tuple_y[1]
assert y_file_name == x_file_name
split = x_file_name.split("_")
name_to_save = "_".join(i for i in split[:-2])
np.save(os.path.join(self.data_dir, "x_cubes_full_test/", "{}.npy".format(name_to_save)), x_array)
np.save(os.path.join(self.data_dir, "y_cubes_full_test/", "{}.npy".format(name_to_save)), y_array)
def _get_proper_slice_of_y(self, y_array, y_name=None):
assert y_array.shape == (480, 480, 4)
if 1 in y_array[:, :, 1]:
assert False not in (y_array[:, :, 1] == y_array[:, :, 3]), "{}".format(y_name)
assert 1 not in y_array[:, :, 0] and False not in (y_array[:, :, 0] == y_array[:, :, 2])
else:
for i in range(y_array.shape[-1] - 1):
assert False not in (y_array[:, :, i] == y_array[:, :, i + 1])
assert np.count_nonzero(y_array) == 0
return y_array[:, :, 1]
if __name__ == "__main__":
d = CellariHeartDataset("/home/moutan/Programming/thesis/ModelGenesis_Fork/ModelsGenesis/pytorch/datasets/heart_mri/datasets")
d.make_cubes()
| 4,219 |
P4.py
|
red4game/python_games
| 1 |
2025504
|
import os
os.system("color A")
state = "not_finished"
X = 1
line = 6
row = 0
#on va définir le contenu de notre tableau
# case = [["■"] * playlonnes for _ in range(lignes)]
case = [["■"] * 12 for _ in range(12)]
#indication du stade de jeu pour voir si un joueur à gagner
while state == "not_finished":
#affichage du tableau de score et de playonne
print("player", X, "choose a column")
print("")
print(" 1 2 3 4 5 6 7 ")
#actualisation du tableau
print("_________________")
print("║",case[1][1], case[2][1], case[3][1], case[4][1], case[5][1], case[6][1], case[7][1], "║")
print("║",case[1][2], case[2][2], case[3][2], case[4][2], case[5][2], case[6][2], case[7][2], "║")
print("║",case[1][3], case[2][3], case[3][3], case[4][3], case[5][3], case[6][3], case[7][3], "║")
print("║",case[1][4], case[2][4], case[3][4], case[4][4], case[5][4], case[6][4], case[7][4], "║")
print("║",case[1][5], case[2][5], case[3][5], case[4][5], case[5][5], case[6][5], case[7][5], "║")
print("║",case[1][6], case[2][6], case[3][6], case[4][6], case[5][6], case[6][6], case[7][6], "║")
# choix le la colonne à jouer
print("")
print("please choose a value on your keyboard to choose a column")
play = input()
while not (play == "1" or play == "2" or play == "3" or play == "4" or play == "5" or play == "6" or play == "7"):
print("your value is incorrect please choose another")
play = input()
play = int(play)
#mise en place du pion
while case[play][line] == "X" or case[play][line] == "O":
line -= 1
if X == 1:
case[play][line] = "X"
if X == 2:
case[play][line] = "O"
#vérification de la position des pions aux alentours + conditions de victoires pour le joueur 1
if X == 1:
for tic in range(4):
if case[play+tic][line+tic] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play-tic][line+tic] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play+tic][line-tic] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play-tic][line-tic] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play+tic][line] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play-tic][line] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play][line+tic] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play][line-tic] == "X":
row += 1
if row == 4:
state = "finished"
else:
row = 0
#vérification de la position des pions aux alentours + conditions de victoires pour le joueur 2
if X == 2:
for tic in range(4):
if case[play+tic][line+tic] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play-tic][line+tic] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play+tic][line-tic] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play-tic][line-tic] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play+tic][line] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play-tic][line] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play][line+tic] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
for tic in range(4):
if case[play][line-tic] == "O":
row += 1
if row == 4:
state = "finished"
else:
row = 0
#reinitialisation de la hauteur de ligne
line = 6
#alternance du joueur
if X == 1:
X = 2
elif X == 2:
X = 1
#affichage du score final de la partie ainsi que le numéro du gagnant.
print("_________________")
print("║",case[1][1], case[2][1], case[3][1], case[4][1], case[5][1], case[6][1], case[7][1], "║")
print("║",case[1][2], case[2][2], case[3][2], case[4][2], case[5][2], case[6][2], case[7][2], "║")
print("║",case[1][3], case[2][3], case[3][3], case[4][3], case[5][3], case[6][3], case[7][3], "║")
print("║",case[1][4], case[2][4], case[3][4], case[4][4], case[5][4], case[6][4], case[7][4], "║")
print("║",case[1][5], case[2][5], case[3][5], case[4][5], case[5][5], case[6][5], case[7][5], "║")
print("║",case[1][6], case[2][6], case[3][6], case[4][6], case[5][6], case[6][6], case[7][6], "║")
print("")
print("")
if X == 2:
print("player 1 wins")
if X == 1:
print("player 2 wins")
print("GG WP")
| 6,307 |
src/measure.py
|
Thefalas/disksMD
| 0 |
2025120
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 09:54:51 2018
@author: malopez
"""
import math
def distance(i, j, pos):
""" Returns the distance between two particles i, j as a numpy array """
#i = int(i)
#j = int(j)
dist = pos[i] - pos[j]
return dist
def distanceModulus(i, j, pos_X, pos_Y):
""" Measures the distance modulus between two particles i, j """
i = int(i)
j = int(j)
dist_X = pos_X[j] - pos_X[i]
dist_Y = pos_Y[j] - pos_Y[i]
dist = math.sqrt(dist_X**2 + dist_Y**2)
return dist
def relativeVelocity(i, j, vel):
""" Measures the relative velocity between two particles i, j as a numpy
array to operate with it as a vector later on """
#i = int(i)
#j = int(j)
rel_v = vel[i] - vel[j]
return rel_v
class MeasureClass():
def __init__(self, pos, vel, periodicWalls, periodicSideWalls, size_X, size_Y):
self.pos = pos
self.vel = vel
self.size_X = size_X
self.size_Y = size_Y
self.periodicWalls = periodicWalls
self.periodicSideWalls = periodicSideWalls
def distance(self, i, j):
""" Returns the distance between two particles i, j as a numpy array """
#i = int(i)
#j = int(j)
dist = self.pos[i] - self.pos[j]
return dist
def relativeVelocity(self, i, j):
""" Measures the relative velocity between two particles i, j as a numpy
array to operate with it as a vector later on """
#i = int(i)
#j = int(j)
rel_v = self.vel[i] - self.vel[j]
return rel_v
| 1,604 |
main/migrations/0008_activity_hidden_from_results_and_more.py
|
cablespaghetti/running-club-challenge
| 0 |
2022721
|
# Generated by Django 4.0 on 2021-12-19 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_remove_activity_evidence_image'),
]
operations = [
migrations.AddField(
model_name='activity',
name='hidden_from_results',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='activity',
name='age_grade',
field=models.FloatField(editable=False),
),
]
| 569 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.