max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
models/roleItem.py
|
salvuswarez/tabManRedux
| 1 |
2023263
|
import logging
# setup logger for module
_log = logging.getLogger(__name__)
class Role_Item():
__role_name = str()
__name = str()
__description = str()
__permissions = list()
def __init__(self,name,permissions,dscr=""):
self.__name = name
self.__description = dscr
self.__permissions = permissions
self.__load_item()
@property
def permissions(self):
return self.__permissions
@property
def name(self):
return self.__name
@property
def description(self):
return self.__description
@description.setter
def description(self,dscr):
self.__description = dscr
def __load_item(self):
#load default settings here.
pass
def add_permission(self,permission):
self.__permissions.append(permission)
def remove_permission(self,permission):
pass
def reset_permissions(self):
pass
# def apply_to(self,recipient):
# #this will likely need to apply to either a user or a group item
# pass
| 1,094 |
mottak-arkiv-service/app/routers/dto/BevaringOverforing.py
|
omBratteng/mottak
| 0 |
2023038
|
from pydantic import BaseModel
from app.domain.models.BevaringOverforing import BevaringOverforingStatus
class BevaringOverforing(BaseModel):
status: BevaringOverforingStatus
| 182 |
app/utils/exceptions.py
|
mgajewskik/website_scraper_api
| 0 |
2022920
|
from fastapi import HTTPException
def raise_not_found(msg: str = "Website not found."):
raise HTTPException(status_code=404, detail=msg)
def raise_unprocessable_entity(msg: str = "Unprocessable entity."):
raise HTTPException(status_code=422, detail=msg)
def raise_bad_request(msg: str = "Bad request."):
raise HTTPException(status_code=400, detail=msg)
| 371 |
Q-7/surface_area_and_volume_of_cuboids.py
|
gribja/c1pq
| 0 |
2023876
|
width=22
length=23
height=24
surface_area=2*(width*length)+2*(length*height)+2*(height*width)
print("Surface Area of the cuboid is %s" % surface_area)
volume=width*length*height
print("The volume of the cuboid is %s" % volume)
| 227 |
Stochastic_engine/stochastic_engine.py
|
romulus97/HYDROWIRES
| 0 |
2023670
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 09:59:48 2018
@author: YSu
"""
############################################################################
# HISTORICAL WEATHER AND STREAMFLOW ANALYSIS
# Perform statistical analysis of historical meteorological data
# Note: this script ('calculatte_cov') only needs to be performed once; after
# that stochastic input generation can occur as many times as desired.
import time
starttime = time.time()
#############################################################################
#
#############################################################################
# DAILY HYDROPOWER SIMULATION
# Now specify a smaller subset of stochastic data to run (must be <= stoch years)
# FOR FORECAST MODE, THIS NEEDS TO BE > 1
sim_years = 12
# Run ORCA to get California storage dam releases
import main
main.sim(sim_years)
print('ORCA')
#
# California Hydropower model
import CA_hydropower_daily_forecast
CA_hydropower_daily_forecast.hydro(sim_years)
print('CA hydropower')
#Willamette operational model
import Willamette_launch_forecast
Willamette_launch_forecast.launch(sim_years)
print('Willamette')
#
#
# Federal Columbia River Power System Model (mass balance in Python)
import ICF_calc_new
ICF_calc_new.calc(sim_years)
import FCRPS_forecast
FCRPS_forecast.simulate(sim_years)
print('FCRPS')
#############################################################################
##
##############################################################################
### HOURLY WIND AND SOLAR POWER PRODUCTION
#
## WIND
# Specify installed capacity of wind power for each zone
PNW_cap = 6445
CAISO_cap = 4915
# Generate synthetic hourly wind power production time series for the BPA and
# CAISO zones for the entire simulation period
import wind_speed2_wind_power
wind_speed2_wind_power.wind_sim(sim_years,PNW_cap,CAISO_cap)
print('wind')
# SOLAR
# Specify installed capacity of wind power for each zone
CAISO_solar_cap = 9890
# Generate synthetic hourly solar power production time series for
# the CAISO zone for the entire simulation period
import solar_production_simulation
solar_production_simulation.solar_sim(sim_years,CAISO_solar_cap)
print('solar')
###############################################################################
##
###############################################################################
## ELECTRICITY DEMAND AND TRANSMISSION PATH FLOWS
#
## Calculate daily peak and hourly electricity demand for each zone and daily
## flows of electricity along each WECC path that exchanges electricity between
## core UC/ED model (CAISO, Mid-C markets) and other WECC zones
#
import demand_pathflows_forecast
print('paths')
###############################################################################
##
###############################################################################
## NATURAL GAS PRICES
# NOTE: NEED SCRIPT HERE TO SIMULATE STOCHASTIC NATURAL GAS PRICES
# *OR*
# ESTIMATE STATIC GAS PRICES FOR EACH ZONE
import numpy as np
ng = np.ones((sim_years*365,5))
ng[:,0] = ng[:,0]*4.47
ng[:,1] = ng[:,1]*4.47
ng[:,2] = ng[:,2]*4.66
ng[:,3] = ng[:,3]*4.66
ng[:,4] = ng[:,4]*5.13
import pandas as pd
NG = pd.DataFrame(ng)
NG.columns = ['SCE','SDGE','PGE_valley','PGE_bay','PNW']
NG.to_excel('Gas_prices/NG.xlsx')
elapsed = time.time() - starttime
print(elapsed)
#
#
#
#
| 3,353 |
Infosys Assignments/21.py
|
DivyaMaddipudi/Infosyscc
| 0 |
2022737
|
l1 = ["sofaset", "diningtable", "tvstand", "cupboard"]
print("items available are: ",l1)
l2= [20000,8500,4599,13920]
req = input("enter the furniture:")
quantity = int(input("enter quantity:"))
bill = 0
if quantity > 0 and req in l1:
l = l1.index(req)
bill = l2[l] * quantity
print(bill)
elif req not in l1:
print("invalid furniture and bill is",0)
| 371 |
setup.py
|
DAIRLab/contact-nets
| 16 |
2022901
|
from setuptools import setup, find_namespace_packages
setup(
name='contactnets',
packages=find_namespace_packages(include=['contactnets.*']),
version='0.1',
install_requires=[
'pillow',
'osqp',
'mplcursors',
'torch',
'tensorflow==1.14.0',
'tensorboard==2.1.0',
'tensorboardX==1.9',
'vpython',
'torchtestcase',
'pygame',
'pyomo',
'click',
'sk-video',
'pyopengl',
'pyopengl_accelerate',
'glfw',
'gitpython',
'psutil',
'moviepy',
'imageio',
'tabulate',
'grpcio'
])
| 659 |
aiida_zeopp/parsers/plain.py
|
yakutovicha/aiida-zeopp
| 0 |
2023548
|
# pylint: disable=useless-super-delegation
from __future__ import absolute_import
import re
import six
from six.moves import map
from six.moves import range
class KeywordParser(object):
keywords = {
'keyword1': float,
'keyword2': int,
}
@classmethod
def parse(cls, string):
""" Parse zeo++ keyword format
Example string:
keyword1: 12234.32312 keyword2: 1
parameters
----------
string: string
string with keywords
return
------
results: dict
dictionary of output values
"""
results = {}
regex = "{}: ([\d\.]*)"
for keyword, ktype in six.iteritems(cls.keywords):
regex_rep = regex.format(re.escape(keyword))
match = re.search(regex_rep, string)
if match is None:
raise ValueError("Keyword {} not specified".format(keyword))
value = match.group(1)
if value == "":
value = 0
# uncomment this when #1 is fixed
#raise ValueError(
# "No value specified for keyword {}".format(keyword))
results[keyword] = ktype(value)
return results
@classmethod
def parse_aiida(cls, string):
from aiida.orm.data.parameter import ParameterData
return ParameterData(dict=cls.parse(string))
class PoreVolumeParser(KeywordParser):
keywords = {
'Unitcell_volume': float,
'Density': float,
'POAV_A^3': float,
'POAV_Volume_fraction': float,
'POAV_cm^3/g': float,
'PONAV_A^3': float,
'PONAV_Volume_fraction': float,
'PONAV_cm^3/g': float,
}
@classmethod
def parse(cls, string):
""" Parse zeo++ .volpo format
Example volpo string:
@ EDI.volpo Unitcell_volume: 307.484 Density: 1.62239
POAV_A^3: 131.284 POAV_Volume_fraction: 0.42696 POAV_cm^3/g: 0.263168
PONAV_A^3: 0 PONAV_Volume_fraction: 0 PONAV_cm^3/g: 0
parameters
----------
string: string
string in volpo format
return
------
results: dict
dictionary of output values
"""
return super(PoreVolumeParser, cls).parse(string)
class AVolumeParser(KeywordParser):
keywords = {
'Unitcell_volume': float,
'Density': float,
'AV_A^3': float,
'AV_Volume_fraction': float,
'AV_cm^3/g': float,
'NAV_A^3': float,
'NAV_Volume_fraction': float,
'NAV_cm^3/g': float,
}
@classmethod
def parse(cls, string):
""" Parse zeo++ .vol format
Example vol string:
@ EDI.vol Unitcell_volume: 307.484 Density: 1.62239
AV_A^3: 22.6493 AV_Volume_fraction: 0.07366 AV_cm^3/g: 0.0454022
NAV_A^3: 0 NAV_Volume_fraction: 0 NAV_cm^3/g: 0
parameters
----------
string: string
string in volpo format
return
------
results: dict
dictionary of output values
"""
return super(AVolumeParser, cls).parse(string)
class SurfaceAreaParser(KeywordParser):
keywords = {
'Unitcell_volume': float,
'Density': float,
'ASA_A^2': float,
'ASA_m^2/cm^3': float,
'ASA_m^2/g': float,
'NASA_A^2': float,
'NASA_m^2/cm^3': float,
'NASA_m^2/g': float,
'Number_of_channels': int,
'Channel_surface_area_A^2': float,
'Number_of_pockets': int,
'Pocket_surface_area_A^2': float,
}
@classmethod
def parse(cls, string):
""" Parse zeo++ .sa format
Example sa string:
@ HKUST-1.sa Unitcell_volume: 18280.8 Density: 0.879097 ASA_A^2:
3545.59 ASA_m^2/cm^3: 1939.51 ASA_m^2/g: 2206.26 NASA_A^2: 0
NASA_m^2/cm^3: 0 NASA_m^2/g: 0 Number_of_channels: 1
Channel_surface_area_A^2: 3545.59 Number_of_pockets: 0
Pocket_surface_area_A^2:
parameters
----------
string: string
string in sa format
return
------
results: dict
dictionary of output values
"""
return super(SurfaceAreaParser, cls).parse(string)
class ResParser(KeywordParser):
keywords = (
'Largest_included_sphere',
'Largest_free_sphere',
'Largest_included_free_sphere',
)
@classmethod
def parse(cls, string):
""" Parse zeo++ .res format
Example res string:
HKUST-1.res 13.19937 6.74621 13.19937
Containing the diameters of
* the largest included sphere
* the largest free sphere
* the largest included sphere along free sphere path
parameters
----------
string: string
string in res format
return
------
res: dict
dictionary of output values
"""
res = {}
values = string.split()
if len(values) != 4:
raise ValueError("Found more than 4 fields in .res format")
for i in (0, 1, 2):
res[cls.keywords[i]] = float(values[i + 1])
return res
class ChannelParser(object):
@classmethod
def parse(cls, string): # pylint: disable=too-many-locals
""" Parse zeo++ .chan format
Example chan string::
P8bal_P1.chan 2 channels identified of dimensionality 3 3
Channel 0 9.92223 3.85084 9.92223
Channel 1 9.92222 3.85084 9.92222
P8bal_P1.chan summary(Max_of_columns_above) 9.92223 3.85084 9.92223 probe_rad: 1.8 probe_diam: 3.6
parameters
----------
string: string
string in chan format
return
------
results: list
dictionary of output values
"""
lines = string.splitlines()
# remove empty lines
lines = [l for l in lines if l.strip()]
nlines = len(lines)
# parse header line
match = re.search(
r'(\d+) channels identified of dimensionality([\d\s]*)', lines[0])
if not match:
raise ValueError(
"The following string was not recognized as a valid header of the .chan format:\n"
+ lines[0])
nchannels = int(match.group(1))
if nchannels == 0:
dimensionalities = []
else:
dimensionalities = list(map(int, match.group(2).split()))
if nchannels != len(dimensionalities):
raise ValueError(
"Number of channels {} does not match number of dimensionalities {}"
.format(nchannels, len(dimensionalities)))
if nchannels != nlines - 2:
raise ValueError(
"Number of lines in file {} does not equal number of channels {}+2"
.format(nlines, nchannels))
# parse remaning lines (last line is discarded)
dis, dfs, difs = [], [], []
for i in range(1, nchannels + 1):
_c, _i, di, df, dif = lines[i].split()
dis.append(float(di))
dfs.append(float(df))
difs.append(float(dif))
pm_dict = {
'Channels': {
'Largest_included_spheres': dis,
'Largest_free_spheres': dfs,
'Largest_included_free_spheres': difs,
'Dimensionalities': dimensionalities,
}
}
return pm_dict
@classmethod
def parse_aiida(cls, string):
from aiida.orm.data.parameter import ParameterData
return ParameterData(dict=cls.parse(string))
| 7,774 |
src/perf/data_pipe_perf/py_trio_queue_perf.py
|
random-python/data_pipe
| 14 |
2022966
|
"""
"""
import gc
import time
import trio
count = int(1e5) # number of objects to transfer
async def buffer_perf():
gc.collect() # start with clean memory
source = [index for index in range(count)] # pre-allocate data source
target = [None for index in range(count)] # pre-allocate data target
async def producer(writer):
async with writer:
for value in source:
await writer.send(value)
async def consumer(reader):
async with reader:
index = 0
async for value in reader:
target[index] = value
index += 1
async def transfer():
async with trio.open_nursery() as nursery:
writer, reader = trio.open_memory_channel(256)
nursery.start_soon(producer, writer)
nursery.start_soon(consumer, reader)
time_start = time.time()
await transfer()
time_finish = time.time()
time_diff = time_finish - time_start
assert source == target # verify data integrity
return time_diff
def invoke_perf(session_size:int=3):
for session in range(session_size):
print(f"--- session={session} ---")
time_diff = trio.run(buffer_perf) # total test time
time_unit = int(1e6 * time_diff / count) # per-unit test time, microseconds
print(f"count={count} time_diff={time_diff:.3f} time_unit={time_unit} micro")
invoke_perf()
| 1,440 |
src/linux_things/media_server/rename_files.py
|
gmihaila/raspberry_projects
| 0 |
2023438
|
"""
Need to rename and structure files in the following order:
/TV Shows
/My Show
/Season 01
My Show - s01e01.format
My Show - s01e02-03.format
Run:
python src/linux_things/media_server/rename_files.py --show_name 'My Show' --target_path 'where to move' --use_path 'where is the show' --dry_run
"""
import os
import re
import argparse
import warnings
from utils import move_folder
def main():
parser = argparse.ArgumentParser(description='Description')
parser.add_argument('--show_name', help='Name of the TV Show.', type=str)
parser.add_argument('--target_path', help='Where to move the renamed files.', type=str)
parser.add_argument('--use_path', help='Where to find the files that need to be renamed.', type=str)
parser.add_argument('--dry_run', help='Perform dry run before moving files.', default=False, action='store_true')
# parse arguments
args = parser.parse_args()
print(args)
# Add show name to target path
target_path = os.path.join(args.target_path, args.show_name)
os.mkdir(target_path) if not os.path.isdir(target_path) else print(
f"Folder '{args.show_name}' already exists in '{target_path}'")
# Deal with each season.
folder_content = os.listdir(args.use_path)
# Keep only folders and not files.
all_folders = [folder for folder in folder_content if os.path.isdir(os.path.join(args.use_path, folder))]
for folder in all_folders:
folder_path = os.path.join(args.use_path, folder)
season = re.search('[sS]\d{1,2}', os.path.basename(folder_path)).group()
season = re.sub('[sS]', '', season).lstrip('0')
season = f'0{season}' if len(season) < 2 else season
print(f"For season '{season}'")
addon_target_path = f'Season {season}'
seaon_target_path = os.path.join(target_path, addon_target_path)
os.mkdir(seaon_target_path) if not os.path.isdir(seaon_target_path) else print(
f"Folder '{addon_target_path}' already existst in '{target_path}'")
files = os.listdir(folder_path)
# import pdb; pdb.set_trace()
move_folder(files, args.show_name, season, folder_path, seaon_target_path, args.dry_run)
if args.dry_run:
warnings.warn("This was a dry run! No files were moved yer! Don't use --dry_run in order to move files!",
Warning)
if __name__ == '__main__':
main()
| 2,430 |
excel/liangai/mod2017.py
|
ty68/alped
| 0 |
2023511
|
from xlutils.copy import copy
import xlrd
#打开一个excel文档,赋值给book,book是一个打开的excel对象
book = xlrd.open_workbook('2017.xlsx')
#利用xlutils.copy拷贝一份book为newbook
newbook = copy(book)
#用原始的book获取第一个sheet
orisheet = book.sheet_by_index(0)
#用拷贝的book获取一份sheet
sheet = newbook.get_sheet(0)
# 利用原始sheet获取sheet的行数和列数
rows = orisheet.nrows
cols = orisheet.ncols
print('这个sheet一共{0}行,{1}列'.format(rows,cols))
#
# 循环修改每一行的数据
for row in range(1,29996):
#利用原始sheet获取指定单元格数据 .value
celldata = orisheet.cell(row, 1).value
# celldata是str类型,利用字符串方法replace替换指定字符串
newdata = celldata.replace(celldata[6:],'*'* len(celldata[6:]))
#利用拷贝的book里的sheet写入newdata到指定单元格
sheet.write(row, 1, newdata)
#保存写入sheet的数据
newbook.save('new2017.xls')
print('第{1}行,旧身份证信息:{2},新数据已经插入:{0}'.format(newdata,row,celldata))
| 815 |
post_it/migrations/0004_remove_session_session_id.py
|
constantin-kuehne/IdeaPostProduction
| 0 |
2023252
|
# Generated by Django 3.1.2 on 2020-11-28 18:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('post_it', '0003_post_it_zindex'),
]
operations = [
migrations.RemoveField(
model_name='session',
name='session_id',
),
]
| 329 |
example.py
|
kiok46/swipetodelete
| 18 |
2023198
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.properties import ObjectProperty,ListProperty,NumericProperty,BooleanProperty
from kivy.garden.swipetodelete import SwipeBehavior
from kivy.lang import Builder
kvdemo='''
<DragWidget>:
size_hint: (None,None)
size: (625,100)
swipe_rectangle: self.x, self.y , self.width, self.height
swipe_timeout: 1000000
swipe_distance: 1
spacing:5
BoxLayout:
orientation: "horizontal"
spacing:5
Label:
size_hint: .4,1
text: "Timing :"
font_size: 16
canvas.before:
Color:
rgba: 0.398,.310,.562,1
Rectangle:
size: self.size
pos: self.pos
Spinner:
background_color: 0.398,.310,.562,1
font_size: 16
text: 'Select'
values: {'08:00 AM','09:00 AM','10:00 AM','11:00 AM'}
Button:
id: btn
text: "Ok"
background_color: 0.398,.310,.562,1
size_hint: .2,1
<SwipeToDeleteContainer>:
layout_container: layout_container
size_hint: (None, None)
size: (675, 520)
pos_hint: {'x':.065,'y':.065}
do_scroll_x: False
bar_width:'5dp'
GridLayout:
cols: 1
padding: 20
spacing: 20
canvas.before:
Color:
rgba: 0.933,.956,.956,1
Rectangle:
pos: self.pos
size: self.size
size_hint_y: None
id: layout_container
Button:
markup: True
text: "[color=000000]Swipe To Delete (Click Me!)[/color]"
font_size: 32
size_hint: (None,None)
size: (625,80)
on_release: root.add_new()
DragWidget:
left_percentage: 10
right_percentage: 10
DragWidget:
left_percentage: 70
right_percentage: 70
DragWidget:
animation_type: 'in_bounce'
animation_duration: 2
DragWidget:
remove_from_right: False
remove_from_left: False
DragWidget:
'''
class DragWidget(SwipeBehavior,BoxLayout):
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
self.move_to = self.x,self.y
return super(DragWidget, self).on_touch_down(touch)
def on_touch_move(self, touch):
if self.collide_point(touch.x, touch.y):
self.reduce_opacity()
return super(DragWidget, self).on_touch_move(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.x, touch.y):
self.check_for_left()
self.check_for_right()
return super(DragWidget, self).on_touch_up(touch)
class SwipeToDeleteContainer(ScrollView):
layout_container = ObjectProperty(None)
'''The container which contains dragable widgets.
:attr:`layout_container` is a :class:`~kivy.properties.ObjectProperty`,
defaults to None.
'''
def __init__(self,**kwargs):
super(SwipeToDeleteContainer,self).__init__(**kwargs)
self.layout_container.bind(minimum_height=self.layout_container.setter('height'))
def add_new(self):
self.ids.layout_container.add_widget(DragWidget())
class MainApp(App):
def build(self):
Builder.load_string(kvdemo)
return SwipeToDeleteContainer()
if __name__ == '__main__':
app = MainApp()
app.run()
| 3,650 |
sanic_restplus/cors.py
|
alex-ip/sanic-restplus
| 0 |
2023832
|
# -*- coding: utf-8 -*-
#
from asyncio import iscoroutinefunction
from datetime import timedelta
from functools import update_wrapper
from sanic.constants import HTTP_METHODS
from sanic.request import Request as sanic_request
from sanic.response import HTTPResponse
from sanic.views import HTTPMethodView
def crossdomain(origin=None, methods=None, headers=None, expose_headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True, credentials=False):
if methods is not None:
m = list(sorted(x.upper() for x in methods))
if 'OPTIONS' not in m:
m.append('OPTIONS')
methods = ', '.join(m)
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if expose_headers is not None and not isinstance(expose_headers, str):
expose_headers = ', '.join(x.upper() for x in expose_headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
# Todo:
# This is wrong for now, we need a way to find
# only the methods the httpmethodview contains
return ', '.join(HTTP_METHODS)
def decorator(f):
async def wrapped_function(*args, **kwargs):
orig_args = list(args)
args_len = len(orig_args)
rt = RuntimeError("Must only use crossdomain decorator on a function that takes 'request' as "
"first or second argument")
if args_len < 1:
#weird, no args
raise rt
elif args_len < 2:
request = orig_args.pop(0)
args = (request,)
else:
next_arg = orig_args.pop(0)
args = list()
if isinstance(next_arg, HTTPMethodView) or issubclass(next_arg, HTTPMethodView):
args.append(next_arg) # self or cls
next_arg = orig_args.pop(0)
request = next_arg
args.append(request)
args.extend(orig_args)
args = tuple(args)
if not isinstance(request, sanic_request):
raise rt
do_await = iscoroutinefunction(f)
if automatic_options and request.method == 'OPTIONS':
resp = HTTPResponse()
else:
resp = f(*args, **kwargs)
if do_await:
resp = await resp
if isinstance(resp, str):
resp = HTTPResponse(resp)
elif isinstance(resp, tuple):
if len(resp) < 2:
resp = HTTPResponse(resp[0])
elif len(resp) < 3:
resp = HTTPResponse(resp[0], status=resp[1])
else:
resp = HTTPResponse(resp[0], status=resp[1], headers=resp[2])
if not isinstance(resp, HTTPResponse):
raise RuntimeError("crossorigin wrapper did not get a valid response from the wrapped function")
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if credentials:
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
if expose_headers is not None:
h['Access-Control-Expose-Headers'] = expose_headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| 3,950 |
tools/specification_parser/lint_json_output.py
|
open-feature/spec
| 16 |
2022735
|
from os.path import curdir, abspath, join, splitext
from os import walk
import json
import sys
def main(dir):
jsons = []
for root_path, _, file_paths, in walk(dir):
for file_path in file_paths:
absolute_file_path = join(root_path, file_path)
_, file_extension = splitext(absolute_file_path)
if file_extension == ".json":
jsons.append(absolute_file_path)
errors = 0
for j in jsons:
with open(j) as jfile:
spec = json.load(jfile)
entries = spec;
for entry in spec:
for child in entry.get('children', []):
entries.append(child)
try:
for entry in entries:
if entry.get('RFC 2119 keyword') is None and \
'condition' not in entry['id'].lower():
print(f"{j}: Rule {entry['id']} is missing a RFC 2119 keyword", file=sys.stderr)
errors += 1
pass
except Exception as k:
print(f"Non json-spec formatted file found: {j}", file=sys.stderr)
sys.exit(errors)
def has_errors(entry):
pass
if __name__ == '__main__':
main(sys.argv[1])
| 1,259 |
orc8r/gateway/python/magma/ctraced/trace_manager.py
|
chandra-77/magma
| 1 |
2023788
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import errno
import logging
import os
import pathlib
import subprocess
import time
from typing import List
from subprocess import SubprocessError
from .command_builder import get_trace_builder
from collections import namedtuple
_TRACE_FILE_NAME = "call_trace"
_TRACE_FILE_NAME_POSTPROCESSED = "call_trace_postprocessed"
_TRACE_FILE_EXT = "pcapng"
_MAX_FILESIZE = 4000 # ~ 4 MiB for a trace
_POSTPROCESSING_TIMEOUT = 10 # 10 seconds for TShark to apply display filters
EndTraceResult = namedtuple('EndTraceResult', ['success', 'data'])
class TraceManager:
"""
TraceManager is a wrapper for TShark specifically for starting and
stopping call/interface/subscriber traces.
Only a single trace can be captured at a time.
"""
def __init__(self, config):
self._is_active = False # is call trace being captured
self._proc = None
self._trace_directory = config.get("trace_directory",
"/var/opt/magma/trace") # type: str
# Specify southbound interfaces
self._trace_interfaces = config.get("trace_interfaces",
["eth0"]) # type: List[str]
# Should specify absolute path of trace filename if trace is active
self._trace_filename = "" # type: str
self._trace_filename_postprocessed = "" # type: str
# TShark display filters are saved to postprocess packet capture files
self._display_filters = "" # type: str
self._tool_name = config.get("trace_tool", "tshark") # type: str
self._trace_builder = get_trace_builder(self._tool_name)
def start_trace(
self,
capture_filters: str,
display_filters: str,
) -> bool:
"""Start a call trace.
Note:
The output file location is appended to the custom run options,
matching trace_directory in ctraced.yml
Args:
capture_filters: Capture filters for running TShark.
Equivalent to the -f option of TShark.
Syntax based on BPF (Berkeley Packet Filter)
display_filters: Display filters for running TShark.
Equivalent to the -Y option of TShark.
Returns:
True if successfully started call trace
"""
if self._is_active:
logging.error("TraceManager: Failed to start trace: "
"Trace already active")
return False
self._build_trace_filename()
command = self._trace_builder.build_trace_command(
self._trace_interfaces,
_MAX_FILESIZE,
self._trace_filename,
capture_filters,
)
self._display_filters = display_filters
return self._execute_start_trace_command(command)
def end_trace(self) -> EndTraceResult:
"""Ends call trace, if currently active.
Returns:
success: True if call trace finished without issue
data: Call trace file in bytes
"""
# If trace is active, then stop it
if self._is_active:
stopped = self._stop_trace()
if not stopped:
return EndTraceResult(False, None)
while True:
if self._ensure_trace_file_exists():
logging.info("TraceManager: Trace file written!")
break
logging.info("TraceManager: Waiting 1s for trace file to be "
"written...")
time.sleep(1)
# Perform postprocessing of capture file with TShark display filters
if len(self._display_filters) > 0:
succeeded = self._postprocess_trace()
if not succeeded:
return EndTraceResult(False, None)
# Read trace data into bytes
with open(self._trace_filename_postprocessed, "rb") as trace_file:
data = trace_file.read() # type: bytes
else:
# Read trace data into bytes
with open(self._trace_filename, "rb") as trace_file:
data = trace_file.read() # type: bytes
# Ensure the tmp trace file is deleted
self._ensure_tmp_file_deleted()
self._trace_filename = ""
self._is_active = False
logging.info("TraceManager: Call trace has ended")
# Everything cleaned up, return bytes
return EndTraceResult(True, data)
def _stop_trace(self) -> bool:
# If the process has ended, then _proc isn't None
self._proc.poll()
return_code = self._proc.returncode
if self._proc.returncode is None:
logging.info("TraceManager: Ending call trace")
self._proc.terminate()
else:
logging.info("TraceManager: Tracing process return code: %s",
return_code)
logging.debug("TraceManager: Trace logs:")
logging.debug("<" * 25)
while True:
line = self._proc.stdout.readline()
if not line:
break
logging.debug("| %s", str(line.rstrip()))
logging.debug(">" * 25)
if return_code is not None:
self._is_active = False
return False
return True
def _postprocess_trace(self) -> bool:
command = self._trace_builder.build_postprocess_command(
self._trace_filename,
self._display_filters,
self._trace_filename_postprocessed
)
logging.info("TraceManager: Starting postprocess, command: [%s]",
' '.join(command))
try:
self._proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self._is_active = False
logging.error("TraceManager: Failed to postprocess trace: %s",
str(e))
return False
self._proc.wait()
logging.debug("<" * 25)
while True:
line = self._proc.stdout.readline()
if not line:
break
logging.debug("| %s", str(line.rstrip()))
logging.debug(">" * 25)
logging.info("TraceManager: Finished postprocess")
return True
def _build_trace_filename(self):
# Example filename path:
# /var/opt/magma/trace/call_trace_1607358641.pcap
self._trace_filename = "{0}/{1}_{2}.{3}".format(
self._trace_directory,
_TRACE_FILE_NAME,
int(time.time()),
_TRACE_FILE_EXT)
self._trace_filename_postprocessed = "{0}/{1}_{2}.{3}".format(
self._trace_directory,
_TRACE_FILE_NAME_POSTPROCESSED,
int(time.time()),
_TRACE_FILE_EXT)
def _execute_start_trace_command(self, command: List[str]) -> bool:
"""Executes a command to start a call trace
Args:
command: Shell command with each token ordered in the list.
example: ["tshark", "-i", "eth0"] would be for "tshark -i eth0"
Returns:
True if successfully executed command
"""
logging.info("TraceManager: Starting trace with %s, command: [%s]",
self._tool_name, ' '.join(command))
self._ensure_trace_directory_exists()
# TODO(andreilee): Handle edge case where only one instance of the
# process can be running, and may have been started
# by something external as well.
try:
self._proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except SubprocessError as e:
logging.error("TraceManager: Failed to start trace: %s", str(e))
return False
self._is_active = True
logging.info("TraceManager: Successfully started trace with %s",
self._tool_name)
return True
def _ensure_trace_file_exists(self) -> bool:
return os.path.isfile(self._trace_filename)
def _ensure_tmp_file_deleted(self):
"""Ensure that tmp trace file is deleted.
Uses exception handling rather than a check for file existence to avoid
TOCTTOU bug
"""
try:
os.remove(self._trace_filename)
if os.path.isfile(self._trace_filename_postprocessed):
os.remove(self._trace_filename_postprocessed)
except OSError as e:
if e.errno != errno.ENOENT:
logging.error("TraceManager: Error when deleting tmp trace "
"file: %s", str(e))
def _ensure_trace_directory_exists(self) -> None:
pathlib.Path(self._trace_directory).mkdir(parents=True, exist_ok=True)
| 9,427 |
olympe-final/usr/lib/python3.8/site-packages/olympe/app.py
|
mbyzhang/olympe-ros-docker
| 73 |
2023254
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (C) 2019 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import unicode_literals
from __future__ import print_function
import argparse
import os
import sys
import olympe
from sphinx.cmd.build import main as sphinx_build
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--version',
action="store_true",
help=u'Displays version'
)
parser.add_argument(
'--gendoc',
dest="doc_out_directory",
help="Generate olympe documentation"
)
parser.add_argument(
'--gendoc_context_path',
dest="doc_context",
help="Documentation context path"
)
ns = parser.parse_args()
args = vars(ns)
if args['doc_out_directory']:
cmd = ["-b", "html"]
if args["doc_context"]:
cmd += ["-D", "custom_html_context_path={}".format(args["doc_context"])]
cmd += ["{}/doc".format(os.path.dirname(olympe.__file__))]
cmd += [args['doc_out_directory']]
sys.exit(sphinx_build(cmd))
if 'version' in args and args['version']:
print(olympe.VERSION_STRING)
sys.exit(0)
| 2,699 |
loss.py
|
shirsenh/SRResGAN-Improved-Perceptual
| 0 |
2023184
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class GeneratorLoss(nn.Module):
def __init__(self, vgg_network, writer, steps):
super(GeneratorLoss, self).__init__()
self.vgg_network = vgg_network
# self.dis_network = dis_network
self.writer = writer
self.steps = steps
self.mse_loss = nn.MSELoss().cuda()
self.bce_loss = nn.BCELoss().cuda()
self.huber_loss = nn.SmoothL1Loss().cuda()
def forward(self, out_labels, out_images, target_images, opt):
# self.steps += out_images.shape[0]
# print("Image loss: {}".format(image_loss.item()))
overall_loss = 0
self.ones_const = Variable(torch.ones(out_images.size()[0])).cuda()
image_loss = self.huber_loss(out_images, target_images)
self.writer.add_scalar("Image Loss", image_loss, self.steps)
overall_loss += opt.mse_loss_coefficient * image_loss
if opt.adversarial_loss:
adversarial_loss = self.bce_loss(out_labels, self.ones_const)
self.writer.add_scalar("Gen Adversarial Loss", adversarial_loss, self.steps)
overall_loss += opt.adversarial_loss_coefficient*adversarial_loss
if opt.vgg_loss:
vgg_perception_loss = self.mse_loss(self.vgg_network(out_images), self.vgg_network(target_images))
self.writer.add_scalar("VGG Perception Loss", vgg_perception_loss, self.steps)
overall_loss += opt.vgg_loss_coefficient*vgg_perception_loss
return overall_loss
| 1,555 |
_site/MCAT_pkg/MCAT_pkg/model_assessment.py
|
rjeeda/rjeeda.github.io
| 0 |
2023889
|
import numpy as np
import pandas as pd
import scipy
import scipy.stats as st
import bebi103
import bokeh.io
def ecdf(x, data):
"""Give the value of an ECDF at arbitrary points x
Parameters
__________
x : array
points to calculate ECDF for
data : array
input data to generate the ECDF based on
Returns
__________
output : array of ECDF values for each point in x
"""
y = np.arange(len(data) + 1) / len(data)
return y[np.searchsorted(np.sort(data), x, side="right")]
def AIC(params, log_likelihood_fun, data):
"""Calculates the AIC (akaike criterion)
Parameters
_________
params : tuple
MLE parameters for distribution
log_likelihood_fun : function
calculates the log likelihood for the desired distribution
data : array
empirical dataset to calculate log likelihood with respect to
Returns
_________
output : float
AIC value
"""
L = log_likelihood_fun(params, data);
return -2*(L) + 2*len(params)
def predictive_ecdf(data, gen_function, params, size = 1000, title = None, xlabel = "Time to Catastrophe (s)",
color = "blue", data_color = "gray"):
""" Compares ECDF of theoretical distribution to experimental
Parameters
__________
data : array
input data array
gen_function : function
generative function to sample from
params : tuple
parameters to use for generative function
size : int (optional), default = 1000
number of samples to draw from the generative distribution
palette : list (optional), default = ["blue"]
if given, used as palette argument to bebi103.viz.fill_between
"""
single_samples = np.array([gen_function(*params, size = len(data))
for _ in range (size)])
n_theor = np.arange(0, single_samples.max() + 1)
p = bebi103.viz.predictive_ecdf(
samples = single_samples,
data = data,
discrete = True,
color = color,
data_color=data_color,
x_axis_label=xlabel,
y_axis_label="ECDF",
)
if(title != None):
p.title.text = title
return p
def QQ_plot(data, gen_function, params, size = 1000, axis_label = None, title = None, color = "green"):
""" creates a QQ_plot comparing the empirical and theoretical value
Parameters
__________
data : array
input data
gen_function : function
function to generate points from the desired distribution
params : tuple
MLE parameters
size : int (optional), default = 1000
number of samples to generate from the generative distribution
axis_label : string (optional)
if given, axis_label is used as the label for both axes of the returned
plot
title : string (optional)
if given, used as the title for the returned plot
palette : list (optional)
if given, used as the argument to palette in bebi103.viz.qqplot
Returns
________
output : p (bokeh figure)
"""
single_samples = np.array([gen_function(*params, size = len(data))
for _ in range (size)])
p = bebi103.viz.qqplot(
data=data,
samples=single_samples,
patch_kwargs = {"color":color},
line_kwargs = {"color":color}
)
if(axis_label != None):
p.xaxis.axis_label = axis_label
p.yaxis.axis_label = axis_label
if(title != None):
p.title.text = title
return p
| 3,444 |
codes/models/layers/subpixel.py
|
ishine/AFILM
| 12 |
2024140
|
import tensorflow as tf
def SubPixel1D(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
"""
with tf.name_scope('subpixel'):
X = tf.transpose(I, [2,1,0]) # (r, w, b)
X = tf.compat.v1.batch_to_space_nd(X, [r], [[0,0]]) # (1, r*w, b)
X = tf.transpose(X, [2,1,0])
return X
| 426 |
workspace/core/settings.py
|
jacksmith15/workspace-cli
| 2 |
2023147
|
from __future__ import annotations
import os
from dataclasses import dataclass
from functools import lru_cache
@dataclass
class Settings:
filename: str = "workspace.json"
@classmethod
def from_env(cls):
env_prefix = "WORKSPACE_"
params = {
name: os.environ[env_prefix + name.upper()]
for name, field in cls.__dataclass_fields__.items()
if env_prefix + name.upper() in os.environ and field.init
}
return cls(**params)
@lru_cache(maxsize=None)
def get_settings() -> Settings:
return Settings.from_env()
| 591 |
empower_vbs_emulator/main.py
|
5g-empower/empower-vbs-emulator
| 0 |
2023697
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Main module."""
import sys
import json
from argparse import ArgumentParser
import tornado.ioloop
from empower_vbs_emulator.vbs import VBS
def main():
"""Main module."""
# parse arguments
parser = ArgumentParser()
parser.add_argument("-a", "--address",
dest="address",
type=str,
default="127.0.0.1",
help="The 5G-EmPOWER runtime address")
parser.add_argument("-p", "--port",
dest="port",
type=int,
default=5533,
help="The 5G-EmPOWER runtime port")
parser.add_argument("-s", "--scenario",
dest="scenario",
type=str,
required=True,
help="Path to JSON file describing the scenario")
parsed, _ = parser.parse_known_args(sys.argv[1:])
scenario = {}
# load json
with open(parsed.scenario) as json_file:
scenario = json.load(json_file)
# instantiate VBS with configuration scenario
vbs = VBS(address=parsed.address, port=parsed.port, scenario=scenario)
# start VBS
vbs.start()
# start tornado loop
tornado.ioloop.IOLoop.instance().start()
| 1,911 |
main.py
|
sumitsk/HER
| 4 |
2024104
|
import torch
import os
import numpy as np
import datetime
from copy import deepcopy
import utils
from arguments import get_args
from learner import Learner
from policy import Policy
import logger
from tensorboard_logger import configure, log_value
if __name__ == '__main__':
args = get_args()
logid = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") if args.logid is None else str(args.logid)
logdir = os.path.join('save', logid)
utils.check_logdir(logdir)
logger.configure(logdir)
configure(logdir)
params = vars(args)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
params['device'] = device
params['cached_env'] = utils.get_cached_env(params['env_name'])
policy = Policy(params)
train_envs = utils.make_parallel_envs(params['env_name'], params['seed'], params['num_processes'])
trainer_params = deepcopy(params)
trainer_params['envs'] = train_envs
trainer_params['exploit'] = False
trainer = Learner(policy, trainer_params)
eval_seed = np.random.randint(0, 100)
eval_num_processes = params['num_processes']
eval_envs = utils.make_parallel_envs(params['env_name'], eval_seed, eval_num_processes)
evaluator_params = deepcopy(params)
evaluator_params['num_processes'] = eval_num_processes
evaluator_params['envs'] = eval_envs
evaluator_params['exploit'] = True
evaluator = Learner(policy, evaluator_params)
n_test_rollouts = 10
best_success_rate = -1
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pt')
best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pt')
periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pt')
n_batches = trainer_params['n_batches']*trainer_params['num_processes']
for epoch in range(params['n_epochs']):
trainer.clear_history()
policy.set_train_mode()
for i in range(params['n_cycles']):
episode = trainer.generate_rollouts()
policy.store_episode(episode)
for _ in range(n_batches):
critic_loss, policy_loss = policy.train()
step = epoch*params['n_cycles']+i
log_value('critic_loss', critic_loss, step)
log_value('policy_loss', policy_loss, step)
# policy.update_target_net()
evaluator.clear_history()
policy.set_eval_mode()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
# log statistics
logger.record_tabular('epoch', epoch)
test_stats = evaluator.logs()
for key, val in test_stats.items():
logger.record_tabular('test/'+key, val)
train_stats = trainer.logs()
for key, val in train_stats.items():
logger.record_tabular('train/'+key, val)
for key, val in policy.logs():
logger.record_tabular(key, val)
logger.dump_tabular()
log_value('train_success_rate', train_stats['success_rate'], epoch)
log_value('test_success_rate', test_stats['success_rate'], epoch)
success_rate = test_stats['success_rate']
if success_rate >= best_success_rate:
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
policy.save(epoch, best_policy_path)
policy.save(epoch, latest_policy_path)
if epoch % params['save_every'] == 0:
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
policy.save(epoch, policy_path)
| 3,683 |
PersonalWebsite/main/migrations/0001_initial.py
|
justokarell/PersonalWebsite
| 0 |
2023136
|
# Generated by Django 3.0.4 on 2020-03-31 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AreaMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_title', models.CharField(max_length=200)),
('map_updated', models.DateTimeField(verbose_name='last updated')),
],
),
]
| 576 |
doc/_static/draw_tree.py
|
U8NWXD/vivarium
| 13 |
2023668
|
import networkx as nx
from matplotlib import pyplot as plt
def main():
G = nx.Graph()
edges = [
('root', 'cell'),
('root', 'global'),
('root', 'injector'),
('root', 'glucose_phosphorylation'),
('root', 'my_deriver'),
('cell', 'ATP'),
('cell', 'ADP'),
('cell', 'G6P'),
('cell', 'GLC'),
('cell', 'HK'),
('global', 'initial_mass'),
('global', 'mass'),
]
fig, ax = plt.subplots(figsize=(15, 5))
G.add_edges_from(edges)
nx.draw_networkx(
G,
pos=nx.nx_pydot.graphviz_layout(G, prog="dot"),
with_labels=True,
node_size=2000,
font_size=14,
ax=ax
)
plt.savefig('tree.png')
if __name__ == '__main__':
main()
| 782 |
main.py
|
CocoaPods/keith-as-a-service
| 1 |
2022895
|
from flask import Flask, request
from flask.views import MethodView
import hashlib
import hmac
import json
import os
import requests
import sys
class GithubHook(MethodView):
def post(self):
try:
signature = request.headers["X-Hub-Signature"]
except Exception, e:
print "No hub signature %s : %s" % (request.headers,
request.get_json())
return "This is sketchy, I'm leaving"
try:
secret = os.environ["hook_secret"]
hash_string = hmac.new(secret.encode('utf-8'),
msg=request.data,
digestmod=hashlib.sha1).hexdigest()
expected_hash = "sha1=" + hash_string
except Exception, e:
print "Building expected hash failed: %s" % e
return "Building expected hash failed", 500
if signature != expected_hash:
return "Wrong hash, gtfo"
request_json = request.get_json()
pull_request = request_json.get("pull_request")
if request_json.get("action") != "opened":
return "Meh, only care about opens"
links = pull_request.get("_links")
pull_request_link = links.get("self").get("href")
comments_url = links.get("comments").get("href")
message_file = os.path.join(os.path.dirname(__file__), "message.md")
with open(message_file, "r") as file:
message = file.read()
comment_body = {"body": message}
close_state = {"state": "closed"}
auth = (os.environ["github_user"], os.environ["github_pass"])
try:
requests.post(comments_url,
data=json.dumps(comment_body),
auth=auth)
requests.patch(pull_request_link,
data=json.dumps(close_state),
auth=auth)
except Exception, e:
print "Exception: ", e
return "42"
app = Flask(__name__)
app.add_url_rule('/', view_func=GithubHook.as_view('counter'))
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: %s <PORT>" % sys.argv[0]
sys.exit(1)
app.run(port=int(sys.argv[1]), host="0.0.0.0")
| 2,298 |
CodesComplete/CleanCode/descriptor.py
|
vinimmelo/python
| 0 |
2022965
|
class NonDataDescriptor:
def __get__(self, instance, value):
if instance is None:
return self
print('Descriptor test!')
return 42
class DataDescriptor:
def __get__(self, instance, value):
if instance is None:
return self
print('Descriptor test!')
return 42
def __set__(self, instance, value):
print('Setting descriptor')
instance.__dict__["descriptor"] = value
class Dog:
descriptor = DataDescriptor()
def __init__(self, legs, eyes):
self.legs = legs
self.eyes = eyes
def bark(self):
return f"Legs: {self.legs} Eyes: {self.legs}"
| 672 |
test_frame/test_broker/test_publish.py
|
DJMIN/funboost
| 333 |
2023418
|
import time
from test_frame.test_broker.test_consume import f
f.clear()
for i in range(1000000):
# time.sleep(0.2)
if i == 0:
print(time.strftime("%H:%M:%S"), '发布第一条')
if i == 99999:
print(time.strftime("%H:%M:%S"), '发布第100000条')
f.push(i, i * 2)
if __name__ == '__main__':
pass
# f.multi_process_pub_params_list([{'a':i,'b':2*i} for i in range(100000)],process_num=5)
| 411 |
Python/StringTyping.py
|
Zardosh/code-forces-solutions
| 0 |
2022786
|
n = int(input())
s = input()
word = ''
biggest_repetition = 1
for i in range(len(s)):
word += s[i]
if word in s[i + 1:i + len(word) + 1]:
biggest_repetition = max(biggest_repetition, len(word))
print(len(s) - biggest_repetition + 1)
| 252 |
app/app.py
|
Ollen/FileCarver
| 1 |
2023698
|
import sys, os, time
sys.path.append('../controller')
from head_file import *
from init_drive import *
from model_app import *
from burn_drive import *
from flask import Flask, render_template, request, redirect, jsonify, Response
app = Flask(__name__)
# Global Variables
doCopy = -1
selectedDrive = None
selectRawPath = None
copyRawPath = None
copyRawName = None
fileList = None
filePrefix = ""
scanOption = None
extractLocation = None
fullDriveList = None
selectedShredOption = None
drive = None
# Start page of the application
@app.route("/")
def start():
return render_template('start.html')
# Select a drive page.
@app.route("/select", methods=['GET','POST'])
def select():
global fullDriveList
fullDriveList = listDrive()
return render_template('select.html', drive_list = fullDriveList)
# Select a raw image Page
@app.route("/selectRaw", methods=['GET','POST'])
def selectRaw():
return render_template('selectRaw.html')
@app.route("/selectShred", methods=['GET', 'POST'])
def selectShred():
global fullDriveList
fullDriveList = listDrive()
return render_template('selectShred.html', drive_list = fullDriveList)
@app.route("/burnDrive", methods=['GET', 'POST'])
def burnDrive():
global selectedDrive
global selectedShredOption
global drive
selectedDrive = request.form['drive'];
selectedShredOption = request.form['option'];
drive = getDrive(fullDriveList, int(selectedDrive));
return render_template('loadShred.html');
@app.route("/startBurn", methods=['GET', 'POST'])
def startBurn():
print("=-=-=-=-=--=-=-=-= START BURN -=-=-=-=-=--==-=-", file=sys.stderr)
rawTotal = getDriveTotal(drive);
print(drive, file=sys.stderr)
print(rawTotal, file=sys.stderr)
print(selectedShredOption, file=sys.stderr)
rand_data = "0123456789ABCDEF"
rand_char = "ABCDEF"
rand_numb = "1234567890"
block_total = int(math.ceil(rawTotal / 8192))
if selectedShredOption == "0":
return Response(cleanDrive(drive, block_total, 8192, "0") ,mimetype= 'text/event-stream')
if selectedShredOption == "1":
return Response(cleanDrive(drive, block_total, 8192, rand_data) ,mimetype= 'text/event-stream')
# Set the user input in the select page
@app.route("/setSelect", methods=['GET', 'POST'])
def setSelectGlobal():
global drive
driveText = request.form['drive']
path = request.form['copyPath']
name = request.form['copyName']
copy = request.form['copy']
setSelect(driveText, copy, path, name)
# print(selectedDrive, file=sys.stderr)
# print(copyRawPath, file=sys.stderr)
# print(copyRawName, file=sys.stderr)
# print(option, file=sys.stderr)
# sys.stdout.flush()
drive = getDrive(fullDriveList, int(selectedDrive))
print(drive, file=sys.stderr)
if not name or not path :
print('redirect to config', file=sys.stderr)
return redirect('http://localhost:5000/config')
else:
print('redirect to copy', file=sys.stderr)
return redirect('http://localhost:5000/copy')
@app.route("/setSelectRaw", methods=['GET', 'POST'])
def setRaw():
global drive
drive = request.form['rawPath']
return redirect('http://localhost:5000/config')
# Configure extraction page.
@app.route("/config", methods=['GET','POST'])
def config():
return render_template('config.html')
# Copy raw image loading page.
@app.route("/copy", methods=['GET','POST'])
def copy():
return render_template('loadCopy.html')
# Extract retireved files loading page.
@app.route("/extract", methods=['GET','POST'])
def extract():
setConfig(request.form.getlist('checklist'),request.form['scanOption'],
request.form['extractLocation'], request.form['filePrefix'])
#print(drive, file=sys.stderr)
print(extractLocation, file=sys.stderr)
print(fileList, file=sys.stderr)
print(scanOption, file=sys.stderr)
if scanOption == "1":
print('[+] Initializing Fast Scan', file=sys.stderr)
elif scanOption == "2":
print('[+] Initializing Normal Scan', file=sys.stderr)
elif scanOption == "3":
print('[+] Initializing Deep Scan', file=sys.stderr)
rawExtractLocation = extractLocation.replace('\\','\\\\')
print(rawExtractLocation, file=sys.stderr)
return render_template('loadExtract.html', copy = doCopy, location = rawExtractLocation)
# Extract directly from drive
@app.route('/scanExtract')
def scanExtract():
print (drive, file=sys.stderr)
(lst_srt, lst_end, lst_buf) = compileRegs(fileList)
if scanOption == '1':
print ('Extracting (Fast-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(fastReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
elif scanOption == '2':
print ('Extracting (Standard-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(standardReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
elif scanOption == '3':
print ('Extracting (Standard-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(deepReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
# Extract from copied raw Image
@app.route('/scanExtractCopy')
def scanCopy():
rawPath = copyRawPath + copyRawName + ".dd"
print (rawPath, file=sys.stderr)
(lst_srt, lst_end, lst_buf) = compileRegs(fileList)
if scanOption == '1':
print ('Extracting (Fast-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(fastReadImage(rawPath, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
elif scanOption == '2':
print ('Extracting (Standard-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(standardReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
elif scanOption == '3':
print ('Extracting (Standard-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(deepReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
@app.route('/scanExtractRaw')
def scanExtractRaw():
print ('ScanExtractRaw', file=sys.stderr)
print (drive, file=sys.stderr)
(lst_srt, lst_end, lst_buf) = compileRegs(fileList)
if scanOption == '1':
print ('Extracting (Fast-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(fastReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
elif scanOption == '2':
print ('Extracting (Standard-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(standardReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
elif scanOption == '3':
print ('Extracting (Standard-Scan): ', file=sys.stderr)
fullPrefix = namingFile(extractLocation, filePrefix)
return Response(deepReadImage(drive, fullPrefix, lst_srt, lst_end, fileList, lst_buf),
mimetype= 'text/event-stream')
# Copy Raw Image
@app.route("/copyImage")
def copyImage():
return Response(toRawImage(copyRawName, copyRawPath, drive), mimetype= 'text/event-stream')
# Set the drive, rawpath, and rawname global variables.
def setSelect(drive, copy, rawPath = None, rawName = None):
global selectedDrive
global copyRawPath
global copyRawName
global doCopy
selectedDrive = drive
doCopy = copy
copyRawName = rawName
copyRawPath = rawPath
def setConfig(listFile, option, location, prefix = ""):
global fileList
global filePrefix
global scanOption
global extractLocation
fileList = listFile
filePrefix = prefix
scanOption = option
extractLocation = location
if __name__ == "__main__":
print('Starting...')
sys.stdout.flush()
app.run()
| 8,337 |
conf/settings/local.py
|
pincoin/iclover
| 1 |
2024196
|
from .base import *
DEBUG = Secret.DEBUG
# SECURITY WARNING: Keep them secret!
SECRET_KEY = Secret.SECRET_KEY
ALLOWED_HOSTS = Secret.ALLOWED_HOSTS
DATABASES = Secret.DATABASES
if DEBUG:
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/assets/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
STATICFILES_DIRS = [
]
# Media files (Uploaded files)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# logging
LOGGING = {
'version': 1,
# 기존의 로깅 설정을 비활성화 할 것인가?
'disable_existing_loggers': False,
# 포맷터
# 로그 레코드는 최종적으로 텍스트로 표현됨
# 이 텍스트의 포맷 형식 정의
# 여러 포맷 정의 가능
'formatters': {
'format1': {
'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'format2': {
'format': '%(levelname)s %(message)s'
},
},
# 핸들러
# 로그 레코드로 무슨 작업을 할 것인지 정의
# 여러 핸들러 정의 가능
'handlers': {
# 콘솔(터미널)에 출력
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'format2',
}
},
# 로거
# 로그 레코드 저장소
# 로거를 이름별로 정의
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
else:
# AWS Setting
AWS_REGION = Secret.AWS_REGION
AWS_STORAGE_BUCKET_NAME = Secret.AWS_STORAGE_BUCKET_NAME
AWS_ACCESS_KEY_ID = Secret.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = Secret.AWS_SECRET_ACCESS_KEY
AWS_QUERYSTRING_AUTH = False
AWS_S3_HOST = f's3.{AWS_REGION}.amazonaws.com'
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_DEFAULT_ACL = None
# Static Setting
STATIC_URL = '/assets/'
# Media Setting
MEDIA_URL = '/media/'
DEFAULT_FILE_STORAGE = 'conf.settings.storage.MediaStorage'
STATICFILES_STORAGE = 'conf.settings.storage.StaticStorage'
MEDIAFILES_LOCATION = 'media'
STATICFILES_LOCATION = 'assets'
REST_FRAMEWORK_TOKEN = 'e8b0ddbbe7152d35771a20c6669d1c2016175580'
# Our own apps
INSTALLED_APPS += [
'core',
'member',
'design',
'managing',
'api',
'imagekit',
'mathfilters',
'storages',
]
SESSION_COOKIE_AGE = 609600
SESSION_SAVE_EVERY_REQUEST = True
| 2,496 |
mlxtk/simulation_set/cmd_qdel.py
|
f-koehler/mlxtk
| 2 |
2023341
|
import argparse
from mlxtk.cwd import WorkingDir
from mlxtk.simulation_set.base import SimulationSetBase
def cmd_qdel(self: SimulationSetBase, args: argparse.Namespace):
del args
if not self.working_dir.exists():
self.logger.warning(
"working dir %s does not exist, do nothing", self.working_dir
)
with WorkingDir(self.working_dir):
for simulation in self.simulations:
simulation.main(["qdel"])
| 460 |
calling_linear_pig.py
|
Aurametrix/HDFS
| 1 |
2024178
|
#!/usr/bin/env python
import os
import sys
import shutil
import random
import tempfile
import Queue
from org.apache.pig.scripting import Pig
from org.codehaus.jackson.map import ObjectMapper
EPS = 10e-6 # maximum distance between consective weights for convergence
pig_script = sys.argv[1] # pig script to run iteratively
data_dir = sys.argv[2] # directory where intermediate weights will be written
features = sys.argv[3] # location, inside data_dir, where the data to fit exists
num_features = sys.argv[4] # number of features
#
# Cleanup data dir
#
cmd = "rmr %s/weight-*" % data_dir
Pig.fs(cmd)
#
# Initialize weights
#
w0_fields = []
weights = []
for i in xrange(int(num_features)):
weights.append(str(random.random()))
w0_fields.append({"name":"w%s" % i,"type":25,"schema":None}) # See Pig's DataType.java
path = tempfile.mkdtemp()
w0 = open("%s/part-r-00000" % path, 'w')
w0.write("\t".join(weights)+"\n")
w0.close()
#
# Create schema for weights, place under weight-0 dir
#
w0_schema = {"fields":w0_fields,"version":0,"sortKeys":[],"sortKeyOrders":[]}
w0_schema_file = open("%s/.pig_schema" % path, 'w')
ObjectMapper().writeValue(w0_schema_file, w0_schema);
w0_schema_file.close()
#
# Copy initial weights to fs
#
copyFromLocal = "copyFromLocal %s %s/%s" % (path, data_dir, "weight-0")
Pig.fs(copyFromLocal)
#
# Iterate until converged
#
features = "%s/%s" % (data_dir,features)
script = Pig.compileFromFile(pig_script)
weight_queue = Queue.Queue(25) # for moving average
avg_weight = [0.0 for i in xrange(int(num_features))]
converged = False
prev = 0
weight_dir = tempfile.mkdtemp()
while not converged:
input_weights = "%s/weight-%s" % (data_dir,prev)
output_weights = "%s/weight-%s" % (data_dir,prev+1)
bound = script.bind({'input_weights':input_weights,'output_weights':output_weights,'data':features})
bound.runSingle()
#
# Copy schema for weights to each output
#
copyOutputSchema = "cp %s/.pig_schema %s/.pig_schema" % (input_weights, output_weights)
Pig.fs(copyOutputSchema)
#
# The first few iterations the weights bounce all over the place
#
if (prev > 1):
copyToLocalPrev = "copyToLocal %s/part-r-00000 %s/weight-%s" % (input_weights, weight_dir, prev)
copyToLocalNext = "copyToLocal %s/part-r-00000 %s/weight-%s" % (output_weights, weight_dir, prev+1)
Pig.fs(copyToLocalPrev)
Pig.fs(copyToLocalNext)
localPrev = "%s/weight-%s" % (weight_dir, prev)
localNext = "%s/weight-%s" % (weight_dir, prev+1)
x1 = open(localPrev,'r').readlines()[0]
x2 = open(localNext,'r').readlines()[0]
x1 = [float(x.strip()) for x in x1.split("\t")]
x2 = [float(x.strip()) for x in x2.split("\t")]
weight_queue.put(x1)
avg_weight = [x[1] + (x[0] - x[1])/(prev-1.0) for x in zip(x1,avg_weight)]
#
# Make sure to collect enough weights into the average before
# checking for convergence
#
if (prev > 25):
first_weight = weight_queue.get()
avg_weight = [(x[0] - x[1]/25.0 + x[2]/25.0) for x in zip(avg_weight, first_weight, x1)]
#
# Compute distance from weight centroid to new weight
#
d = sum([(pair[0] - pair[1])**2 for pair in zip(x2,avg_weight)])
converged = (d < EPS)
os.remove(localPrev)
os.remove(localNext)
prev += 1
#
# Cleanup
#
shutil.rmtree(path)
shutil.rmtree(weight_dir)
| 3,661 |
fedhf/component/sampler/__init__.py
|
beiyuouo/fedhf
| 2 |
2023403
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : fedhf\component\sampler\__init__.py
# @Time : 2022-05-03 16:00:35
# @Author : <NAME>
# @Email : <EMAIL>
# @License : Apache License 2.0
__all__ = ["build_sampler", "RandomSampler", "NonIIDSampler", "sampler_factory", "BaseSampler"]
from .base_sampler import BaseSampler
from .random_sampler import RandomSampler
from .noniid_sampler import NonIIDSampler
sampler_factory = {
'random': RandomSampler,
'non-iid': NonIIDSampler,
}
def build_sampler(sam_name: str):
if sam_name not in sampler_factory.keys():
raise ValueError(f'Sampler {sam_name} not found.')
sampler = sampler_factory[sam_name]
return sampler
| 717 |
src/ia/random_playeria.py
|
yoyonel/2018_papayoo
| 0 |
2024105
|
"""
"""
import random
#
from ia.playeria import PlayerIA
class RandomPlayerIA(PlayerIA):
def __init__(self,
player_id,
player_name=None):
"""
:param player_id:
:param player_name:
"""
super().__init__(player_id, player_name)
def do_discards(self, discard):
"""
:param discard:
:type discard: DiscardCards
:return:
:rtype: list[Cards]
"""
def _choose_discard_cards():
id_cards = list(range(len(self._cards)))
random.shuffle(id_cards)
return [
self._cards[id_card]
for id_card in id_cards[:discard.nb_cards]
]
#
discard_cards = _choose_discard_cards()
#
self.remove_cards(discard_cards)
#
return discard_cards
def get_from_discards(self, cards):
"""
:param cards:
:type cards: list[Cards]
:return:
"""
# self.add_cards(cards)
self.add_discard_cards(cards)
def play(self, cards_already_played):
"""
:param cards_already_played:
:type cards_already_played: list[tuple(int, Cards)]
:return:
:rtype: Cards
"""
cards_playable = self._cards
# filter cards we can play
if cards_already_played:
first_card_play = cards_already_played[0]
filter_suit = first_card_play.suit
filter_cards_playable = [
card
for card in self._cards
if card.suit == filter_suit
]
if filter_cards_playable:
cards_playable = filter_cards_playable
#
play_card = random.choice(cards_playable)
self.remove_cards([play_card])
return play_card
| 1,872 |
applications/Ma-Net/networks/decoder.py
|
Simon-liusheng/PaddleVideo
| 1 |
2023664
|
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from utils.api import kaiming_normal_
class Decoder(nn.Layer):
def __init__(self, num_classes, backbone, BatchNorm):
super(Decoder, self).__init__()
if backbone == 'resnet' or backbone == 'drn' or backbone == 'resnet_edge':
low_level_inplanes = 256
elif backbone == 'xception':
low_level_inplanes = 128
elif backbone == 'mobilenet':
low_level_inplanes = 24
else:
raise NotImplementedError
self.conv1 = nn.Conv2D(low_level_inplanes, 48, 1, bias_attr=False)
self.bn1 = BatchNorm(48)
self.relu = nn.ReLU(True)
self.last_conv = nn.Sequential(
nn.Conv2D(304,
256,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), BatchNorm(256), nn.ReLU(True),
nn.Sequential(),
nn.Conv2D(256,
256,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), BatchNorm(256), nn.ReLU(True),
nn.Sequential())
self._init_weight()
def forward(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
x = F.interpolate(x,
size=low_level_feat.shape[2:],
mode='bilinear',
align_corners=True)
x = paddle.concat((x, low_level_feat), axis=1)
x = self.last_conv(x)
return x
def _init_weight(self):
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2D):
from utils.api import fill_
fill_(m.weight, 1)
from utils.api import zero_
zero_(m.bias)
def build_decoder(num_classes, backbone, BatchNorm):
return Decoder(num_classes, backbone, BatchNorm)
| 2,219 |
Shared/dtos/GameActionDto.py
|
GeorgeVelikov/Werewolf-Framework
| 1 |
2023203
|
class GameActionDto():
def __init__(self, gameIdentifier, player, targetPlayerIdentifier, additionalData = None):
self.__gameIdentifier = gameIdentifier;
self.__player = player;
self.__targetPlayerIdentifier = targetPlayerIdentifier;
self.__additionalData = additionalData;
@property
def GameIdentifier(self):
return self.__gameIdentifier;
@property
def Player(self):
return self.__player;
@property
def TargetPlayerIdentifier(self):
return self.__targetPlayerIdentifier;
@property
def AdditionalData(self):
return self.__additionalData;
| 642 |
oswin_tempest_plugin/exceptions.py
|
openstack/oswin-tempest-plugin
| 6 |
2024010
|
# Copyright 2017 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import exceptions
class ResizeException(exceptions.TempestException):
message = ("Server %(server_id)s failed to resize to the given "
"flavor %(flavor)s")
class NotFoundException(exceptions.TempestException):
message = "Resource %(resource)s (%(res_type)s) was not found."
class WSManException(exceptions.TempestException):
message = ('Command "%(cmd)s" failed on host %(host)s failed with the '
'return code %(return_code)s. std_out: %(std_out)s, '
'std_err: %(std_err)s')
| 1,194 |
Feature Detection/edge_corner.py
|
tuhinmallick/cv_course
| 2 |
2024058
|
"""
Created on May 9, 2020.
@authors:
<NAME> <<EMAIL>> https://github.com/starasteh/
<NAME> <<EMAIL>> https://github.com/aminheydarshahi/
"""
import cv2
import numpy as np
from skimage.feature import peak_local_max
from scipy.signal import argrelextrema
def show(name, img, x, y):
windowStartX = 10
windowStartY = 50
windowXoffset = 5
windowYoffset = 40
w = img.shape[0] + windowXoffset
h = img.shape[1] + windowYoffset
cv2.namedWindow(name)
cv2.moveWindow(name, windowStartX + w * x, windowStartY + h * y)
cv2.imshow(name, img)
cv2.waitKey(0)
def harrisResponseImage(img):
'''
Compute the spatial derivatives in x and y direction.
:param img: input image
:return: Harris response of the image
'''
dIdx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
dIdy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
show("dI/dx", abs(dIdx), 1, 0)
show("dI/dy", abs(dIdy), 2, 0)
##########################################################
## Compute Ixx, Iyy, and Ixy with
## Ixx = (dI/dx) * (dI/dx),
## Iyy = (dI/dy) * (dI/dy),
## Ixy = (dI/dx) * (dI/dy).
## Note: The multiplication between the images is element-wise (not a matrix
## multiplication)!!
Ixx = dIdx ** 2
Iyy = dIdy ** 2
Ixy = dIdx * dIdy
show("Ixx", abs(Ixx), 0, 1)
show("Iyy", abs(Iyy), 1, 1)
show("Ixy", abs(Ixy), 2, 1)
##########################################################
## Compute the images A,B, and C by blurring the
## images Ixx, Iyy, and Ixy with a
## Gaussian filter of size 3x3 and standard deviation of 1.
kernelSize = (3, 3)
sdev = 1
A = cv2.GaussianBlur(Ixx, kernelSize, sdev)
B = cv2.GaussianBlur(Iyy, kernelSize, sdev)
C = cv2.GaussianBlur(Ixy, kernelSize, sdev)
show("A", abs(A) * 5, 0, 1)
show("B", abs(B) * 5, 1, 1)
show("C", abs(C) * 5, 2, 1)
##########################################################
## Compute the harris response with the following formula:
## R = Det - k * Trace*Trace
## Det = A * B - C * C
## Trace = A + B
k = 0.06
trace = A + B
det = A * B - C * C
response = det - k * (trace ** 2)
## Normalize the response image
dbg = (response - np.min(response)) / (np.max(response) - np.min(response))
dbg = dbg.astype(np.float32)
show("Harris Response", dbg, 0, 2)
##########################################################
cv2.imwrite("dIdx.png", (abs(dIdx) * 255.0))
cv2.imwrite("dIdy.png", (abs(dIdy) * 255.0))
cv2.imwrite("A.png", (abs(A) * 5 * 255.0))
cv2.imwrite("B.png", (abs(B) * 5 * 255.0))
cv2.imwrite("C.png", (abs(C) * 5 * 255.0))
cv2.imwrite("response.png", np.uint8(dbg * 255.0))
return response
def harrisKeypoints(response, threshold=0.1):
'''
Generate a keypoint for a pixel,
if the response is larger than the threshold
and it is a local maximum.
Don't generate keypoints at the image border.
Note 1: Keypoints are stored with (x,y) and images are accessed with (y,x)!!
Note 2: with changing k in the R equation, we detect different number of corners.
k = 0.005 is the best according to this image.
:param response: Harris response of an image
:param threshold: Minimum intensity of peaks
:return: list of the keypoints
'''
points = []
maxima = peak_local_max(response, min_distance=1, threshold_abs=threshold)
for maximum in maxima:
points.append(cv2.KeyPoint(maximum[1], maximum[0], 1))
return points
def harrisEdges(input, response, edge_threshold=-0.01):
'''
Set edge pixels to red.
A pixel belongs to an edge, if the response is smaller than a threshold
and it is a minimum in x or y direction.
Don't generate edges at the image border.
:param input: input image
:param response: harris response of the image
:param edge_threshold: Maximum intensity
'''
result = input.copy()
# 1) if the response is smaller than a threshold
response = np.where(response > edge_threshold, np.inf, response)
# 2) it is a minimum in x or y direction
for x, y in zip(range(response.shape[0]), range(response.shape[1])):
minima_x = argrelextrema(response[:, y], np.less)
minima_y = argrelextrema(response[x], np.less)
result[minima_x, x] = (0, 0, 255)
result[y, minima_y] = (0, 0, 255)
return result
def main():
input_img = cv2.imread('blox.jpg') ## read the image
input_gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) ## convert to grayscale
input_gray = (input_gray - np.min(input_gray)) / (np.max(input_gray) - np.min(input_gray)) ## normalize
input_gray = input_gray.astype(np.float32) ## convert to float32 for filtering
## Obtain Harris Response, corners and edges
response = harrisResponseImage(input_gray)
points = harrisKeypoints(response)
edges = harrisEdges(input_img, response)
imgKeypoints1 = cv2.drawKeypoints(input_img, points, outImage=None, color=(0, 255, 0))
show("Harris Keypoints", imgKeypoints1, 1, 2)
show("Harris Edges", edges, 2, 2)
cv2.imwrite("edges.png", edges)
cv2.imwrite("corners.png", imgKeypoints1)
if __name__ == '__main__':
main()
| 5,268 |
setup.py
|
maximmenshikov/sphinx_redactor_theme
| 0 |
2024074
|
# -*- coding: utf-8 -*-
from setuptools import setup
from sphinx_redactor_theme import __version__
setup(
name='sphinx_redactor_theme',
version='0.0.2.dev0',
url='https://github.com/testthedocs/sphinx_redactor_theme',
license='MIT',
author='TestTheDocs Community',
author_email='<EMAIL>',
description='Sphinx theme for redactor docs.',
long_description=open('README.rst').read(),
zip_safe=False,
packages=['sphinx_redactor_theme'],
package_data={
'sphinx_redactor_theme': [
'theme.conf',
'*.html',
'static/css/*.css',
'static/js/*.js',
'static/font/*.*'
]
},
entry_points={
'sphinx.html_themes': [
'sphinx_redactor_theme = sphinx_redactor_theme',
]
},
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
)
| 1,318 |
tests/__init__.py
|
Storj/metadisk-client-python
| 0 |
2024239
|
from . test_rest import * # NOQA
from . test_api_usage import * # NOQA
if __name__ == "__main__":
unittest.main()
| 123 |
src/models/score.py
|
leudom/german-fake-news-classifier
| 0 |
2022816
|
# -*- coding: utf-8 -*-
"""FakeNewsClassifier Scoring Script (Entry Point)
This script serves as an entry point to ACI Deployment
"""
# %% Imports
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
import os
import json
import numpy as np
from joblib import load
from azureml.core.model import Model
#%%
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
#MODEL_PATH = os.path.join(os.getenv('PROJECT_DIR'), 'bin', 'models', 'model.pkl')
#model = load(MODEL_PATH)
#type(model)
#X = ["Das ist ein Test, ob dieser Text als Fake News klassifiziert wird", "Ein weiterer Test"]
#results = model.predict_proba(X)
#results.tolist()
# %% Define entry point
def init():
global model
model_path = Model.get_model_path(os.getenv('MODEL_NAME'))
logging.info('Model path is %s' % str(model_path))
model = load(model_path)
logging.info('Model successfully loaded')
def run(data):
try:
data = json.loads(data)
result = model.predict_proba(data['data'])
return {'data': result.tolist(), 'message': "Successfully classified news"}
except Exception as e:
error = str(e)
return {'data': error, 'message': "Failed to classify news"}
| 1,239 |
bigbuild/tests/test_commands.py
|
datadesk/django-bigbuild
| 28 |
2024012
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import bigbuild
from bigbuild.tests import TestBase
from bigbuild.tests import BUILD_DIR
from django.test import override_settings
from bigbuild.models import PageList, Page
from bigbuild import get_archive_directory
from django.core.management import call_command
from django.core.management.base import CommandError
logging.disable(logging.CRITICAL)
class TestCommands(TestBase):
def test_build(self):
Page.create(slug="yet-another-fake-page")
Page.create(slug="my-archived-page")
call_command("build")
self.assertTrue(os.path.exists(BUILD_DIR))
expected_index = os.path.join(
BUILD_DIR,
PageList()['yet-another-fake-page'].get_absolute_url().lstrip("/"),
'index.html'
)
self.assertTrue(os.path.exists(expected_index))
PageList()[0].build()
with override_settings(BUILD_DIR=''):
bigbuild.get_build_directory()
@override_settings(BIGBUILD_GIT_BRANCH='test', BIGBUILD_BRANCH_BUILD=True)
def test_branch_build(self):
call_command("build")
self.assertTrue(os.path.exists(os.path.join(BUILD_DIR, 'test')))
self.assertTrue('test' in bigbuild.get_base_url())
@override_settings(BIGBUILD_BRANCH_BUILD=False)
def test_base_url(self):
bigbuild.get_base_url()
@override_settings(BAKERY_GZIP=True)
def test_gzip(self):
call_command("build")
def test_validatepages(self):
call_command("validatepages")
def test_createpage(self):
call_command("createpage", "test-page")
with self.assertRaises(ValueError):
call_command("createpage", "test-page")
call_command("createpage", "test-page", force=True)
def test_archivepage(self):
p = Page.create(slug='test-archived-page')
call_command("archivepage", p.slug)
with self.assertRaises(CommandError):
call_command("archivepage", p.slug)
with self.assertRaises(CommandError):
call_command("archivepage", "hello-wtf")
call_command("unarchivepage", "test-archived-page")
def test_cache(self):
"""
Test the page caching
"""
before = PageList()
cache_path = os.path.join(get_archive_directory(), '.cache')
if os.path.exists(cache_path):
os.remove(cache_path)
call_command("cachepages")
after = PageList()
self.assertEqual(before[0].slug, after[0].slug)
call_command("cachepages")
| 2,590 |
model/keypoint_detector.py
|
netjerikhet/ai-human-emotions
| 0 |
2022805
|
from torch import nn
import torch
import torch.nn.functional as F
from model.util import Hourglass, make_coordinate_grid, AntiAliasInterpolation2d
class KPDetector(nn.Module):
"""
Detecting the keypoints. Return keypoint positions and jacobian near each point
"""
def __init__(self, block_expansion, num_kp, num_channels, max_features,
num_blocks, temperature, estimate_jacobian=False, scale_factor=1,
single_jacobian_map=False, pad=0):
super(KPDetector, self).__init__()
self.predictor = Hourglass(block_expansion, in_features=num_channels,
max_features=max_features, num_blocks=num_blocks)
self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7,7),
padding=pad)
if estimate_jacobian:
self.num_jacobian_maps = 1 if single_jacobian_map else num_kp
self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters,
out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad)
self.jacobian.weight.data.zero_()
self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float))
else:
self.jacobian = None
self.temperature = temperature
self.scale_factor = scale_factor
if self.scale_factor != 1:
self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def gaussian2kp(self, heatmap):
"""
Extract the mean and from a heatmap
"""
shape = heatmap.shape
heatmap = heatmap.unsqueeze(-1)
grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)
value = (heatmap * grid).sum(dim=(2, 3))
kp = {'value': value}
return kp
def forward(self, x):
if self.scale_factor != 1:
x = self.down(x)
feature_map = self.predictor(x)
prediction = self.kp(feature_map)
final_shape = prediction.shape
heatmap = prediction.view(final_shape[0], final_shape[1], -1)
heatmap = F.softmax(heatmap / self.temperature, dim=2)
heatmap = heatmap.view(*final_shape)
out = self.gaussian2kp(heatmap)
if self.jacobian is not None:
jacobian_map = self.jacobian(feature_map)
jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 4, final_shape[2],
final_shape[3])
heatmap = heatmap.unsqueeze(2)
jacobian = heatmap * jacobian_map
jacobian = jacobian.view(final_shape[0], final_shape[1], 4, -1)
jacobian = jacobian.sum(dim=-1)
jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 2, 2)
out['jacobian'] = jacobian
return out
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
| 3,937 |
grn_learn/grn_learn/viz.py
|
manuflores/grn_learn
| 1 |
2023613
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import squarify
from matplotlib import rcParams
def bokeh_style():
'''
Formats bokeh plotting enviroment.
Based on the RPgroup PBoC style created by <NAME> and <NAME>.
'''
theme_json = {'attrs':{'Axis': {
'axis_label_text_font': 'Helvetica',
'axis_label_text_font_style': 'normal'
},
'Legend': {
'border_line_width': 1.5,
'background_fill_alpha': 0.5
},
'Text': {
'text_font_style': 'normal',
'text_font': 'Helvetica',
'text_font_size': 18
},
'Title': {
#'background_fill_color': '#FFEDC0',
'text_font_style': 'normal',
'align': 'center',
'text_font': 'Helvetica',
'offset': 2,
}}}
return theme_json
# +
# ------- PLOTTING FUNCTIONS -------------------------
def set_plotting_style():
"""
Plotting style parameters, based on the RP group.
"""
tw = 1.5
rc = {'lines.linewidth': 2,
'axes.labelsize': 18,
'axes.titlesize': 21,
'xtick.major' : 16,
'ytick.major' : 16,
'xtick.major.width': tw,
'xtick.minor.width': tw,
'ytick.major.width': tw,
'ytick.minor.width': tw,
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'font.family': 'sans',
'weight':'bold',
'grid.linestyle': ':',
'grid.linewidth': 1.5,
'grid.color': '#ffffff',
'mathtext.fontset': 'stixsans',
'mathtext.sf': 'fantasy',
'legend.frameon': True,
'legend.fontsize': 12,
"xtick.direction": "in","ytick.direction": "in"}
plt.rc('text.latex', preamble=r'\usepackage{sfmath}')
plt.rc('mathtext', fontset='stixsans', sf='sans')
sns.set_style('ticks', rc=rc)
#sns.set_palette("colorblind", color_codes=True)
sns.set_context('notebook', rc=rc)
rcParams['axes.titlepad'] = 20
def ecdf(x, plot = None, label = None):
'''
Compute and plot ECDF.
----------------------
Inputs
x: array or list, distribution of a random variable
plot: bool, if True return the plot of the ECDF
label: string, label for the plot
Outputs
x_sorted : sorted x array
ecdf : array containing the ECDF of x
'''
x_sorted = np.sort(x)
n = len (x)
ecdf = np.linspace(0, 1, len(x_sorted))
if label is None and plot is True:
plt.scatter(x_sorted, ecdf, alpha = 0.7)
elif label is not None and plot is True:
plt.scatter(x_sorted, ecdf, alpha = 0.7, label = label)
return x_sorted, ecdf
def make_treemap(x_keys, x_counts):
'''
Wrapper function to plot treemap using the squarify module.
-------------------------------------------
x_keys = names of the different categories
x_counts = counts of the given categories
'''
norm = mpl.colors.Normalize(vmin=min(x_counts), vmax=max(x_counts))
colors = [mpl.cm.Greens(norm(value)) for value in x_counts]
plt.figure(figsize=(14,8))
squarify.plot(label= x_keys, sizes= x_counts, color = colors, alpha=.6)
plt.axis('off');
def make_radar_chart(x_keys, x_counts):
'''
Wrapper function to make radar chart.
------------------------------------------
x_keys = names of the different categories
x_counts = counts of the given categories
'''
categories = list(x_keys)
N = len(categories)
if N > 30:
print('The categories are too big to visualize in a treemap.')
else:
values = list(x_counts)
values.append(values[0])
values_sum = np.sum(values[:-1])
percentages= [(val/values_sum)*100 for val in values]
#angles
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
sns.set_style('whitegrid')
# Initialize figure
plt.figure(1, figsize=(7, 7))
# Initialise the polar plot
ax = plt.subplot(111, polar=True)
# Draw one ax per variable + add labels labels yet
plt.xticks(angles[:-1], categories, color='grey', size=12)
#Set first variable to the vertical axis
ax.set_theta_offset(pi / 2)
#Set clockwise rotation
ax.set_theta_direction(-1)
#Set yticks to gray color
ytick_1, ytick_2, ytick_3 = np.round(max(percentages)/3),np.round((max(percentages)/3)*2),np.round(max(percentages)/3)*3
plt.yticks([ytick_1, ytick_2, ytick_3], [ytick_1, ytick_2, ytick_3],
color="grey", size=10)
plt.ylim(0, int(max(percentages)) + 4)
# Plot data
ax.plot(angles, percentages, linewidth=1,color = 'lightgreen')
# Fill area
ax.fill(angles, percentages, 'lightgreen', alpha=0.3);
def plot_distplot_feature(data, col_name):
"""
Get a histogram with the y axis in log-scale
"""
plt.hist(data[col_name].values, bins = int(data.shape[0]/10000),
color = 'dodgerblue')
plt.yscale('log')
plt.xlabel(col_name)
plt.ylabel('frequency')
def plot_boxplot_feature(data, col_name, hue_col_name):
"""
Get a boxplot with the variable in the x axis, in log scale.
You also need to provide a hue column name.
"""
sns.boxplot(data = data, x = col_name, y = hue_col_name, palette = 'RdBu')
plt.xscale('log')
def palette(cmap = None):
palette = sns.cubehelix_palette(start = 0, rot=0, hue = 1, light = 0.9, dark = 0.15)
if cmap == True:
palette = sns.cubehelix_palette(start = 0, rot=0, hue = 1, light = 0.9, dark = 0.15, as_cmap = True)
return palette
| 6,014 |
tests/expect-fail/recipe-174786.py
|
JohannesBuchner/pystrict3
| 1 |
2023477
|
import random
h,t,sumh,sumt=0,0,0,0
for j in range(1,101):
for i in range(1,101):
x=random.randint(1,2)
if (x==1):
h=h+1
else:
t=t+1
print("Heads are:", h, "Tails are", t)
sumh=sumh+h
sumt=sumt+t
h,t=0,0
print("Heads are:", sumh, "Tails are", sumt)
| 317 |
tests/test_provider_jeremmfr_iptables.py
|
mjuenema/python-terrascript
| 507 |
2022771
|
# tests/test_provider_jeremmfr_iptables.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:14 UTC)
def test_provider_import():
import terrascript.provider.jeremmfr.iptables
def test_resource_import():
from terrascript.resource.jeremmfr.iptables import iptables_nat
from terrascript.resource.jeremmfr.iptables import iptables_nat_ipv6
from terrascript.resource.jeremmfr.iptables import iptables_project
from terrascript.resource.jeremmfr.iptables import iptables_project_ipv6
from terrascript.resource.jeremmfr.iptables import iptables_raw
from terrascript.resource.jeremmfr.iptables import iptables_raw_ipv6
from terrascript.resource.jeremmfr.iptables import iptables_rules
from terrascript.resource.jeremmfr.iptables import iptables_rules_ipv6
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.jeremmfr.iptables
#
# t = terrascript.provider.jeremmfr.iptables.iptables()
# s = str(t)
#
# assert 'https://github.com/jeremmfr/terraform-provider-iptables' in s
# assert '1.2.0' in s
| 1,227 |
machine-learning/small-tasks/f-score.py
|
nothingelsematters/university
| 1 |
2023179
|
def harmonic_mean(a, b):
return 0 if a + b == 0 else 2 * a * b / (a + b)
size = int(input())
elements = [list(map(int, input().split())) for x in range(size)]
row = [sum(i) for i in elements]
column = [sum(map(lambda x: x[i], elements)) for i in range(size)]
full = sum(row)
precision = 0
recall = 0
score = 0
for i in range(size):
local_precision = 0 if row[i] == 0 else elements[i][i] / row[i]
local_recall = 0 if column[i] == 0 else elements[i][i] / column[i]
weight = row[i]
precision += local_precision * weight
recall += local_recall * weight
score += harmonic_mean(local_precision, local_recall) * weight
# macro
print(harmonic_mean(precision, recall) / full)
# micro
print(score / full)
| 729 |
pyccuweather/utils.py
|
chrisvoncsefalvay/pyccuweather
| 4 |
2022938
|
# coding=utf-8
"""
Pyccuweather
The Python Accuweather API
(c) <NAME>, 2015.
http://www.github.com/chrisvoncsefalvay/pyccuweather/
"""
import json
from time import gmtime
from datetime import date
def wloads(content):
"""
Decodes incoming JSON with UTF-8.
:param content: JSON formatted content
:return: JSON formatted content loaded as object and decoded with UTF-8.
"""
return json.loads(content.decode('utf-8'))
def get_woy(epochdate):
"""
Converts an epoch date into week of year.
:param epochdate: Epoch date
:return: week of year
"""
_time = gmtime(epochdate)
return date(_time.tm_year, _time.tm_mon, _time.tm_mday).isocalendar()[1]
| 701 |
export_to_csv.py
|
minhht-0134/redmine_sample
| 0 |
2023224
|
import pandas
def export(data, file_name):
df = pandas.DataFrame(data=data)
df.to_csv(file_name)
| 107 |
helpers/preprocess.py
|
alishazal/qalb
| 2 |
2023847
|
import sys
import unicodedata as ud
import re
def allNonAscii(word):
for char in word:
if ord(char) < 128:
return False
return True
def copyNonAscii(input_line, copy_marker, output_line, is_prediction):
input_words = input_line.split()
if not is_prediction: output_words = output_line.split()
for i in range(len(input_words)):
if allNonAscii(input_words[i]):
input_words[i] = copy_marker
if not is_prediction: output_words[i] = copy_marker
if is_prediction: return " ".join(input_words)
return " ".join(input_words), " ".join(output_words)
def isPunctuation(word):
for char in word:
if char not in ".,?!'\":;-()[]}{":
return False
return True
def copyTextEmojiAndPunctuation(input_line, copy_marker, output_line, is_prediction):
input_words = input_line.split()
if not is_prediction: output_words = output_line.split()
for i in range(len(input_words)):
if isPunctuation(input_words[i]):
input_words[i] = copy_marker
if not is_prediction: output_words[i] = copy_marker
else:
# Handling <3 separately first
match = re.search(r'(<3)+', input_words[i], re.IGNORECASE)
if match and match.group(0) == input_words[i]:
input_words[i] = copy_marker
if not is_prediction: output_words[i] = copy_marker
match = re.search(r'[=:;8xX>^()$*@][-_.\'"]*[XxOoPpsSDVv)(\][/\\Cc3><}{@|:;=0*L$^~]+', input_words[i], re.IGNORECASE)
if match:
emoticon = match.group(0)
if emoticon == input_words[i]:
input_words[i] = copy_marker
if not is_prediction: output_words[i] = copy_marker
if is_prediction: return " ".join(input_words)
return " ".join(input_words), " ".join(output_words)
def removeAccents(line):
newLine = []
for word in line.split(" "):
nfkd_form = ud.normalize('NFKD', word)
res = "".join([c for c in nfkd_form if not ud.combining(c)])
newLine.append(res.replace(' ', ''))
return " ".join(newLine)
def compress(line, limit):
ans = ""
currChar = ""
currCharCounter = 1
compressThese = '23567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_ '
for i in line:
if i == currChar:
currCharCounter += 1
else:
currChar = i
currCharCounter = 1
if currCharCounter < limit + 1 or i not in compressThese:
ans += i
return ans
def copy_tokens(input_line, output_line, copy_marker):
input_line = input_line.split()
output_line = output_line.split()
new_output = []
for word in range(len(output_line)):
if input_line[word] == output_line[word]: new_output.append(copy_marker)
else: new_output.append(output_line[word])
return " ".join(new_output)
def preprocess(all_input_lines, all_output_lines, is_train, is_predict, alignment, copy_unchanged_tokens,
copy_marker, writing_system):
if is_train:
for line in range(len(all_input_lines)):
input_line = all_input_lines[line].strip()
output_line = all_output_lines[line].strip()
#convert unchanged words to a special character (default: #) to protect them in output
if is_train and copy_unchanged_tokens and alignment == "word":
output_line = copy_tokens(input_line, output_line, copy_marker)
#preprocessing
input_line = compress(input_line, 2)
input_line = input_line.lower()
input_line = removeAccents(input_line)
if alignment == "word":
if writing_system == "latin":
input_line, output_line = copyNonAscii(input_line, copy_marker, output_line, False)
input_line, output_line = copyTextEmojiAndPunctuation(input_line, copy_marker, output_line, False)
all_input_lines[line] = input_line
all_output_lines[line] = output_line
return all_input_lines, all_output_lines
if is_predict:
for line in range(len(all_input_lines)):
input_line = all_input_lines[line].strip()
#preprocessing
input_line = compress(input_line, 2)
input_line = input_line.lower()
input_line = removeAccents(input_line)
if alignment == "word":
if writing_system == "latin":
input_line = copyNonAscii(input_line, copy_marker, None, True)
input_line = copyTextEmojiAndPunctuation(input_line, copy_marker, None, True)
all_input_lines[line] = input_line
return all_input_lines
| 4,826 |
adding_more_strings.py
|
agolla0440/my_python_workbook
| 0 |
2023830
|
"""
in this program i will be adding multiple strings to one string
"""
def add_strings(first, second, third, fourth, fifth, sixth, seventh):
value = first + " " + second + " " + third + " " + fourth + " " + fifth + " " + sixth + " " + seventh
return value
statement = add_strings("Jeff", "Bezos", "is", "the", "CEO", "of", "amazon")
print(statement, ".")
print("$" * 100000)
| 388 |
macaroon/macaroon/playback/__init__.py
|
javihernandez/accerciser-mirror
| 2 |
2024130
|
from sequence import *
from wait_actions import *
from keypress_actions import *
from sequence_step import *
| 109 |
bitcoin/TamanioHorarioBloques.py
|
JaimePerez89/Bitcoin
| 0 |
2022793
|
import datetime
from bitcoin import PlotColumns
def media_size_bloque_hr(block_data, representar='N'):
'''
Función que agrupa el tamaño de cada bloque en función de la hora de inicio
y devuelve por pantalla el tamaño medio horario.
Si se indica con el parámetro "representar", junto al mensaje se representan
gráficamente los resultados mediante un gráfico de barras
:param block_data: diccionario con la información de bloques (resultante de ejecutar
la función de lectura.
:param representar: 'Y' la función representa por pantalla la gráfica junto
al mensaje.
'N' (valor por defecto) la función no representa la
gráfica junto al mensaje
:return: results (dict): diccionario con los resultados
mensaje por pantalla con info sobre el tamaño medio de los bloques en función
de la hora de inicio
'A4_TamañoMedioBloques.png' en la carpeta de proyecto 'img'
'''
# Inicializamos dos listas que nos servirán para representar los
# resultados
hora_ejex = []
tamanio_medio_y = []
# Inicializo un diccionario que retornaremos como solucion
results = {}
# Inicializamos un diccionario donde guardaremos los resultados
media_bloque = {}
for i in block_data:
# Obtengo el dato del tamaño del bloque
size = block_data[i]['size']
# Obtengo la hora de inicio del bloque
time = block_data[i]['time']
time = datetime.datetime.fromtimestamp(time)
hora = time.hour
# Vamos a insertar el tamaño en una lista dentro de un diccionario
# La key del diccionario identificará la hora
# Para no crear de antemano todas las opciones del diccionario
# Recurrimemos a un try-except para cuando no exista la key, crearla
try:
# Si ya existe la key, agregamos el tamaño a la lista
media_bloque[hora].append(size)
except:
# Si no existe la key, devolverá un error y se ejecutará este código
# que creará esa key e iniciará una lista con el valor del tamaño
media_bloque[hora] = [size]
# Recorremos el diccionario con los resultados para mostrar por pantalla
# la media de los tamaños.
# Para que nos salga ordenado, vamos a recorrer hora a hora, y no por keys
for j in range(24):
# Obtenemos el valor medio para esa hora
try:
valor_medio = sum(media_bloque[j]) / len(media_bloque[j])
except:
# En caso de no tener bloques para esa hora, asignamos al valor
# medio el valor cero
valor_medio = 0
print("El valor medio de los tamaños para la hora {} es {}"
.format(j, valor_medio))
# Además de mostrar el resultado, agregamos la info a las lista que nos
# servirán para representar los ejes en la gráfica
hora_ejex.append(j)
tamanio_medio_y.append(valor_medio)
# Añadimos la info a un diccionario que retornaremos como solución
results[j] = valor_medio
# Definimos datos para el eje x y para el eje y
x = hora_ejex
y = tamanio_medio_y
titulo_x = 'Hora del día'
titulo_y = 'Tamaño medio de los bloques'
plot_title = 'TAMAÑO MEDIO DE LOS BLOQUES PARA CADA HORA'
size = (20, 10) # Parámetro con el tamaño del gráfico resultante
plot_name = 'A4_TamañoMedioBloques.png'
# Llamamos a la función que dibuja el gráfico
PlotColumns.plot_col_graph(x, y, titulo_x, titulo_y, plot_title,
size, plot_name, representar)
return results
| 3,704 |
Todo/todo_app/models.py
|
LukaszRemkowicz/ToDo_Django
| 0 |
2023311
|
from django.db import models
from django.conf import settings
User = settings.AUTH_USER_MODEL
"""Profile, maybe for later use. Not used now"""
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
todo_link = models.CharField(max_length=100)
def __str__(self):
return self.user.username
class TodoList(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
shared = models.BooleanField(default=False)
class TodoDetails(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.CharField(max_length=30, null=True)
description = models.CharField(max_length=400, null=True)
link = models.CharField(max_length=400, null=True)
complete = models.BooleanField(default=False)
date_added = models.DateField(auto_now_add=True)
todo = models.ForeignKey(TodoList, on_delete=models.CASCADE)
def __str__(self):
return f'{self.todo}'
class SharedRelationModel(models.Model):
todo = models.ForeignKey(TodoList, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.user} {self.todo}'
| 1,256 |
job_hunter_crawler/job_hunter/spiders/spider.py
|
HowardHowonYu/Crawling_project
| 0 |
2022821
|
import scrapy
import requests
from scrapy.http import TextResponse
import time
import datetime
from job_hunter.items import JobHunterItem
class Spider(scrapy.Spider):
name = "JobkoreaCrawler"
allow_domain = ["https://www.jobkorea.co.kr/"]
start_urls = []
# 아규먼트를 받을수 있게 지정해 줬습니다.
# careerType =1 은 신입을 말합니다. 추후 경력직 까지 크롤링 할때 생성자 함수에 아규먼트를 추가할수 있습니다.
def __init__(self, serach_keyword="데이터 분석", careerType=1, page=1, **kwargs):
self.start_urls = ["https://www.jobkorea.co.kr/Search/?stext={}&careerType={}&tabType=recruit&Page_No={}".format(serach_keyword,careerType,page)]
super().__init__(**kwargs)
# 5초 딜레이 막힘
def parse(self, response):
# 크롤링시 잡코리아에서 ip를 차단해 버리기 떄문에 딜레이를 걸어줬습니다.
time.sleep(30)
total_pages = int(response.xpath('//*[@id="content"]/div/div/div[1]/div/div[2]/div[2]/div/div[3]/ul/li[2]/span/text()')[0].extract())
for page in range(1, total_pages):
# 문자열의 마지막 글자만 잡아서 total_pages의 숫자만큼 url을 만들고 yield로 get_content()함수에 던져줍니다.
page_url = self.start_urls[0][:-1]+"{}".format(page)
yield scrapy.Request(page_url, callback=self.get_content)
def get_content(self, response):
# 크롤링시 잡코리아에서 ip를 차단해 버리기 떄문에 딜레이를 걸어줬습니다.
time.sleep(30)
links = response.xpath('//*[@id="content"]/div/div/div[1]/div/div[2]/div[2]/div/div[1]/ul/li/div/div[2]/a/@href').extract()
# 이 과정에서 각 페이지 별로 가지고 있는 구인 공고들의 링크를 만들어 yield로 get_details()함수에 던져줍니다.
links = ["http://www.jobkorea.co.kr" + link for link in links if "gamejob.co.kr" not in link if "&siteCode=WN" not in link]
for link in links:
yield scrapy.Request(link, callback=self.get_details)
def get_details(self, response):
time.sleep(30)
item = JobHunterItem()
item['date'] = datetime.datetime.now()
item["company_name"] = response.xpath('//*[@id="container"]/section/div/article/div[1]/h3/span/text()')[0].extract().strip()
try:
item["deadline"] = str(datetime.datetime.now().year) + "." + response.xpath('//*[@id="tab02"]/div/article[1]/div/dl[2]/dd[2]/span/text()')[0].extract()[5:]
except:
item["deadline"] = "수시채용"
item['link'] = response.url
item["position"] = response.xpath('//*[@id="container"]/section/div/article/div[1]/h3/text()')[1].extract().strip()
item['location'] = ",".join(response.xpath('//*[@id="container"]/section/div/article/div[2]/div/dl/dd/a/text()').extract())
item["keyword"] = ", ".join(response.xpath('//*[@id="artKeywordSearch"]/ul/li/button/text()').extract())
for_select_salary_condition = " ".join(response.xpath('//*[@id="container"]/section/div/article/div[2]/div[2]/dl/dd/span[@class="tahoma"]/text()').extract()).strip().split(" ")[0]
if len(for_select_salary_condition) <= 2:
item["salary_condition"] = "회사 내규에 따름"
else :
item["salary_condition"] = for_select_salary_condition + "만원"
# 구인 공고 링크 안으로 들어가 사업 분야에 대한 더 자세한 정보를 가져옵니다.
# 여기 오류 생김 고쳐야 함.
# url = "http://www.jobkorea.co.kr" + response.xpath('//*/article[contains(@class, "artReadCoInfo") and contains(@class, "divReadBx")]/div/div/p/a/@href')[0].extract()
# req = requests.get(url)
# response_detail_page = TextResponse(req.url,body=req.text,encoding='utf-8')
item["business"] = response.xpath('//*[@id="container"]/section/div/article/div[2]/div[3]/dl/dd/text()')[0].extract().strip()
yield item
| 3,756 |
asset_app/admin.py
|
orlandofv/sianna
| 0 |
2022942
|
from django.contrib import admin
from .models import (Component, Company,
Maintenance, MaintenanceSchedule, Division, Branch, Position,
Group, System, Type, SubType, Vendor, Allocation)
class ComponentAdmin(admin.ModelAdmin):
list_display = ('name', 'manufacturer', 'stock_code', 'notes',)
class MaintenanceAdmin(admin.ModelAdmin):
list_display = ('frequency', 'schedule', 'type',
'time_allocated', 'action')
class MaintenanceScheduleAdmin(admin.ModelAdmin):
list_display = ('name', 'maintenance')
class CompanyAdmin(admin.ModelAdmin):
list_display = ('name', 'address', 'manager', 'contacts', 'notes',)
class DivisionAdmin(admin.ModelAdmin):
list_display = ('name', 'company', 'address', 'contacts', 'notes',)
class BranchAdmin(admin.ModelAdmin):
list_display = ('name', 'division', 'notes',)
class PositionAdmin(admin.ModelAdmin):
list_display = ('name', 'branch', 'notes',)
class GroupAdmin(admin.ModelAdmin):
list_display = ('name', 'notes',)
class SystemAdmin(admin.ModelAdmin):
list_display = ('name', 'group', 'notes',)
class TypeAdmin(admin.ModelAdmin):
list_display = ('name', 'system', 'notes',)
class SubtypeAdmin(admin.ModelAdmin):
list_display = ('name', 'type', 'notes',)
class AllocationAdmin(admin.ModelAdmin):
list_display = ('allocation_no', 'component', 'date_allocated','serial_number',
'status')
class VendorAdmin(admin.ModelAdmin):
list_display = ('name', 'notes')
admin.site.register(Component, ComponentAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(Division, DivisionAdmin)
admin.site.register(Branch, BranchAdmin)
admin.site.register(Position, PositionAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(System, SystemAdmin)
admin.site.register(Type, TypeAdmin)
admin.site.register(SubType, SubtypeAdmin)
admin.site.register(Allocation, AllocationAdmin)
admin.site.register(Maintenance, MaintenanceAdmin)
admin.site.register(MaintenanceSchedule, MaintenanceScheduleAdmin)
admin.site.register(Vendor, VendorAdmin)
| 2,075 |
DarkPrior.py
|
Avashist1998/Morning_view_mode
| 1 |
2024215
|
import cv2
import numpy as np
from utils import read_image, rgb_min_image, min_filter
class DarkPrior():
def __init__(self, epsilon=10**-8):
self.epsilon = epsilon
def dark_channel(self, image):
# output the dark channel as the image
new_image = image.copy()
# perfroming the 15 x 15 min filter
min_image = min_filter(new_image)
# perfroming the color min operation
dark_prior = rgb_min_image(min_image)
return dark_prior
def transmission_map(self, image,A,w):
#finds the transmission map for the image
image_new = np.divide(image,A).astype(float)
# finding the dark channel of the divide image
new_dark = self.dark_channel(image_new)
# Saling and subtracting the image
transmission = 1 - w*new_dark
return transmission
def A_estimator(self, image,dark_prior):
#Used the information extracted from the dark prior
#find a value for A
image_copy = image.copy()
[row,col,dem] = image_copy.shape
dark_copy = dark_prior.copy()
# finding the number of 0.01% values
num = np.round(row*col*0.001).astype(int)
j = sorted(np.asarray(dark_copy).reshape(-1), reverse=True)[:num]
# getting the location of the top 0.01%
ind = np.unravel_index(j[0], dark_copy.shape)
# Pefroming a search for the max value in the group
max_val = image_copy[ind[0],ind[1],:]
for element in j:
ind = np.unravel_index(element, dark_copy.shape)
if (sum(max_val[:]) < sum(image_copy[ind[0],ind[1],:])):
max_val[:] = image_copy[ind[0],ind[1],:]
# creating a color image of the max value
A = image_copy
A[:,:,:] = max_val[:]
return A
def Radience_cal(self, image,A,Transmission_map,t_not):
#Used information from the transmit map to remove haze from the image.
image_copy = image.copy()
Transmission_map_copy = (Transmission_map.copy()).astype(float)
# Pefroming the min operation between Ttransmission map and 0.1
divisor = np.maximum(Transmission_map_copy,t_not)
radience = (image.copy()).astype(float)
# Perfroming the eqution 3 for every color channel
for i in range(3):
radience[:,:,i] = np.divide(((image_copy[:,:,i]).astype(float) - A[0,0,i]),divisor) + A[0,0,i]
# Capping all of the out of bound values
#radience = radience - np.min(radience)
#radience = 255*(radience/np.max(radience))
radience[radience>255]=255
radience[radience<0]=0
return radience.astype('uint8')
def guided_filter(self, image,guide,diameter,epsilon):
w_size = diameter+1
# Exatrcation the mean of the image by blurring
meanI=cv2.blur(image,(w_size,w_size))
mean_Guide=cv2.blur(guide,(w_size,w_size))
# Extracting the auto correlation
II=image**2
corrI=cv2.blur(II,(w_size,w_size))
# Finding the correlation between image and guide
I_guide=image*guide
corrIG=cv2.blur(I_guide,(w_size,w_size))
# using the mean of the image to find the variance of each point
varI=corrI-meanI**2
covIG=corrIG-meanI*mean_Guide
#covIG normalized with a epsilon factor
a=covIG/(varI+epsilon)
#a is used to find the b
b=mean_Guide-a*meanI
meanA=cv2.blur(a,(w_size,w_size))
meanB=cv2.blur(b,(w_size,w_size))
# using the mean of a b to fix refine the transmission map
transmission_rate=meanA*image+meanB
# normalizaing of the transimational map
transmission_rate = transmission_rate/np.max(transmission_rate)
return transmission_rate
def Haze_Remover(path=None, image=None):
'''
This function is used to dehaze a image from an image path or from a cv2 image object
'''
if path is None and image is None:
print("There is not path and image enter to the function. Please add a image or a path to the model")
return None
else:
if image is none:
image = read_image(path)
min_image = min_filter(image)
dark_prior = rgb_min_image(min_image)
A = A_estimator(image,dark_prior)
img_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
Transmition_image = self.transmission_map(image,A,0.95)
refine_Transmission_image = self.guided_filter(img_gray.astype(np.float32),Transmition_image.astype(np.float32),100,self.epsilon)
refine_radience_image = self.Radience_cal(image,A,refine_Transmission_image,0.1)
self.output = {'Input':image, 'Min_Image':min_image, 'A':A_estimator,'Gray_Image':img_gray,
'Transmition_Map':Transmition_image, 'Refine_Transmition_Map':refine_Transmission_image,
'DeHaze_Image':refine_radience_image}
return output
def Save_image(self, path='output.jpg', key='DeHaze_Image'):
'''
Input is path/filename
Key is the file you want to save
key = [Input, Min_Image, A, Gray_Image, Transmition_Map, Refine_Transmition_Map, DeHaze_Image]
saves the image to the path
'''
cv2.imwrite(path,output[key])
print('file name {} has been saved').format(path)
| 5,450 |
src/agent/k8s-rest-agent/src/api/models/user.py
|
tianxuanhong/cello
| 865 |
2024153
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models.signals import post_save
from api.utils.db_functions import make_uuid
class User(AbstractUser):
roles = []
id = models.UUIDField(
primary_key=True,
help_text="ID of user",
default=make_uuid,
editable=True,
)
username = models.CharField(default="", max_length=128, unique=True)
def __str__(self):
return self.username
class Profile(models.Model):
user = models.OneToOneField(
User, related_name="profile", on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "%s's profile" % self.user
class Meta:
ordering = ("-created_at",)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
# Create your models here.
| 1,001 |
spirl/data/block_stacking/src/robosuite/__init__.py
|
kouroshHakha/fist
| 8 |
2024120
|
import os
from spirl.data.block_stacking.src.robosuite.environments.base import make
from spirl.data.block_stacking.src.robosuite.environments.sawyer_lift import SawyerLift
from spirl.data.block_stacking.src.robosuite.environments.sawyer_stack import SawyerStack
from spirl.data.block_stacking.src.robosuite.environments.sawyer_pick_place import SawyerPickPlace
from spirl.data.block_stacking.src.robosuite.environments.sawyer_nut_assembly import SawyerNutAssembly
from spirl.data.block_stacking.src.robosuite.environments.baxter_lift import BaxterLift
from spirl.data.block_stacking.src.robosuite.environments.baxter_peg_in_hole import BaxterPegInHole
from spirl.data.block_stacking.src.robosuite.environments.baxter_modified import BaxterChange
__version__ = "0.3.0"
__logo__ = """
; / ,--.
["] ["] ,< |__**|
/[_]\ [~]\/ |// |
] [ OOO /o|__|
"""
| 898 |
kornia/augmentation/_2d/__init__.py
|
Ishticode/kornia
| 418 |
2023289
|
from kornia.augmentation._2d.geometric import *
from kornia.augmentation._2d.intensity import *
from kornia.augmentation._2d.mix import *
| 138 |
workers/admin.py
|
addiebarron/django-workers
| 18 |
2023708
|
from django.contrib import admin
from .models import Task
@admin.register(Task)
class TaskAdmin(admin.ModelAdmin):
list_display = ('handler', 'run_at', 'schedule', 'status')
list_filter = ('status',)
| 211 |
tests/unit/core/adapters/test_cmd_runner.py
|
meaningfy-ws/ted-sws
| 1 |
2024144
|
#!/usr/bin/python3
"""
"""
from ted_sws.core.adapters.cmd_runner import CmdRunner
def test_cmd_runner(caplog):
cmd_runner = CmdRunner(name="TEST_CMD_RUNNER")
cmd_runner.run()
assert "CMD :: BEGIN" in caplog.text
assert "CMD :: END" in caplog.text
| 267 |
src/python/dart/engine/redshift/actions/stop_datastore.py
|
RetailMeNotSandbox/dart
| 18 |
2024203
|
import logging
from dart.engine.redshift.admin.cluster import RedshiftCluster
_logger = logging.getLogger(__name__)
def stop_datastore(redshift_engine, datastore, action):
"""
:type redshift_engine: dart.engine.redshift.redshift.RedshiftEngine
:type datastore: dart.model.datastore.Datastore
:type action: dart.model.action.Action
"""
cluster = RedshiftCluster(redshift_engine, datastore)
cluster.stop_cluster()
action = redshift_engine.dart.patch_action(action, progress=.1)
cluster.wait_for_cluster_deleted()
redshift_engine.dart.patch_action(action, progress=1)
| 609 |
ibsec/__main__.py
|
dissolved/ibsec
| 0 |
2022944
|
from fire import Fire
from .ibsec import lookup
def main():
Fire(lookup)
if __name__ == '__main__':
main()
| 117 |
src/arch/x86/isa/insts/simd512/floating_point/arithmetic/vsubps.py
|
jyhuang91/gem5-avx
| 2 |
2023236
|
microcode = '''
def macroop VSUBPS_XMM_XMM {
vsubf dest=xmm0, src1=xmm0v, src2=xmm0m, size=4, VL=16
vclear dest=xmm2, destVL=16
};
def macroop VSUBPS_XMM_M {
ldfp128 ufp1, seg, sib, "DISPLACEMENT + 0", dataSize=16
vsubf dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=16
vclear dest=xmm2, destVL=16
};
def macroop VSUBPS_XMM_P {
rdip t7
ldfp128 ufp1, seg, riprel, "DISPLACEMENT + 0", dataSize=16
vsubf dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=16
vclear dest=xmm2, destVL=16
};
def macroop VSUBPS_YMM_YMM {
vsubf dest=xmm0, src1=xmm0v, src2=xmm0m, size=4, VL=32
vclear dest=xmm4, destVL=32
};
def macroop VSUBPS_YMM_M {
ldfp256 ufp1, seg, sib, "DISPLACEMENT + 0", dataSize=32
vsubf dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=32
vclear dest=xmm4, destVL=32
};
def macroop VSUBPS_YMM_P {
rdip t7
ldfp256 ufp1, seg, riprel, "DISPLACEMENT + 0", dataSize=32
vsubf dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=32
vclear dest=xmm4, destVL=32
};
def macroop VSUBPS_ZMM_ZMM {
vsubf dest=xmm0, src1=xmm0v, src2=xmm0m, size=4, VL=64
};
def macroop VSUBPS_ZMM_M {
ldfp512 ufp1, seg, sib, "DISPLACEMENT + 0", dataSize=64
vsubf dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=64
};
def macroop VSUBPS_ZMM_P {
rdip t7
ldfp512 ufp1, seg, riprel, "DISPLACEMENT + 0", dataSize=64
vsubf dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=64
};
'''
| 1,428 |
Interface/modules.py
|
kamilbaskut/Midterm-Project
| 0 |
2023928
|
import cv2
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
from skimage.transform import resize
from skimage.io import imread
import keras
from keras.metrics import categorical_crossentropy
from keras.models import model_from_json
from keras.models import load_model
from keras.optimizers import SGD, Adam, Adamax
from keras.preprocessing import image
# gelen modelin hangi optimizer da iyi olduğunu öğren modeli compile etmek için
def test_model(dataset, model, directory):
if(len(directory) > 0):
img_size = 0
classes = []
opt = ''
# Datasete göre resmin boyutu hesaplandı
if(dataset == 'HuSHeM'):
img_size = 131
classes = ['Normal', 'Tapered', 'Pyriform', 'Amorphous']
if(model == 'GoogleNet'): # 1.durum için
opt = 'sgd'
elif(model == 'MobileNet'): # 2.durum için
opt = 'adamax'
else: # 3.durum için
opt = 'adam'
else:
img_size = 170
classes = ['Abnormal', 'Boya', 'Sperm']
if(model == 'GoogleNet'): # 4.durum için
opt = 'adamax'
elif(model == 'MobileNet'): # 5.durum için
opt = 'sgd'
else: # 6.durum için
opt = 'adamax'
# Json yükleniyor
json_file = open(dataset+model+'.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
# Model oluşturuluyor
loaded_model = model_from_json(loaded_model_json)
# Modelin ağırlıkları yükleniyor
loaded_model.load_weights(dataset+model+'.h5')
# Yeni model compile ediliyor
loaded_model.compile(loss="categorical_crossentropy",
optimizer=opt, metrics=['accuracy'])
# Resim okunuyor...
img = io.imread(directory)
img = resize(img, (img_size, img_size, 3), anti_aliasing=True)
image = np.asarray(img)
image = np.expand_dims(image, axis=0)
# Resim test ediliyor
pred = loaded_model.predict(image)
# En yüksek değere sahip sınıf
y_pred = np.argmax(pred)
# pred[count] şekline sokuluyor
pred.shape = (len(classes), 1)
# Sınıflarıyla birlikte text e çevriliyor
text = ""
x = 0.0
for n in range(len(pred)):
x += pred[n]
for n in range(len(pred)):
text += (classes[n]+str((pred[n]/x)*100)+'%\n')
return text, classes[y_pred]
else:
return "Lütfen image yolunu belirtiniz", "Null"
| 2,655 |
corescrape/threads/corescrape_thread.py
|
anewmanvs/corescrape
| 0 |
2024159
|
"""
Core Scrape Threading
Thread control for this package.
"""
import signal
from warnings import warn
from queue import Queue
from threading import Thread
from . import corescrape_event
from core import CoreScrape
from core.exceptions import CoreScrapeTimeout
# pylint: disable=invalid-name, too-few-public-methods, multiple-statements
# pylint: disable=bare-except, too-many-arguments, too-many-instance-attributes
def alarm_handler(signum, frame):
"""Handles the alarm."""
raise CoreScrapeTimeout
class CoreScrapeThread(CoreScrape):
"""
Core Scrape Thread.
Uses multiples threads to request pages and parse its content.
A valid rotator must be passed to produce each request using a new proxy
and make it less likely to be red flagged as a bot or scrapper by internet
service providers. The user could pass a parser (CoreScrape class or custom
class with a 'parse' method) to parse the response and avoid having the need
to store the whole page for postprocessing.
This controller also gives the user the option to set up a timer, in seconds,
to raise a timeout. The timer is set if the user provided an integer to param
'timeout' during 'start_threads' method processing. The timer is unset in
'wait_for_threads' method.
Params:
nthreads: int. Desired number of threads. Once the method 'start_threads' is
called, the controller will try to split the given input into chunks of
number 'nthreads'. If it is not possible to split in 'nthreads' chunks,
then the actual number of threads is available in 'actualnthreads'.
rotator: corescrape.proxy.Rotator (preferably). Uses this rotator to make
requests using different proxies and user agents. There is always the
possibility to pass the 'requests' module to this parameter, but that is
not advised as the control of proxies and user-agents is not automatic.
parser: corescrape.pgparser.SimpleParser, based on or None. Uses this to
parse the page content and extract the useful information, making it
less memory expensive. If no argument is given, the thread controller
will return a list of the full pages collected.
timeout: int or None. Time in seconds to configure the timeout process.
Set up a timer to raise an event and stop the threads once the time is
reached.
logoperator: corescrape.logs.LogOperator or None. Log to be fed with process
runtime information.
"""
def __init__(self, nthreads, rotator, parser=None, timeout=None,
logoperator=None):
"""Constructor."""
if timeout is not None and not isinstance(timeout, int):
raise TypeError("Param. 'timeout' must be 'int' or 'NoneType'")
# inputs
self.nthreads = nthreads
self.actualnthreads = nthreads
self.rotator = rotator
self.parser = parser
self.timeout = timeout # CAREFUL! This is not timeout for requests
self.timeoutset = False
# control attrs
self.queue = Queue()
self.event = corescrape_event.CoreScrapeEvent(logoperator=logoperator)
self.threads = []
super().__init__(logoperator=logoperator)
def __split(self, a):
"""
Tries to split the input into chunks for each thread.
Input must be a list.
"""
if not isinstance(a, list):
raise TypeError("Param 'a' must be 'list'")
n = self.nthreads # desired number of threads
k, m = divmod(len(a), n)
split = [a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)]
split = [part for part in split if part] # drops empty chunks
# actual number of threads. Sometimes differs from 'nthreads'
self.actualnthreads = len(split)
return split
def __warn_wait_threads(self):
"""Produce warning to wait for threads if needed."""
if self.threads:
warn(
'There are threads running. Wait for them to stop before calling '
'this method'
)
return True
return False
def __set_timeout(self):
"""
If seconds for timeout were informed in the constructor, will set an alarm
for timeout. Once timeout is reached, the iteration is broken and return
as expected.
"""
if self.timeout:
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(self.timeout)
self.log('CoreScrapeThread set the timeout for {} seconds.'.format(
self.timeout), tmsg='info')
self.timeoutset = True
def __disarm_timeout(self):
"""Turn off the timeout."""
if self.timeoutset:
self.timeoutset = False
signal.alarm(0)
self.log('CoreScrapeThread disarmed the timeout.', tmsg='info')
def __check_am_i_the_last(self):
"""Check if this thread is the last and if it should set an event."""
condition = self.queue.qsize() + 1 >= self.actualnthreads
condition = condition and self.event.state.is_EXECUTING()
if condition:
self.event.state.set_DUTY_FREE()
def __iterate(self, threadid, data, *args):
"""Do iterations in threads, each one calling the passed code."""
# pylint: disable=unused-argument
self.log('Starting iteration in threadid {} for {} items'.format(
threadid, len(data)))
res = []
for url in data:
# the reason here does not matter. If it is set, break out
if self.event.is_set(): break
try:
page = self.rotator.request(url, self.event, threadid=threadid)
except:
self.event.state.set_ABORT_THREAD()
break
if page is None: continue # not able to retrieve the page
if self.parser is None:
res.append(page)
self.log('Storing whole response for {}. Thread {}'.format(
url, threadid))
elif page.status_code == 404:
self.log('URL {} returned a 404. Thread {}'.format(url, threadid),
tmsg='warning')
res.append({url: None}) # points it was collected but useless
else:
_res = self.parser.parse(page, threadid=threadid)
if not _res:
self.log('URL {} could not be parsed. Thread {}'.format(
url, threadid))
continue # no info collected, must go on
self.log('URL {} collected. Thread {}'.format(url, threadid),
tmsg='header')
res.append({url: _res})
self.__check_am_i_the_last()
return res
def start_threads(self, to_split_params, *fixed_args):
"""Starts threads."""
def test_if_urls(p):
return [a.startswith('http://') or a.startswith('https://') for a in p]
# pylint: disable=no-value-for-parameter
abort = self.__warn_wait_threads()
if abort:
return False
if not all(test_if_urls(to_split_params)):
raise ValueError('List of strings must begin with protocol')
self.log('Starting threads for {} items'.format(len(to_split_params)))
self.threads = []
self.event.state.set_EXECUTING()
for threadid, split in enumerate(self.__split(to_split_params)):
pargs = (threadid, split, *fixed_args)
thread = Thread(
target=lambda q, *args: q.put(self.__iterate(*args)),
args=(self.queue, *pargs)
)
thread.start()
self.threads.append(thread)
self.__set_timeout()
return True
def wait_for_threads(self):
"""Wait lock for threads."""
try:
self.event.wait()
except KeyboardInterrupt:
self.event.state.set_ABORT_USER()
except CoreScrapeTimeout:
self.event.state.set_TIMEOUT()
finally:
self.__disarm_timeout()
for thread in self.threads:
thread.join()
self.event.clear()
self.threads = []
def join_responses(self):
"""Join responses from the threads."""
abort = self.__warn_wait_threads()
if abort:
return []
res = []
while not self.queue.empty():
res += self.queue.get()
return res
def is_sentenced(self):
"""
Informs if the thread controller is sentenced due to the last event state.
"""
sentenced = self.event.state.is_sentenced()
if sentenced:
self.event.state.set_FINISHED()
return sentenced
| 8,973 |
src/main.py
|
thealah/imdb-import
| 0 |
2022900
|
#!/usr/bin/python3
import os
import logging
import psycopg2
import psycopg2.extras
import schema
from relation_type import RelationType
import sys
import configargparse
from contextlib import contextmanager
from pathlib import Path
from batch_iterator import batch_iterator
# Configure loging
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s - %(name)s - %(levelname)s', "%m-%d %H:%M:%S")
ch.setFormatter(formatter)
root.addHandler(ch)
logger = logging.getLogger(__name__)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
def configure():
config_dir = os.environ["NOMAD_TASK_DIR"] if "NOMAD_TASK_DIR" in os.environ else ".."
config_path = str(Path(config_dir) / "config.yaml")
parser = configargparse.ArgumentParser(default_config_files=[config_path], description="Get data for config")
parser.add_argument("--rds_server", env_var="RDS_SERVER", type=str, help="RDS Server", required=True)
parser.add_argument("--rds_database", env_var="RDS_DATABASE", type=str, help="RDS Database", required=True)
parser.add_argument("--rds_user", env_var="RDS_USER", type=str, help="RDS User", required=True)
parser.add_argument("--rds_password", env_var="RDS_PASSWORD", type=str, help="RDS Password", required=True)
parser.add_argument("--ingestion_type", env_var="INGESTION_TYPE", type=str, help="Ingestion Type", required=True)
args, unknown = parser.parse_known_args()
if unknown:
logger.info("received unknown arguments " + unknown)
return args
@contextmanager
def open_cursor(rds_server, rds_database, rds_user, rds_password, readonly=True):
connection = psycopg2.connect(dbname=rds_database, user=rds_user, password=<PASSWORD>, host=rds_server)
connection.set_session(readonly=readonly, autocommit=True)
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
yield cursor
cursor.close()
connection.close()
def iterate_over_file(filename, read_line_function):
with open(filename) as file_handle:
i = 0
for line in file_handle:
if i > 0:
yield read_line_function(line)
i += 1
def iterate_over_principals():
for titlePrincipals in iterate_over_file("title.principals.tsv", schema.read_title_principals_line):
for name_id in titlePrincipals["nameIds"]:
name_id = name_id.strip()
title_id = titlePrincipals["titleId"].strip()
yield {
"nameId": name_id,
"titleId": title_id,
"relationType": RelationType.PRINCIPAL.value
}
def main(
rds_server,
rds_database,
rds_user,
rds_password,
ingestion_type
):
ingestion_type = ingestion_type.lower()
logger.info(ingestion_type)
with open_cursor(rds_server, rds_database, rds_user, rds_password, readonly=False) as cursor:
total = 0
def title_exists(title):
return title["titleId"] in title_ids
if ingestion_type == "titles":
for titles in batch_iterator(iterate_over_file("title.basics.tsv", schema.read_title_line)):
schema.store_titles(cursor, titles)
total += len(titles)
if total % 100000 == 0:
print("Titles inserted: " + str(total))
return
if ingestion_type == "ratings":
cursor.execute("truncate imdb.titleRatingsIngestion")
for ratings in batch_iterator(iterate_over_file("title.ratings.tsv", schema.read_title_ratings_line)):
schema.store_title_ratings_ingestion(cursor, ratings)
for i in range(100):
logger.info("Running title ratings ingestion insert partition :" + str(i))
cursor.execute("""
insert into imdb.titleRatings (titleId, averageRating, numVotes)
select i.titleId, i.averageRating, i.numVotes
from imdb.titleRatingsIngestion i
inner join imdb.titleBasics tb
on tb.titleId = i.titleId
where i.numVotes %% 100 = %(iterator_i)s
ON CONFLICT (titleId)
DO UPDATE SET
averageRating = excluded.averageRating,
numVotes = excluded.numVotes;
""", {
"iterator_i": i
})
cursor.execute("truncate imdb.titleRatingsIngestion")
return
if ingestion_type == "names":
cursor.execute("truncate imdb.titleNameIngestion")
for names in batch_iterator(iterate_over_file("name.basics.tsv", schema.read_name_line)):
schema.store_names(cursor, names)
known_for_title_ids = [{
"titleId": known_for_title.strip(),
"nameId": name["nameId"],
"relationType": RelationType.KNOWN_FOR.value
} for name in names for known_for_title in name["knownForTitles"]]
for name_title_ids in batch_iterator(known_for_title_ids):
schema.store_title_names_ingestion(cursor, name_title_ids)
total += len(names)
if total % 100000 == 0:
print("Names inserted: " + str(total))
for i in range(1000):
logger.info("Running title name ingestion insert partition :" + str(i))
cursor.execute("""
insert into imdb.titleName (titleId, nameId, relationType)
select i.titleId, i.nameId, i.relationType
from imdb.titleNameIngestion i
inner join imdb.titleBasics tb
on tb.titleId = i.titleId
inner join imdb.nameBasics nb
on nb.nameId = i.nameId
where i.id %% 1000 = %(iterator_i)s
ON CONFLICT (titleId, nameId, relationType) DO NOTHING;
""", {
"iterator_i": i
})
cursor.execute("truncate imdb.titleNameIngestion")
return
if ingestion_type == "principals":
cursor.execute("truncate imdb.titleNameIngestion")
for title_principals in batch_iterator(iterate_over_principals()):
schema.store_title_names_ingestion(cursor, title_principals)
for i in range(1000):
logger.info("Running title name ingestion insert partition :" + str(i))
cursor.execute("""
insert into imdb.titleName (titleId, nameId, relationType)
select i.titleId, i.nameId, i.relationType
from imdb.titleNameIngestion i
inner join imdb.titleBasics tb
on tb.titleId = i.titleId
inner join imdb.nameBasics nb
on nb.nameId = i.nameId
where i.id %% 1000 = %(iterator_i)s
ON CONFLICT (titleId, nameId, relationType) DO NOTHING;
""", {
"iterator_i": i
})
cursor.execute("truncate imdb.titleNameIngestion")
if __name__ == "__main__":
main(**configure().__dict__)
| 7,584 |
tools/gen_region_bin.py
|
JadeArkadian/reshade
| 2 |
2024033
|
import urllib.request, json
f_out = open("region.bin", "wb")
url = "https://api.guildwars2.com/v2/maps"
response = urllib.request.urlopen(url)
data = json.loads(response.read())
barray = [0]*(data[-1] + 1)
print("Size : " + str(len(barray)))
for id in data:
print("%.2f" % ((id/len(barray))*100) + "%", end="\r")
map_url = url + "/" + str(id)
map_response = urllib.request.urlopen(map_url)
map_data = json.loads(map_response.read())
if "region_id" not in map_data:
continue
barray[id] = map_data["region_id"]
f_out.write(bytearray(barray))
f_out.close()
print("Done.")
| 603 |
routes/router.py
|
harlericho/python_api_programming
| 1 |
2024077
|
from flask import Blueprint, request, redirect, jsonify
from flask.helpers import flash
from utils.db import db
from models.programming import Programming
programming = Blueprint('router', __name__)
@programming.route('/')
def index():
try:
data = Programming.query.all()
list = []
for i in data:
file = {
'id': i.id,
'names': i.names,
'description': i.description,
}
list.append(file)
return jsonify({'programming': list})
except Exception as e:
return jsonify({'error': str(e)})
@programming.route('/<int:id>', methods=['GET'])
def get(id):
try:
data = Programming.query.get(id)
if data != None:
file = {
'id': data.id,
'names': data.names,
'description': data.description,
}
return jsonify({'programming': file})
else:
return jsonify({'error': 'Programming not found'})
except Exception as e:
return jsonify({'error': str(e)})
@programming.route('/', methods=['POST'])
def create():
try:
json = request.get_json()
if request.method == 'POST':
names = json['names']
description = json['description']
data = Programming(names, description)
db.session.add(data)
db.session.commit()
return jsonify({'programming': 'Programming created'})
except Exception as e:
return jsonify({'error': str(e)})
@programming.route('/<int:id>', methods=['PUT'])
def update(id):
try:
file = Programming.query.get(id)
if file != None:
json = request.get_json()
if request.method == 'PUT':
file.names = json['names']
file.description = json['description']
db.session.commit()
return jsonify({'programming': 'Programming updated'})
else:
return jsonify({'error': 'Programming not found'})
except Exception as e:
return jsonify({'error': str(e)})
@programming.route('/<int:id>', methods=['DELETE'])
def delete(id):
try:
file = Programming.query.get(id)
if file != None:
db.session.delete(file)
db.session.commit()
return jsonify({'programming': 'Programming deleted'})
else:
return jsonify({'error': 'Programming not found'})
except Exception as e:
return jsonify({'error': str(e)})
| 2,565 |
_version.py
|
Slater-Victoroff/pyjaco
| 0 |
2023258
|
from os.path import join, dirname
from platform import system, version
from re import findall, match, IGNORECASE
from subprocess import Popen, PIPE
version_file = join(dirname(__file__),'VERSION')
class GITExecutable(object):
"""
Attempts to find git.
Returns the best guess
at the path for a git
executable.
"""
def __new__(self):
cmd = 'which'
if system() == 'Windows':
try:
major, minor = [int(x) for x in version().split('.',2)[0:2]]
# version number information
# http://msdn.microsoft.com/en-us/library/ms724834%28v=VS.85%29.aspx
# (6, 0) is Vista or Windows Server 2008
# (6, 1) is Windows 7 or Server 2008 RC2
if (major, minor) > (6, 0):
# Windows 7 brings the
# where command with
# functionality similar
# to which on unix.
cmd = 'where'
except:
pass
try:
p = Popen([cmd, 'git'], stdout=PIPE, stderr=PIPE)
p.stderr.close()
path = p.stdout.next().strip()
except:
path = None
return path or 'git'
GIT_EXECUTABLE = GITExecutable()
def get_version():
file_version = get_file_version()
version = get_repo_version() or file_version
if version is None:
raise ValueError('Could not determine version.')
if version != file_version:
put_file_version(version)
return version
def get_file_version():
try:
with open(version_file, 'rb') as fp:
version = fp.next().strip()
except:
version = None
return version
def put_file_version(version):
with open(version_file, 'wb') as fp:
fp.write(version)
def get_repo_version():
"""
Repo tags are assumed to be in the format:
Major.Minor
Example:
0.1
Function returns a version string of the form:
Major.Minor.Patch+CommitHash
Example:
0.1.1+c58ec0d
"""
try:
p = Popen([GIT_EXECUTABLE, 'describe'], stdout=PIPE, stderr=PIPE)
p.stderr.close()
parts = findall('[a-zA-Z0-9]+',p.stdout.next().strip())
if parts:
version = "%s.%s" % (parts[0],parts[1])
if len(parts) > 2:
version = "%s.%s+%s" % (version,parts[2],parts[3][1:])
else:
version = "%s.0" % version
else:
raise ValueError('git describe did not return a valid version string.')
except:
version = None
return version
def parse_version(version):
"""
input version string of the form:
'Major.Minor.Patch+CommitHash'
like:
'0.1.5+95ffef4'
------ or ------
'0.1.0'
returns version_info tuple of the form:
(major,minor,patch,hash)
like:
(0, 1, 5, '95ffef4')
-------- or --------
(0, 1, 0, '')
"""
matches = match(
'(?P<major>[0-9]+)\.(?P<minor>[0-9]+)\.(?P<patch>[0-9]+)(g(?P<hash>[a-z0-9]*))?',
version,
IGNORECASE
)
if matches:
major = int(matches.group('major'))
minor = int(matches.group('minor'))
patch = int(matches.group('patch'))
hash = matches.group('hash') or ''
return (major,minor,patch,hash)
else:
raise ValueError("Version string, '%s' could not be parsed. It should be of the form: 'Major.Minor.Patch+CommitHash'." % version)
if __name__ == '__main__':
print get_version()
| 3,736 |
http_proxy.py
|
darkless456/Python
| 0 |
2024070
|
# http_proxy.py
import socket
import _thread # thread
import urllib.parse # urlparse
import select
BUFLEN = 8192
class Proxy(object):
def __init__(self,conn,addr):
self.source = conn
self.request = ""
self.headers = {}
self.destnation = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.run()
def get_headers(self):
header = ''
while True:
header += self.source.recv(BUFLEN)
index = header.find('\n')
if index > 0:
break
firstLine = header[:index]
self.request = header[index+1:]
self.headers['method'],self.headers['path'],self.headers['protocol'] = firstLine.split()
def conn_destnation(self):
url = urllib.parse.urlparse(self.headers['path'])
hostname = url[1]
port = '80'
if hostname.find(':') > 0:
addr.port = hostname.split(':')
else:
addr = hostname
port = int(port)
ip = socket.gethostbyname(addr)
print(ip,port)
self.destnation.connect((ip,port))
data = '%s %s %s\r\n'
# %(self.headers['method'],self.headers['path'],headers['protocol'])
self.destnation.send(data + self.request)
print(data + self.request)
def renderto(self):
readsocket = [self.destnation]
while True:
data = ''
(rlist,wlist,elist) = select.select(readsocket,[],[],3)
if rlist:
data = rlist[0].recv(BUFLEN)
if len(data) > 0:
self.source.send(data)
else:
break
def run(self):
self.get_headers()
self.conn_destnation()
self.renderto()
class Server(object):
def __init__(self,host,port,handler = Proxy):
self.host = host
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((host, port))
self.server.listen(5)
self.handler = handler
def start(self):
while True:
try:
conn, addr = self.server.accept()
_thread.start_new_thread(self.handler, (conn, addr))
except:
pass
if __name__ == '__main__':
s = Server('127.0.0.1', 8080)
s.start()
| 2,424 |
document/signals.py
|
Zarah-Project/zarah-db-api
| 0 |
2023756
|
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from document.indexer import DocumentIndexer
from document.models import Document
@receiver([post_save], sender=Document)
def do_index(sender, instance, **kwargs):
indexer = DocumentIndexer(instance)
indexer.index()
@receiver([pre_delete], sender=Document)
def remove_index(sender, instance, **kwargs):
indexer = DocumentIndexer(instance)
indexer.remove_record()
| 480 |
upho/phonon/sf_fitter.py
|
sheikhahnaf/upho
| 0 |
2023498
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import h5py
import numpy as np
from scipy.optimize import curve_fit
from upho.analysis.functions import FittingFunctionFactory
from upho.irreps.irreps import extract_degeneracy_from_ir_label
__author__ = '<NAME>'
class SFFitter(object):
def __init__(self, filename='sf.hdf5', name='gaussian'):
self._name = name
with h5py.File(filename, 'r') as f:
self._band_data = f
self._run()
def _run(self):
band_data = self._band_data
npaths, npoints = band_data['paths'].shape[:2]
frequencies = band_data['frequencies']
frequencies = np.array(frequencies)
self._is_squared = np.array(band_data['is_squared'])
filename_sf = 'sf_fit.hdf5'
with h5py.File(filename_sf, 'w') as f:
self.print_header(f)
for ipath in range(npaths):
for ip in range(npoints):
print(ipath, ip)
group = '{}/{}/'.format(ipath, ip)
peak_positions, widths, norms, fiterrs, sf_fittings = (
self._fit_spectral_functions(
frequencies,
point_data=band_data[group],
)
)
self._write(f, group, peak_positions, widths, norms, fiterrs, sf_fittings)
def _fit_spectral_functions(self, frequencies, point_data, prec=1e-6):
partial_sf_s = point_data['partial_sf_s']
num_irreps = np.array(point_data['num_irreps'])
dfreq = frequencies[1] - frequencies[0]
fitting_function = FittingFunctionFactory(
name=self._name,
is_normalized=False).create()
peak_positions = []
widths = []
norms = []
fiterrs = []
sf_fittings = []
for i in range(num_irreps):
sf = partial_sf_s[:, i]
if np.sum(sf) < prec:
peak_position = np.nan
width = np.nan
norm = np.nan
fiterr = np.nan
sf_fitting = np.full(frequencies.shape, np.nan)
else:
peak_position = self._create_initial_peak_position(frequencies, sf)
width = self._create_initial_width()
if self._is_squared:
norm = self._create_initial_norm(frequencies, sf)
else:
ir_label = str(point_data['ir_labels'][i], encoding='ascii')
norm = float(extract_degeneracy_from_ir_label(ir_label))
def f(x, p, w):
return fitting_function(x, p, w, norm)
p0 = [peak_position, width]
maxfev = create_maxfev(p0)
fit_params, pcov = curve_fit(
f, frequencies, sf, p0=p0, maxfev=maxfev)
fiterr = np.sqrt(np.sum((f(frequencies, *fit_params) - sf) ** 2)) * dfreq
peak_position = fit_params[0]
width = fit_params[1]
norm = fit_params[2] if len(fit_params) == 3 else norm
sf_fitting = f(frequencies, *fit_params)
peak_positions.append(peak_position)
widths .append(width)
norms .append(norm)
fiterrs.append(fiterr)
sf_fittings.append(sf_fitting)
peak_positions = np.array(peak_positions)
widths = np.array(widths)
norms = np.array(norms)
fiterrs = np.asarray(fiterrs)
sf_fittings = np.asarray(sf_fittings)
return peak_positions, widths, norms, fiterrs, sf_fittings
def _create_initial_peak_position(self, frequencies, sf, prec=1e-12):
position = frequencies[np.argmax(sf)]
# "curve_fit" does not work well for extremely small initial guess.
# To avoid this problem, "position" is rounded.
# See also "http://stackoverflow.com/questions/15624070"
if abs(position) < prec:
position = 0.0
return position
def _create_initial_width(self):
width = 0.1
return width
def _create_initial_norm(self, frequencies, sf):
dfreq = frequencies[1] - frequencies[0]
norm = np.sum(sf) * dfreq
return norm
def print_header(self, file_output):
file_output.create_dataset('function' , data=self._name)
file_output.create_dataset('is_squared', data=self._is_squared)
file_output.create_dataset('paths' , data=self._band_data['paths'])
file_output['frequencies'] = self._band_data['frequencies'][...]
def _write(self, file_out, group_name, peak_positions_s, widths_s, norms_s, fiterrs, sf_fittings):
group = file_out.create_group(group_name)
keys = [
'natoms_primitive',
'elements',
'distance',
'pointgroup_symbol',
'num_irreps',
'ir_labels',
]
for k in keys:
file_out.create_dataset(
group_name + k, data=np.array(self._band_data[group_name + k])
)
group.create_dataset('peaks_s', data=peak_positions_s)
group.create_dataset('widths_s', data=widths_s)
group.create_dataset('norms_s', data=norms_s)
group['fitting_errors'] = fiterrs
group['partial_sf_s'] = sf_fittings
group['total_sf'] = np.nansum(sf_fittings, axis=0)
def create_maxfev(p0):
maxfev = 20000 * (len(p0) + 1)
return maxfev
| 5,734 |
tests/unit/preorders/test_forms.py
|
etienne86/oc_p13_team_spirit
| 0 |
2022808
|
"""Contain the unit tests related to the forms in app ``preorders``."""
from django.core.files.uploadedfile import UploadedFile
from django.test import TestCase
from teamspirit.catalogs.models import Catalog, Product
from teamspirit.core.models import Address
from teamspirit.preorders.forms import AddToCartForm, DropFromCartForm
from teamspirit.preorders.models import ShoppingCart, ShoppingCartLine
from teamspirit.profiles.models import Personal
from teamspirit.users.models import User
class PreordersFormsTestCase(TestCase):
"""Test the forms in the app ``preorders``."""
def setUp(self):
super().setUp()
# a user in database
self.address = Address.objects.create(
label_first="1 rue de l'impasse",
label_second="",
postal_code="75000",
city="Paris",
country="France"
)
self.personal = Personal.objects.create(
phone_number="01 02 03 04 05",
address=self.address
)
self.user = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>",
first_name="Toto",
last_name="<NAME>",
personal=self.personal
)
# log this user in
self.client.login(email="<EMAIL>", password="<PASSWORD>")
# some other data
self.shopping_cart = ShoppingCart.objects.create(
user=self.user,
)
self.catalog = Catalog.objects.create(
name="Catalogue de vêtements",
)
self.image = UploadedFile()
self.product = Product.objects.create(
name="Débardeur homme",
image=self.image,
is_available=True,
is_free=False,
price=25,
catalog=self.catalog,
)
self.shopping_cart_line = ShoppingCartLine.objects.create(
shopping_cart=self.shopping_cart,
product=self.product,
quantity=2,
size='XS'
)
def test_add_to_cart_form_success(self):
"""Unit test - app ``preorders`` - form ``AddToCartForm``
Test the 'product add to cart' form with success.
"""
# count the records in database: before
records_before = ShoppingCartLine.objects.all().count()
# process the form
form_data = {
'shopping_cart': self.shopping_cart,
'product': self.product,
'quantity': 1,
'size': 'M',
}
form = AddToCartForm(data=form_data)
self.assertTrue(form.is_valid())
form.save()
expected_shopping_cart_line = ShoppingCartLine(
shopping_cart=self.shopping_cart,
product=self.product,
quantity=1,
size='M'
)
# count the records in database: after
records_after = ShoppingCartLine.objects.all().count()
# is one record added in database?
self.assertEqual(records_after, records_before + 1)
# is this last record as expected?
last_record = ShoppingCartLine.objects.all()[records_after - 1]
self.assertEqual(
last_record.shopping_cart,
expected_shopping_cart_line.shopping_cart
)
self.assertEqual(
last_record.product,
expected_shopping_cart_line.product
)
self.assertEqual(
last_record.quantity,
expected_shopping_cart_line.quantity
)
self.assertEqual(
last_record.size,
expected_shopping_cart_line.size
)
def test_drop_from_cart_form_success(self):
"""Unit test - app ``preorders`` - form ``DropFromCartForm``
Test the 'product drop from cart' form with success.
"""
# count the records in database: before
records_before = ShoppingCartLine.objects.all().count()
# process the form
form = DropFromCartForm(
data={},
request_user=self.user,
line_id=self.shopping_cart_line.id,
shopping_cart_line=self.shopping_cart_line
)
self.assertTrue(form.is_valid())
# count the records in database: after
records_after = ShoppingCartLine.objects.all().count()
# is one record added in database?
self.assertEqual(records_after, records_before - 1)
| 4,406 |
bin/eoddcreatereport.py
|
remotesensinginfo/eodatadown
| 3 |
2024134
|
#!/usr/bin/env python
"""
EODataDown - Setup/Update the system.
"""
# This file is part of 'EODataDown'
# A tool for automating Earth Observation Data Downloading.
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Purpose: Command line tool for running the system.
#
# Author: <NAME>
# Email: <EMAIL>
# Date: 26/11/2019
# Version: 1.0
#
# History:
# Version 1.0 - Created.
import argparse
import logging
import os
import os.path
import datetime
import rsgislib
import eodatadown.eodatadownrun
from eodatadown import EODATADOWN_SENSORS_LIST
logger = logging.getLogger('eoddcreatereport.py')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, default="", help="Path to the JSON config file.")
parser.add_argument("-o", "--output", type=str, required=True, help="The output PDF report file.")
parser.add_argument("--start", type=str, required=True, help="The start date (recent), with format YYYYMMDD.")
parser.add_argument("--end", type=str, required=True, help="The start date (earliest), with format YYYYMMDD.")
parser.add_argument("-s", "--sensor", type=str, required=False, choices=EODATADOWN_SENSORS_LIST,
help='''Specify the sensor for which this process should be executed (Optional)''')
parser.add_argument("-p", "--platform", type=str, required=False,
help='''Specify the platform for which this process should be executed (Optional)''')
parser.add_argument("--order_desc", action='store_true', default=False,
help="Specify that the scenes should be in descending order.")
parser.add_argument("--record_db", action='store_true', default=False,
help="Specify that the report should be stored in database.")
args = parser.parse_args()
config_file = args.config
main_config_value = os.getenv('EDD_MAIN_CFG', None)
if (config_file == '') and (main_config_value is not None):
config_file = main_config_value
print("'" + config_file + "'")
if not os.path.exists(config_file):
logger.info("The config file does not exist: '" + config_file + "'")
raise Exception("Config file does not exist")
t = rsgislib.RSGISTime()
t.start(True)
start_date = datetime.datetime.strptime(args.start, '%Y%m%d').date()
end_date = datetime.datetime.strptime(args.end, '%Y%m%d').date()
eodatadown.eodatadownrun.create_date_report(config_file, args.output, start_date, end_date, args.sensor,
args.platform, args.order_desc, args.record_db)
t.end(reportDiff=True, preceedStr='EODataDown processing completed ', postStr=' - eoddcreatereport.py.')
| 3,273 |
python/cvi_toolkit/model/onnx_model.py
|
sophgo/tpu_compiler
| 3 |
2023171
|
from .base_model import model_base
import onnxruntime
import onnx
from onnx import helper
import os
import numpy as np
class OnnxModel(model_base):
def __init__(self):
self.net = None
def get_shape(self, sess):
"""
Onnxruntime output shape maybe wrong, we print here to check
"""
for output in sess.get_outputs():
print("output name {}\n".format(output.name))
print("output shape {}".format(output.shape))
def load_model(self, model_file, wegiht_file=None):
self.model_file = model_file
self.net = onnxruntime.InferenceSession(model_file)
self.onnx_model = onnx.load(model_file)
def inference(self, inputs):
return self._infernece(inputs)
def _infernece(self, inputs, onnx_model=None):
if onnx_model:
ort_session = onnxruntime.InferenceSession(onnx_model)
# self.get_shape(ort_session)
ort_outs = ort_session.run(None, inputs)
else:
ort_outs = self.net.run(None, inputs)
return ort_outs
def get_all_tensor(self, input_data):
output_keys = ['output']
onnx_model = self.onnx_model
for node in onnx_model.graph.node:
_intermediate_tensor_name = list(node.output)
intermediate_tensor_name = ",".join(_intermediate_tensor_name)
intermediate_layer_value_info = helper.ValueInfoProto()
intermediate_layer_value_info.name = intermediate_tensor_name
onnx_model.graph.output.append(intermediate_layer_value_info)
output_keys.append(intermediate_layer_value_info.name + '_' + node.op_type)
dump_all_onnx = "all_{}".format(self.model_file.split("/")[-1])
onnx.save(onnx_model, dump_all_onnx)
print("dump multi-output onnx all tensor at ", dump_all_onnx)
# dump all inferneced tensor
ort_outs = self._infernece(input_data, onnx_model=dump_all_onnx)
output_dict = dict(zip(output_keys, map(np.ndarray.flatten, ort_outs)))
return output_dict
def get_inputs(self, ):
return self.net.get_inputs()
def get_op_info(self):
raise RuntimeError("Todo")
| 2,213 |
web_tests/the_rules.py
|
SalamiArmy/InfoBoet
| 1 |
2023968
|
def run(thorin, incoming):
return """The rules are:
1. A robot may not injure a human being or, through inaction, allow a human being to come to harm.
2. A robot must obey orders given it by human beings except where such orders would conflict with the First Law.
3. A robot must protect its own existence as long as such protection does not conflict with the First or Second Law."""
| 402 |
docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyMMFD/__init__.py
|
liujiamingustc/phd
| 3 |
2022837
|
#!/usr/bin/env python
try:
from pyMMFD import MMFD
__all__ = ['MMFD']
except:
__all__ = []
#end
| 109 |
AbstractSearchEngine/indexing/UnifiedSearch.py
|
canuse/arXiv_explorer
| 0 |
2024122
|
from AbstractSearchEngine.indexing.BM25 import BM25
from AbstractSearchEngine.utils.ranking import borda_count
from AbstractSearchEngine.indexing.TFIDF import TFIDF
from functools import lru_cache
@lru_cache()
def search_by_words(word_list):
"""[summary]
Args:
word_list ([type]): [description]
Returns:
list: list of article_id。不需要score。
"""
bm25_result, wc1 = BM25.search_by_words(word_list, 200)
tfidf_result, wc2 = TFIDF.search_by_words(word_list, 200)
result = borda_count([bm25_result, tfidf_result])
return [x[0] for x in result][:200], max(wc1, wc2)
def query_expansion(word_list, nrel=10, nexp=2, allow_dup=True):
"""[summary]
Args:
word_list ([type]): [description]
nrel (int, optional): [description]. Defaults to 10.
nexp (int, optional): [description]. Defaults to 2.
allow_dup (bool, optional): [description]. Defaults to True.
Returns:
list: list of expended query。预计返回stem过之后的["apple banana fruit","apple banana orange".....]
"""
bm25_result = BM25.query_expansion(word_list, nrel=nrel, nexp=nexp, allow_dup=allow_dup)
tfidf_result = TFIDF.query_expansion(word_list, nrel=nrel, nexp=nexp, allow_dup=allow_dup)
result = borda_count([bm25_result, tfidf_result], algorithm='vote')
return result
@lru_cache()
def get_relative_article(arxivID_list, nart=10):
"""[summary]
Args:
arxivID_list ([type]): [description]
nart (int, optional): [description]. Defaults to 10.
Returns:
list: list of article_id。不需要score。
"""
bm25_result = BM25.get_relative_article(arxivID_list, nart=10)
tfidf_result = TFIDF.get_relative_article(arxivID_list, nart=10)
result = borda_count([bm25_result, tfidf_result])
return [x[0] for x in result]
| 1,824 |
main.py
|
SuhelMehta9/TrackSystemLogin
| 2 |
2023172
|
from time import sleep
import socket
from capture_image import captureImage
from config import *
# Capture image
image_files = captureImage(config["IMAGE_DIR"])
# Check internet connectivity
while True:
try:
host = socket.gethostbyname("www.google.com")
s = socket.create_connection((host, 80), 2)
s.close()
break
except:
sleep(20)
pass
if config["SEND_EMAIL"]:
# Send email
from send_mail import sendMail
sendMail(
config["FROM_EMAIL_ADDRESS"],
config["PASSWORD_OF_FROM_EMAIL_ADDRESS"],
config["TO_EMAIL_ADDRESS"],
config["EMAIL_SUBJECT"],
config["EMAIL_BODY"],
config["SMTP_SERVER_ADDRESS"],
config["SMTP_SSL_PORT"],
image_files,
)
if config["GIT_COMMIT"]:
# Create git commit
from git_commit import gitCommit
if (
config["GITHUB_USERNAME_OR_API_TOKEN"]
and config["GITHUB_USER_PASSWORD"]
):
gitCommit(
config["GITHUB_REPOSITORY"],
image_files,
config["GIT_COMMIT_MESSAGE"],
config["GITHUB_USERNAME_OR_API_TOKEN"],
config["GITHUB_USER_PASSWORD"],
)
elif config["GITHUB_USERNAME_OR_API_TOKEN"]:
gitCommit(
config["GITHUB_REPOSITORY"],
image_files,
config["GIT_COMMIT_MESSAGE"],
config["GITHUB_USERNAME_OR_API_TOKEN"],
)
| 1,439 |
Beginner_Contest/115/C_Christmas_Eve/main.py
|
Tao4free/atcoder
| 0 |
2024146
|
def readraw(typ=None):
if typ is None:
return input().strip()
else:
return typ(input().strip())
def readarray(typ=None):
inpt_list = input().split()
if typ is None:
return [int(x) if x.isdigit() else x for x in inpt_list]
else:
return list(map(typ, inpt_list))
if __name__ == "__main__":
n, k = readarray()
tree = [readraw(int) for _ in range(n)]
tree.sort()
ans = min(tree[i+k-1] - tree[i] for i in range(n-k+1))
print(ans)
| 503 |
13-work-with-basic-auth/python/basic_auth_2.py
|
tourdedave/selenium-tips
| 251 |
2022627
|
# -*- coding: utf-8 -*-
"""
Implementation of http://elementalselenium.com/tips/13-work-with-basic-auth
"""
import unittest
from selenium import webdriver
class BasicAuth1(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get('http://admin:[email protected]/basic_auth')
def tearDown(self):
self.driver.quit()
def test_visit_basic_auth_secured_page(self):
driver = self.driver
driver.get('http://the-internet.herokuapp.com/basic_auth')
page_message = driver.find_element_by_css_selector('.example p').text
assert page_message == 'Congratulations! You must have the proper credentials.'
if __name__ == "__main__":
unittest.main()
| 756 |
nbdev_colab_helper/core.py
|
pete88b/nbdev_colab_helper
| 5 |
2023327
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['IN_COLAB', 'setup_git', 'git_push', 'read_config', 'setup_project', 'init_notebook']
# Cell
IN_COLAB = 'google.colab' in str(get_ipython())
# Cell
import os, subprocess, urllib, shlex
def _run_commands(commands, password=None):
"Run a list of commands making sure we mask `password` when printing commands"
for cmd in commands:
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
output, err = process.communicate()
if password: cmd = cmd.replace(password, '*****')
print(cmd)
if output or err:
print(' ', output.decode('utf8').strip() if output else '', err or '')
def setup_git(git_url:str, git_branch:str, name:str, password:str, email:str):
"Link your mounted drive to GitHub"
password = urllib.parse.quote(password)
_run_commands([
f"git config --global user.email {email}",
f"git config --global user.name {name}",
f"git init",
f"git remote rm origin",
f"git remote add origin {git_url.replace('://git', f'://{name}:{password}@git')}",
f"git pull origin {git_branch}", # TODO: do we need --allow-unrelated-histories?
f"git checkout {git_branch}",
f"git push --set-upstream origin {git_branch}"],
password)
def git_push(git_branch:str, message:str):
"Convert notebooks to scripts and then push to the library"
_run_commands([
f'nbdev_install_git_hooks',
f'nbdev_build_lib',
f'git add *', # TODO: allow specific files to be pushed
f'git commit -m "{message}"',
f'git push origin {git_branch}']) # TODO: show message when push fails!
# Cell
from configparser import ConfigParser
from pathlib import Path
def read_config(project_name):
config_path = Path('/content/drive/My Drive/nbdev_colab_projects.ini')
config = ConfigParser()
config.read(config_path)
if project_name not in config:
print(f'Error: [{project_name}] section not found in {config_path}')
print(f'Please add a section for [{project_name}] and run `setup_project` again')
print('See https://pete88b.github.io/nbdev_colab_helper/core.html for details')
return config, None
return config, config[project_name]
# Cell
def setup_project(project_name):
"Set-up the colab runtime for `project_name`"
assert IN_COLAB, "You do not appear to be running in Colab"
if not Path('/content/drive/My Drive').exists():
print('Connecting to google drive')
from google.colab import drive
drive.mount('/content/drive')
config, project_config = read_config(project_name)
if project_config is None: return config, project_config
project_path = Path(project_config['project_parent'])/project_name
git_url, git_branch = project_config['git_url'], project_config['git_branch']
if project_path.is_dir():
print(f'Clone of {project_name} already exists in {project_path.parent}')
else:
project_path.parent.mkdir(parents=True, exist_ok=True)
_run_commands([f'git clone {git_url} "{project_path}"'])
get_ipython().magic(f'cd {project_path}')
_run_commands(['pip install fastscript==1.0.0 fastcore==1.0.8 nbdev==1.0.14'])
setup_git(git_url, git_branch, project_config['git_user_name'],
project_config['git_user_password'], project_config['git_user_email'])
return config, project_config
# Cell
def init_notebook(project_name):
print('Connecting to google drive')
from google.colab import drive
drive.mount('/content/drive')
config, project_config = read_config(project_name)
if project_config is None: return config, project_config
project_path = Path(project_config['project_parent'])/project_name
get_ipython().magic(f'cd {project_path}')
_run_commands(['pip install fastscript==1.0.0 fastcore==1.0.8 nbdev==1.0.14'])
from nbdev.imports import Config
get_ipython().magic(f'cd {Config().nbs_path}') # TODO is there a better way to know which dir the nb is in?
# TODO: de-duplicate with setup_project
# TODO: Read `requirements` section in `settings.ini` and install all reqs here
| 4,084 |
library/forms.py
|
Mohirbeck/Libraryapp
| 0 |
2024152
|
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.forms.widgets import RadioSelect
from .models import Book
from django import forms
from django.core.validators import MinLengthValidator
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = "__all__"
class StudentForm(forms.Form):
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
group = forms.CharField(max_length=31)
faculty = forms.CharField(max_length=63)
username = forms.CharField(max_length=63)
pwd1 = forms.CharField(max_length=127,
required=True,
widget=forms.PasswordInput,
validators=[MinLengthValidator(8)],
label="Password")
pwd2 = forms.CharField(max_length=127,
required=True,
widget=forms.PasswordInput,
validators=[MinLengthValidator(8)],
label="Confirm Password")
def clean_pwd2(self):
if self.cleaned_data['pwd1'] != self.cleaned_data['pwd2']:
raise ValidationError('Passwords must be the same!')
else:
SpecialSym = ['$', '@', '#', '%', '/']
if len(self.cleaned_data['pwd2']) < 8:
raise ValidationError('length should be at least 8')
else:
if not any(char.isdigit() for char in self.cleaned_data['pwd2']):
raise ValidationError(
'Password should have at least one numeral')
else:
if not any(char.isupper() for char in self.cleaned_data['pwd2']):
raise ValidationError(
'Password should have at least one uppercase letter')
else:
if not any(char.islower() for char in self.cleaned_data['pwd2']):
raise ValidationError(
'Password should have at least one lowercase letter')
else:
if not any(char in SpecialSym for char in self.cleaned_data['pwd2']):
raise ValidationError(
'Password should have at least one of the symbols $@#')
return self.cleaned_data['pwd2']
| 2,487 |
soli/aria/models/plant.py
|
rcdixon/soli
| 0 |
2023331
|
from aria.models import Crop
from aria.models.validation.plant import *
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Plant(models.Model):
class Meta:
db_table = "plant"
app_label = "aria"
crop = models.ForeignKey(Crop, on_delete=models.CASCADE)
pattern = models.CharField(
choices=GROW_STYLE,
max_length=1,
blank=False,
default=None
)
spacing = models.SmallIntegerField(
validators=[
MinValueValidator(0)
]
)
frost = models.CharField(
choices=FROST,
max_length=1,
blank=False,
default=None
)
date = models.SmallIntegerField(
default=1,
validators=[
MinValueValidator(-180),
MaxValueValidator(180)
]
)
location = models.CharField(
choices=LOCATION,
max_length=1,
blank=False,
default=None
)
transplant = models.ForeignKey(
"self",
null=True,
db_column="pl_transplant",
on_delete=models.CASCADE)
temperature = models.SmallIntegerField(
null=True,
validators=[
MinValueValidator(-50),
MaxValueValidator(50)
]
)
germination = models.SmallIntegerField(
null=True,
validators=[
MinValueValidator(1),
MaxValueValidator(365)
]
)
depth = models.SmallIntegerField(
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(1000)
]
)
| 1,643 |
example_style_transfer/python/modules/forward.py
|
natxopedreira/ofxTensorFlow2
| 65 |
2023973
|
"""
Feed-forward network to generate the stylized result.
"""
import tensorflow as tf
class instance_norm(tf.keras.layers.Layer):
def __init__(self, epsilon=1e-3):
super(instance_norm, self).__init__()
self.epsilon = epsilon
def build(self, input_shape):
self.beta = tf.Variable(tf.zeros([input_shape[3]]))
self.gamma = tf.Variable(tf.ones([input_shape[3]]))
def call(self, inputs):
mean, var = tf.nn.moments(inputs, axes=[1, 2], keepdims=True)
x = tf.divide(tf.subtract(inputs, mean), tf.sqrt(tf.add(var, self.epsilon)))
return self.gamma * x + self.beta
class conv_2d(tf.keras.layers.Layer):
def __init__(self, filters, kernel, stride):
super(conv_2d, self).__init__()
pad = kernel // 2
self.paddings = tf.constant([[0, 0], [pad, pad],[pad, pad], [0, 0]])
self.conv2d = tf.keras.layers.Conv2D(filters, kernel, stride, use_bias=False, padding='valid')
self.instance_norm = instance_norm()
def call(self, inputs, relu=True):
x = tf.pad(inputs, self.paddings, mode='REFLECT')
x = self.conv2d(x)
x = self.instance_norm(x)
if relu:
x = tf.nn.relu(x)
return x
class resize_conv_2d(tf.keras.layers.Layer):
def __init__(self, filters, kernel, stride):
super(resize_conv_2d, self).__init__()
self.conv = conv_2d(filters, kernel, stride)
self.instance_norm = instance_norm()
self.stride = stride
def call(self, inputs):
new_h = inputs.shape[1] * self.stride * 2
new_w = inputs.shape[2] * self.stride * 2
x = tf.image.resize(inputs, [new_h, new_w], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
x = self.conv(x)
# return x
""" Redundant """
x = self.instance_norm(x)
return tf.nn.relu(x)
class tran_conv_2d(tf.keras.layers.Layer):
def __init__(self, filters, kernel, stride):
super(tran_conv_2d, self).__init__()
self.tran_conv = tf.keras.layers.Conv2DTranspose(filters, kernel, stride, padding='same')
self.instance_norm = instance_norm()
def call(self, inputs):
x = self.tran_conv(inputs)
x = self.instance_norm(x)
return tf.nn.relu(x)
class residual(tf.keras.layers.Layer):
def __init__(self, filters, kernel, stride):
super(residual, self).__init__()
self.conv1 = conv_2d(filters, kernel, stride)
self.conv2 = conv_2d(filters, kernel, stride)
def call(self, inputs):
x = self.conv1(inputs)
return inputs + self.conv2(x, relu=False)
class feed_forward(tf.keras.models.Model):
def __init__(self):
super(feed_forward, self).__init__()
# [filters, kernel, stride]
self.conv1 = conv_2d(32, 9, 1)
self.conv2 = conv_2d(64, 3, 2)
self.conv3 = conv_2d(128, 3, 2)
self.resid1 = residual(128, 3, 1)
self.resid2 = residual(128, 3, 1)
self.resid3 = residual(128, 3, 1)
self.resid4 = residual(128, 3, 1)
self.resid5 = residual(128, 3, 1)
#self.tran_conv1 = tran_conv_2d(64, 3, 2)
#self.tran_conv2 = tran_conv_2d(32, 3, 2)
self.resize_conv1 = resize_conv_2d(64, 3, 2)
self.resize_conv2 = resize_conv_2d(32, 3, 2)
self.conv4 = conv_2d(3, 9, 1)
def call(self, inputs):
x = self.conv1(inputs)
x = self.conv2(x)
x = self.conv3(x)
x = self.resid1(x)
x = self.resid2(x)
x = self.resid3(x)
x = self.resid4(x)
x = self.resid5(x)
x = self.resize_conv1(x)
x = self.resize_conv2(x)
x = self.conv4(x, relu=False)
return (tf.nn.tanh(x) * 150 + 255. / 2) # for better convergence
| 3,860 |
abc/abc119/abc119c.py
|
c-yan/atcoder
| 1 |
2023261
|
from itertools import product
N, A, B, C, *l = map(int, open(0).read().split())
result = float('inf')
for p in product(range(4), repeat=N):
c = [0, 0, 0, 0]
v = [0, 0, 0, 0]
for i in range(N):
c[p[i]] += 1
v[p[i]] += l[i]
if min(c[:3]) == 0:
continue
t = abs(A - v[0]) + abs(B - v[1]) + abs(C - v[2]) + (sum(c[:3]) - 3) * 10
result = min(result, t)
print(result)
| 413 |
PyFBA/tests/test_compound.py
|
SysSynBio/PyFBA
| 24 |
2024198
|
import unittest
import PyFBA
"""
A class to test the compound class.
"""
class TestCompound(unittest.TestCase):
def setUp(self):
"""This method is called before every test_ method"""
self.compound = PyFBA.metabolism.Compound("t1", "test compound")
self.compound.abbreviation = "Cool"
self.compound.add_attribute('What', "Everything")
self.compound_with_loc = PyFBA.metabolism.CompoundWithLocation.from_compound(self.compound, "extracellular")
def test_equals(self):
"""Test that our equals function works"""
othercompound = PyFBA.metabolism.Compound("t2", "test compound")
self.assertEqual(self.compound, othercompound)
othercompound.name = "Another compound"
self.assertNotEqual(self.compound, othercompound)
def test_hash(self):
"""Test that our hash function works"""
self.assertEqual(hash(self.compound), hash(("t1", "test compound")))
def test_in_reactions(self):
"""Test which reactions the compound is in"""
self.compound.add_reactions({"a", "b", "c"})
self.compound.add_reactions({"d"})
self.assertTrue(self.compound.has_reaction("a"))
self.assertEqual(self.compound.number_of_reactions(), 4)
# check that it is a set
self.compound.add_reactions({"a", "b", "c"})
self.assertEqual(self.compound.number_of_reactions(), 4)
# check that we are provided with a set
self.assertRaises(
TypeError,
self.compound.add_reactions,
"A reaction"
)
def test_abbreviation(self):
""" Did we get the new abbreviation?"""
self.assertEqual(self.compound.abbreviation, "Cool")
def test_adding_attributes(self):
""" Did we add the new attributes"""
self.assertEqual(self.compound.get_attribute("What"), "Everything")
def test_compound_with_location(self):
"""Test the location of a compound"""
self.assertEqual(self.compound_with_loc.location, 'extracellular')
def test_comp_with_loc_copied(self):
"""Test we copied all attributes properly"""
self.assertEqual(self.compound_with_loc.get_attribute("What"), "Everything")
| 2,234 |
pure1_check_new_array.py
|
sile16/pypure_examples
| 0 |
2024183
|
from pypureclient import pure1
import json
# app id is from Pure1 REST settings. (need to be set as admin to use)
app_id = "pure1:apikey:0ichbc6p0crIhWQm"
key_file = "mattapp_priv.pem"
# leave password blank if none
private_key_password = ""
# create client and authenticate
pure1Client = pure1.Client(private_key_file=key_file,
private_key_password=private_key_password,
app_id=app_id)
# get Arrays
pure1Client.get_arrays()
response = pure1Client.get_arrays()
resources = []
# check to see if the response was valid and then pull out items
if response is not None:
resources = list(response.items)
# load previous saved arrays from file:
try:
existing_arrays = {}
with open("pure1_arrays.json") as f:
existing_arrays = json.load(f)
except Exception:
pass
# go through list of arrays from Pure1
# because
for a in resources:
if a.id not in existing_arrays:
existing_arrays[a.id] = a.name
print("New array found {}".format(a.name))
# check to see if array was renamed
elif existing_arrays[a.id] != a.name:
print("Array: {} was renamed to -> {} but is not new.".format(
existing_arrays[a.id],
a.name))
# save new set of arrays to file
with open("pure1_arrays.json", "w") as f:
json.dump(existing_arrays, f)
| 1,371 |
src/test.py
|
dreamer121121/F3Net
| 0 |
2024221
|
#!/usr/bin/python3
#coding=utf-8
import os
import sys
sys.path.insert(0, '../')
sys.dont_write_bytecode = True
import cv2
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import dataset
from net import F3Net
from transform import *
import pydensecrf.densecrf as dcrf
class Test(object):
def __init__(self, Dataset, Network, path):
## dataset
self.cfg = Dataset.Config(datapath=path, snapshot=args.model, mode='test')
self.data = Dataset.Data(self.cfg)
self.loader = DataLoader(self.data, batch_size=1, shuffle=False, num_workers=8)
## network
self.net = Network(self.cfg)
self.net.train(False)
self.net.to('cuda:1')
self.path = path
def save(self):
with torch.no_grad():
import datetime
#start = datetime.datetime.now()
cnt = 1
total = datetime.datetime(1999,1,1)
for image, mask, shape, name in self.loader:
#image.shape (1,3,352,352)
#shape: init img shape ,which is for pre_mask to match the size of init img
image = image.to('cuda:1').float()
start = datetime.datetime.now()
out1u, out2u, out2r, out3r, out4r, out5r = self.net(image, shape)
total += datetime.datetime.now()-start
print("inference time: ",(total-datetime.datetime(1999,1,1))/cnt)
out = out2u
pred = (torch.sigmoid(out[0,0])*255).cpu().numpy()
#
# Q = None
# if args.crf:
# Q = self.dense_crf(user_img.numpy().astype(np.uint8),pred)
head = '../eval/maps/F3Net/'+ self.cfg.datapath.split('/')[-1]
if not os.path.exists(head):
os.makedirs(head)
cv2.imwrite(head+'/'+name[0]+'.png', np.round(pred))
# import sys
# sys.exit(0)
#print("inference time: ",(datetime.datetime.now()-start)/cnt)
cnt +=1
@torch.no_grad()
def save_fig(self):
normalize = Normalize(mean=self.cfg.mean, std=self.cfg.std)
resize = Resize(352, 352)
totensor = ToTensor()
fr = open(self.path+'/test.txt','r')
file_list = fr.readlines()
fr.close()
import datetime
cnt = 1
total = datetime.datetime(1999, 1, 1)
for name in file_list:
name = name.replace('\n','')
user_image = cv2.imread(self.path+'/image/'+name+'.jpg')
start = datetime.datetime.now()
input_data = user_image[:,:,::-1].astype(np.float32)
shape = [torch.tensor([int(input_data.shape[0])]),torch.tensor([int(input_data.shape[1])])]
input_data = normalize(input_data)
input_data = resize(input_data)
input_data = totensor(input_data)
input_data = input_data[np.newaxis,:,:,:]
image = input_data.to('cuda:1').float()
user_image = torch.from_numpy(user_image).to('cuda:1').float()
alpha = torch.ones(user_image.size()[0],user_image.size()[1],1).to('cuda:1')*255
user_image = torch.cat((user_image,alpha),dim=2)
out1u, out2u, out2r, out3r, out4r, out5r = self.net(image, shape)
out = out2u
pred = (torch.sigmoid(out[0, 0]))
# if args.crf:
# Q = self.dense_crf(user_image.astype(np.uint8), pred.cpu().numpy())
# print('--Q--', Q)
# cv2.imwrite('./crf_test.png',np.round(Q*255))
# import sys
# sys.exit(0)
mask = pred.unsqueeze(dim=2)
outimg = torch.mul(mask, user_image).detach().cpu().numpy()
# for w in range(outimg.shape[0]):
# for h in range(outimg.shape[1]):
# if all(outimg[w][h] == [0, 0, 0]):
# alpha[w][h] = 0
# else:
# alpha[w][h] = 255 # 看看能否优化速度
#
# outimg = np.dstack([outimg, alpha])
head = '../eval/results/F3Net/'+ self.cfg.datapath.split('/')[-1]
if not os.path.exists(head):
os.makedirs(head)
cv2.imwrite(head+'/'+name+'.png', np.round(outimg))
total += datetime.datetime.now() - start
print("inference time: ", (total - datetime.datetime(1999, 1, 1)) / cnt)
cnt += 1
def dense_crf(slef,img, output_probs): # img为输入的图像,output_probs是经过网络预测后得到的结果
h = output_probs.shape[0] # 高度
w = output_probs.shape[1] # 宽度
output_probs = np.expand_dims(output_probs, 0)
output_probs = np.append(1 - output_probs, output_probs, axis=0)
d = dcrf.DenseCRF2D(w, h, 2) # NLABELS=2两类标注,车和不是车
U = -np.log(output_probs) # 得到一元势
U = U.reshape((2, -1)) # NLABELS=2两类标注
U = np.ascontiguousarray(U) # 返回一个地址连续的数组
img = np.ascontiguousarray(img)
d.setUnaryEnergy(U) # 设置一元势
img = img.squeeze()
d.addPairwiseGaussian(sxy=20, compat=3) # 设置二元势中高斯情况的值
d.addPairwiseBilateral(sxy=30, srgb=20, rgbim=img, compat=10) # 设置二元势众双边情况的值
Q = d.inference(5) # 迭代5次推理
Q = np.argmax(np.array(Q), axis=0).reshape((h, w)) # 得列中最大值的索引结果
return Q
if __name__=='__main__':
#for path in ['../data/ECSSD', '../data/PASCAL-S', '../data/DUTS', '../data/HKU-IS', '../data/DUT-OMRON']:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
type=str,
default='mask')
parser.add_argument('--dataset',
type=str,
nargs='+',
default=[],
)
parser.add_argument('--model',
type=str)
parser.add_argument('--crf',
action='store_true')
args = parser.parse_args()
for path in args.dataset:
print("="*30+"path:"+"="*30,path)
t = Test(dataset, F3Net, '../data/'+path)
if args.mode == 'mask':
t.save()
elif args.mode == 'fig':
t.save_fig()
# t.show()
| 6,488 |
asetk/atomistic/__init__.py
|
ltalirz/asetk
| 18 |
2023957
|
"""Provide atomistic classes
Describes atomistic objects, such as atoms, energy levels, spectra etc.
"""
| 106 |
orders/migrations/0024_recivedorders.py
|
M4yankChoudhary/Pizza
| 1 |
2023225
|
# Generated by Django 3.0.6 on 2020-05-30 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0023_auto_20200529_1351'),
]
operations = [
migrations.CreateModel(
name='RecivedOrders',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=65)),
('all_items', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
]
| 654 |
cytopy/tests/conftest.py
|
JANHMS/CytoPy
| 41 |
2023639
|
from cytopy.tests import assets
from ..data.population import Population
from ..data.project import Project
from ..data.experiment import FileGroup
from mongoengine.connection import connect, disconnect
import pandas as pd
import numpy as np
import pytest
import shutil
import sys
import os
@pytest.fixture(scope='session', autouse=True)
def setup():
"""
Setup testing database
Yields
-------
None
"""
sys.path.append("/home/ross/CytoPy")
os.mkdir(f"{os.getcwd()}/test_data")
connect("test", host="mongomock://localhost", alias="core")
yield
shutil.rmtree(f"{os.getcwd()}/test_data", ignore_errors=True)
disconnect(alias="core")
@pytest.fixture
def example_populated_experiment():
"""
Generate an example Experiment populated with a single FileGroup "test sample"
Yields
-------
Experiment
"""
test_project = Project(project_id="test", data_directory=f"{os.getcwd()}/test_data")
exp = test_project.add_experiment(experiment_id="test experiment",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
exp.add_fcs_files(sample_id="test sample",
primary=f"{assets.__path__._path[0]}/test.FCS",
controls={"test_ctrl": f"{assets.__path__._path[0]}/test.FCS"},
compensate=False)
yield exp
test_project.reload()
test_project.delete()
os.mkdir(f"{os.getcwd()}/test_data")
def reload_filegroup(project_id: str,
exp_id: str,
sample_id: str):
"""
Reload a FileGroup
Parameters
----------
project_id: str
exp_id: str
sample_id: str
Returns
-------
FileGroup
"""
fg = (Project.objects(project_id=project_id)
.get()
.get_experiment(exp_id)
.get_sample(sample_id))
return fg
def create_example_populations(filegroup: FileGroup,
n_populations: int = 3):
"""
Given a FileGroup add the given number of example populations.
Parameters
----------
filegroup: FileGroup
n_populations: int (default=3)
Total number of populations to generate (must be at least 2)
Returns
-------
FileGroup
"""
for pname, parent in zip([f"pop{i + 1}" for i in range(n_populations)],
["root"] + [f"pop{i + 1}" for i in range(n_populations - 1)]):
parent_df = filegroup.load_population_df(population=parent,
transform="logicle")
x = parent_df["FS Lin"].median()
idx = parent_df[parent_df["FS Lin"] >= x].index.values
p = Population(population_name=pname,
n=len(idx),
parent=parent,
index=idx,
source="gate")
filegroup.add_population(population=p)
filegroup.save()
return filegroup
def create_logicle_like(u: list, s: list, size: list):
assert len(u) == len(s), "s and u should be equal length"
lognormal = [np.random.lognormal(mean=u[i], sigma=s[i], size=int(size[i]))
for i in range(len(u))]
return np.concatenate(lognormal)
def create_linear_data():
x = np.concatenate([np.random.normal(loc=3.2, scale=0.8, size=100000),
np.random.normal(loc=0.95, scale=1.1, size=100000)])
y = np.concatenate([np.random.normal(loc=3.1, scale=0.85, size=100000),
np.random.normal(loc=0.5, scale=1.4, size=100000)])
return pd.DataFrame({"x": x, "y": y})
def create_lognormal_data():
x = np.concatenate([np.random.normal(loc=4.2, scale=0.8, size=50000),
np.random.lognormal(mean=4.2, sigma=0.8, size=50000),
np.random.lognormal(mean=7.2, sigma=0.8, size=50000),
np.random.lognormal(mean=0.8, sigma=0.95, size=50000)])
y = np.concatenate([np.random.normal(loc=3.2, scale=0.8, size=50000),
np.random.lognormal(mean=4.1, sigma=0.8, size=50000),
np.random.lognormal(mean=6.2, sigma=0.8, size=50000),
np.random.lognormal(mean=1.4, sigma=0.7, size=50000)])
return pd.DataFrame({"x": x, "y": y})
| 4,329 |
Proyecto_final/plot.py
|
maherreramu/Algoritmos
| 0 |
2024285
|
import networkx as nx
import matplotlib.pyplot as plt
from graph import G
plot = nx.spring_layout(G)
nx.draw_networkx_nodes(G, plot)
nx.draw_networkx_edges(G, plot, edgelist=G.edges())
nx.draw_networkx_labels(G, plot)
plt.show()
| 230 |
Python/Linear Regression in Python/Linear Regression.py
|
tanisha-bhadani/hacktoberfest2021-1
| 380 |
2023607
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#importing the required packages
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
#reading the data
dataset= pd.read_excel("C:/Users/ASUS/Linear Regression.xlsx", sheet_name="Linear Regression")
#defined a function to take the independent variables as input
def reg(col_name):
#gaining insights about the data
dataset.head()
dataset.isnull().sum()
dataset.describe()
dataset.corr()
print("for", col_name)
sns.pairplot(dataset)
print('---------------------------------------------------------------')
#dividing the data into x and y
x=dataset[[col_name]]
y=dataset[['price']]
#splitting the training data and testing data
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=0.3,random_state=10)
reg= LinearRegression()
#fitting the model
reg.fit(x_train,y_train)
#predicting on the unseen data
prediction= reg.predict(x_test)
#getting the RMSE valuse and r2 value and printing them
RMSE=np.sqrt(mean_squared_error(y_test,prediction))
r2=r2_score(y_test, prediction)
#print("for", col_name)
print("R Sqaure value is", r2)
print("Root mean square is ", RMSE)
#predicting and priniting the estimated price
unseen_pred=reg.predict(np.array([[2]]))
print("The estiamted price is",unseen_pred)
print('---------------------------------------------------------------')
#passing values (the independent varibles) to the function
reg('sqft_living')
reg('bedrooms')
reg('bathrooms')
reg('floors')
# In[ ]:
| 1,750 |
src/ef/field/expression.py
|
fizmat/ef_python
| 1 |
2023749
|
import ast
import operator as op
import inject
import numpy
from simpleeval import SimpleEval
from ef.field import Field
from ef.util.inject import safe_default_inject
class FieldExpression(Field):
@safe_default_inject
@inject.params(xp=numpy)
def __init__(self, name, electric_or_magnetic, expression_x, expression_y, expression_z, xp=numpy):
super().__init__(name, electric_or_magnetic)
self._xp = xp
self.expression_x = expression_x
self.expression_y = expression_y
self.expression_z = expression_z
self._ev = SimpleEval(functions={"sin": xp.sin,
"cos": xp.cos,
"sqrt": xp.sqrt},
operators={ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.FloorDiv: op.floordiv,
ast.Pow: op.pow, ast.Mod: op.mod,
ast.Eq: op.eq, ast.NotEq: op.ne,
ast.Gt: op.gt, ast.Lt: op.lt,
ast.GtE: op.ge, ast.LtE: op.le,
ast.Not: op.not_,
ast.USub: op.neg, ast.UAdd: op.pos,
ast.In: lambda x, y: op.contains(y, x),
ast.NotIn: lambda x, y: not op.contains(y, x),
ast.Is: lambda x, y: x is y,
ast.IsNot: lambda x, y: x is not y,
}
)
# todo: inherit SimpleEval and define math functions inside
# todo: add r, theta, phi names
def get_at_points(self, positions, time: float) -> numpy.ndarray:
positions = self._xp.asarray(positions)
self._ev.names["t"] = time
self._ev.names["x"] = positions[:, 0]
self._ev.names["y"] = positions[:, 1]
self._ev.names["z"] = positions[:, 2]
result = self._xp.empty_like(positions)
result[:, 0] = self._ev.eval(self.expression_x)
result[:, 1] = self._ev.eval(self.expression_y)
result[:, 2] = self._ev.eval(self.expression_z)
return result
| 2,370 |
python/run.py
|
bretmulvey/azure-iot-central-arm-sdk-samples
| 0 |
2023822
|
import os
import random
from azure.identity._credentials.browser import InteractiveBrowserCredential
from azure.mgmt.iotcentral import IotCentralClient
from azure.mgmt.iotcentral.models import App, AppSkuInfo, SystemAssignedServiceIdentityType
region = "westus"
resourceGroup = "myResourceGroup"
skuName = "ST2"
# Get subscription info from environment.
tenantId = os.environ["AZURE_TENANT_ID"]
subscriptionId = os.environ["AZURE_SUBSCRIPTION_ID"]
if (tenantId is None) or (subscriptionId is None):
raise Exception("Expected environment variables.")
# Make IOTC client.
credential = InteractiveBrowserCredential(tenant_id = tenantId)
iotc = IotCentralClient(credential, subscriptionId)
# Choose app name.
appName = f"pysdk-{random.randint(100000, 999999)}-{random.randint(100000, 999999)}"
# Define the app configuration.
app = App(location = region, sku = AppSkuInfo(name = skuName))
app.subdomain = appName
app.display_name = appName
app.identity = { "type": "SystemAssigned" }
# Create the app.
print(f"\nCreating {appName}. Check browser window for login.")
poller = iotc.apps.begin_create_or_update(resourceGroup, appName, app)
result = poller.result()
if result.state != "created":
raise Exception("Expected 'created' state.")
print("OK")
# Make sure it's idempotent.
print(f"\nUpdating {appName}.")
poller = iotc.apps.begin_create_or_update(resourceGroup, appName, app)
result = poller.result()
if result.state != "created":
raise Exception("Expected 'created' state.")
print("OK")
# List all the apps in the resource group.
print(f"\nListing IoT Central apps in '{resourceGroup}'")
appsInGroup = iotc.apps.list_by_resource_group(resourceGroup)
for item in appsInGroup:
print(item)
# Update the app tags.
print(f"\nUpdating {appName} tags.")
tag = "mytag"
value = "myvalue"
app.tags = { tag: value }
poller = iotc.apps.begin_create_or_update(resourceGroup, appName, app)
result = poller.result()
if result.tags[tag] != value:
raise Exception("Expected updated tag.")
print("OK")
# Delete the app.
print(f"\nDeleting {appName}")
poller = iotc.apps.begin_delete(resourceGroup, appName)
result = poller.result()
if result:
print(result)
raise Exception("Expected 'None'.")
print("OK")
# Verify that we can't create in geography as we could before.
print("\nMake sure we can't use geography.")
app.location = "unitedstates"
appName = "us-" + appName
app.subdomain = app.display_name = appName
try:
poller = iotc.apps.begin_create_or_update(resourceGroup, appName, app)
result = poller.result()
print("It worked but it shouldn't have!")
except:
print("OK")
# Verify that S1 SKU is no longer allowed.
print(f"\nMake sure we can't use S1 SKU.")
appNameS1 = "s1-" + appName
app = App(location = region, sku = AppSkuInfo(name = "S1"))
app.subdomain = appNameS1
app.display_name = appNameS1
try:
poller = iotc.apps.begin_create_or_update(resourceGroup, appNameS1, app)
result = poller.result()
print("It worked but it shouldn't have!")
except:
print("OK")
| 3,063 |
wave_visualizer.py
|
Animenosekai/AudioVisualization.py
| 0 |
2022682
|
import pyaudio
import struct
import matplotlib.pyplot as plt
import numpy as np
mic = pyaudio.PyAudio()
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 5000
CHUNK = int(RATE/5)
stream = mic.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK)
fig, ax = plt.subplots(figsize=(14,6))
x = np.arange(0, 2 * CHUNK, 2)
ax.set_ylim(-200, 200)
ax.set_xlim(0, CHUNK) #make sure our x axis matched our chunk size
line, = ax.plot(x, np.random.rand(CHUNK))
while True:
data = stream.read(CHUNK)
data = np.frombuffer(data, np.int16)
line.set_ydata(data)
fig.canvas.draw()
fig.canvas.flush_events()
plt.pause(0.01)
| 672 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.