max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
Jan Bodnar - zetcode/Interface/event_object.py
|
jgorman3691/PyQt5Tutorials
| 0 |
2169740
|
#! python3
import sys, PyQt5
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout, QLabel
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
grid = QGridLayout()
x = 0
y = 0
self.text = f'x: {x}, y: {y}'
self.label = QLabel(self.text, self)
grid.addWidget(self.label, 0, 0, Qt.AlignTop)
self.setMouseTracking(True)
self.setLayout(grid)
self.setGeometry(300, 300, 450, 300)
self.setWindowTitle('Event Object')
self.show()
def mouseMoveEvent(self, e):
x = e.x()
y = e.y()
text = f'x: {x}, y: {y}'
self.label.setText(text)
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 953 |
pygazebo/msg/log_playback_control_pb2.py
|
CryptoCopter/pygazebo
| 0 |
2168907
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: log_playback_control.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import time_pb2 as time__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='log_playback_control.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1alog_playback_control.proto\x12\x0bgazebo.msgs\x1a\ntime.proto\"\x8c\x01\n\x12LogPlaybackControl\x12\r\n\x05pause\x18\x01 \x01(\x08\x12\x12\n\nmulti_step\x18\x02 \x01(\x11\x12\x0e\n\x06rewind\x18\x03 \x01(\x08\x12\x0f\n\x07\x66orward\x18\x04 \x01(\x08\x12\x1f\n\x04seek\x18\x05 \x01(\x0b\x32\x11.gazebo.msgs.Time\x12\x11\n\trt_factor\x18\x06 \x01(\x01'
,
dependencies=[time__pb2.DESCRIPTOR,])
_LOGPLAYBACKCONTROL = _descriptor.Descriptor(
name='LogPlaybackControl',
full_name='gazebo.msgs.LogPlaybackControl',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='pause', full_name='gazebo.msgs.LogPlaybackControl.pause', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='multi_step', full_name='gazebo.msgs.LogPlaybackControl.multi_step', index=1,
number=2, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rewind', full_name='gazebo.msgs.LogPlaybackControl.rewind', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='forward', full_name='gazebo.msgs.LogPlaybackControl.forward', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='seek', full_name='gazebo.msgs.LogPlaybackControl.seek', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rt_factor', full_name='gazebo.msgs.LogPlaybackControl.rt_factor', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=196,
)
_LOGPLAYBACKCONTROL.fields_by_name['seek'].message_type = time__pb2._TIME
DESCRIPTOR.message_types_by_name['LogPlaybackControl'] = _LOGPLAYBACKCONTROL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LogPlaybackControl = _reflection.GeneratedProtocolMessageType('LogPlaybackControl', (_message.Message,), {
'DESCRIPTOR' : _LOGPLAYBACKCONTROL,
'__module__' : 'log_playback_control_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.LogPlaybackControl)
})
_sym_db.RegisterMessage(LogPlaybackControl)
# @@protoc_insertion_point(module_scope)
| 4,626 |
chromium/chrome/browser/media/router/media_router.gyp
|
wedataintelligence/vivaldi-source
| 0 |
2169434
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'media_router.gypi',
],
'targets': [
{
# GN version: //chrome/browser/media/router:router
'target_name': 'media_router',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)',
],
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/chrome/common_constants.gyp:common_constants',
'<(DEPTH)/components/components.gyp:keyed_service_content',
'<(DEPTH)/components/components.gyp:keyed_service_core',
'<(DEPTH)/skia/skia.gyp:skia',
'<(DEPTH)/url/url.gyp:url_lib',
],
'sources': [
'<@(media_router_sources)',
],
'conditions': [
[ 'OS!="android" and OS!="ios"', {
'dependencies': [
# media_router_type_converters.h needs the generated file.
'media_router_mojo_gen',
'media_router_mojo',
'<(DEPTH)/extensions/extensions.gyp:extensions_browser',
],
'sources': [
'<@(media_router_non_android_sources)',
]
}],
]
},
{
# Mojo compiler for the Media Router internal API.
'target_name': 'media_router_mojo_gen',
'type': 'none',
'sources': [
'media_router.mojom',
],
'includes': [
'../../../../third_party/mojo/mojom_bindings_generator.gypi',
],
},
{
'target_name': 'media_router_mojo',
'type': 'static_library',
'dependencies': [
'media_router_mojo_gen',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/media/router/media_router.mojom.cc',
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/media/router/media_router.mojom.h',
],
},
{
# GN version: //chrome/browser/media/router:test_support
'target_name': 'media_router_test_support',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)',
],
'dependencies': [
'media_router',
'media_router_mojo',
'media_router_mojo_gen',
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/testing/gmock.gyp:gmock',
],
'sources': [
'<@(media_router_test_support_sources)',
],
},
],
}
| 2,402 |
2.part/Colored Dartboard.py
|
ferrerinicolas/python_samples
| 0 |
2168709
|
print("this is my third commit")
speed(5)
radius = 100
circle_quantity = 4
#this function make circles
def make_circle():
pendown()
color(color_choice)
begin_fill()
circle (radius)
end_fill()
penup()
#This function will move tracy to next circle
def move_down():
penup()
left(90)
forward(25)
right(90)
pendown()
penup()
setposition(0,-100)
#Here we take the colors from input
for i in range(4):
color_choice = input("Which color for the circle?: ")
make_circle()
radius = radius - 25
move_down()
| 575 |
usersettings/admin.py
|
christianwgd/photos
| 0 |
2168529
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import UserSettings, Theme
@admin.register(Theme)
class ThemeAdmin(admin.ModelAdmin):
list_display = ['name', 'cssfile', ]
@admin.register(UserSettings)
class UserSettingsAdmin(admin.ModelAdmin):
list_display = ['user', 'limit', 'recent']
| 365 |
backend/expenses/admin.py
|
arineto/home_expenses
| 0 |
2169128
|
from django.contrib import admin
from rangefilter.filter import DateTimeRangeFilter
from .models import Category, Expense
class CategoryAdmin(admin.ModelAdmin):
list_display = ("name",)
search_fields = ("name",)
ordering = ("name",)
def settle_up(_, request, queryset):
queryset.update(is_settled=True)
class ExpenseAdmin(admin.ModelAdmin):
list_display = ("description", "value", "is_settled", "user", "category", "date")
search_fields = ("description", "value")
list_filter = (("date", DateTimeRangeFilter), "is_settled", "user__email", "category")
ordering = ("-date",)
actions = [settle_up]
admin.site.register(Category, CategoryAdmin)
admin.site.register(Expense, ExpenseAdmin)
| 729 |
resources/block_requests.py
|
gwasserfall/matcha
| 3 |
2169210
|
from flask import request
from flask_restful import Resource, abort
from flask_jwt_extended import get_jwt_identity
from helpers import jwt_refresh_required
from helpers.genders import genders
from helpers.email import send_validation_email
from models.user import User, get_full_user
from models.validation import Validation
from models.block_request import BlockRequest
from models.matches import Match
from helpers import Arguments
import traceback
class BlocksResource(Resource):
@jwt_refresh_required
def get(self, username):
current_user = get_jwt_identity()
blocked = BlockRequest.check_blocked(current_user["id"], username)
return blocked or {"blocked_them" : False, "blocked_them" : False}, 200
class BlockRequestsListResource(Resource):
@jwt_refresh_required
def get(self):
current_user = get_jwt_identity()
user = User.get(id=current_user["id"])
if not user.is_admin:
return {"message" : "You do not have admin access."}, 401
else:
return BlockRequest.get_all(), 200
@jwt_refresh_required
def post(self):
current_user = get_jwt_identity()
args = Arguments(request.json)
args.integer("reported_id")
args.string("reason")
args.validate()
block_request = BlockRequest(dict(args))
block_request.reporter_id = current_user["id"]
try:
block_request.save()
return {"message" : "User reported."}, 200
except Exception as e:
return {"message" : str(e)}, 400
class BlockRequestResource(Resource):
@jwt_refresh_required
def put(self, id):
current_user = get_jwt_identity()
user = User.get(id=current_user["id"])
if not user.is_admin:
return {"message" : "You are not authorised to review block requests"}, 401
else:
args = Arguments(request.json)
args.boolean("blocked")
args.string("admin_comments")
args.validate()
data = dict(args)
data["id"] = id
block_request = BlockRequest.get(id=data.get("id", None))
if block_request:
if block_request.blocked:
match = Match.check_match(block_request.reporter_id, block_request.reported_id)
if match["liked"] or match["matched"]:
my_like = Match.get(matcher_id=block_request.reporter_id, matchee_id=block_request.reported_id)
their_like = Match.get(matcher_id=block_request.reported_id, matchee_id=block_request.reporter_id)
if match["liked"] and match["matched"]:
try:
my_like.delete()
their_like.delete()
except Exception as e:
return {"message" : str(e)}, 500
elif match["liked"] and not match["matched"]:
try:
my_like.delete()
except Exception as e:
return {"message" : str(e)}, 500
block_request.reviewed = True
block_request.blocked = data["blocked"]
block_request.admin_comments = data["admin_comments"]
try:
block_request.save()
msg = "Request reviewed. User blocked." if block_request.blocked == 1 else "Request reviewed. User NOT blocked."
return {"message" : "{}".format(msg)}, 200
except Exception as e:
return {"message" : str(e)}, 400
else:
return {"messgae" : "The block request you are trying to update does not exist"}, 400
| 3,930 |
AI/FaceDetect/VisualizeDetect.py
|
NeKoSaNnn/AICloudAlbum
| 2 |
2168321
|
from matplotlib import pyplot as plt
def VisualizeBlocks(filename, blocks):
img = plt.imread(filename)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for block in blocks:
top = block[0]
right = block[1]
bottom = block[2]
left = block[3]
start = (left, top)
width = right-left
height = bottom-top
rect = plt.Rectangle(start, width, height, fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.imshow(img)
plt.show()
# cv2.imwrite('new_test_3.jpg', img)
| 568 |
tests/test_cm.py
|
JulienPalard/cloudmesh
| 0 |
2168978
|
""" run with
nosetests -v --nocapture test_cm.py
"""
from __future__ import print_function
from cloudmesh_base.util import HEADING
from cloudmesh_base.logger import LOGGER, LOGGING_ON, LOGGING_OFF
log = LOGGER(__file__)
import sys
import os
if os.path.isdir(os.path.expanduser("~/.cloudmesh")):
print("ERROR", "you must not have a .cloudmesh dir to run this test")
sys.exit()
class Test:
def get_user(self):
import cloudmesh
config = cloudmesh.load("user")
self.username = config.get("cloudmesh.hpc.username")
def setup(self):
self.project_id = "fg82"
def tearDown(self):
pass
def test_01_git(self):
os.system("git pull")
def test_02_init(self):
HEADING()
os.system("./install system")
os.system("./install requirements")
def test_03_user(self):
HEADING()
os.system("./install new")
def test_04_cloudmesh_install(self):
HEADING()
os.system("./install cloudmesh")
def test_05_fetch(self):
HEADING()
os.system("cm-iu user fetch")
os.system("cm-iu user create")
def test_06_mongo(self):
HEADING()
os.system("fab mongo.reset")
def test_07_key(self):
HEADING()
self.get_user()
os.system('cm "key add --keyname={0}-key ~/.ssh/id_rsa.pub"'.format(self.username))
def test_08_project(self):
os.system("cm project default {0}".format(self.project_id))
def test_09_help(self):
HEADING()
import cloudmesh
cloud_commands = [
"cloud",
"group",
"inventory",
"rain",
"storm",
"yaml",
"keys",
"defaults",
"image",
"list",
"register",
"user",
"debug",
"project",
"flavor",
"init",
"metric",
"security_group",
"vm",
"loglevel",
]
success = True
for command in cloud_commands:
execution = "help {0}".format(command)
print("testing", execution, end=' ')
try:
result = cloudmesh.shell(execution)
except Exception, e:
success = False
print(e)
if "Usage" not in result:
print(command, "ERROR", result)
success = False
else:
success = success
print("ok")
assert success
def test_10_cloud_on(self):
HEADING()
os.system("cm cloud on india")
| 2,757 |
ceptic/network.py
|
Kosinkadink/ceptic
| 2 |
2167908
|
import select
from sys import version_info
from ceptic.common import CepticException
class SocketCepticException(CepticException):
"""
General SocketCeptic Exception, inherits from CepticException
"""
pass
if version_info < (3, 0):
# Python 2
import socket
class SocketCeptic(object):
"""
Wrapper for normal or ssl socket; adds necessary CEPtic functionality to sending and receiving.
Usage: wrapped_socket = SocketCeptic(existing_socket)
"""
def __init__(self):
self.s = None
@classmethod
def wrap_socket(cls, s):
wrapped = SocketCeptic()
wrapped.s = s
return wrapped
def send(self, msg):
"""
Send message, prefixed by a 16-byte length
:param msg: string or bytes to send
:return: None
"""
# if there is nothing to send, then don't just send size
if not msg:
return
total_size = format(len(msg), ">16")
self.send_raw(total_size)
self.send_raw(msg)
def sendall(self, msg):
"""
Send message, wrapper for SocketCeptic.send
:param msg: string or bytes to send
:return: None
"""
return self.send(msg)
def send_raw(self, msg):
"""
Send message without prefix
:param msg: string or bytes to send
:return: None
"""
# if there is nothing to send, then do nothing
if not msg:
return
sent = 0
while sent < len(msg):
try:
sent += self.s.send(msg[sent:])
except socket.error as e:
raise SocketCepticException("connection was closed: {}".format(str(e)))
def recv(self, byte_amount, decode=True):
"""
Receive message, first the 16-byte length prefix, then the message of corresponding length. No more than the
specified amount of bytes will be received, but based on the received length less bytes could be received
:param byte_amount: integer
:param decode: does nothing, included for Python3 cross-compatibility
:return: received bytes, readable as a string
"""
try:
size_to_recv = self.recv_raw(16)
size_to_recv = int(size_to_recv.strip())
except ValueError:
raise SocketCepticException("no data received (EOF)")
amount = byte_amount
if size_to_recv < amount:
amount = size_to_recv
return self.recv_raw(amount)
def recv_raw(self, byte_amount, decode=True):
recv_amount = 0
text = ""
try:
while recv_amount < byte_amount:
part = self.s.recv(byte_amount - recv_amount)
recv_amount += len(part)
text += part
if not part:
break
except (EOFError, OSError):
raise SocketCepticException("no data received (EOF)")
except socket.error as e:
raise SocketCepticException("connection was closed: {}".format(str(e)))
return text
def get_socket(self):
"""
Return raw socket instance
:return: basic socket instance (socket.socket)
"""
return self.s
def close(self):
"""
Close socket
:return: None
"""
self.s.close()
else:
# Python 3
class SocketCeptic(object):
"""
Wrapper for normal or ssl socket; adds necessary CEPtic functionality to sending and receiving.
Usage: wrapped_socket = SocketCeptic(existing_socket)
"""
def __init__(self):
self.s = None
@classmethod
def wrap_socket(cls, s):
wrapped = SocketCeptic()
wrapped.s = s
return wrapped
def send(self, msg):
"""
Send message, prefixed by a 16-byte length
:param msg: string or bytes to send
:return: None
"""
# if there is nothing to send, then don't just send size
if not msg:
return
total_size = format(len(msg), ">16")
# send length and body
self.send_raw(total_size)
self.send_raw(msg)
def sendall(self, msg):
"""
Send message, wrapper for SocketCeptic.send
:param msg: string or bytes to send
:return: None
"""
return self.send(msg)
def send_raw(self, msg):
"""
Send message without prefix
:param msg: string or bytes to send
:return: None
"""
# if there is nothing to send, then don't just send size
if not msg:
return
# if it is already in bytes, do not encode it
sent = 0
while sent < len(msg):
try:
sent += self.s.send(msg[sent:].encode())
except AttributeError:
sent += self.s.send(msg[sent:])
except ConnectionResetError as e:
raise SocketCepticException("connection was closed: {}".format(str(e)))
def recv(self, byte_amount, decode=True):
"""
Receive message, first the 16-byte length prefix, then the message of corresponding length. No more than the
specified amount of bytes will be received, but based on the received length less bytes could be received
:param byte_amount: integer
:param decode: boolean for whether return will be str (True) or bytes (False)
:return: received bytes, readable as a string
"""
try:
size_to_recv = self.recv_raw(16)
size_to_recv = int(size_to_recv.strip())
except ValueError:
raise SocketCepticException("no data received (EOF)")
amount = byte_amount
if size_to_recv < byte_amount:
amount = size_to_recv
return self.recv_raw(amount, decode)
def recv_raw(self, byte_amount, decode=True):
"""
Receive message of corresponding length. No more than the
specified amount of bytes will be received
:param byte_amount: integer
:param decode: boolean for whether return will be str (True) or bytes (False)
:return: received bytes, readable as a string
"""
recv_amount = 0
text = bytes()
try:
while recv_amount < byte_amount:
part = self.s.recv(byte_amount - recv_amount)
recv_amount += len(part)
text += part
if not part:
break
except (EOFError, OSError):
raise SocketCepticException("no data received (EOF)")
except ConnectionResetError as e:
raise SocketCepticException("connection was closed: {}".format(str(e)))
if decode:
return text.decode()
return text
def get_socket(self):
"""
Return raw socket instance
:return: basic socket instance (socket.socket)
"""
return self.s
def close(self):
"""
Close socket
:return: None
"""
self.s.close()
def select_ceptic(read_list, write_list, error_list, timeout):
"""
CEPtic wrapper version of the select function
:param read_list: see select.select
:param write_list: see select.select
:param error_list: see select.select
:param timeout: see select.select
:return: see select.select
"""
read_dict = {}
write_dict = {}
error_dict = {}
# fill out dicts with socket:SocketCeptic pairs
for sCep in read_list:
read_dict.setdefault(sCep.get_socket(), sCep)
for sCep in write_list:
write_dict.setdefault(sCep.get_socket(), sCep)
for sCep in error_list:
error_dict.setdefault(sCep.get_socket(), sCep)
ready_to_read, ready_to_write, in_error = select.select(read_dict.keys(), write_dict.keys(), error_dict.keys(),
timeout)
# lists returned back
ready_read = []
ready_write = []
have_error = []
# fill out lists with corresponding SocketCeptics
for sock in ready_to_read:
ready_read.append(read_dict[sock])
for sock in ready_to_write:
ready_write.append(write_dict[sock])
for sock in in_error:
have_error.append(error_dict[sock])
return ready_read, ready_write, have_error
| 9,159 |
mbs/tests/test_views.py
|
hhuuggoo/multiuserblazeserver
| 7 |
2169315
|
import tempfile
import shutil
from os.path import exists, join
import json
from nose.tools import with_setup
from mbs.app import setup_app
from mbs.settings import settings
from . import config_file, data_file
test = None
data = None
t = None
datadir = None
def setup_function():
global app
global test
global datadir
global t
datadir = tempfile.mkdtemp()
config = config_file("config.py")
app = setup_app(config_file=config)
test = app.test_client()
data = settings.data
settings.data_directory = datadir
def teardown_function():
global app
global test
global data
global datadir
if exists(datadir):
shutil.rmtree(datadir)
app = None
test = None
data = None
datadir = None
old = None
def setup_auth_test():
global old
setup_function()
old = settings.auth_backend.can_write
def reject(path, username):
return False
settings.auth_backend.can_write = reject
def teardown_auth_test():
teardown_function()
global old
settings.auth_backend.can_write = old
@with_setup(setup_function, teardown_function)
def test_upload():
with open(data_file('test.csv')) as f:
resp = test.post("/upload",
data={'file' : (f, 'test.csv')}
)
assert resp.status_code == 200
result = json.loads(resp.data.decode('utf-8'))
assert result['path'] == "defaultuser/test.csv"
assert exists(join(settings.data_directory, result['path']))
@with_setup(setup_auth_test, teardown_auth_test)
def test_upload_without_permissions():
with open(data_file('test.csv')) as f:
resp = test.post("/upload",
data={'file' : (f, 'test.csv')}
)
assert resp.status_code == 403
assert not exists(join(settings.data_directory, "defaultuser", "test.csv"))
@with_setup(setup_function, teardown_function)
def test_configure():
resp = test.post("/configure",
data=json.dumps(
{'kwargs' : {'delimiter' : '\t'},
'uri' : "defaultuser/test.csv"
}),
headers={'content-type' : 'application/json'}
)
assert resp.status_code == 200
result = resp.data == 'success'
assert settings.storage['defaultuser/test.csv'] == {u'delimiter': u'\t'}
@with_setup(setup_auth_test, teardown_auth_test)
def test_configure_without_permissions():
#monkey patch auth backend to disallow upload
resp = test.post("/configure",
data=json.dumps(
{'kwargs' : {'delimiter' : '\t'},
'uri' : "defaultuser/test.csv"
}),
headers={'content-type' : 'application/json'}
)
assert resp.status_code == 403
| 2,846 |
trade_remedies_api/contacts/migrations/0004_auto_20190207_2037.py
|
uktrade/trade-remedies-api
| 1 |
2166659
|
# Generated by Django 2.0.1 on 2019-02-07 20:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("organisations", "0003_auto_20181211_1642"),
("cases", "0023_remove_submission_deficiency_document"),
("contacts", "0003_casecontact_organisation"),
]
operations = [
migrations.AlterUniqueTogether(
name="casecontact", unique_together={("case", "contact", "organisation")},
),
]
| 489 |
xfields/beam_elements/electronlens_interpolated.py
|
kparasch/xfields
| 0 |
2168892
|
import numpy as np
import xobjects as xo
import xtrack as xt
from ..fieldmaps import TriLinearInterpolatedFieldMap
from ..fieldmaps import TriCubicInterpolatedFieldMap
from ..fieldmaps import TriCubicInterpolatedFieldMapData
from ..fieldmaps import TriLinearInterpolatedFieldMapData
from ..general import _pkg_root
class ElectronLensInterpolated(xt.BeamElement):
_xofields={
'current': xo.Float64,
'length': xo.Float64,
'voltage': xo.Float64,
"fieldmap": TriCubicInterpolatedFieldMapData,
}
def __init__(self,
_context=None,
_buffer=None,
_offset=None,
length=None,
fieldmap=None,
x_range=None, y_range=None,
nx=None, ny=None,
dx=None, dy=None,
x_grid=None, y_grid=None,
rho=None,
current=None, voltage=None,
):
if _buffer is not None:
_context = _buffer.context
if _context is None:
_context = xo.context_default
# Choose a small number of slices with a large range in zeta.
# By copying the same potential on each slice, the
# interpolation becomes 2D, doesn't depend on the
# longitudinal variable.
nz = 11
z_range=(-1,1)
fieldmap = TriLinearInterpolatedFieldMap(x_range=x_range, y_range=y_range,
z_range=z_range, nx=nx, ny=ny, nz=nz,
dx=dx, dy=dy,
x_grid=x_grid, y_grid=y_grid,
solver="FFTSolver2p5D")
for ii in range(nz):
fieldmap.rho[:,:,ii] = rho
fieldmap.update_phi_from_rho()
tc_fieldmap = TriCubicInterpolatedFieldMap(x_grid=fieldmap._x_grid,
y_grid=fieldmap._y_grid,
z_grid=fieldmap._z_grid,
)
nx = tc_fieldmap.nx
ny = tc_fieldmap.ny
nz = tc_fieldmap.nz
dx = tc_fieldmap.dx
dy = tc_fieldmap.dy
dz = tc_fieldmap.dz
scale = [1., dx, dy, dz,
dx * dy, dx * dz, dy * dz,
dx * dy * dz]
phi = fieldmap.phi
#print(phi.shape)
##########################################################################
# dphi_dx = np.zeros_like(phi)
# dphi_dy = np.zeros_like(phi)
# dphi_dxdy = np.zeros_like(phi)
# dphi_dx[1:-1,:] = 0.5*(phi[2:,:] - phi[:-2,:])
# dphi_dy[:,1:-1] = 0.5*(phi[:,2:] - phi[:,:-2])
# dphi_dxdy[1:-1,:] = 0.5*(dphi_dy[2:,:] - dphi_dy[:-2,:])
# print("Setting derivatives: ")
# kk=0.
# for ix in range(nx):
# if (ix)/nx > kk:
# while (ix)/nx > kk:
# kk += 0.1
# print(f"{int(np.round(100*kk)):d}%..")
# for iy in range(ny):
# for iz in range(nz):
# index = 8 * ix + 8 * nx * iy + 8 * nx * ny * iz
# tc_fieldmap._phi_taylor[index+0] = phi[ix, iy, 0]
# tc_fieldmap._phi_taylor[index+1] = dphi_dx[ix, iy, 0]
# tc_fieldmap._phi_taylor[index+2] = dphi_dy[ix, iy, 0]
# tc_fieldmap._phi_taylor[index+3] = 0.
# tc_fieldmap._phi_taylor[index+4] = dphi_dxdy[ix, iy, 0]
# tc_fieldmap._phi_taylor[index+5] = 0.
# tc_fieldmap._phi_taylor[index+6] = 0.
# tc_fieldmap._phi_taylor[index+7] = 0.
##########################################################################
## Optimized version of above block ##########################################
phi_slice = np.zeros([phi.shape[0], phi.shape[1], 8])
for iz in range(nz):
phi_slice[:,:,0] = phi[:,:, iz]
phi_slice[1:-1,:,1] = 0.5*(phi[2:,:,iz] - phi[:-2,:,iz])
phi_slice[:,1:-1,2] = 0.5*(phi[:,2:,iz] - phi[:,:-2,iz])
phi_slice[1:-1,:,4] = 0.5*(phi_slice[2:,:,2] - phi_slice[:-2,:,2])
flat_slice = phi_slice.transpose(1,0,2).flatten()
len_slice = len(flat_slice)
index_offset = 8 * nx * ny * iz
tc_fieldmap._phi_taylor[index_offset:index_offset+len_slice] = _context.nplike_lib.asarray(flat_slice)
##############################################################################
self.xoinitialize(
_context=_context,
_buffer=_buffer,
_offset=_offset,
current=current,
length=length,
voltage=voltage,
fieldmap=tc_fieldmap)
srcs = []
srcs.append(_pkg_root.joinpath('headers/constants.h'))
srcs.append(_pkg_root.joinpath('fieldmaps/interpolated_src/tricubic_coefficients.h'))
srcs.append(_pkg_root.joinpath('fieldmaps/interpolated_src/cubic_interpolators.h'))
srcs.append(_pkg_root.joinpath('beam_elements/electronlens_src/electronlens_interpolated.h'))
ElectronLensInterpolated.XoStruct.extra_sources = srcs
| 5,317 |
ace/data/__init__.py
|
iory/ACEngine
| 5 |
2169709
|
# flake8: noqa
from .ace_utils import get_ace
from .ace_utils import get_english_resource_grammar
from .ace_utils import get_jacy_grammar
| 139 |
scripts/datalad_helper_scripts/batch_remove_deprecated_URLs.py
|
emmetaobrien/conp-dataset
| 18 |
2163213
|
import getopt
import json
import os
import re
import sys
import traceback
import git
def parse_input(argv):
"""
Displays the script's help section and parses the options given to the script.
:param argv: command line arguments
:type argv: array
:return: parsed and validated script options
:rtype: dict
"""
script_options = {}
description = (
"\nThis script can be used to remove from git-annex a series of URLs matching"
" a specific pattern.\n"
"\t- To run the script and print out the URLs that will be removed, use options"
" -d <dataset path> -u <invalid URL regex>.\n"
"\t- After examination of the result of the script, rerun the script with the same"
" option and add the -c argument for actual removal of the URLs.\n"
"\t- Option -v prints out progress of the script in the terminal.\n"
)
usage = (
f"\nusage : python {__file__} -d <DataLad dataset directory path> -u <invalid URL regex>\n"
"\noptions: \n"
"\t-d: path to the DataLad dataset to work on\n" # noqa: E131
"\t-u: regular expression for invalid URLs to remove from git-annex\n" # noqa: E131
"\t-c: confirm that the removal of the URLs should be performed. By default it will just print out what needs to be removed for validation\n" # noqa: E501,E131
"\t-v: verbose\n" # noqa: E131
)
try:
opts, args = getopt.getopt(argv, "hcd:u:")
except getopt.GetoptError:
sys.exit()
script_options["run_removal"] = False
script_options["verbose"] = False
if not opts:
print(description + usage)
sys.exit()
for opt, arg in opts:
if opt == "-h":
print(description + usage)
sys.exit()
elif opt == "-d":
script_options["dataset_path"] = arg
elif opt == "-u":
script_options["invalid_url_regex"] = arg
elif opt == "-c":
script_options["run_removal"] = True
elif opt == "-v":
script_options["verbose"] = True
if "dataset_path" not in script_options.keys():
print(
"\n\t* ----------------------------------------------------------------------------------------------------------------------- *" # noqa: E501
"\n\t* ERROR: a path to the DataLad dataset to process needs to be given as an argument to the script by using the option `-d` *" # noqa: E501
"\n\t* ----------------------------------------------------------------------------------------------------------------------- *", # noqa: E501
)
print(description + usage)
sys.exit()
if not os.path.exists(script_options["dataset_path"]):
print(
f"\n\t* ------------------------------------------------------------------------------ *"
f"\n\t* ERROR: {script_options['dataset_path']} does not appear to be a valid path "
f"\n\t* ------------------------------------------------------------------------------ *",
)
print(description + usage)
sys.exit()
if not os.path.exists(os.path.join(script_options["dataset_path"], ".datalad")):
print(
f"\n\t* ----------------------------------------------------------------------------------- *"
f"\n\t* ERROR: {script_options['dataset_path']} does not appear to be a DataLad dataset "
f"\n\t* ----------------------------------------------------------------------------------- *",
)
print(description + usage)
sys.exit()
if "invalid_url_regex" not in script_options.keys():
print(
"\n\t* --------------------------------------------------------------------------------------------------- *" # noqa: E501
"\n\t* ERROR: a regex for invalid URLs to remove should be provided to the script by using the option `-u` *" # noqa: E501
"\n\t* --------------------------------------------------------------------------------------------------- *", # noqa: E501
)
print(description + usage)
sys.exit()
return script_options
def get_files_and_urls(dataset_path, annex):
"""
Runs git annex whereis in the dataset directory to retrieve
a list of annexed files with their URLs' location.
:param dataset_path: full path to the DataLad dataset
:type dataset_path: string
:param annex: the git annex object
:type annex: object
:return: files path and there URLs organized as follows:
{
<file-1_path> => [file-1_url-1, file-1_url-2 ...]
<file-2_path> => [file-2_url-1, file-2_url-2 ...]
...
}
:rtype: dict
"""
current_path = os.path.dirname(os.path.realpath(__file__))
results = {}
try:
os.chdir(dataset_path)
annex_results = annex("whereis", ".", "--json")
results_list = annex_results.split("\n")
for annex_result_item in results_list:
r_json = json.loads(annex_result_item)
file_path = r_json["file"]
file_urls = []
for entry in r_json["whereis"]:
file_urls.extend(entry["urls"])
results[file_path] = file_urls
except Exception:
traceback.print_exc()
sys.exit()
finally:
os.chdir(current_path)
return results
def filter_invalid_urls(files_and_urls_dict, regex_pattern):
"""
Filters out the URLs that need to be removed based on a regular
expression pattern.
:param files_and_urls_dict: files' path and their respective URLs.
:type files_and_urls_dict: dict
:param regex_pattern: regular expression pattern for URL filtering
:type regex_pattern: str
:return: filtered URLs per file
:rtype: dict
"""
filtered_dict = {}
for file_path in files_and_urls_dict.keys():
filtered_urls_list = filter(
lambda x: re.search(regex_pattern, x),
files_and_urls_dict[file_path],
)
filtered_dict[file_path] = filtered_urls_list
return filtered_dict
def remove_invalid_urls(filtered_file_urls_dict, script_options, annex):
"""
Removes URLs listed in the filtered dictionary from the files.
:param filtered_file_urls_dict: filtered URLs to remove per file
:type filtered_file_urls_dict: dict
:param script_options: options give to the script
:type script_options: dict
:param annex: the git annex object
:type annex: object
"""
dataset_path = script_options["dataset_path"]
current_path = os.path.dirname(os.path.realpath(__file__))
try:
os.chdir(dataset_path)
for file_path in filtered_file_urls_dict.keys():
for url in filtered_file_urls_dict[file_path]:
if script_options["run_removal"]:
if script_options["verbose"]:
print(f"\n => Running `git annex rmurl {file_path} {url}`\n")
annex("rmurl", file_path, url)
else:
print(
f"\nWill be running `git annex rmurl {file_path} {url}`\n",
)
except Exception:
traceback.print_exc()
finally:
os.chdir(current_path)
if __name__ == "__main__":
script_options = parse_input(sys.argv[1:])
repo = git.Repo(script_options["dataset_path"])
annex = repo.git.annex
# fetch files and urls attached to the file
if script_options["verbose"]:
print(
f"\n => Reading {script_options['dataset_path']} and grep annexed files with their URLs\n",
)
files_and_urls_dict = get_files_and_urls(script_options["dataset_path"], annex)
# grep only the invalid URLs that need to be removed from the annexed files
regex_pattern = re.compile(script_options["invalid_url_regex"])
if script_options["verbose"]:
print(
f"\n => Grep the invalid URLs based on the regular expression {regex_pattern}",
)
filtered_file_urls_dict = filter_invalid_urls(files_and_urls_dict, regex_pattern)
# remove the invalid URLs found for each annexed file
remove_invalid_urls(filtered_file_urls_dict, script_options, annex)
| 8,340 |
evals/chaos_eval/evaluate3D.py
|
linleon1995/prior_guiding_network
| 1 |
2168554
|
# -*- coding: utf-8 -*-
"""
This example shows the evaluation process used in CHAOS challenge.
A sample data is shared with original DICOM images, its ground truth
and an example of segmentation result.
Ground truth volume is used as reference to evaluate sample segmentation.
DICOM folder is used to transform voxel values into real world coordinates.
Hence, ASSD and MSSD metrics are calculated with millimeter.
09/07/2019
@author: <NAME>, <EMAIL>
"""
import os
from CHAOSmetrics import png_series_reader
from CHAOSmetrics import evaluate
# ======= Directories =======
# cwd = os.path.normpath(os.getcwd() + os.sep + os.pardir)
cwd = os.path.normpath(os.getcwd() + os.sep)
print(cwd)
ground_dir = os.path.normpath(cwd + '/Data_3D/Ground')
seg_dir = os.path.normpath(cwd + '/Data_3D/Segmentation')
dicom_dir = os.path.normpath(cwd + '/Data_3D/DICOM_anon')
# ======= Volume Reading =======
Vref = png_series_reader(ground_dir)
Vseg = png_series_reader(seg_dir)
print(Vseg.shape)
print('Volumes imported.')
# ======= Evaluation =======
print('Calculating...')
[dice, ravd, assd ,mssd]=evaluate(Vref,Vseg,dicom_dir)
print('DICE=%.3f RAVD=%.3f ASSD=%.3f MSSD=%.3f' %(dice, ravd, assd ,mssd))
# import nibabel as nib
# print(nib.load('/home/user/DISK/data/Jing/data/Training/raw/img0001.nii.gz').affine)
| 1,348 |
exercicios-Python/aula12a.py
|
pedrosimoes-programmer/exercicios-python
| 0 |
2168933
|
profissao = input('Qual a sua profissão: ')
if profissao.title() == 'Professor':
print('Que bela profissão!')
materia = input('Que matéria você ensina: '.title())
if materia.title() == 'Geografia':
print('Que bela matéria!')
else:
print('Odeio a matéria de {}!'.format(materia))
elif profissao.title() == 'Empresário':
print('Uma excelente profissão!')
salario = int(input('Qual seu salário: R$'))
if salario >= 15000:
print('Você ganha bastante! Parabéns!')
else:
print('Você ganha relativamente bem!')
opcao = input('Quer um aumento? ')
if opcao.title() == 'Sim' or 'Yes' or 'Sí':
aumento = salario + (salario * 15/100)
print('Parabéns, você recebeu um aumento de 15%, agora seu novo salário é: {}'.format(aumento))
else:
print('Você é um idiota! Agora não adianta mais chorar!')
elif profissao.title() == 'Geógrafo':
print('Uma profissão difícil de ser encontrada no Brasil. Parabéns!')
else:
print('Você tem uma profissão normal, {}!'.format(profissao.title()))
| 1,098 |
infrastructor/data/DatabaseManager.py
|
ahmetcagriakca/ImageProcessingApi
| 0 |
2168465
|
from datetime import time
class DatabaseManager:
def __init__(self, connector, retry_count=3):
self.connector = connector
self.retry_count = retry_count
self.default_retry = 1
def _connect_to_db(self):
self.connector.connect()
def _disconnect_from_db(self):
self.connector.disconnect()
def fetch(self, query):
self._connect_to_db()
cur = self.connector.connection.cursor()
cur.execute(query)
datas = cur.fetchall()
self._disconnect_from_db()
data_list=[]
for data in datas:
rows=[]
for row in data:
rows.append(row)
data_list.append(rows)
return data_list;
def delete(self, query) -> None:
self._connect_to_db()
cur = self.connector.connection.cursor()
cur.execute(query)
self.connector.connection.commit()
self._disconnect_from_db()
def insert_many(self, executable_script, inserted_rows):
self._connect_to_db()
cur = self.connector.connection.cursor()
cur.prepare(executable_script)
cur.executemany(None, inserted_rows)
# for rows in insertedRows:
# cur.execute(executableScript,rows)
self.connector.connection.commit()
self._disconnect_from_db()
def insert_to_db_with_script(self, data, script):
self._connect_to_db()
self.connector.bulk_insert(data, script)
self._disconnect_from_db()
def insert_to_db(self, data):
self._connect_to_db()
result = self.connector.bulk_insert(data, self.sql_builder.build())
self._disconnect_from_db()
return result
def insert_to_db_for_page(self, data, page, limit):
self._connect_to_db()
result = self._insert_to_db_with_retry(data, page, limit, self.default_retry)
self._disconnect_from_db()
return result
def insert_to_db_with_paging(self, data, page, limit):
print(f"Operation started. data_length :{len(data)} page :{page} limit :{limit}")
data_length = len(data)
total_fragment_count = int(data_length / limit)
fragment_count = total_fragment_count - page
result = False
self._connect_to_db()
try:
executed_page = page
for rec in range(fragment_count):
processing_page = page + rec
# preparing data
start = processing_page * limit
end = start + limit
fragmented_data = data[start:end]
result = self._insert_to_db_with_retry(fragmented_data, start, end, self.default_retry)
if not result:
break
# finish operation
if result:
remaining_data_count = data_length - (total_fragment_count * limit)
# preparing data
start = total_fragment_count * limit
end = start + remaining_data_count
fragmented_data = data[start:end]
result = self._insert_to_db_with_retry(fragmented_data, start, end, self.default_retry)
finally:
self._disconnect_from_db()
def _insert_to_db_with_retry(self, data, start, end, retry):
try:
self.connector.bulk_insert(data, self.sql_builder.build())
except Exception as ex:
if (retry > self.retry_count):
print(f"Db write error on start:{start},end:{end} Error:{ex}")
return False
print(
f"Getting error on insert (Operation will be retried. Retry Count:{retry}). start:{start},end:{end}, Error:{ex}")
# retrying connect to db
self.connector.connect()
time.sleep(1)
return self._insert_to_db_with_retry(data, start, end, retry + 1)
print(f'Committed start:{start} end:{end}')
return True
def insert(self, executable_script):
self._connect_to_db()
self.connector.insert(executable_script)
self._disconnect_from_db()
| 4,152 |
scripts/python/metronomeStates.py
|
imec-int/choirbox
| 1 |
2168773
|
# import sys
# sys.path +=['.']
# from states import State, StateMachine
# # from StateMachine import StateMachine
# class pdSendBPM(State):
# def run(self):
# print("receiving")
# def next(self, input):
# print("next")
# return BPMsetter.taptempo
# class taptempo(State):
# def run(self):
# print("reading tap tempo")
# def next(self, input):
# print("next")
# class ProgramBPM(State):
# def run(self):
# print("updating tap tempo")
# def next(self, input):
# print("next")
# class BPMsetter(StateMachine):
# def __init__(self):
# print("statemachine init")
# StateMachine.__init__(self, BPMsetter.pdSendBPM)
# # self.button = button
# # self.led = led
from statemachine import StateMachine, State
class BPMStateMachine(StateMachine):
pdSendsBPM = State('pdSendsBPM', initial=True)
tapTempo = State('taptempo')
programBPM = State('programBPM')
tapped = pdSendsBPM.to(tapTempo)
tapStopped = tapTempo.to(programBPM)
go = programBPM.to(pdSendsBPM)
| 1,111 |
cbe/cbe/human_resources/serializers.py
|
cdaf/cbe
| 3 |
2162950
|
from rest_framework import serializers
from drf_nest.serializer_fields import TypeField, GenericRelatedField
from cbe.party.serializers import IndividualSerializer, OrganisationSerializer
from cbe.party.models import Individual, Organisation
from cbe.human_resources.models import IdentificationType, Identification, Staff, Timesheet, TimesheetEntry
class IdentificationTypeSerializer(serializers.HyperlinkedModelSerializer):
type = TypeField()
issuer = serializers.HyperlinkedRelatedField(view_name='organisation-detail', lookup_field='enterprise_id', queryset=Organisation.objects.all())
class Meta:
model = IdentificationType
fields = ('type', 'url', 'name', 'issuer', 'system' )
class IdentificationSerializer(serializers.HyperlinkedModelSerializer):
type = TypeField()
identification_type_name = serializers.SerializerMethodField()
party = GenericRelatedField( many=False, url_only=True,
serializer_dict={
Individual: IndividualSerializer(),
Organisation: OrganisationSerializer(),
})
party_role = GenericRelatedField( many=False, url_only=True, serializer_dict={})
class Meta:
model = Identification
fields = ('type', 'url', 'identification_type', 'identification_type_name', 'valid_from', 'valid_to', 'number', 'party', 'party_role')
def get_identification_type_name(self,obj):
if obj.identification_type:
return obj.identification_type.name
else:
return None
class StaffSerializer(serializers.HyperlinkedModelSerializer):
type = TypeField()
party = GenericRelatedField( many=False,
serializer_dict={
Individual: IndividualSerializer(),
Organisation: OrganisationSerializer(),
})
company = serializers.HyperlinkedRelatedField(view_name='organisation-detail', lookup_field='enterprise_id', queryset=Organisation.objects.all())
class Meta:
model = Staff
fields = ('type', 'url', 'company', 'party' )
class TimesheetEntrySerializer(serializers.HyperlinkedModelSerializer):
type = TypeField()
class Meta:
model = TimesheetEntry
fields = ('type', 'url', 'timesheet', 'start', 'end', 'duration', 'notes' )
class TimesheetSerializer(serializers.HyperlinkedModelSerializer):
type = TypeField()
class Meta:
model = Timesheet
fields = ('type', 'url', 'staff', 'start_date', 'end_date', 'timesheet_entries' )
| 2,602 |
model-optimizer/extensions/front/tf/ObjectDetectionAPI_test.py
|
calvinfeng/openvino
| 0 |
2169579
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from generator import generator, generate
from extensions.front.tf.ObjectDetectionAPI import calculate_shape_keeping_aspect_ratio, \
calculate_placeholder_spatial_shape
from mo.front.subgraph_matcher import SubgraphMatch
from mo.graph.graph import Graph
from mo.utils.custom_replacement_config import CustomReplacementDescriptor
from mo.utils.error import Error
class FakePipelineConfig:
def __init__(self, model_params: dict):
self._model_params = model_params
def get_param(self, param: str):
if param not in self._model_params:
return None
return self._model_params[param]
@generator
class TestCalculateShape(unittest.TestCase):
min_size = 600
max_size = 1024
@generate(*[(100, 300, 341, 1024, False),
(100, 600, 171, 1024, False),
(100, 3000, 34, 1024, False),
(300, 300, 600, 600, False),
(300, 400, 600, 800, False),
(300, 600, 512, 1024, False),
(1000, 2500, 410, 1024, False),
(1800, 2000, 600, 667, False),
(300, 100, 1024, 341, False),
(600, 100, 1024, 171, False),
(3000, 100, 1024, 34, False),
(400, 300, 800, 600, False),
(600, 300, 1024, 512, False),
(2500, 1000, 1024, 410, False),
(2000, 1800, 667, 600, False),
(300, 300, 1024, 1024, True),
(900, 300, 1024, 1024, True),
(1300, 900, 1024, 1024, True),
(1025, 1025, 1024, 1024, True),
])
def test_calculate_shape(self, h, w, th, tw, pad):
self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(h, w, self.min_size, self.max_size, pad), (th, tw))
class TestCalculatePlaceholderSpatialShape(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.graph['user_shapes'] = None
self.replacement_desc = CustomReplacementDescriptor('dummy_id', {})
self.match = SubgraphMatch(self.graph, self.replacement_desc, [], [], [], '')
self.pipeline_config = FakePipelineConfig({})
def test_default_fixed_shape_resizer(self):
self.pipeline_config._model_params['resizer_image_height'] = 300
self.pipeline_config._model_params['resizer_image_width'] = 600
self.assertTupleEqual((300, 600),
calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
def test_fixed_shape_resizer_overrided_by_user(self):
self.pipeline_config._model_params['resizer_image_height'] = 300
self.pipeline_config._model_params['resizer_image_width'] = 600
self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': [1, 400, 500, 3]}]}
self.assertTupleEqual((400, 500),
calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
def test_default_keep_aspect_ratio_resizer(self):
self.pipeline_config._model_params['resizer_min_dimension'] = 600
self.pipeline_config._model_params['resizer_max_dimension'] = 1024
self.assertTupleEqual((600, 600),
calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
def test_keep_aspect_ratio_resizer_overrided_by_user(self):
self.pipeline_config._model_params['resizer_min_dimension'] = 600
self.pipeline_config._model_params['resizer_max_dimension'] = 1024
self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': [1, 400, 300, 3]}]}
self.assertTupleEqual((800, 600),
calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
def test_keep_aspect_ratio_resizer_overrided_by_user_pad(self):
self.pipeline_config._model_params['resizer_min_dimension'] = 600
self.pipeline_config._model_params['resizer_max_dimension'] = 1024
self.pipeline_config._model_params['pad_to_max_dimension'] = True
self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': [1, 400, 300, 3]}]}
self.assertTupleEqual((1024, 1024),
calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
def test_missing_input_shape_information(self):
self.assertRaises(Error, calculate_placeholder_spatial_shape, self.graph, self.match, self.pipeline_config)
| 5,105 |
snakegame/gamestates/playing.py
|
vedard/SnakeGame
| 0 |
2166991
|
import curses
from snakegame.entities import Snake, Fruit
from snakegame import Config
class Playing:
def __init__(self, engine):
self.engine = engine
self.snake = Snake(self.engine.screen_middle())
self.fruit = Fruit(self.engine.screen_size())
self.score = 0
self.collected_fruit = 0
self.next_score_gain = Config.STARTING_FRUIT_SCORE
self.engine.delay = Config.STARTING_DELAY
self.snake.go_left()
def input(self):
key = self.engine.screen.getch()
if key == curses.KEY_UP:
self.snake.go_up()
if key == curses.KEY_DOWN:
self.snake.go_down()
if key == curses.KEY_LEFT:
self.snake.go_left()
if key == curses.KEY_RIGHT:
self.snake.go_right()
if key == ord('p'):
self.engine.pause()
if key == ord('q'):
exit()
def update(self):
self.snake.update()
if self.snake.is_colliding_with(self.fruit):
self.collect_fruit()
if (self.snake.is_running_over_himself() or
self.snake.is_out_of_screen(self.engine.screen_size())):
self.engine.end(self.score)
def render(self):
self.engine.screen.erase()
self.engine.screen.border(0)
self.snake.render(self.engine.screen)
self.fruit.render(self.engine.screen)
self.engine.screen.addstr(0, 2,
f"Score: {self.score:n} | Fruits: {self.collected_fruit}",
curses.color_pair(curses.COLOR_CYAN))
self.engine.screen.addstr(self.engine.screen_size().x - 1, 2,
f"Control: ← ↑ → ↓ Quit: q Pause: p",
curses.color_pair(curses.COLOR_CYAN))
def collect_fruit(self):
self.collected_fruit += 1
self.score += self.next_score_gain
if self.fruit.double_gain:
self.next_score_gain *= Config.SPECIAL_FRUIT_MULTIPLIER
self.engine.delay /= Config.DIFICULTY_MULTIPLIER
self.snake.add_part()
self.fruit = Fruit(self.engine.screen_size())
| 2,173 |
src/posim/utils.py
|
zrowland885/posim
| 0 |
2167659
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 21:39:24 2021
@author: zrowl
"""
def convert_images(filetype, read_dir, write_dir=None):
import os
from PIL import Image
if write_dir == None:
write_dir = read_dir
_, _, files = next(os.walk(read_dir))
for f in files:
img = Image.open(read_dir+f).convert('L')
filename = os.path.splitext(f)[0]
path = write_dir+filename+filetype
img.save(path)
read_dir = 'output_old3/'
write_dir = 'output/'
filetype = '.png'
convert_images(filetype, read_dir, write_dir)
| 585 |
Logistic_Regression/average_and_plot.py
|
htt-trangtran/smg
| 1 |
2168975
|
############################
# written by <NAME> and <NAME>
############################
"""
Average data and plot
"""
import pandas as pd
from csv import reader
import numpy as np
import matplotlib.pyplot as plt
#-------------------------------------------------------------------------------
# Functions: average data and get the average data
def load_csv_result (filename):
data = []
with open (filename, 'r') as file:
csv_reader = reader (file)
for row in csv_reader:
data.append(float(row[0]))
return data[1:]
def avg_data (listrecord, liststats, result, record_path, record_avg_path, num_seed):
# Get len(a)
record_name = listrecord[0] + '_seed_0'
a = np.array(load_csv_result (record_path + str(liststats[0]) + '_' + record_name + '.csv'))
n = len(a)
for record in listrecord:
# Set the initial steps
for stats in liststats:
result[record][stats + '_avg'] = np.zeros(n)
# Get the data
for seed in range (num_seed):
record_seed = record + '_seed_' + str(seed)
for stats in liststats:
a = np.array(load_csv_result (record_path + str(stats) + '_' + record_seed + '.csv'))
result[record][stats + '_avg'] += a / num_seed
# Save the average data for future references
record_name = record + '_avg_' + str(num_seed) + '.csv'
for stats in liststats:
pd.DataFrame(result[record][stats + '_avg'] ).to_csv(record_avg_path + str(stats) + '_' + record_name, index = False)
def get_avg_data (listrecord, liststats, result, record_avg_path, num_seed):
for record in listrecord:
# Get the data
record_name = record + '_avg_' + str(num_seed) + '.csv'
for stats in liststats:
a = np.array(load_csv_result (record_avg_path + str(stats) + '_' + record_name))
result[record][stats + '_avg'] = a
#-------------------------------------------------------------------------------
# Function plot data
def plot_data (dataname, num_epoch, listrecord, record_path, record_avg_path):
liststats = ['loss', 'grad_loss', 'acc_train', 'acc_test']
num_seed = 10
#---------------------------------------------------------------------------
# All the result will be keep in a dictionary:
result = {}
for name in listrecord:
result [name] = {}
#---------------------------------------------------------------------------
avg_data (listrecord, liststats, result, record_path, record_avg_path, num_seed)
get_avg_data (listrecord, liststats, result, record_avg_path, num_seed)
#---------------------------------------------------------------------------
number = np.arange(num_epoch[1], num_epoch[0] + num_epoch[1], num_epoch[1])
plt.figure()
mark = ['^', 'v', '>', '<', 'o', 's', 'x', '>', 'd', 'o', 's', 'x', '>', 'd',
's', 'v', '>', '<', 'o', 's', 'x', '>', 'd', 'o', 's', 'x']
plt.figure(figsize=(10,12))
# Plot loss
plt.subplot(2,1,1)
stats = 'loss_avg'
i = 0
for record in listrecord:
plt.plot (number[:], result[record][stats][:], marker = mark[i], label = listrecord[i].replace(dataname +'_',''))
i += 1
plt.legend(fontsize = 18)
plt.xticks(size = 18)
plt.yticks(size = 18)
plt.xlabel('Number of effective passes', fontsize = 25)
plt.ylabel('Train loss', fontsize = 25)
plt.yscale('log')
plt.title(dataname , fontsize = 28)
# Plot gradient
plt.subplot(2,1,2)
stats = 'grad_loss_avg'
i = 0
for record in listrecord:
plt.plot (number[:], result[record][stats][:], marker = mark[i], label = listrecord[i].replace(dataname +'_',''))
i += 1
plt.legend(fontsize = 18)
plt.xticks(size = 18)
plt.yticks(size = 18)
plt.xlabel('Number of effective passes', fontsize = 25)
plt.ylabel('Grad loss', fontsize = 25)
plt.yscale('log')
plt.show()
| 4,003 |
tests/plugins/loading/plugins/elements/incompatibleminor/incompatibleminor.py
|
doraskayo/buildstream
| 0 |
2168841
|
from buildstream import Element
class IncompatibleMinor(Element):
BST_MIN_VERSION = "2.1000"
def setup():
return IncompatibleMinor
| 144 |
examples/python/one.py
|
goldstar611/appimage-builder
| 155 |
2169388
|
#!/usr/bin/python3
import os
import subprocess
print("ONE!")
for k, v in os.environ.items():
print("%s: %s" % (k, v))
path = os.path.abspath(__file__)
path = os.path.dirname(path)
output = subprocess.check_output([os.path.join(path, "two.py")])
print("exec two from one: " + output.decode())
| 300 |
tools/kommuner.py
|
atlefren/beermap
| 4 |
2169782
|
# -*- coding: utf-8 -*-
import requests
import json
url = 'http://knreise.cartodb.com/api/v2/sql'
url += '?api_key=<KEY>'
url += '&q=SELECT navn, komm, ST_AsGeoJSON(the_geom) as geom from kommuner'
r = requests.get(url)
data = r.json()
features = []
for row in data['rows']:
geom = row.pop('geom')
print row
features.append({
"type": "Feature",
"geometry": json.loads(geom),
"properties": row
})
with open('../data/kommuner.geojson', 'w') as f:
f.write(json.dumps({
'type': 'FeatureCollection',
'features': features
}))
| 589 |
ur/pdf_front_page_img.py
|
rochester-rcl/islandora-import-scripts
| 1 |
2169115
|
#!/usr/bin/python
import os
import sys
import shutil
import re
import pdfpagetojpg
# pull list of SORTED item IDs from folder where XML files are stored
# (assumes Jeff will create one XML file / item and place them in a directory)
def get_file_list(file_directory):
fileList = []
for root, sub, files in os.walk(file_directory):
for item in files:
if item.endswith(".pdf"):
print("adding file " + os.path.join(root,item))
fileList.append(os.path.join(root,item))
else:
print("Skipping file " + item + " name pattern did not match")
return fileList
# ##################################
# Create the file structure for a book in Islandora
# ##################################
def add_derivative(file, destination):
print("add derivative")
pdfpagetojpg.convert_pdf_page(file, 400, destination)
# ########################################
# Main Program
# ########################################
def main():
# base directory of files to import
base_directory = input("Please enter directory of files to create pdf front page image: ")
if not os.path.isdir(base_directory):
print("Directory " + base_directory + " does not exist or is not a directory")
sys.exit()
else:
print("Directory found " + base_directory)
file_list = get_file_list(base_directory)
print("processing " + str(len(file_list)))
for my_file in file_list:
print("found file " + my_file)
dir_name = os.path.dirname(my_file)
dest_file_name = dir_name + "\\" + "TN.jpg"
add_derivative(my_file, dest_file_name)
if __name__ == '__main__':
main()
| 1,763 |
polymorphorism.py
|
blulady/python
| 0 |
2168061
|
#parent class user
#polymorphorism
# - allows child class to overide behavior of parent class
class User:
name = "Mark"
email = "<EMAIL>"
password = "<PASSWORD>"
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_password = input("Enter your password: ")
if (entry_email == self.email and entry_password == self.password):
print("Welcome Back, {}!".format(entry_name))
else:
print("The password or email is incorrect.")
#Child Class Employee
class Employee(User):
base_pay = 11.00
department = "General"
pin_number = "3980"
#This is the same method in the parent class "User".
#The difference is that, instead of using entry_password, we're using entry_pin.
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Entery your email: ")
entry_pin = input("Enter your pin: ")
if (entry_email == self.email and entry_pin == self.pin_number):
print("Welcome back, {}!".format(entry_name))
else:
print("The pin or email is incorrect")
#The following code invokes the methods inside each class for User and Employee.
customer = User()
customer.getLoginInfo()
manager = Employee()
manager.getLoginInfo()
| 1,356 |
server/fb.py
|
OmkarVedak/Safest-Path-Navigator
| 0 |
2167093
|
import firebase_admin
import time
from firebase_admin import credentials
from firebase_admin import db
from pyfcm import FCMNotification
import arrow
import utils
key = 'safest-path-firebase-adminsdk-two07-46c5f72b5d.json'
cred = credentials.Certificate(key)
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://safest-path.firebaseio.com'
})
push_service = FCMNotification(api_key="<KEY>")
THRESHOLD = 0
def check_locations():
ref = db.reference('locations')
user_ids = list(dict(ref.get()).keys())
for user_id in user_ids:
user = db.reference(f'users/{user_id}')
ttl_current_loc = user.get().get('ttl_current_loc', None)
ttl_preferred_loc = user.get().get('ttl_preferred_loc', None)
token = user.get()['token']
check_for_current_location(user, token, ttl_current_loc)
check_for_preferred_location(user, token, ttl_preferred_loc)
def check_for_current_location(user, token, ttl):
# Check if TTL expired, then proceed
if ttl and arrow.utcnow() < arrow.get(ttl):
print(user.get()['email'], "TTL alive, skipping check")
return
lat = user.get()['lat']
long = user.get()['long']
print(user.get()['email'], lat, long)
# Get crime score
score = utils.get_crime_score(lat, long, radius=0.5)
print(score)
if score > THRESHOLD:
print("Sending notification")
# Send notification
result = push_service.notify_single_device(
registration_id=token,
message_title="Safety alert",
message_body="You are near unsafe location. Kindly be alert"
)
# Set TTL so that no multiple notification is sent
# Ex: 10 min
user.child('ttl_current_loc').set(
arrow.utcnow().shift(minutes=10).format()
)
def check_for_preferred_location(user, token, ttl):
# Check if TTL expired, then proceed
if ttl and arrow.utcnow() < arrow.get(ttl):
print(user.get()['email'], "TTL alive, skipping check")
return
lat = user.get().get('preferred', {}).get('lat', None)
long = user.get().get('preferred', {}).get('long', None)
print(user.get()['email'], lat, long)
if not (lat and long): return
crime = utils.get_latest_crime(lat, long, radius=0.5)
if crime:
# Send notification
result = push_service.notify_single_device(
registration_id=token,
message_title="Crime alert",
message_body=f"{crime['parent_incident_type']} reported near your home location"
)
# Set TTL so that no multiple notification is sent
# Ex: 10 min
user.child('ttl_preferred_loc').set(
arrow.utcnow().shift(minutes=10).format()
)
if __name__ == "__main__":
while True:
check_locations()
time.sleep(10)
| 2,882 |
Primus/src/bot.py
|
HerouFenix/rl_bots
| 0 |
2164445
|
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlutilities.linear_algebra import vec3, norm
from util.game_info import GameInfo
from util.math import distance
from plays import strategy
# USED JUST FOR TESTING PURPOSES - COMMENT AFTER
from plays.play import Play
from plays.actions.drive import Drive, Stop, AdvancedDrive, Arrive
from plays.kickoff.kickoff import SimpleKickoff, SpeedFlipDodgeKickoff
from plays.strikes.strike import Strike, DodgeStrike, BumpStrike, CloseStrike, SetupStrike, DribbleStrike
from plays.strikes.aerial import AerialStrike, DoubleAerialStrike
from plays.dribbles.dribble import Dribble
from plays.defense.defense import Defense, GoToNet
from plays.defense.clear import BumpClear, DodgeClear, AerialClear
from plays.actions.jump import Jump, AirDodge, SpeedFlip, HalfFlip, AimDodge
from plays.utility.recovery import Recovery
from plays.utility.refuel import Refuel
from rlutilities.simulation import Input
DRAW_BALL_PREDICTIONS = False # Set to True if you want to show the ball prediction lines
DRAW_FLIGHT_PATH = True # Set to True if you want to show the ball flight path when aerialing
class Primus(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.state = None
self.tick_counter = 0 # Used for RLBotTraining
self.play = None # The play the bot is trying to execute
self.controls: SimpleControllerState = SimpleControllerState()
self.primus = None #The agent
self.objective = "None" # Just for debugging purposes so we know what the agent's current objective is
def initialize_agent(self):
# Set up information about the boost pads now that the game is active and the info is available
#self.boost_pad_tracker.initialize_boosts(self.get_field_info())
# Set up information about the game (abstracted into the GameInfo class)
self.state = GameInfo(self.team)
self.state.set_mode("soccar")
self.primus = self.state.cars[self.index]
def get_output(self, packet: GameTickPacket):
# Wait a bit to avoid actions carrying over from the last goal
if self.tick_counter < 20:
self.tick_counter += 1
return Input()
# Start by updating the game's state
self.state.read_packet(packet, self.get_field_info())
# If the bot is not attempting to do anything
if self.play is None:
# Get a play to execute
self.play, self.objective = strategy.choose_play(self.state, self.primus)
"""Test individual moves"""
#self.test_pick_action()
# If bot has picked a play, execute it
if self.play is not None:
"""
# If ball is going into net, screw it, just try to clear
if (isinstance(self.play, Defense) or isinstance(self.play, GoToNet)) and self.state.ball_predictions is not None:
danger = False
for prediction in self.state.ball_predictions:
if prediction.time < 2.0 and self.state.net.check_inside(prediction.position):
danger = True
break
if danger:
self.play = strategy.pick_clear(self.state, self.primus)
self.objective = "Danger Clearing"
"""
# If we can interrupt, avoid demolition
if self.play.interruptible():
collisions = self.state.detect_collisions_with_agent(self.primus, 0.2, 1 / 60)
if(len(collisions) > 0):
# If team collision, only the lowest id car should dodge
if(collisions[0][1]):
if(collisions[0][0] > self.primus.id):
self.play = AirDodge(self.primus, 0.2)
self.controls.throttle = -1
self.objective = "Dodging Teammate"
else:
self.play = AirDodge(self.primus, 0.2)
self.objective = "Dodging Enemy"
self.controls.throttle = -1
self.play.step(self.state.time_delta)
self.controls = self.play.controls
if(self.play.finished): # If the bot finished its play
# Get a play to execute
self.play, self.objective = strategy.choose_play(self.state, self.primus) # Pick new play
"""Test individual moves"""
#self.test_pick_action()
# Draw play name
self.renderer.draw_string_3d(self.primus.position + vec3(0,0,10), 2, 2, self.play.name, self.renderer.white())
self.renderer.draw_line_3d(self.primus.position, self.state.ball.position, self.renderer.white())
self.renderer.draw_string_3d(self.primus.position + vec3(0,0,-5), 1, 1, f'Speed: {norm(self.primus.velocity):.1f}', self.renderer.white())
self.renderer.draw_rect_3d(self.state.ball.position , 8, 8, True, self.renderer.cyan(), centered=True)
self.renderer.draw_string_3d(self.primus.position + vec3(0,0, 20), 1, 1, self.objective, self.renderer.white())
# Draw target
if self.play.target is not None:
self.renderer.draw_line_3d(self.primus.position, self.play.target, self.renderer.cyan())
# Draw Ball predictions
if DRAW_BALL_PREDICTIONS and len(self.state.ball_predictions) > 0:
points = [ball.position for ball in self.state.ball_predictions]
if len(points) > 1:
self.renderer.draw_polyline_3d([vec3(p[0], p[1], 10) if p[2] < 10 else p for p in points], self.renderer.lime())
# Draw Flight path
if DRAW_FLIGHT_PATH:
if isinstance(self.play,AerialStrike) and len(self.play.flight_path) > 0:
self.renderer.draw_polyline_3d([vec3(p[0], p[1], 10) if p[2] < 10 else p for p in self.play.flight_path], self.renderer.orange())
elif isinstance(self.play,DoubleAerialStrike) and len(self.play.aerial_strike.flight_path) > 0:
self.renderer.draw_polyline_3d([vec3(p[0], p[1], 10) if p[2] < 10 else p for p in self.play.aerial_strike.flight_path], self.renderer.orange())
return self.controls
def test_pick_action(self):
# For debugging purposes, manually select what action the agent should do
# Jumps
#self.play = Jump(self.primus, 1.0)
#self.play = AirDodge(self.primus, 0.1,self.state.ball.position)
#self.play = SpeedFlip(self.primus)
#self.play = HalfFlip(self.primus)
#self.play = AimDodge(self.primus, 0.8, self.state.ball.position)
# Drive
self.play = Drive(self.primus,target_speed=5000)
#self.play = AdvancedDrive(self.primus, self.state.ball.position)
#self.play = Arrive(self.primus, arrival_time = 20.0)
#self.play = Stop(self.primus)
# Kickoffs
#self.play = SimpleKickoff(self.primus, self.state)
#self.play = SpeedFlipDodgeKickoff(self.primus, self.state)
# Strikes
#self.state.predict_ball()
#self.play = Strike(self.primus, self.state, self.state.enemy_net.center)
#self.play = DodgeStrike(self.primus, self.state, self.state.enemy_net.center)
#self.play = BumpStrike(self.primus, self.state, self.state.enemy_net.center)
#self.play = CloseStrike(self.primus, self.state, self.state.enemy_net.center)
#self.play = SetupStrike(self.primus, self.state, self.state.enemy_net.center)
#self.play = DribbleStrike(self.primus, self.state, self.state.enemy_net.center)
#self.play = AerialStrike(self.primus, self.state, self.state.enemy_net.center)
#aerial = AerialStrike(self.primus, self.state, self.state.enemy_net.center)
#self.play = DoubleAerialStrike(aerial)
# Dribble
#self.play = Dribble(self.primus, self.state.ball, self.state.enemy_net.center)
# Defense
#self.play = Defense(self.primus, self.state, self.state.ball.position, 10)
# Clear
#self.state.predict_ball()
#self.play = DodgeClear(self.primus, self.state)
#self.play = BumpClear(self.primus, self.state)
#self.play = AerialClear(self.primus, self.state)
# Utility
#self.play = Refuel(self.primus, self.state)
#self.play = Recovery(self.primus, self.state)
| 8,781 |
experiments/image_experiments.py
|
kckishan/Depth_and_Dropout
| 1 |
2168870
|
#!/usr/bin/env python
# coding: utf-8
from tqdm import tqdm
import os
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use("default")
import seaborn as sns
sns.set_style("ticks")
import sys
sys.path.append("../")
from src.models.CNN import AdaptiveConvNet
from src.utils import get_device, plot_network_mask
import argparse
def argument_parser():
parser = argparse.ArgumentParser(description="Run Nonparametric Bayesian Architecture Learning")
parser.add_argument('--use-cuda', action='store_false',
help="Use CPU or GPU")
parser.add_argument("--prior_temp", type=float, default=1.,
help="Temperature for Concrete Bernoulli from prior")
parser.add_argument("--temp", type=float, default=.5,
help="Temperature for Concrete Bernoulli from posterior")
parser.add_argument("--epsilon", type=float, default=0.01,
help="Epsilon to select the activated layers")
parser.add_argument("--truncation_level", type=int, default=10,
help="K+: Truncation for Z matrix")
parser.add_argument("--a_prior", type=float, default=1.1,
help="a parameter for Beta distribution")
parser.add_argument("--b_prior", type=float, default=10.,
help="b parameter for Beta distribution")
parser.add_argument("--kernel", type=int, default=5,
help="Kernel size. Default is 3.")
parser.add_argument("--num_samples", type=int, default=5,
help="Number of samples of Z matrix")
parser.add_argument("--epochs", type=int, default=50,
help="Number of training epochs.")
parser.add_argument("--lr", type=float, default=0.003,
help="Learning rate.")
parser.add_argument("--l2", type=float, default=1e-6,
help="Coefficient of weight decay.")
parser.add_argument("--batch_size", type=float, default=64,
help="Batch size.")
parser.add_argument("--max_width", type=int, default=64,
help="Dimension of hidden representation.")
return parser.parse_known_args()[0]
args = argument_parser()
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# Normalize the test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform_train, download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False)
device = get_device(args)
model = AdaptiveConvNet(input_channels=1,
num_classes=10,
num_channels=args.max_width,
kernel_size=args.kernel,
args=args,
device=device).to(device)
model = model.to(device)
print(model)
loss_fn = nn.CrossEntropyLoss(reduction="none")
optimizer = torch.optim.AdamW(model.parameters(), args.lr, weight_decay=args.l2)
if not os.path.exists("results"):
os.mkdir("results")
def evaluate(test_loader):
loglike = 0
error_sum = 0
with torch.no_grad():
model.eval()
for i, (data, labels) in enumerate(test_loader):
data = data.float().to(device)
labels = labels.long().to(device)
output = model(data, args.num_samples)
pred = output.mean(0)
logits = F.softmax(pred, dim=1)
ll = -F.nll_loss(logits, labels, reduction="sum").item()
loglike += ll
predicted = torch.argmax(logits, 1)
error = predicted.ne(labels).sum().item()
error_sum += error
test_loglikes = loglike / len(test_dataset)
test_err = error_sum / len(test_dataset)
test_metrics = {'test_err': round(test_err * 100, 3),
'test_like': round(test_loglikes, 3)}
return test_metrics
train_losses = []
with tqdm(range(args.epochs)) as tq:
for epoch in tq:
train_loss = 0.0
model.train()
for i, (data, labels) in enumerate(train_loader):
data = data.float().to(device)
labels = labels.long().to(device)
# making grad zero
optimizer.zero_grad()
# sample an architecture
act_vec = model(data, args.num_samples)
loss = model.estimate_ELBO(loss_fn, act_vec, labels, N_train=len(train_dataset), kl_weight=1)
loss.backward()
optimizer.step()
# adding losses
train_loss += loss.item()
train_loss = train_loss / len(train_loader)
train_losses.append(train_loss)
test_results = evaluate(test_loader)
print("Test error: {} Test Log likelihood: {}".format(test_results['test_err'], test_results['test_like']))
kl_beta = model.structure_sampler.get_kl()
tq.set_postfix({'Tr. loss': '%.6f' % train_loss, 'KL Beta': '%.6f' % kl_beta})
torch.save(model, "results/model_MNIST.pt")
| 5,865 |
eggshell/config.py
|
Zeitsperre/eggshell
| 0 |
2168344
|
"""
Configuration
-------------
WPS servers often need to specify a number of paths for processes to find data, shapefiles, caches and determine where
outputs are stored. To make sure all birds use the same architecture, eggshell provides a :class:`Paths` class to help
with this.
"""
from os.path import join, pardir, abspath
from pywps import configuration
import logging
LOGGER = logging.getLogger("PYWPS")
class Paths(object):
"""This class facilitates the configuration of WPS birds."""
def __init__(self, module):
"""Instantiate class relative to the given module.
:param module: Imported module relative to which paths will be defined.
"""
self._base = module.__path__[0] # _base is not the top level
@property
def top_level(self):
""" return the top level directory of a WPS bird """
return abspath(join(self._base, pardir))
@property
def data(self):
"""Return the path to the data directory."""
return join(self._base, 'data')
@property
def shapefiles(self):
"""Return the path to the geographic data directory."""
return join(self.data, 'shapefiles')
@property
def testdata(self):
"""Return the path to the test data directory."""
return join(self.top_level, 'tests/testdata')
@property
def cache(self):
"""Return the path to the server cache directory."""
out = configuration.get_config_value("cache", "cache_path")
if not out:
LOGGER.warn("No cache path configured. Using default value.")
out = join(configuration.get_config_value("server", "outputpath"), "cache")
return out
@property
def outputpath(self):
"""Return the server directory for process outputs."""
return configuration.get_config_value("server", "outputpath")
@property
def outputurl(self):
"""Return the server URL for process outputs."""
return configuration.get_config_value("server", "outputurl").rstrip('/')
# @property
# def masks(self):
# """Return the path to the masks directory."""
# # TODO: currently this folder is not used
# return join(self.data, 'masks')
# @property
# def Rsrc_dir(self):
# """Return the path to the R source directory."""
# return os.path.join(self._base, 'Rsrc')
#
# @property
# def static(self):
# """Return the path to the static content directory."""
# return os.path.join(self._base, 'static')
# Should these go into the class or they're too specialized ?
def esgfsearch_distrib():
"""TODO"""
distrib = configuration.get_config_value("extra", "esgfsearch_distrib")
if distrib is None:
LOGGER.warn("No ESGF Search distrib option configured. Using default value.")
distrib = True
return distrib
def esgfsearch_url():
"""Return the server configuration value for the ESGF search node URL."""
url = configuration.get_config_value("extra", "esgfsearch_url")
if not url:
LOGGER.warn("No ESGF Search URL configured. Using default value.")
url = 'https://esgf-data.dkrz.de/esg-search'
return url
| 3,218 |
storage/team08/ArbolB+.py
|
strickergt128/tytus
| 0 |
2168467
|
class Nodo:
#constructor
def __init__(self, orden):
self.orden = orden
self.cuenta = 0
self.siguiente = None
self.claves = []
self.hijos = []
self.padre = None
def nodoLLeno(self):
return self.cuenta >= self.orden
def nodoSemiVacio(self):
return self.cuenta <= self.orden/2
def addClave(self, clave):
self.claves.append(clave)
self.cuenta += 1
self.claves.sort()
def buscar(self, clave):
if len(self.hijos)==0:
return self
return self.__buscar(0 ,clave)
def __buscar(self, valor, clave):
if valor == len(self.claves):
return self.hijos[valor].buscar(clave)
if clave >= self.claves[valor]:
return self.__buscar(valor+1,clave)
return self.hijos[valor].buscar(clave)
class Arbol:
def __init__(self, orden):
self.orden = orden
self.raiz = None
self.gr = None
def buscar(self, clave):
return self.raiz.buscar(clave)
| 1,073 |
dashboard/models.py
|
austinpray/uniqpanel
| 2 |
2169624
|
from datetime import timedelta
import humanize
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError(_('The Email must be set'))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
return self.create_user(email, password, **extra_fields)
class User(AbstractUser):
username = None
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f'<User {self.email}>'
class FileAnalysisJob(models.Model):
completed_at_default = timezone.make_aware(
timezone.datetime.min, timezone.get_default_timezone())
created_at = models.DateTimeField(auto_now_add=True)
display_name = models.CharField(max_length=255)
file_name = models.CharField(max_length=255)
file_size_bytes = models.PositiveBigIntegerField(editable=False, default=0)
unique_lines = models.PositiveBigIntegerField(editable=False, default=0)
total_lines = models.PositiveBigIntegerField(editable=False, default=0)
redis_key = models.TextField(editable=False, default="")
elapsed_time_ns = models.PositiveBigIntegerField(editable=False, default=0)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f"File({self.id}, {self.display_name})"
@classmethod
def create(cls, **kwargs):
f = cls(**kwargs)
if not f.display_name:
f.display_name = f.file_name
return f
@classmethod
def complete_jobs(cls):
return cls.objects.filter(elapsed_time_ns__gt=0)
def json_dict(self) -> dict:
# not dry but better explicit than implicit for security etc
return {
"id": self.id,
"createdAt": self.created_at,
"displayName": self.display_name,
"fileName": self.file_name,
"fileSizeBytes": self.file_size_bytes,
"fileSizeDisplay": humanize.naturalsize(self.file_size_bytes, binary=True),
"uniqueLines": self.unique_lines,
"totalLines": self.total_lines,
"elapsedTimeNs": self.elapsed_time_ns,
"elapsedTimeDisplay": humanize.precisedelta(
timedelta(microseconds=round(self.elapsed_time_ns/1000)),
minimum_unit='milliseconds'
)
}
| 3,237 |
src/pyobo/sources/umls/__main__.py
|
cthoyt/pyobo
| 0 |
2169831
|
# -*- coding: utf-8 -*-
"""CLI for UMLS exporter."""
from .umls import UMLSGetter
if __name__ == "__main__":
UMLSGetter.cli()
| 133 |
getPMIDs_only.py
|
NCBI-Hackathons/PubChem_WikiData_Interoperability
| 2 |
2167241
|
# getPMIDs_only.py
# Author: <NAME> (<EMAIL>)
# Input: file containing list of pubchem Ids (cid)
# Output: For each pubChem document of the cids, list of referenced pubMedIds
#
# Process:
# pubchem TXT output url hardcoded
# Want to go easy on the pubchem server so batch requests into groups of 100 cids
# print the list pubMedIds returned
#
# run the output of this program through " sort -un " to get the unique list of pubMedIds
import httplib
import time
import json
# Request PubMedId in text format for a list of comma separated cids
def getcidPubId (x):
conn = httplib.HTTPSConnection("pubchem.ncbi.nlm.nih.gov")
request_txt = "/rest/pug/compound/cid/" + x + "/xrefs/PubMedId/TXT"
# print "rq=", request_txt
conn.request("GET", request_txt)
r1 = conn.getresponse()
#print r1.status, r1.reason
data1 = r1.read()
# print "data=", data1
return data1
# Get the list cids
f = open('pccompound_result.txt')
# Loop through the file
ctr=1
cidList=""
pubData=""
for line in f:
# stripe of the linefeed
line = line.rstrip()
if ctr == 100:
# Have a batch of 100
cidList += line
pubData=getcidPubId(cidList)
print pubData
cidList=""
#time.sleep(4)
ctr=1
else:
# Build a batch
cidList += line + ","
ctr += 1
# Do the remaining part of the list
pubData=getcidPubId(cidList.rstrip(","))
pubData=getcidPubId(cidList)
print pubData
# Done
| 1,466 |
tools/spectrumvalidation/tests/validation/test_predict_voxels2.py
|
sodium24/CIL
| 0 |
2167970
|
# MIT License
#
# Copyright (c) 2019 DARPA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is a part of the CIRN Interaction Language.
import pytest
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import box
from validation.predict import predict_voxels2
class TestPredictVoxels2(object):
def test_ramp_prediction_no_lag_1_s(self):
"""
Confirm baseline alg operation with a trivially estimable sequence
:return:
"""
time_step_s = 0.2
# ramp from 0.0 to 0.49 over 50 steps
source_duty_cycles = [0.0, ]*50
source_duty_cycles[0:3] = [.001, ]*3
# build up a sequence that a VAR with 3 lags should be REALLY good at estimating
for i in range(3, 50):
source_duty_cycles[i] = sum(source_duty_cycles[i-3:i])/2.0
# build up the source dataframe, set timestamps as the index, and drop the index name
source_timestamps_ms = np.array(range(50), dtype=np.float)*time_step_s*1000
source_timestamps_ms = source_timestamps_ms.astype(np.int64)
source_df = pd.DataFrame({"10": source_duty_cycles,
"timestamp": source_timestamps_ms})
source_df = source_df.set_index("timestamp", drop=True,)
del source_df.index.name
start_time = 0.0
stop_time = 5.0
samples_to_predict = 5
lags = 3
result_df = predict_voxels2(tx_pivot_df=source_df,
start_time=start_time,
stop_time=stop_time,
samples_to_predict=samples_to_predict,
lags=lags)
# pandas convention includes the endpoint. Subtract off a millisecond to avoid this
expected_df = source_df.loc[stop_time*1000:(stop_time+1.0)*1000-1]
# pytest doesn't do well with dataframes. Use pandas native check
pd.testing.assert_frame_equal(expected_df, result_df)
def test_ramp_prediction_no_lag_5_s(self):
"""
Confirm baseline alg operation doesn't blow up with a longer estimate
:return:
"""
time_step_s = 0.2
# ramp from 0.0 to 0.49 over 50 steps
source_duty_cycles = [0.0, ] * 50
source_duty_cycles[0:3] = [.001, ] * 3
# build up a sequence that a VAR with 3 lags should be REALLY good at estimating
for i in range(3, 50):
source_duty_cycles[i] = sum(source_duty_cycles[i - 3:i]) / 2.0
# build up the source dataframe, set timestamps as the index, and drop the index name
source_timestamps_ms = np.array(range(50), dtype=np.float) * time_step_s * 1000
source_timestamps_ms = source_timestamps_ms.astype(np.int64)
source_df = pd.DataFrame({
"10": source_duty_cycles,
"timestamp": source_timestamps_ms})
source_df = source_df.set_index("timestamp", drop=True, )
del source_df.index.name
start_time = 0.0
stop_time = 5.0
samples_to_predict = 25
lags = 3
result_df = predict_voxels2(tx_pivot_df=source_df,
start_time=start_time,
stop_time=stop_time,
samples_to_predict=samples_to_predict,
lags=lags)
# pandas convention includes the endpoint. Subtract off a millisecond to avoid this
expected_df = source_df.loc[stop_time * 1000:(stop_time + 5.0) * 1000 - 1]
# pytest doesn't do well with dataframes. Use pandas native check
pd.testing.assert_frame_equal(expected_df, result_df)
| 4,803 |
test/simple_box_pair_pooling.py
|
Seth-Park/pocket
| 13 |
2168073
|
"""
Test box pair pooling
<NAME> <<EMAIL>>
The Australian National University
Australian Centre for Robotic Vision
"""
import time
import torch
from collections import OrderedDict
from torchvision.ops import MultiScaleRoIAlign
from pocket.ops import SimpleBoxPairPool
def test():
# Image size
im_s = 1024
f = list([
torch.rand(1, 3, 128, 128),
torch.rand(1, 3, 64, 64),
torch.rand(1, 3, 32, 32),
])
pts = torch.rand(256, 4) * im_s
boxes_h = torch.zeros(128, 4)
boxes_h[:, 0] = torch.min(pts[:128, 0], pts[:128, 2])
boxes_h[:, 1] = torch.min(pts[:128, 1], pts[:128, 3])
boxes_h[:, 2] = torch.max(pts[:128, 0], pts[:128, 2])
boxes_h[:, 3] = torch.max(pts[:128, 1], pts[:128, 3])
boxes_o = torch.zeros(128, 4)
boxes_o[:, 0] = torch.min(pts[128:, 0], pts[128:, 2])
boxes_o[:, 1] = torch.min(pts[128:, 1], pts[128:, 3])
boxes_o[:, 2] = torch.max(pts[128:, 0], pts[128:, 2])
boxes_o[:, 3] = torch.max(pts[128:, 1], pts[128:, 3])
m1 = SimpleBoxPairPool(
output_size=7,
spatial_scale=[1/8, 1/16, 1/32],
sampling_ratio=2
)
# Compute pooled box pair features
out1 = m1(f, [boxes_h], [boxes_o])
boxes_union = boxes_h.clone()
boxes_union[:, 0] = torch.min(boxes_h[:, 0], boxes_o[:, 0])
boxes_union[:, 1] = torch.min(boxes_h[:, 1], boxes_o[:, 1])
boxes_union[:, 2] = torch.max(boxes_h[:, 2], boxes_o[:, 2])
boxes_union[:, 3] = torch.max(boxes_h[:, 3], boxes_o[:, 3])
f = OrderedDict([
(0, f[0]),
(1, f[1]),
(2, f[2])
])
m2 = MultiScaleRoIAlign(
[0, 1, 2],
output_size=7,
sampling_ratio=2
)
# Compute pooled box union features
out2 = m2(f, [boxes_union], [(im_s, im_s)])
# Compare the pooled features
# The two feature maps will be exactly the same when rois are mapped
# to the same level
# To do this, change line170 in pocket.ops.SimpleBoxPairPool
# - levels = self.map_levels(boxes_1, boxes_2)
# + levels = self.map_levels(box_pair_union)
assert out1.shape == out2.shape, \
"Inconsistent feature map size"
print("Pixels matched: {}/{}.".format(
torch.eq(out1, out2).sum(), torch.as_tensor(out1.shape).prod()))
if __name__ == '__main__':
test()
| 2,323 |
molecule/default/tests/test_default.py
|
osism/ansible-hosts
| 1 |
2169853
|
def test_hosts_file(host):
f = host.file("/etc/hosts")
assert f.exists
assert f.is_file
| 100 |
youtube/services.py
|
BudzynskiMaciej/Django-Project
| 0 |
2169773
|
import requests
from .models import Video
from DjangoTut.settings import YOUTUBE_API_ACCESS_KEY
class YoutubeService:
API_KEY = None
BASE_API_URL = "https://www.googleapis.com/youtube/v3/"
METHOD_GET = 'get'
METHOD_POST = 'post'
METHOD_DELETE = 'delete'
def __init__(self):
self.api_key = YOUTUBE_API_ACCESS_KEY
if not self.api_key:
print("Error Reading API_KEY, Service will not work")
def _make_request(self, resource, params, method=METHOD_GET):
url = self.BASE_API_URL + resource
request = getattr(requests, method)(url, params)
return request.json()
def get_my_videos(self, username, max_results=15):
my_videos = []
params = {
'part': 'contentDetails',
'forUsername': username,
'key': self.api_key,
}
request = self._make_request('channels', params)
if not request['items']:
return my_videos
my_uploaded_playlist = request['items'][0]['contentDetails']['relatedPlaylists']['uploads']
params = {
'part': 'snippet',
'maxResults': str(max_results),
'playlistId': my_uploaded_playlist,
'key': self.api_key,
}
request = self._make_request('playlistItems', params)
for item in request['items']:
youtube_id = item['snippet']['resourceId']['videoId']
published_at = item['snippet']['publishedAt']
channel_id = item['snippet']['channelId']
title = item['snippet']['title']
description = item['snippet']['description']
thumbnail = item['snippet']['thumbnails']['medium']['url']
channel_title = item['snippet']['channelTitle']
video = Video(youtube_id=youtube_id, published_at=published_at, channel_id=channel_id, title=title,
description=description, thumbnail=thumbnail, channel_title=channel_title)
my_videos.append(video)
return my_videos
def get_most_popular_videos(self, max_results, region_code):
most_popular_videos = []
params = {
'part': 'snippet,statistics',
'chart': 'mostPopular',
'maxResults': str(max_results),
'regionCode': region_code,
'key': self.api_key
}
request = self._make_request('videos', params)
for item in request['items']:
youtube_id = item['id']
published_at = item['snippet']['publishedAt']
channel_id = item['snippet']['channelId']
title = item['snippet']['title']
description = item['snippet']['description']
thumbnail = item['snippet']['thumbnails']['medium']['url']
channel_title = item['snippet']['channelTitle']
view_count = item['statistics']['viewCount']
like_count = item['statistics']['likeCount']
dislike_count = item['statistics']['dislikeCount']
if 'commentCount' not in item['statistics']:
comment_count = 0
else:
comment_count = item['statistics']['commentCount']
video = Video(youtube_id=youtube_id, published_at=published_at, channel_id=channel_id, title=title,
description=description, thumbnail=thumbnail, channel_title=channel_title,
view_count=view_count, like_count=like_count, dislike_count=dislike_count,
comment_count=comment_count, is_most_viewed=True)
most_popular_videos.append(video)
return most_popular_videos
| 3,663 |
modules/tests/asset/asset001.py
|
apocsantos/eden
| 1 |
2169347
|
# Set up Assets
__all__ = ["asset001"]
# Selenium WebDriver
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
#from selenium.webdriver.common.keys import Keys
from gluon import current
from s3 import s3_debug
from tests import *
import unittest, re, time
def asset001():
config = current.test_config
browser = config.browser
driver = browser
driver.find_element_by_xpath("//div[@id='menu_div_sit']/a[3]/div").click()
driver.find_element_by_link_text("New").click()
driver.find_element_by_id("asset_asset_number").click()
driver.find_element_by_id("asset_asset_number").clear()
driver.find_element_by_id("asset_asset_number").send_keys("WS-100-17")
w_autocomplete("Wat","asset_asset_item","Water Purification Unit",False)
driver.find_element_by_id("asset_asset_sn").clear()
driver.find_element_by_id("asset_asset_sn").send_keys("WPU-4536-9381")
driver.find_element_by_id("asset_asset_comments").clear()
driver.find_element_by_id("asset_asset_comments").send_keys("test")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
driver.find_element_by_link_text("Home").click()
| 1,337 |
tests/client_test.py
|
mherrmann/dnsimple-python
| 12 |
2169569
|
import unittest
from dnsimple.client import Client
from dnsimple.version import version
class ClientTest(unittest.TestCase):
def test_defaults_base_url_to_production_api(self):
client = Client()
self.assertEqual('https://api.dnsimple.com', client.base_url)
def test_api_version(self):
client = Client()
self.assertEqual('v2', client.api_version)
def test_accepts_base_url_option(self):
client = Client(base_url='http://api.example.com')
self.assertEqual('http://api.example.com', client.base_url)
def test_sets_sandbox_environment(self):
client = Client(sandbox=True)
self.assertEqual('https://api.sandbox.dnsimple.com', client.base_url)
def test_access_token(self):
client = Client(access_token='token')
self.assertEqual('token', client.auth.token)
def test_uses_basic_authentication(self):
client = Client(email='<EMAIL>', password='<PASSWORD>')
self.assertEqual('<EMAIL>', client.auth.username)
self.assertEqual('secret', client.auth.password)
def test_uses_oauth2_authorization(self):
client = Client(access_token='token')
self.assertEqual('token', client.auth.token)
def test_uses_versioned_url(self):
client = Client()
self.assertEqual('https://api.dnsimple.com/v2/whoami', client.versioned('/whoami'))
def test_can_set_the_user_agent(self):
client = Client(user_agent="MySuperAPP")
self.assertEqual('MySuperAPP dnsimple-python/{version}'.format(version=version), client.user_agent)
if __name__ == '__main__':
unittest.main()
| 1,638 |
MAIN APP/carops/migrations/0011_remove_client_first_name.py
|
tiberius-ls/tiberius-ls.github.io
| 0 |
2169572
|
# Generated by Django 3.1.5 on 2021-02-01 16:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('carops', '0010_auto_20210201_1702'),
]
operations = [
migrations.RemoveField(
model_name='client',
name='first_name',
),
]
| 331 |
script.py
|
PercyOfficial/Image-Editor-1
| 0 |
2169817
|
class script(object):
START_MSG = """ <b>Hi {}
I'm [Cortana](https://telegra.ph/file/c13f2a5c27fed530955d5.jpg), an Image Editor Bot which Supports various modes
For more click help....</b>"""
HELP_MSG = """Hai, Follow these Steps..
<code>🌀 Send me any Image to Edit..</code>
<code>🌀 Select the Corresponding mode that you need</code>
<code>🌀 Your Edited Image will be Uploaded </code>
© @Cortana_BOTS 2020-2021"""
ABOUT_MSG = """⭕️<b>My Name : Cortana </b>
⭕️<b>Language :</b> <code>Python3</code>
⭕️<b>Library :</b> <a href='https://docs.pyrogram.org/'>Pyrogram 1.0.7</a>
⭕️<b>Creater :</b> 👉 <a href='t.me/percy_jackson_4'>Master Chief</a>"""
| 669 |
torch_glow/tests/nodes/index_select_test.py
|
aksingh-fb/glow
| 1 |
2166179
|
import unittest
import torch
from parameterized import parameterized
from tests import utils
class IndexSelectModule(torch.nn.Module):
def __init__(self, dimension):
super(IndexSelectModule, self).__init__()
self.dimension = dimension
def forward(self, tensor, index):
return torch.index_select(tensor, self.dimension, index)
class TestIndexSelect(unittest.TestCase):
@parameterized.expand(
[
("0-dim", torch.randn(3, 4), 0, torch.tensor([0, 2])),
("1-dim", torch.randn(3, 4), 1, torch.tensor([0, 2])),
("repeat index", torch.randn(3, 4), 1, torch.tensor([2, 2])),
]
)
def test_index_select(self, _, tensor, dimension, index):
utils.compare_tracing_methods(
IndexSelectModule(dimension),
tensor,
index,
skip_to_glow=True,
fusible_ops={"aten::index_select"},
)
| 938 |
pyroute2/test-get-device-ip-ns-1.py
|
li-ma/homework
| 0 |
2169365
|
from pyroute2 import IPDB
from pyroute2 import NetNS
from pyroute2.common import uifname
ip = IPDB(nl=NetNS('ns1'))
def test1():
try:
device = ip.interfaces['veth0']['ipaddr']
finally:
pass
for i in range(1000):
test1()
ip.release()
| 265 |
test/dom/html/test_p.py
|
jkloth/pyxml
| 2 |
2165806
|
from util import testAttribute, error
def test():
print 'testing source code syntax'
from xml.dom.html import HTMLParagraphElement
from xml.dom import implementation
doc = implementation.createHTMLDocument('Title')
p = doc.createElement('P')
print 'testing get/set'
p._set_align('left')
rt = p._get_align()
if rt != 'Left':
error('get/set align failed')
print 'get/set works'
if __name__ == '__main__':
test()
| 466 |
fuzzi-gen/fuzzi/processing/iris.py
|
hengchu/fuzzi-impl
| 4 |
2168186
|
import csv
import json
import re
import random
import pkg_resources
def main():
with open(pkg_resources.resource_filename('fuzzi',
'data/Iris/iris.data'),
'rt') as f:
reader = csv.reader(f)
l = list(reader)
# Converting to floats for Fuzzi purposes and assigning cluster labels
for i in range(len(l)):
y = len(l[0])-1
for j in range(len(l[0])-1):
l[i][j] = float(l[i][j])
if (l[i][y] == 'Iris-versicolor'):
l[i][y] = 2.0
elif (l[i][y] == 'Iris-virginica'):
l[i][y] = 1.0
elif (l[i][y] == 'Iris-setosa'):
l[i][y] = 0.0
data = {}
print(len(l[0]))
random.shuffle(l)
data['db'] = l[0:100]
data['db_test'] = l[100:-1]
#Initial cluster centers (can be played with)
x0 = random.randint(0, len(l)-1)
x1 = random.randint(0, len(l)-1)
x2 = random.randint(0, len(l)-1)
data['cs1'] = l[x0]
data['cs2'] = l[x1]
data['cs3'] = l[x2]
#data['cs'] = [l[x0], l[x1], l[x2]]
data_json_str = json.dumps(data, indent=4)
with open(pkg_resources.resource_filename('fuzzi',
'data/Iris/iris.json'),
'w') as outfile:
outfile.write(data_json_str)
| 1,329 |
lastfm_cg/__main__.py
|
dbeley/lastfm_cg
| 9 |
2169642
|
"""
Create lastfm album collage for an user
"""
import logging
import time
import argparse
import configparser
import pylast
import requests_cache
import os
from lastfm_cg import image_utils
from lastfm_cg import lastfm_utils
logger = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
temps_debut = time.time()
FORMAT = "%(levelname)s :: %(message)s"
TIMEFRAME_VALUES = ["7day", "1month", "3month", "6month", "12month", "overall"]
def lastfmconnect(api_key=None, api_secret=None):
if api_key and api_secret:
network = pylast.LastFMNetwork(api_key=api_key, api_secret=api_secret)
return network
else:
# Lastfm config file parsing
user_config_dir = os.path.expanduser("~/.config/lastfm_cg/")
try:
config = configparser.ConfigParser()
config.read(user_config_dir + "config.ini")
api_key = config["lastfm"]["api_key"]
api_secret = config["lastfm"]["api_secret"]
except Exception as e:
logger.error(
(
"Error with the config file. Be sure to have a valid "
"~/.config/lastfm_cg/config.ini file. Error : %s"
),
e,
)
if not os.path.exists(user_config_dir):
logger.info(
(
"Configuration folder not found. "
"Creating ~/.config/lastfm_cg/."
)
)
os.makedirs(user_config_dir)
if not os.path.isfile(user_config_dir + "config.ini"):
sample_config = (
"[lastfm]\n" "api_key=api_key_here\n" "api_secret=api_secret_here\n"
)
with open(user_config_dir + "config.ini", "w") as f:
f.write(sample_config)
logger.info(
(
"A sample configuration file has been created at "
"~/.config/lastfm_cg/config.ini. Go to "
"https://www.last.fm/api to create your own API keys "
"and put them in the configuration file."
)
)
exit()
network = pylast.LastFMNetwork(api_key=api_key, api_secret=api_secret)
return network
def main():
# argument parsing
args = parse_args()
if args.API_KEY and args.API_SECRET:
network = lastfmconnect(api_key=args.API_KEY, api_secret=args.API_SECRET)
else:
network = lastfmconnect()
if not args.columns:
args.columns = args.rows
# cache for python-requests
if not args.disable_cache:
cache_folder = os.path.expanduser("~/.local/share/lastfm_cg/")
if not os.path.exists(cache_folder):
logger.info("Cache folder not found. Creating %s", cache_folder)
os.makedirs(cache_folder)
if not os.path.isfile(cache_folder + "lastfm_cg_cache.sqlite"):
original_folder = os.getcwd()
os.chdir(cache_folder)
requests_cache.install_cache("lastfm_cg_cache")
os.chdir(original_folder)
requests_cache.configure(os.path.expanduser(cache_folder + "lastfm_cg_cache"))
if args.username:
users = [x.strip() for x in args.username.split(",")]
else:
logger.error("Use the -u/--username flag to set an username.")
exit()
if args.timeframe not in TIMEFRAME_VALUES:
logger.error(
"Incorrect value %s for timeframe. Accepted values : %s",
args.columns,
TIMEFRAME_VALUES,
)
exit()
for username in users:
user = network.get_user(username)
nb_covers = args.rows * args.columns if not args.top100 else 100
list_covers = lastfm_utils.get_list_covers(
user=user, nb_covers=nb_covers, timeframe=args.timeframe
)
img = (
image_utils.create_image(list_covers=list_covers, nb_columns=args.columns)
if not args.top100
else image_utils.create_top100_image(list_covers=list_covers)
)
# export image
if args.output_filename:
export_filename = args.output_filename
elif args.top100:
export_filename = (
f"{args.timeframe}_{username}_top100_{int(time.time())}.png"
)
else:
export_filename = f"{args.timeframe}_{username}_{args.columns*args.rows:004}_{int(time.time())}.png"
img.save(export_filename)
logger.info("Runtime : %.2f seconds." % (time.time() - temps_debut))
def parse_args():
parser = argparse.ArgumentParser(
description="Create lastfm album collage\
for one or several lastfm users."
)
parser.add_argument(
"--debug",
help="Display debugging information",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO,
)
parser.add_argument(
"--timeframe",
"-t",
help="Timeframe (Accepted values : 7day, 1month,\
3month, 6month, 12month, overall.\
Default : 7day).",
type=str,
default="7day",
)
parser.add_argument(
"--rows",
"-r",
help="Number of rows (Default : 5).",
type=int,
default=5,
)
parser.add_argument(
"--columns",
"-c",
help="Number of columns (Default : number of rows).",
type=int,
)
parser.add_argument(
"--username",
"-u",
help="Usernames to extract, separated by comma.",
type=str,
)
parser.add_argument(
"-d",
"--disable_cache",
help="Disable the cache",
dest="disable_cache",
action="store_true",
)
parser.add_argument(
"--top100",
help="Create a top 100 image. Will override columns/rows.",
dest="top100",
action="store_true",
)
parser.add_argument("--API_KEY", help="Lastfm API key (optional)")
parser.add_argument("--API_SECRET", help="Lastfm API secret (optional)")
parser.add_argument(
"--output_filename", help="Output filename (optional, example: output.png)"
)
parser.set_defaults(disable_cache=False)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel, format=FORMAT)
return args
if __name__ == "__main__":
main()
| 6,581 |
multimetric/cls/importer/base.py
|
Shardey/analyzer-thesis
| 14 |
2167649
|
class Importer():
class ImporterItem():
def __init__(self, _file, _cnt, _sev):
self._values = {
"filename": _file,
"content": _cnt,
"severity": _sev
}
def match(self, _filter):
return all([self._values[k] == v for k, v in _filter.items()])
def get(self):
return self._values
@staticmethod
def from_csv(line):
_sev = None
if len(line) > 2:
_sev = line[2]
return Importer.ImporterItem(_file=line[0], _cnt=line[1], _sev=_sev)
def __init__(self, args, filearg):
self._input = filearg
self._items = []
def getItems(self, _filter={}):
return [x.get() for x in self._items if x.match(_filter)]
def getSumItems(self, _filter={}):
_items = self.getItems(_filter=_filter)
if len(_items) == 1:
if str.isdigit(_items[0]["content"]):
return int(_items[0]["content"])
return len(_items)
| 1,061 |
tools/management/commands/output_all_fusion_protein.py
|
penglian518/protwis
| 0 |
2169734
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from django.db.models import Q
from django.template.loader import render_to_string
from structure.models import Structure
from protein.models import Protein
from residue.models import ResidueGenericNumber, ResidueGenericNumberEquivalent
from construct.functions import *
from common import definitions
from common.selection import SelectionItem
from common.alignment_gpcr import Alignment
import xlsxwriter, xlrd
import logging, json, os
class Command(BaseCommand):
help = "Output all uniprot mappings"
logger = logging.getLogger(__name__)
def handle(self, *args, **options):
structures = Structure.objects.filter(refined=False)
self.fusions = {}
self.fusions_mutations = {}
self.fusions_starts = {}
self.fusions_ends = {}
f = open('fusions.json', 'w')
i = 0
for s in structures:
pdbname = str(s)
print(pdbname)
failed = []
# try:
protein = Protein.objects.filter(entry_name=pdbname.lower()).get()
protein_name = protein.parent.entry_name
d = fetch_pdb_info(pdbname,protein)
# except:
# print(pdbname,'failed')
# failed.append(pdbname)
# print(d)
# print(d['construct_sequences'])
self.find_fusion(d['construct_sequences'],protein_name,pdbname)
#print(self.fusions)
i += 1
# if i>10:
# break
self.save_excel()
def find_fusion(self,construct_sequences,protein_name,pdbname):
list_of_none_fusion = ['Not_Observed','Engineered mutation','','Conflict','Insertion','S-arrestin'] # 'Expression tag','Linker',
list_of_comfirmed_fusion = ['C8TP59','Q0SXH8','Q9V2J8','Soluble cytochrome b562','Endolysin','Rubredoxin','Lysozyme','Flavodoxin']
#ODD Rubredoxin
#Q9V2J8 GlgA glycogen synthase auto_4ZJ8
#C8TP59 Cytochrome b562
# Q0SXH8 Cytochrome b(562)
result = []
position = None
for name, segment in construct_sequences.items():
# print(segment)
position = segment['where']
seq = segment['seq']
mutations = None
if 'mutations' in segment:
mutations = segment['mutations']
wt_ranges = segment['ranges']
if name in list_of_none_fusion:
continue
if name == protein_name:
continue
if "Guanine" in name:
continue
if "N/A" in name:
continue
if name not in self.fusions:
self.fusions[name] = {}
self.fusions_mutations[name] = []
self.fusions_starts[name] = []
self.fusions_ends[name] = []
mutation_list = []
if mutations:
for mut,v in mutations.items():
mutation_list.append([v[0],str(mut),v[1]])
if mut not in self.fusions_mutations[name]:
self.fusions_mutations[name].append(mut)
wt_range_str = ""
for wt_range in wt_ranges:
wt_range_str += str(wt_range[0])+"-"+str(wt_range[1])
if wt_range[0] not in self.fusions_starts[name]:
self.fusions_starts[name].append(wt_range[0])
if wt_range[1] not in self.fusions_ends[name]:
self.fusions_ends[name].append(wt_range[1])
self.fusions[name][protein_name+"_"+pdbname] = {'mutations':mutation_list, 'wt_ranges':wt_ranges, 'position':position, 'sequence' : seq}
def save_excel(self):
"""Convert fusions info to excel file"""
workbook = xlsxwriter.Workbook('fusions.xlsx')
for name in self.fusions:
worksheet_name = name[0:30]
worksheet_name = worksheet_name.replace("/", " ")
worksheet = workbook.add_worksheet(worksheet_name)
headers = ['name','position','seq']
headers_lookup = {}
headers_start_end = {}
header_i = 3
for i,start in enumerate(sorted(self.fusions_starts[name])):
headers_start_end[start] = "start_"+str(start)
print(headers_start_end)
for i,end in enumerate(sorted(self.fusions_ends[name])):
if end in headers_start_end:
headers_start_end[end] = "both"
else:
headers_start_end[end] = "end_"+str(end)
print(headers_start_end)
for end in sorted(headers_start_end):
if headers_start_end[end]=="both":
headers.append("end_"+str(end))
headers.append("start_"+str(end))
headers_lookup["end_"+str(end)] = header_i
header_i +=1
headers_lookup["start_"+str(end)] = header_i
else:
headers.append(headers_start_end[end])
headers_lookup[headers_start_end[end]] = header_i
header_i +=1
print(headers_lookup)
for i,mut in enumerate(sorted(self.fusions_mutations[name])):
headers.append("Mut in "+str(mut))
headers_lookup["mut_"+str(mut)] = header_i
header_i +=1
row = 1
index = {}
col = 0
for h in headers:
worksheet.write(0, col, h)
index[h] = col
col += 1
for xtal_name,v in self.fusions[name].items():
worksheet.write(row, 0, xtal_name)
worksheet.write(row, 1, v['position'])
worksheet.write(row, 2, v['sequence'])
for mut in v['mutations']:
worksheet.write(row, headers_lookup['mut_'+mut[1]], mut[0]+mut[1]+mut[2])
for r in v['wt_ranges']:
worksheet.write(row, headers_lookup['start_'+str(r[0])], "X")
worksheet.write(row, headers_lookup['end_'+str(r[1])], "X")
row += 1
workbook.close()
| 6,402 |
contek_timbersaw/timed_rolling_file_handler.py
|
contek-io/contek-timbersaw
| 0 |
2169035
|
import os
import time
from logging.handlers import TimedRotatingFileHandler
from typing import Optional
from contek_timbersaw.async_compressor import AsyncCompressor
from contek_timbersaw.async_deleter import AsyncDeleter
class TimedRollingFileHandler(TimedRotatingFileHandler):
def __init__(
self,
log_dir: str,
file_suffix: str = '.log',
compression_format: Optional[str] = None,
retention: int = 0,
**kwargs,
) -> None:
super().__init__(log_dir, delay=True, **kwargs)
self._log_dir = log_dir
self._file_suffix = file_suffix
self._compress = AsyncCompressor(compression_format)
self._delete = AsyncDeleter(log_dir, retention)
self._update_current_file()
def doRollover(self):
self.close()
self._update_current_file()
self._calculate_new_rollover_at()
def _calculate_new_rollover_at(self) -> None:
current_time = int(time.time())
new_rollover_at = self.computeRollover(current_time)
while new_rollover_at <= current_time:
new_rollover_at = new_rollover_at + self.interval
if self._should_adjust_for_dst_change():
dst_now = time.localtime(current_time)[-1]
dst_at_rollover = time.localtime(new_rollover_at)[-1]
if dst_now != dst_at_rollover:
if not dst_now: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
new_rollover_at += addend
self.rolloverAt = new_rollover_at
def _should_adjust_for_dst_change(self) -> bool:
if self.utc:
return False
return self.when == 'MIDNIGHT' or self.when.startswith('W')
def _update_current_file(self) -> None:
if self.utc:
time_tuple = time.gmtime()
else:
time_tuple = time.localtime()
time_str = time.strftime(self.suffix, time_tuple)
file_name = time_str + self._file_suffix
new_file = os.path.join(self._log_dir, file_name)
if self.baseFilename is not None:
if new_file == self.baseFilename:
return
self._compress(self.baseFilename)
self.baseFilename = new_file
| 2,413 |
scripts/nuscenes_devkit/python-sdk/nuscenes_utils/geometry_utils.py
|
n2pham/3d-bat
| 335 |
2168392
|
# nuScenes dev-kit. Version 0.1
# Code written by <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
# from __future__ import annotations
import numpy as np
import math
from enum import IntEnum
class BoxVisibility(IntEnum):
""" Enumerates the various level of box visibility in an image """
ALL = 0 # Requires all corners are inside the image.
ANY = 1 # Requires at least one corner visible in the image.
NONE = 2 # Requires no corners to be inside, i.e. box can be fully outside the image.
IN_FRONT = 3 # Requires all corners to be 1 meter front of the camera AND at least one corner be visible in image.
def quaternion_slerp(q0, q1, fraction):
"""
Does interpolation between two quaternions. This code is modified from
https://www.lfd.uci.edu/~gohlke/code/transformations.py.html
:param q0: <np.array: 4>. First quaternion.
:param q1: <np.array: 4>. Second quaternion.
:param fraction: <float>. Interpolation fraction between 0 and 1.
:return: <np.array: 4>. Interpolated quaternion.
"""
_EPS = np.finfo(float).eps * 4.0
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if d < 0.0:
# invert rotation
d = -d
np.negative(q1, q1)
angle = math.acos(d)
if abs(angle) < _EPS:
return q0
is_in = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * is_in
q1 *= math.sin(fraction * angle) * is_in
q0 += q1
return q0
def view_points(points, view, normalize):
"""
This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and
orthographic projections. It first applies the dot product between the points and the view. By convention,
the view should be such that the data is projected onto the first 2 axis. It then optionally applies a
normalization along the third dimension.
For a p
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).
The projection should be such that the corners are projected onto the first 2 axis.
:param normalize: <bool>. Whether to normalize the remaining coordinate (along the third axis).
:return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[0] == 3
viewpad = np.eye(4)
viewpad[:view.shape[0], :view.shape[1]] = view
nbr_points = points.shape[1]
# Do operation in homogenous coordinates
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
return points
def box_in_image(box, intrinsic, imsize, vis_level=BoxVisibility.IN_FRONT):
"""
Check if a box is visible inside an image without accounting for occlusions.
:param box: <Box>.
:param intrinsic: <float: 3, 3>. Intrinsic camera matrix.
:param imsize: (width <int>, height <int>).
:param vis_level: <int>. One of the enumerations of <BoxVisibility>.
:return <Bool>. True if visibility condition is satisfied.
"""
corners_3d = box.corners()
corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :]
visible = np.logical_and(corners_img[0, :] > 0, corners_img[0, :] < imsize[0])
visible = np.logical_and(visible, corners_img[1, :] < imsize[1])
visible = np.logical_and(visible, corners_img[1, :] > 0)
visible = np.logical_and(visible, corners_3d[2, :] > 1)
in_front = corners_3d[2, :] > 1 # True if a corner is at least 1 meter in front of camera.
if vis_level == BoxVisibility.ALL:
return all(visible)
elif vis_level == BoxVisibility.ANY:
return any(visible)
elif vis_level == BoxVisibility.NONE:
return True
elif vis_level == BoxVisibility.IN_FRONT:
return any(visible) and all(in_front)
else:
raise ValueError("vis_level: {} not valid".format(vis_level))
| 4,295 |
pcf/test/particle/aws/ecs/test_ecs_cluster.py
|
pmbrent/Particle-Cloud-Framework
| 46 |
2169342
|
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import moto
from unittest import TestCase
from pcf.particle.aws.ecs.ecs_cluster import ECSCluster
from pcf.core import State, pcf_exceptions
class TestECSCluster(TestCase):
particle_definition = {
"pcf_name": "pcf_ecs_cluster",
"flavor": "ecs_cluster",
"aws_resource": {
"clusterName": "core"
}
}
incorrect_particle_definition = {
'pcf_name': 'gg',
"flavor": "ecs_cluster",
}
@moto.mock_ecs
def test_get_status(self):
particle = ECSCluster(self.particle_definition)
status = particle.get_status()
assert status == {"status": "missing"}
@moto.mock_ecs
def test_apply_states(self):
particle = ECSCluster(self.particle_definition)
# Test start
particle.set_desired_state(State.running)
particle.apply()
assert particle.get_state() == State.running
# Test Update
# ECS Cluster has no update function
# Test Terminate
particle.set_desired_state(State.terminated)
particle.apply()
assert particle.get_state() == State.terminated
@moto.mock_ecs
def test_incorrect_definitions(self):
# Test missing definitions
self.assertRaises(pcf_exceptions.InvalidUniqueKeysException, ECSCluster, self.incorrect_particle_definition)
# Test Wrong Type in definition
self.incorrect_particle_definition["aws_resource"]={}
self.incorrect_particle_definition["aws_resource"]["clusterName"] = 100
particle = ECSCluster(self.incorrect_particle_definition)
particle.set_desired_state(State.running)
try:
particle.apply(sync=False)
except Exception as e:
is_right_exception = "Parameter validation" in e.args[0]
assert is_right_exception
| 2,432 |
mapgenerator.py
|
btminzon/RaspPiZeroGPS
| 0 |
2169976
|
import sys
import dblib
import simplekml
from string import Template
in_file = open("/home/pi/gpsProject/RaspPiZeroGPS/maps_template.html", "rt") # Read html template
template = in_file.read()
in_file.close()
def createKml(date):
kml = simplekml.Kml()
coordinates = dblib.getCoordinates(date)
kml.newlinestring(name='Route', description=date, coords=coordinates)
filename = 'Route_' + date + '.kml'
kml.save(filename)
print("File " + filename + " created successfully!")
def generateHtml(date):
global template
lnglat = dblib.getCoordinates(date)
output = Template(template).substitute(lnglat=lnglat)
out_file = open("/var/www/html/gps/index.html", "wt")
out_file.write(output)
out_file.close()
print("Map created successfully!")
if __name__ == '__main__':
date = sys.argv[1]
if sys.argv[2] == 'kml':
createKml(date)
elif sys.argv[2] == 'map':
generateHtml(date)
else:
print("Option " + sys.argv[2] + " invalid")
| 1,014 |
exercise/venv/lib/python3.7/site-packages/sqreen/instrumentation/strategies/dbapi2.py
|
assuzzanne/my-sqreen
| 0 |
2169667
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, 2017, 2018, 2019 Sqreen. All rights reserved.
# Please refer to our terms for more information:
#
# https://www.sqreen.io/terms.html
#
""" DBApi2 strategy and custom connection and cursor classes
"""
import inspect
import logging
from ...utils import update_wrapper
from ..helpers import Proxy, partial
from ..hook_point import hook_point
from .base import BaseStrategy
LOGGER = logging.getLogger(__name__)
METHODS_CACHE = {}
def get_methods_and_builtin(object):
""" Iterate on an object and returns its attribute that are either
methods or builtins
"""
object_class = object.__class__
result = {}
# Check cache first
cache_result = METHODS_CACHE.get(object_class, None)
# If not cached do the whole check
if cache_result is None:
for key in dir(object.__class__):
# Ignore private or semi-private methods
if key.startswith("_"):
continue
try:
value = getattr(object, key)
except AttributeError:
continue
if inspect.ismethod(value) or inspect.isbuiltin(value):
result[key] = value
# Cache the computation
METHODS_CACHE[object_class] = result.keys()
# If cached, retrieve only the methods, don't check against that it's a
# method or a builtin
else:
for key in cache_result:
try:
value = getattr(object, key)
except AttributeError:
continue
result[key] = value
return result
class CustomCursor(Proxy):
""" CustomCursor proxy, it proxy a real Cursor of a DBApi2
module, instrument all methods with prefix ::Cursor.
"""
def __init__(self, real_cursor, hook_point, module_path):
super(CustomCursor, self).__init__(real_cursor)
hook_module = "{}::Cursor".format(module_path)
members = get_methods_and_builtin(self._obj)
for key, method in members.items():
new_hook_point = hook_point(hook_module, key, method)
object.__setattr__(self, key, new_hook_point)
class CustomConnection(Proxy):
""" CustomConnection proxy, it proxy a real Connection of a DBApi2
module, instrument all methods with prefix ::Connection and returns
CustomCursor when callin the cursor method.
"""
def __init__(self, real_connection, hook_point, module_path):
self._sqreen_hook_point = hook_point
self._sqreen_module_path = module_path
super(CustomConnection, self).__init__(real_connection)
hook_module = "{}::Connection".format(module_path)
members = get_methods_and_builtin(self._obj)
for key, method in members.items():
if key == "cursor":
# Don't hook on cursor
continue
new_hook_point = hook_point(hook_module, key, method)
object.__setattr__(self, key, new_hook_point)
def cursor(self, *args, **kwargs):
""" Instantiate a real cursor, proxy it via CustomCursor.
"""
return CustomCursor(
self._obj.cursor(*args, **kwargs),
self._sqreen_hook_point,
self._sqreen_module_path,
)
def custom_connect(hook_point, module_path, original_connect, *args, **kwargs):
""" Replacement to the connect function of a DBApi2 module. It will
instantiate a connection via the original connect function and proxy it
via CustomConnection defined earlier.
"""
def wrapper(*args, **kwargs):
return CustomConnection(
original_connect(*args, **kwargs), hook_point, module_path
)
wrapper = update_wrapper(wrapper, original_connect)
return wrapper
class DBApi2Strategy(BaseStrategy):
""" Strategy for DBApi2 drivers.
It's different from the SetAttrStrategy and requires special care in
hook_module and hook_name.
DBApi2 tries to hook on 'connect' method of DBApi2 compatible driver.
In order to do so, it needs the module name where 'connect' is available. It
must be the first part of hook_module, for example in sqlite3, it will be
'sqlite3'.
The hook_module could then contains either '::Cursor' for hooking on
cursor methods or '::Connection' for hooking on connection methods.
The hook_name will then specify which method it will hook on.
For example with sqlite3, the tuple ('sqlite3::Connection', 'execute') will
reference the execute method on a sqlite3 connection.
In the same way, the tuple ('sqlite3::Cursor', 'execute') will reference
the execute method on a sqlite3 cursor.
It will works the same way for all DBApi2 drivers, even with psycopg2
where cursor class is actually defined as 'psycopg2.extensions.cursor',
'psycopg2::Cursor' will correctly reference all psycopg2 cursor methods.
"""
def __init__(
self,
module_path,
observation_queue,
queue,
import_hook,
before_hook_point=None,
):
super(DBApi2Strategy, self).__init__(
observation_queue, queue, import_hook, before_hook_point
)
self.module_path = module_path
self.hooked = False
def hook(self):
""" Accept a callback and store it. If it's the first callback
for this strategy, actually hook to the endpoint.
Once hooked, it will instrument all method on connection and cursor.
But if no callback is defined on a specific method, the overhead will
be minimal.
"""
# Check if we already hooked at
if not self.hooked:
self.import_hook.register_patcher(
self.module_path, None, "connect", self.import_hook_callback
)
self.hooked = True
def import_hook_callback(self, original):
""" Monkey-patch the object located at hook_class.hook_name on an
already loaded module
"""
_hook_point = partial(hook_point, self)
return custom_connect(_hook_point, self.module_path, original)
@staticmethod
def get_strategy_id(callback):
""" Returns the module part of hook_module (without everything after
::) as identifier for this strategy. Multiple hooks on sqlite3* should
be done in this strategy.
"""
# Check if the klass is part module and part klass name
if "::" in callback.hook_module:
return callback.hook_module.split("::", 1)[0]
else:
return callback.hook_module
def _restore(self):
""" The hooked module will always stay hooked
"""
pass
| 6,703 |
scripts/evaluate_oracle.py
|
Js-Mim/wagner_vad
| 1 |
2169720
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'Fraunhofer IDMT'
"""
Oracle evaluation. Using the labels from the stamps of the libretti
and compared against the manually annotated ones. Providing upper bound
error.
"""
# imports
import numpy as np
from sklearn.metrics import precision_recall_fscore_support as prf
from tools import helpers
def perform_testing():
print('--- Performing Oracle Evaluation ---')
testing_data_dict = helpers.csv_to_dict(training=False)
keys = list(testing_data_dict.keys())
testing_key = keys[0] # Validate on the second composer
print('Testing on: ' + ' '.join(testing_key))
# Get data
_, y_annotated, _ = helpers.fetch_data(testing_data_dict, testing_key)
# Get data dictionary
training_data_dict = helpers.csv_to_dict(training=True)
training_keys = sorted(list(training_data_dict.keys()))[2]
print('Using predictions from: ' + " ".join(training_keys))
# Get data
_, y_noisy, _ = helpers.fetch_data(training_data_dict, training_keys)
res = prf(y_annotated, y_noisy, average='binary')
cls_error = np.sum(np.abs(y_annotated - y_noisy))/np.shape(y_annotated)[0] * 100.
print('Precision: %2f' % res[0])
print('Recall: %2f' % res[1])
print('Fscore: %2f' % res[2])
print('Error: %2f' % cls_error)
return None
if __name__ == "__main__":
np.random.seed(218)
# Testing
perform_testing()
# EOF
| 1,451 |
advent_of_code/day16.py
|
rfrazier716/advent_of_code_2019
| 0 |
2169608
|
import numpy as np
from pathlib import Path
from math import ceil
def get_base_pattern(phase_step):
return np.roll(np.repeat(np.array([0, 1, 0, -1]), phase_step + 1), -1)
def flawed_ft(ft_input=np.array([])):
# generator to return the fft phase
output = np.full(ft_input.shape, 0) # output array that will be filled
ft_matrix=np.array([
np.tile(get_base_pattern(step), ceil(ft_input.shape[0] / get_base_pattern(step).shape[0]))[:ft_input.shape[0]]
for step in range(ft_input.shape[0])
])
while True:
# every time the generator is called return a new signal output by
output=np.mod(abs(np.matmul(ft_matrix,ft_input.T)),10) # multiply the input by the ft_matrix operator and keep only the ones digit
#output = np.array([
# np.mod(abs(np.sum(np.multiply(ft_input, pattern_array[step]))), 10)
# for step in range(ft_input.shape[0])]) # perform the FFT phase calculation by multiplying and summing
yield output
ft_input = output
def fast_fft(fft_input=np.array([])):
# generator for the fft which is valid only when you care about the latter half of the data
while True:
# the fft is the accumulation of the input reversed
output = np.flip(np.mod(np.cumsum(np.flip(fft_input)), 10))
yield output
fft_input = output
def get_input():
puzzle_input_path = Path("puzzle_inputs") / "day16_input.txt"
with open(puzzle_input_path) as file:
puzzle_input = file.readline() # there's only one line of data
# puzzle_input = "02935109699940807407585447034323" # test input
puzzle_input = np.array([int(x) for x in puzzle_input.strip()]) # parse the input and convert to an numpy array
return puzzle_input
def puzzle_part_a(puzzle_input):
ft = flawed_ft(puzzle_input)
for _ in range(99):
next(ft)
signal_output = next(ft)[0:8]
print("output signal is {}".format(''.join([str(j) for j in signal_output])))
def puzzle_part_b(puzzle_input):
n_repetitions = 10000
input_length = puzzle_input.shape[0] * n_repetitions
input_offset = int(''.join([str(j) for j in puzzle_input[:7]]))
condensed_input_length = input_length - input_offset # the part of input we actually care to look at
print('input is {} digits long but answer offset is {} only need to look at last {} digits'.format(input_length,
input_offset,
input_length - input_offset))
quot, rem = divmod(condensed_input_length, puzzle_input.shape[0])
# print("{},{}".format(quot,rem))
condensed_input = np.concatenate((puzzle_input[-rem:], np.tile(puzzle_input, quot)))
# print(condensed_input.shape)
fft = fast_fft(condensed_input)
for _ in range(99):
next(fft) # iterate 99 steps, only care about 100th
puzzle_answer = ''.join([str(j) for j in next(fft)[:8]])
print("Answer is {}".format(puzzle_answer))
def puzzle_tests():
test_input_string="12345678"
puzzle_input=np.array([int(x) for x in test_input_string])
ft = flawed_ft(puzzle_input)
for _ in range(4):
print(next(ft))
signal_output = next(ft)[0:8]
print("output signal is {}".format(''.join([str(j) for j in signal_output])))
def main():
puzzle_input = get_input()
print("\n**Running Test Cases**")
puzzle_tests()
print("\n**Running Puzzle Part A**")
puzzle_part_a(puzzle_input)
print("\n**Running Puzzle Part B**")
puzzle_part_b(puzzle_input)
if __name__ == "__main__":
main()
| 3,711 |
backend/app/settings/messages.py
|
dnguyenngoc/film
| 1 |
2169152
|
BAD_REQUEST="Bad Request"
UNAUTHORIZED="Unauthorized"
FORBIDDEN="Forbidden"
NOT_FOUND="Not Found"
METHOD_NOT_ALLOWED="Method Not Allowed"
NOT_ACCEPTABLE="Not Acceptable"
UNPROCESSABLE_ENTITY="Unprocessable Entity"
INTERNAL_SERVER_ERROR="Internal Server Error"
# response
COMPLETE="Complete"
| 291 |
plotting_tools/SensorMap.py
|
cemac/UNRESP_AQSensorTools
| 0 |
2167703
|
# -*- coding: utf-8 -*-
"""Sensor Plotter
This module was developed by CEMAC as part of the UNRESP Project.
This script takes data from the AQ Sensors (obtained by getAQMeshData.py)
and plots the data.
Example:
To use::
Attributes:
.. CEMAC_AQMesh:
https://github.com/cemac/UNRESP_AQSensorTools
"""
import pandas as pd
import glob
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from pytz import timezone
from datetime import datetime
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.io import shapereader
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from matplotlib.font_manager import FontProperties
import cartopy.io.img_tiles as cimgt
# Station lat lonts
fname = '../aqtools/information.csv'
data = pd.read_csv(fname)
Stations = data.StationName
towns = (' El Panama', ' Rigoberto', ' Pacaya', ' El Crucero',
' La Concepcion', ' Masaya', ' San Marcos', ' Jinotepe')
townCoords = ((-86.2058, 11.972), (-86.2021, 11.9617), (-86.3013, 11.9553),
(-86.3113, 11.9923), (-86.189772, 11.936161),
(-86.096053, 11.973523), (-86.20317, 11.906584),
(-86.19993, 11.85017))
volcCoords = (-86.1608, 11.9854)
font = FontProperties()
font.set_weight('bold')
font.set_family('monospace')
extent = [-86.7, -86.0, 11.7, 12.2]
request = cimgt.OSM()
fig = plt.figure(figsize=(9, 13))
ax = plt.axes(projection=request.crs)
gl = ax.gridlines(draw_labels=True, alpha=0.2)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# ax.set_extent(extent)
ax.set_extent(extent)
ax.add_image(request, 10, interpolation='spline36')
for i, town in enumerate(towns):
ax.plot(townCoords[i][0], townCoords[i]
[1], 'og', markersize=4, transform=ccrs.Geodetic())
ax.plot(volcCoords[0], volcCoords[1], '^r',
markersize=6, transform=ccrs.Geodetic())
plt.show()
| 1,972 |
convertmask/utils/auglib/optional/crop.py
|
wwdok/mask2json
| 27 |
2169027
|
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-10-23 15:40:56
LastEditors: xiaoshuyui
LastEditTime: 2020-11-10 12:16:57
'''
import cv2
import numpy as np
import skimage.util.noise as snoise
from convertmask.utils.auglib.optional.generatePolygon import (
generatePolygon, generateRectangle)
from skimage import io
def rectangleCrop(img: np.ndarray, startPoint: tuple = None, noise=False):
imgShape = img.shape
mask = generateRectangle(imgShape, startPoint)
mask[mask != 255] = 1
mask[mask == 255] = 0
if noise:
noisedMask = np.ones(imgShape) * 255
noisedMask = snoise.random_noise(noisedMask, 's&p') * 255
noisedMask = np.array(noisedMask * (1 - mask), dtype=np.uint8)
return img * mask + noisedMask
return img * mask
def polygonCrop(img: np.ndarray,
startPoint: tuple = None,
convexHull=False,
noise=False):
imgShape = img.shape
mask = generatePolygon(imgShape, startPoint, convexHull)
mask[mask != 255] = 1
mask[mask == 255] = 0
if noise:
noisedMask = np.ones(imgShape) * 255
if len(imgShape) == 3:
mask = cv2.merge([mask, mask, mask])
noisedMask = snoise.random_noise(noisedMask, 's&p') * 255
noisedMask = np.array(noisedMask * (1 - mask), dtype=np.uint8)
return img * mask + noisedMask
return img * mask
def multiRectanleCrop(img: np.ndarray, number: int = 1, noise=False):
if isinstance(img,str):
img = io.imread(img)
imgShape = img.shape
mask = np.zeros(imgShape, dtype=np.uint8)
for _ in range(number):
mask += generateRectangle(imgShape)
mask[mask != 255] = 1
mask[mask == 255] = 0
if noise:
noisedMask = np.ones(imgShape) * 255
noisedMask = snoise.random_noise(noisedMask, 's&p') * 255
noisedMask = np.array(noisedMask * (1 - mask), dtype=np.uint8)
return img * mask + noisedMask
return img * mask
def multiPolygonCrop(img: np.ndarray,
number: int = 1,
noise=False,
convexHull=False):
imgShape = img.shape
mask = np.zeros((imgShape[0], imgShape[1]), dtype=np.uint8)
for _ in range(number):
mask += generatePolygon(imgShape, convexHull=convexHull)
mask[mask != 255] = 1
mask[mask == 255] = 0
if noise:
noisedMask = np.ones(imgShape) * 255
if len(imgShape) == 3:
mask = cv2.merge([mask, mask, mask])
noisedMask = snoise.random_noise(noisedMask, 's&p') * 255
noisedMask = np.array(noisedMask * (1 - mask), dtype=np.uint8)
return img * mask + noisedMask
return img * mask
| 2,741 |
10-DataManipulationWithPandas/04-SubsettingColumns.py
|
Pegasus-01/Data-manipulation-and-merging-with-pandas
| 1 |
2169439
|
#part1
# Select the individuals column
individuals = homelessness['individuals']
individuals.head
# Print the head of the result
print(individuals.head())
#part2
# Select the state and family_members columns
state_fam = homelessness[['state' , 'family_members']]
# Print the head of the result
print(state_fam.head())
#part3
# Select only the individuals and state columns, in that order
ind_state = homelessness[['individuals' , 'state']]
# Print the head of the result
print(ind_state.head())
| 521 |
Discrete2D/torch-ac-composable/torch_ac_composable/algos/compositional_er_ppo_bcq.py
|
Lifelong-ML/Mendez2022ModularLifelongRL
| 0 |
2168654
|
import torch
import torch.nn.functional as F
from algos.agent_wrappers import CompositionalLearner
import random
import copy
class CompositionalErPpoBcq(CompositionalLearner):
def update_modules(self, task_id, use_pcgrad=False):
target_update = 1000
self.bcq_batch_size = 256
bcq_learning_rate = 1e-3
ppo_learning_rate = self.agent.lr
self.agent.lr = bcq_learning_rate
self.acmodel.set_use_bcq(task_id, True) # from the moment it goes through update modules, use BCQ
# since update modules is when the behavior cloning part is trained
self.target_acmodel = copy.deepcopy(self.acmodel)
self.agent.restart_optimizer()
# Normal offline phase
accommodation_tasks = self.filter_accommodation_tasks(task_id)
self.freeze_unused_modules(task_id)
self.txt_logger.info('accommodation tasks: {}'.format(accommodation_tasks))
for k, v in self.replay_buffer.items():
print(k, len(v))
loaders = {task: torch.utils.data.DataLoader(
dataset,
batch_size=self.bcq_batch_size,
shuffle=True,
num_workers=0,
pin_memory=True)
for task, dataset in self.replay_buffer.items() if task in accommodation_tasks
}
self._naive_epoch(loaders, task_id, target_update)
self.txt_logger.info('Accommodation task {}. Trained:'.format(task_id))
for name, param in self.acmodel.named_parameters():
if param.requires_grad and param.grad is not None:
self.txt_logger.info('\t' + name)
self.agent.lr = ppo_learning_rate
self.freeze_unused_modules(task_id, freeze=False)
def update_target(self):
self.target_acmodel.load_state_dict(self.acmodel.state_dict())
def _naive_epoch(self, loaders, task_id, target_update):
iter_num = 0
for i in range(10): # a single epoch
loaders_iter = {task: iter(l) for task, l in loaders.items()}
done = False
while not done: # assume same memory sizes
loss = 0.
n = 0
for task, l in loaders_iter.items():
try:
batch = next(l)
loss += self.compute_loss(batch, task, use_bcq=True)
n += self.bcq_batch_size
except StopIteration:
done = True
break
if not done:
loss /= n
self.agent.optimizer.zero_grad()
loss.backward()
self.agent.optimizer.step()
if (iter_num + 1) % target_update == 0:
self.update_target()
iter_num += 1
def compute_loss(self, batch, task_id, use_bcq=False):
if not use_bcq:
return super().compute_loss(batch, task_id)
state, action, reward, next_state, done = batch
state = state.to(self.acmodel.device, non_blocking=True)
action = action.to(self.acmodel.device, non_blocking=True)
reward = reward.to(self.acmodel.device, non_blocking=True)
next_state = next_state.to(self.acmodel.device, non_blocking=True)
done = done.to(self.acmodel.device, non_blocking=True)
# hacky, ugly, to avoid changing code too much...
state.image = state
next_state.image = next_state
with torch.no_grad():
next_q_values, bc_prob, _ = self.acmodel(next_state, task_id, return_bc=True)
bc_prob = bc_prob.exp()
bc_prob = (bc_prob / bc_prob.max(1, keepdim=True)[0] > self.acmodel.threshold).float()
next_action = (bc_prob * next_q_values + (1 - bc_prob) * -1e8).argmax(1, keepdim=True)
next_q_state_values, _, _ = self.target_acmodel(next_state, task_id, return_bc=True)
next_q_value = next_q_state_values.gather(1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)
expected_q_value = reward + self.agent.discount * next_q_value * (1 - done)
q_values, bc_prob, bc_original = self.acmodel(state, task_id, return_bc=True)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
loss = 1e-3 * (q_value - expected_q_value).pow(2).sum()
loss += F.nll_loss(bc_prob, action, reduction='sum')
loss += 1e-2 * bc_original.pow(2).sum()
return loss
def filter_accommodation_tasks(self, task_id):
accommodation_tasks = []
for task in self.observed_tasks:
for depth in range(self.depth):
if depth == 0 and self.acmodel.static_object_dict[task_id] == self.acmodel.static_object_dict[task]:
accommodation_tasks.append(task)
break
elif depth == 1 and self.acmodel.target_object_dict[task_id] == self.acmodel.target_object_dict[task]:
accommodation_tasks.append(task)
break
elif depth == 2 and self.acmodel.agent_dyn_dict[task_id] == self.acmodel.agent_dyn_dict[task]:
accommodation_tasks.append(task)
break
return accommodation_tasks
def freeze_unused_modules(self, task_id, freeze=True):
for i in range(self.acmodel.max_modules[0]):
if i != self.acmodel.static_object_dict[task_id]:
for p in self.acmodel.static[i].parameters():
p.requires_grad = not freeze
p.grad = None
for i in range(self.acmodel.max_modules[2]):
if i != self.acmodel.target_object_dict[task_id]:
for p in self.acmodel.target_pre[i].parameters():
p.requires_grad = not freeze
p.grad = None
for p in self.acmodel.target_post[i].parameters():
p.requires_grad = not freeze
p.grad = None
for i in range(self.acmodel.max_modules[3]):
if i != self.acmodel.agent_dyn_dict[task_id]:
for p in self.acmodel.agent_pre[i].parameters():
p.requires_grad = not freeze
p.grad = None
for p in self.acmodel.actor_layers[i].parameters():
p.requires_grad = not freeze
p.grad = None
for p in self.acmodel.critic_layers[i].parameters():
p.requires_grad = not freeze
p.grad = None
| 6,900 |
v1/projects/factories/project.py
|
buckyroberts/Website-API
| 64 |
2169191
|
import factory
from django.utils import timezone
from factory.django import DjangoModelFactory
from ..models.milestone import Milestone
from ..models.project import Project
from ...teams.factories.team import ProjectMemberFactory
class ProjectFactory(DjangoModelFactory):
title = factory.Faker('pystr', max_chars=250)
project_lead = factory.SubFactory(ProjectMemberFactory)
description = factory.Faker('text')
logo = factory.Faker('pystr', max_chars=200)
github_url = factory.Faker('pystr', max_chars=200)
overview = factory.Faker('text')
problem = factory.Faker('text')
target_market = factory.Faker('text')
benefits = factory.Faker('text')
centered_around_tnb = factory.Faker('text')
estimated_completion_date = factory.Faker('date_time', tzinfo=timezone.get_current_timezone())
is_featured = factory.Faker('pybool')
class Meta:
model = Project
class MilestoneFactory(DjangoModelFactory):
project = factory.SubFactory(ProjectFactory)
number = factory.Faker('pyint')
description = factory.Faker('text')
class Meta:
model = Milestone
| 1,125 |
solutions/day13/day13.py
|
FonziePants/adventofcode
| 0 |
2169558
|
#--- Day 13: Shuttle Search ---
def read_data(file_path,debug=True):
file = open(file_path, "r")
line0 = ""
line1 = ""
idx = 0
for line in file:
if not line.rstrip():
continue
if idx == 0:
line0 = line.rstrip()
else:
line1 = line.rstrip()
idx += 1
file.close()
data = (line0,line1)
if debug:
print(data)
return data
def calculate_part1(data,debug=False):
earliest_departure_time = int(data[0])
raw_bus_list = data[1]
# extract the bus list
bus_list_str = raw_bus_list.split(",")
bus_list = []
for bus_str in bus_list_str:
if bus_str == "x":
continue
bus_list.append(int(bus_str))
time = earliest_departure_time - 1
next_bus = -1
while next_bus < 0:
time += 1
for bus in bus_list:
if debug:
print("Time: {0}\nBus: {1}\nMod: {2}\n".format(time,bus,time % bus))
if time % bus == 0:
next_bus = bus
break
if debug:
print("Time: {0}\nBus: {1}\nWait: {2}\n".format(time,next_bus,(time - earliest_departure_time)))
answer = bus * (time - earliest_departure_time)
print("Part 1: {0}\n\n".format(answer))
return answer
def calculate_part2(data,debug=False):
# extract the bus list
orig_bus_list = data[1].split(",")
buses = {}
for i in range(len(orig_bus_list)):
if orig_bus_list[i] == "x":
continue
buses[i] = int(orig_bus_list[i])
increment = buses[0]
del buses[0]
time = 0
while len(buses) > 0:
time += increment
if debug:
print("TIME: {0}".format(time))
buses_copy = buses.copy()
for i in buses_copy:
if (time + i) % buses_copy[i] == 0:
if debug:
print("BUS {0} at index {1}".format(buses_copy[i],i))
del buses[i]
increment *= buses_copy[i]
print("Part 2: {0}\n\n".format(time))
return time
def run_program(test=False, debug=False):
file_path = "solutions\day13\day13.txt"
if test:
file_path = "solutions\day13\day13_test.txt"
data = read_data(file_path, debug)
calculate_part1(data, debug)
calculate_part2(data, debug)
# run_program(True, False)
run_program()
| 2,414 |
nsl_image_recog/experiments_cifar10/dynamic_uns/architecture.py
|
wy1iu/NSL
| 33 |
2169785
|
import tensorflow as tf
import numpy as np
import itertools
class VGG():
def get_conv_filter(self, shape, reg, stddev, reg_mag=1.0):
init = tf.random_normal_initializer(stddev=stddev)
if reg:
regu = tf.contrib.layers.l2_regularizer(self.wd*reg_mag)
filt = tf.get_variable('filter', shape, initializer=init, regularizer=regu)
else:
filt = tf.get_variable('filter', shape, initializer=init)
return filt
def get_named_conv_filter(self, shape, reg, stddev, name):
init = tf.random_normal_initializer(stddev=stddev)
if reg:
regu = tf.contrib.layers.l2_regularizer(self.wd)
filt = tf.get_variable(name, shape, initializer=init, regularizer=regu)
else:
filt = tf.get_variable(name, shape, initializer=init)
return filt
def get_bias(self, dim, init_bias, name):
with tf.variable_scope(name):
init = tf.constant_initializer(init_bias)
regu = tf.contrib.layers.l2_regularizer(self.wd)
bias = tf.get_variable('bias', dim, initializer=init, regularizer=regu)
return bias
def batch_norm(self, x, n_out, phase_train):
"""
Batch normalization on convolutional maps.
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope('bn'):
gamma = self.get_bias(n_out, 1.0, 'gamma')
beta = self.get_bias(n_out, 0.0, 'beta')
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.999)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
return tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
def _max_pool(self, bottom, ksize, name):
return tf.nn.max_pool(bottom, ksize=[1, ksize, ksize, 1], strides=[1, 2, 2, 1],
padding='SAME', name=name)
def _get_input_norm(self, bottom, ksize, stride, pad):
eps = 1e-4
shape = [ksize, ksize, bottom.get_shape()[3], 1]
filt = tf.ones(shape)
input_norm = tf.sqrt(tf.nn.conv2d(bottom*bottom, filt, [1,stride,stride,1], padding=pad)+eps)
return input_norm
def _conv_layer(self, bottom, ksize, n_filt, is_training, name, stride=1,
bn=True, pad='SAME', reg=True, relu=False, verbose=True, init_scale=1.0, reg_mag=1.0, norm=False):
with tf.variable_scope(name) as scope:
n_input = bottom.get_shape().as_list()[3]
shape = [ksize, ksize, n_input, n_filt]
if verbose:
print("shape of filter %s: %s" % (name, str(shape)))
filt = self.get_conv_filter(shape, reg, reg_mag=1.0,
stddev=tf.sqrt(2.0/tf.to_float(ksize*ksize*n_input))*init_scale)
conv = tf.nn.conv2d(bottom, filt, [1, stride, stride, 1], padding=pad)
if norm:
xnorm = self._get_input_norm(bottom, ksize, stride, pad)
conv /= xnorm
if bn:
conv = self.batch_norm(conv, (1,1,1,n_filt), is_training)
if relu:
return tf.nn.relu(conv)
else:
return conv
def _2dcond_conv_layer(self, bottom, mask, ksize, n_filt, is_training, name, stride=1,
bn=True, pad='SAME', reg=True, relu=False, verbose=True):
assert pad == 'SAME'
assert ksize == 3
with tf.variable_scope(name) as scope:
n_input = bottom.get_shape().as_list()[3]
shape = [1, 1, n_input, n_filt*9]
_, h, w, _ = bottom.get_shape().as_list()
if verbose:
print("shape of filter %s: %s" % (name, str(shape)))
mask_shape = mask.get_shape().as_list()
filt = self.get_conv_filter(shape, reg, stddev=tf.sqrt(2.0/tf.to_float(3*3*n_input)))
conv = tf.nn.conv2d(bottom, filt, [1, stride, stride, 1], padding=pad)
conv = tf.reshape(conv, (-1, h, w, 9, n_filt))
conv = tf.pad(conv, [(0,0), (1,1), (1,1), (0,0), (0,0)])
conv_res = tf.constant(0.0)
for i, j in itertools.product(range(3), range(3)):
conv_res += tf.reduce_sum(mask[:,:,:,:, 3*i + j:3*i + j+1] * conv[:,i:i+h, j:j+w, :], axis=-2)
if bn:
conv_res = self.batch_norm(conv_res, (1,1,1,n_filt), is_training)
if relu:
return tf.nn.relu(conv_res)
else:
return conv_res
def _combined_conv(self, bottom, ksize, n_filt, is_training, name, stride=1,
bn=True, pad='SAME', reg=True, relu=False, verbose=True):
with tf.variable_scope(name) as scope:
mask = self._conv_layer(bottom, 3, 128, is_training, 'm1',
stride, bn=False, relu=True, reg=reg, reg_mag=10.0, init_scale=0.1, norm=True)
mask = self._conv_layer(mask, 3, 81, is_training, 'm2',
stride, bn=False, relu=False, reg=reg, reg_mag=10.0, init_scale=0.001)
_, h, w, _ = mask.get_shape().as_list()
mask = tf.reshape(mask, (-1, h, w, 9, 9))
mask += tf.reshape(tf.eye(9), (1,1,1,9,9))
conv = self._2dcond_conv_layer(bottom, mask, 3, n_filt, is_training, name='conv',
stride=1, bn=True, relu=True, reg=reg)
return conv
def _resnet_unit_v1(self, bottom, ksize, n_filt, is_training, name, stride, reg):
with tf.variable_scope(name):
n_input = bottom.get_shape().as_list()[3]
residual = self._combined_conv(bottom, ksize, n_filt, is_training, 'first',
stride, bn=True, relu=True, reg=reg)
residual = self._combined_conv(residual, ksize, n_filt, is_training, name='second',
stride=1, bn=True, relu=False, reg=reg)
if n_input == n_filt:
shortcut = bottom
else:
shortcut = self._conv_layer(bottom, ksize, n_filt, is_training, 'shortcut', stride, bn=True, relu=False, reg=True)
return tf.nn.relu(residual + shortcut)
# Input should be an rgb image [batch, height, width, 3]
def build(self, rgb, n_class, is_training):
self.wd = 5e-4
ksize = 3
n_layer = 3
feat = rgb
n_out = 32
feat = self._conv_layer(feat, ksize, n_out, is_training, name='root', reg=True, bn=True, relu=True)
#32X32
n_out = 32
for i in range(n_layer):
feat = self._combined_conv(feat, ksize, n_out, is_training, name='conv1_' + str(i), reg=True, bn=True, relu=True)
feat = self._max_pool(feat, 2, 'pool1')
# Pool 1, 16x16
n_out = 64
for i in range(n_layer):
feat = self._combined_conv(feat, ksize, n_out, is_training, name='conv2_'+str(i), reg=True, bn=True, relu=True)
feat = self._max_pool(feat, 2, 'pool2')
# Pool 2, 8x8
n_out = 128
for i in range(n_layer):
feat = self._combined_conv(feat, ksize, n_out, is_training, name='conv3_'+str(i), reg=True, bn=True, relu=True)
feat = self._max_pool(feat, 2, 'pool3')
self.score = self._conv_layer(feat, 4, n_class, is_training, "score", bn=False,
pad='VALID', reg=True, relu=False)
self.pred = tf.squeeze(tf.argmax(self.score, axis=3))
| 8,286 |
factorial3.py
|
PRASAD-DANGARE/PYTHON
| 1 |
2169860
|
# Python Program That Generates Prime Number With The Help Of A Function To Test Prime Or Not
'''
Function Name : Function To Test Whether Number Is Prime Or Not.
Function Date : 4 Sep 2020
Function Author : <NAME>
Input : Integer
Output : String
'''
def prime(n):
""" to check if n is prime or not """
x = 1 # This Will Be 0 If Not Prime
for i in range(2, n):
if n % i == 0:
x = 0
break
else:
x = 1
return x
# Generate Prime Number Series
num = int(input('How Many Primes Do You Want ? '))
i = 2 # Start With i Value 2
c = 1 # This Counts The Number Of Primes
while True: # If I Is Prime, Display It
if prime(i): # If I Is Prime, Display It
print(i)
c += 1 # Increase Counter
i += 1 # Generate Next Number To Test
if c > num: # If Count Exceeds num
break # Come Out Of While-Loop
| 967 |
ui/styles.py
|
smartdatalake/danae
| 0 |
2168754
|
style_div = {'display':'flex', 'justify-content': 'center'}
style_div_none = {'display':'flex', 'justify-content': 'center', 'display': 'none'}
style_logo = {'color': '#7100FF', 'font-size': '3em', 'font-family': 'Impact, fantasy;', 'font-weight': 'bold'}
style_drop_list = {'display':'flex', 'justify-content': 'center', 'margin-top': '10px', 'margin-bottom': '10px'}
style_sdl = {'background-color': '#7100FF', 'color': '#ffffff'}
style_sdl_light = {'background-color': '#a569f4', 'color': '#ffffff'}
active_color = '#7100FF'
| 534 |
kissim/tests/test_utils.py
|
AJK-dev/kissim
| 0 |
2169382
|
"""
Unit and regression test for the kissim.utils.
"""
import pytest
from kissim.utils import set_n_cores
@pytest.mark.parametrize(
"n_cores",
[1000000000000],
)
def test_get_n_cores_valueerror(n_cores):
"""
Test if number of cores are set correctly.
"""
with pytest.raises(ValueError):
set_n_cores(n_cores)
| 345 |
sliderule/ipxapi.py
|
slhowardESR/sliderule-python
| 0 |
2169260
|
# Copyright (c) 2021, University of Washington
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the University of Washington nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF WASHINGTON AND CONTRIBUTORS
# “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF WASHINGTON OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sliderule import icesat2
import logging
###############################################################################
# GLOBALS
###############################################################################
# create logger
logger = logging.getLogger(__name__)
###############################################################################
# APIs
###############################################################################
#
# ICEPYX ATL06
#
def atl06p(ipx_region, parm, asset=icesat2.DEFAULT_ASSET):
"""
Performs ATL06-SR processing in parallel on ATL03 data and returns gridded elevations. The list of granules to be processed is identified by the ipx_region object.
See the `atl06p <../api_reference/icesat2.html#atl06p>`_ function for more details.
Parameters
----------
ipx_region: Query
icepyx region object defining the query of granules to be processed
parm: dict
parameters used to configure ATL06-SR algorithm processing (see `Parameters <../user_guide/ICESat-2.html#parameters>`_)
asset: str
data source asset (see `Assets <../user_guide/ICESat-2.html#assets>`_)
Returns
-------
GeoDataFrame
gridded elevations (see `Elevations <../user_guide/ICESat-2.html#elevations>`_)
"""
try:
version = ipx_region.product_version
resources = ipx_region.avail_granules(ids=True)[0]
except:
logger.critical("must supply an icepyx query as region")
return icesat2.__emptyframe()
# try to get the subsetting region
if ipx_region.extent_type in ('bbox','polygon'):
parm.update({'poly': to_region(ipx_region)})
return icesat2.atl06p(parm, asset, version=version, resources=resources)
#
# ICEPYX ATL03
#
def atl03sp(ipx_region, parm, asset=icesat2.DEFAULT_ASSET):
"""
Performs ATL03 subsetting in parallel on ATL03 data and returns photon segment data.
See the `atl03sp <../api_reference/icesat2.html#atl03sp>`_ function for more details.
Parameters
----------
ipx_region: Query
icepyx region object defining the query of granules to be processed
parms: dict
parameters used to configure ATL03 subsetting (see `Parameters <../user_guide/ICESat-2.html#parameters>`_)
asset: str
data source asset (see `Assets <../user_guide/ICESat-2.html#assets>`_)
Returns
-------
list
ATL03 segments (see `Photon Segments <../user_guide/ICESat-2.html#photon-segments>`_)
"""
try:
version = ipx_region.product_version
resources = ipx_region.avail_granules(ids=True)[0]
except:
logger.critical("must supply an icepyx query as region")
return icesat2.__emptyframe()
# try to get the subsetting region
if ipx_region.extent_type in ('bbox','polygon'):
parm.update({'poly': to_region(ipx_region)})
return icesat2.atl03sp(parm, asset, version=version, resources=resources)
def to_region(ipx_region):
"""
Extract subsetting extents from an icepyx region
Parameters
----------
ipx_region: Query
icepyx region object defining the query of granules to be processed
Returns
-------
list
polygon definining region of interest (can be passed into `icesat2` api functions)
"""
if (ipx_region.extent_type == 'bbox'):
bbox = ipx_region.spatial_extent[1]
poly = [dict(lon=bbox[0], lat=bbox[1]),
dict(lon=bbox[2], lat=bbox[1]),
dict(lon=bbox[2], lat=bbox[3]),
dict(lon=bbox[0], lat=bbox[3]),
dict(lon=bbox[0], lat=bbox[1])]
elif (ipx_region.extent_type == 'polygon'):
poly = [dict(lon=ln,lat=lt) for ln,lt in zip(*ipx_region.spatial_extent[1])]
return poly
| 5,496 |
src/inventory/entities/article.py
|
eshatro/warehouse
| 0 |
2169684
|
class Article:
def __init__(
self, id: int, name: str = None, available_stock: int = 0, quantity: int = 0
):
self.id = id
self.name = name
self.available_stock = available_stock
self.quantity = quantity
def __str__(self):
return f"{str(self.id) + '_' + self.name + ' ' + str(self.available_stock)}"
| 360 |
data_registry/migrations/0033_collection_source_url.py
|
open-contracting/data-registry
| 0 |
2169121
|
# Generated by Django 3.2.4 on 2021-08-30 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_registry', '0032_auto_20210827_2224'),
]
operations = [
migrations.AddField(
model_name='collection',
name='source_url',
field=models.TextField(blank=True, help_text='The URL of the publication.', verbose_name='source URL'),
),
]
| 463 |
guillotina_volto/content/file.py
|
enfold/guillotina-volto
| 1 |
2169866
|
# -*- encoding: utf-8 -*-
from guillotina import configure
from guillotina.content import Item
from guillotina_volto.interfaces import IFile
@configure.contenttype(
type_name="File",
schema=IFile,
behaviors=[
"guillotina.behaviors.dublincore.IDublinCore",
"guillotina.contrib.workflows.interfaces.IWorkflowBehavior",
"guillotina_volto.interfaces.base.ICMSBehavior",
],
allowed_types=[], # dynamically calculated
)
class File(Item):
pass
| 488 |
esupa/migrations/0006_partial_payment.py
|
Abando/esupa
| 0 |
2168290
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('esupa', '0005_auto_20150721_0132'),
]
operations = [
migrations.AddField(
model_name='event',
name='partial_payment_open',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='event',
name='partial_payment_toggle',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='subscription',
name='state',
field=models.SmallIntegerField(choices=[(0, 'New'), (11, 'Filled'), (33, 'Queued for pay'), (55, 'Expecting payment'), (66, 'Verifying payment'), (77, 'Partially paid'), (88, 'Unpaid staff'), (99, 'Confirmed'), (-1, 'Checking data'), (-9, 'Rejected')], default=0),
),
]
| 976 |
main.py
|
Hanuu/silver_retriever
| 0 |
2169876
|
from websim_login import log_in_vrc
from write_expression import write_fast_expression
if __name__ == "__main__":
LOGIN_INFO = {
'EmailAddress': '<EMAIL>',
'Password': '',
}
log_in_vrc(LOGIN_INFO)
write_fast_expression("I am the destructor")
| 275 |
__init__.py
|
aneeshnaik/mw_poisson
| 0 |
2169852
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mw_poisson is a Poisson solver for axisymmetric galactic potentials.
The main object is the MilkyWay class, which sets up a galactic density
profile, solves for the potential, then provides various functions that can
interpolate the density, potential, and acceleration at any given point.
See README for further details about mw_poisson and usage examples.
Created: July 2020
Author: <NAME>
"""
from .milkyway import MilkyWay
__all__ = ['MilkyWay']
| 505 |
figures/cluster.py
|
ChrisBeaumont/brut
| 6 |
2167039
|
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Ellipse
matplotlib.rcParams['axes.grid'] = False
matplotlib.rcParams['axes.facecolor'] = '#ffffff'
from bubbly.cluster import merge
from bubbly.field import get_field
from bubbly.util import scale
def plot_stamps(stamps, **kwargs):
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'b')
label = kwargs.pop('label', None)
kw2 = kwargs.copy()
kw2['edgecolor'] = 'k'
kw2['linewidth'] = 2.0
ax = plt.gca()
for s in stamps:
s[2] += np.random.normal(0, .003)
s[1] += np.random.normal(0, .003)
r = Ellipse((s[1], s[2]),
width = 2 * s[-1], height = 2 * s[-1], **kwargs)
r2 = Ellipse((s[1], s[2]),
width = 2 * s[-1], height = 2 * s[-1], **kw2)
ax.add_patch(r2)
ax.add_patch(r)
if label is not None:
plt.plot([np.nan], [np.nan], '-', color = kwargs['edgecolor'],
label=label)
def main():
np.random.seed(42)
data = json.load(open('../models/l035_scores.json'))
stamps = np.array(data['stamps'])
scores = np.array(data['scores'])
l = stamps[:, 1]
b = stamps[:, 2]
good = (scores > .1) & (l < 34.8) & (l > 34.6) & (b > -.4) & (b < -0.2)
assert good.sum() > 0
stamps = stamps[good]
scores = scores[good]
merged, ms = merge(stamps, scores)
f = get_field(35)
g = scale(f.i4, limits=[70, 99])
r = scale(f.mips, limits=[70, 99])
b = r * 0
im = np.dstack((r, g, b))
plt.figure(dpi=200, tight_layout=True)
plt.imshow(im, extent=[36, 34, -1, 1], interpolation="bicubic")
plot_stamps(stamps, linewidth=1, edgecolor='white', label='Raw',
alpha=1)
plot_stamps(merged, edgecolor='red', alpha=1, linewidth=2,
label='Merged')
plt.xlim(34.795, 34.695)
plt.ylim(-.365, -.265)
plt.xlabel("$\ell$ ($^\circ$)")
plt.ylabel("b ($^\circ$)")
leg = plt.legend(loc='upper left', frameon=False)
for text in leg.get_texts():
text.set_color('white')
plt.savefig('cluster.eps')
if __name__ == "__main__":
main()
| 2,244 |
lenet.py
|
loveisatouch/Backbone
| 1 |
2169808
|
import torch
from torchvision import models
from torch import nn
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(in_features=16*5*5, out_features=120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = nn.ReLU(x)
x = self.pool1(x)
x = self.conv2(x)
x = nn.ReLU(x)
x = self.pool2(x)
x = x.view(x.shape[0], -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
model = LeNet()
print(model)
| 904 |
TaylorNet.py
|
dalessioluca/TaylorNet
| 0 |
2167969
|
import torch
import torch.nn as nn
import math
class TaylorNet(nn.Module):
r"""Applies a non-linear multiplicative transformation to the incoming data,
in order to generate output features that can be quadratic and linear in the
input features:
:math:`y = (x W_2^T) * (x W_1^T) + x W_1^T + b`
Note that if output size = input size, then W_2 is not used, and the
transformation becomes:
:math:`y = x * (x W^T) + x W^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\_features)` where :math:`*` means any number of
additional dimensions
Attributes:
weight_1: the learnable weights of the module of shape
`(out_features x in_features)`
weight_2: the learnable weights of the module of shape
`(out_features x in_features)`
If out_features = in_features, there is no weight_2 matrix
bias: the learnable bias of the module of shape `(in_features)`
Examples::
>>> m = nn.TaylorNet(5)
>>> input = torch.randn(128, 5)
>>> output = m(input)
>>> print(output.size())
"""
def __init__(self, in_features, out_features=None, bias=True):
super(TaylorNet, self).__init__()
if out_features is None:
out_features = in_features
self.in_features = in_features
self.out_features = out_features
self.weight1 = nn.Parameter(torch.Tensor(out_features, in_features))
if (in_features != out_features):
self.weight2 = nn.Parameter(torch.Tensor(out_features, in_features))
else:
self.weight2 = None
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight1.size(1))
nn.init.xavier_normal_(self.weight1)
if self.weight2 is not None:
nn.init.xavier_normal_(self.weight2)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
Wx = input.matmul(self.weight1.t())
x = input
if self.weight2 is not None:
x = input.matmul(self.weight2.t())
output = x.mul(Wx) + Wx
if self.bias is not None:
output += self.bias
return output
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
| 2,900 |
app/rpg_queries.py
|
ssbyrne89/DS-Unit-3-Sprint-2-SQL-and-Databases
| 0 |
2168329
|
import os
import sqlite3
# construct a path to wherever your database exists
# DB_FILEPATH = "chinook.db"
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..",
"module1-introduction-to-sql", "rpg_db.sqlite3")
connection = sqlite3.connect(DB_FILEPATH)
connection.row_factory = sqlite3.Row
# above code allow us to reference rows as dictionaries
cursor = connection.cursor()
cursor.execute('''SELECT
count(distinct name) as NumberOfCharacters
FROM charactercreator_character''')
print(dict(cursor.fetchall()[0]))
cursor.execute("""
SELECT(
SELECT
COUNT(charactercreator_cleric.character_ptr_id)
FROM charactercreator_cleric)
as ClericCount,
(SELECT COUNT(charactercreator_mage.character_ptr_id)
FROM charactercreator_mage)
as MageCount,
(SELECT COUNT(charactercreator_fighter.character_ptr_id)
FROM charactercreator_fighter)
as FighterCount,
(SELECT COUNT(charactercreator_thief.character_ptr_id)
FROM charactercreator_thief)
as ThiefCount;
""")
print(dict(cursor.fetchall()[0]))
cursor.execute("""
SELECT
COUNT(item_id) AS ItemCount
FROM armory_item;
""")
print(dict(cursor.fetchall()[0]))
cursor.execute("""
SELECT
COUNT(item_ptr_id) AS NumberOfWeapons
FROM armory_weapon;
""")
print(dict(cursor.fetchall()[0]))
cursor.execute("""
SELECT COUNT(item_id) - COUNT(item_ptr_id) AS nonweapons
FROM armory_item
LEFT JOIN armory_weapon
ON armory_weapon.item_ptr_id = armory_item.item_id;
""")
print(dict(cursor.fetchall()[0]))
cursor.execute("""
SELECT charactercreator_character_inventory.character_id,
COUNT(charactercreator_character_inventory.item_id)
AS item_count FROM charactercreator_character_inventory
JOIN armory_item
ON charactercreator_character_inventory.item_id = armory_item.item_id
GROUP BY character_id
LIMIT 20;
""")
print(dict(cursor.fetchall()[0:20]))
cursor.execute("""
SELECT charactercreator_character_inventory.character_id,
COUNT(charactercreator_character_inventory.item_id)
AS item_count FROM charactercreator_character_inventory
JOIN armory_weapon
ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY character_id
LIMIT 20;
""")
print(dict(cursor.fetchall()[0:20]))
cursor.execute("""
SELECT AVG(count) AS inventory_avg_items
FROM
(SELECT COUNT(charactercreator_character_inventory.item_id)
AS count
FROM charactercreator_character_inventory
GROUP BY charactercreator_character_inventory.character_id)
""")
print(dict(cursor.fetchall()[0]))
connection.close()
| 3,292 |
split.py
|
rimaulana/gsirs-routeviews-with-whois
| 0 |
2168453
|
import csv
import os
import sys
rootFolder = os.path.abspath(os.path.dirname(__file__))
MaxRecordPerFile = 4
with open(sys.argv[1]) as source:
reader = csv.DictReader(source)
writeCounter= 0
fileCounter = 0
for line in reader:
if writeCounter == 0:
fileName = '{root}/{folder}/{name}.{frmt}'.format(root=rootFolder,folder="pool",name=str(fileCounter),frmt="csv")
writer = open(fileName,"w")
# Writing header
writer.write("\"ORIGIN\",\"DATE\",\"ROUTE\",\"LENGTH\"\n")
writer.write('{0},{1},{2},{3}\n'.format(line['ORIGIN'],line['DATE'],line['ROUTE'],line['LENGTH']))
elif writeCounter == (MaxRecordPerFile-1):
writer.write('{0},{1},{2},{3}\n'.format(line['ORIGIN'],line['DATE'],line['ROUTE'],line['LENGTH']))
writer.close()
fileCounter += 1
writeCounter = -1
else:
writer.write('{0},{1},{2},{3}\n'.format(line['ORIGIN'],line['DATE'],line['ROUTE'],line['LENGTH']))
writeCounter += 1
| 1,050 |
examples/ships/ships.py
|
BruchesLena/segmentation_training_pipeline
| 0 |
2169182
|
import pandas as pd
from segmentation_pipeline.impl.datasets import PredictionItem,ConstrainedDirectory
import os
from segmentation_pipeline.impl import rle
import imageio
from segmentation_pipeline import segmentation
import keras.applications as app
from keras.optimizers import Adam
class SegmentationRLE:
def __init__(self,path,imgPath):
self.data=pd.read_csv(path);
self.values=self.data.values;
self.imgPath=imgPath;
self.ddd=self.data.groupby('ImageId');
self.masks=self.ddd['ImageId'];
self.ids=list(self.ddd.groups.keys())
pass
def __getitem__(self, item):
pixels=self.ddd.get_group(self.ids[item])["EncodedPixels"]
return PredictionItem(self.ids[item] + str(), imageio.imread(os.path.join(self.imgPath,self.ids[item])),
rle.masks_as_image(pixels) > 0.5)
def get_masks(self,id):
pixels = self.ddd.get_group(id)["EncodedPixels"]
return rle.masks_as_images(pixels)
def isPositive(self, item):
pixels=self.ddd.get_group(self.ids[item])["EncodedPixels"]
for mask in pixels:
if isinstance(mask, str):
return True;
return False
def __len__(self):
return len(self.masks)
from skimage.morphology import binary_opening, disk
import skimage
import numpy as np
import keras
import matplotlib.pyplot as plt
def main():
ds = SegmentationRLE("F:/all/train_ship_segmentations.csv", "D:/train_ships/train")
#segmentation.execute(ds, "ship_config.yaml")
# cfg=segmentation.parse("fpn/ship_config.yaml")
# cfg.fit(ds)
# cfg = segmentation.parse("linknet/ship_config.yaml")
# cfg.fit(ds)
# cfg = segmentation.parse("psp/ship_config.yaml")
# cfg.fit(ds)
ds0=ds
cfg = segmentation.parse("fpn_full/ship_config.yaml")
#cfg.predict_to_directory(ConstrainedDirectory("F:/all/test_v2",["00dc34840.jpg"]),"F:/all/test_v2_seg",fold=2,stage=2)
# exit(0)
# fig = plt.figure()
# ax = fig.add_subplot(121)
# ax.imshow(ds0[2].x)
# ax1 = fig.add_subplot(255)
# ax1.imshow(ds[18].x)
#
# ax2 = fig.add_subplot(256)
# ax2.imshow(ds[19].x)
#
# ax3 = fig.add_subplot(254)
# ax3.imshow(ds[20].x)
#
# ax4 = fig.add_subplot(255)
# ax4.imshow(ds[21].x)
#
# ax5 = fig.add_subplot(256)
# ax5.imshow(ds[22].x)
#
# ax5 = fig.add_subplot(257)
# ax5.imshow(ds[23].x)
#
# plt.show()
#cfg.fit(ds,foldsToExecute=[2],start_from_stage=2)
cfg0 = segmentation.parse("./fpn-resnext2/ship_config.yaml")
mdl=cfg0.createAndCompileClassifier()
mdl.load_weights("./fpn-resnext2/classify_weights/best-2.0.weights")
exists={}
goodC=0;
for v in cfg0.predict_on_directory_with_model(mdl,"F:/all/test_v2",ttflips=True):
for i in range(0,len(v.data)):
if (v.predictions[i]>0.5):
goodC=goodC+1;
exists[v.data[i]]=v.predictions[i]
print(goodC)
#cfg0.fit_classifier(ds,2,mdl,12,stage=22)
#print("A")
num=0;
#cfg.predict_to_directory("F:/all/test_v2","F:/all/test_v2_seg",batchSize=16)
def onPredict(id, img, d):
exists=d["exists"]
out_pred_rows = d["pred"]
if exists[id]<0.5:
out_pred_rows += [{'ImageId': id, 'EncodedPixels': None}]
return
good = d["good"]
num = d["num"]
cur_seg = binary_opening(img.arr > 0.5, np.expand_dims(disk(2), -1))
cur_rles = rle.multi_rle_encode(cur_seg)
if len(cur_rles) > 0:
good = good + 1;
for c_rle in cur_rles:
out_pred_rows += [{'ImageId': id, 'EncodedPixels': c_rle}]
else:
out_pred_rows += [{'ImageId': id, 'EncodedPixels': None}]
num = num + 1;
d["good"] = good
d["num"] = num
pass
out_pred_rows=[]
toPredict=[]
for id in exists:
if exists[id]<0.5:
out_pred_rows += [{'ImageId': id, 'EncodedPixels': None}]
else:
toPredict.append(id)
d = {"pred": out_pred_rows, "good": 0, "num": 0,"exists":exists}
cfg.predict_in_directory(ConstrainedDirectory("F:/all/test_v2",toPredict),2,1,onPredict,d,ttflips=True)
submission_df = pd.DataFrame(out_pred_rows)[['ImageId', 'EncodedPixels']]
submission_df.to_csv('mySubmission.csv', index=False)
print("Good:"+str(d["good"]))
print("Num:" + str(d["num"]))
if __name__ == '__main__':
main()
| 4,521 |
tests/test_openvpn.py
|
N7SALab/HEV
| 0 |
2169980
|
import json
from modules.openvpn import Openvpn
from core.helpers.config import Config
CONF = Config()
def test_openpvn():
assert Openvpn.build_client_configs_test(
CONF.MINIO_HOST,
CONF.MINIO_ACCESS_KEY,
CONF.MINIO_SECRET_KEY,
CONF.OPENVPN) is True
| 290 |
api/serializers/suggestion_log_serializer.py
|
invinst/CPDB
| 16 |
2169461
|
from django_extensions.db.fields.json import JSONField
from rest_framework import serializers
from search.models import SuggestionLog
class SuggestionLogSerializer(serializers.HyperlinkedModelSerializer):
serializer_field_mapping = serializers.HyperlinkedModelSerializer.serializer_field_mapping
serializer_field_mapping[JSONField] = serializers.DictField
class Meta:
model = SuggestionLog
fields = (
'search_query',
'num_suggestions',
'created_at',
)
| 528 |
source/create_levels.py
|
pvcraven/two_worlds
| 3 |
2170021
|
import arcade
import random
import math
from constants import *
from level import Level
from level_1 import get_level_1_array
from level_1 import add_level_1_creatures
from level_2 import get_level_2_array
from level_2 import add_level_2_creatures
from level_3 import get_level_3_array
from level_3 import add_level_3_creatures
from stairs import Stairs
def create_stairs(level_list):
for level in level_list:
level.stair_list = arcade.SpriteList()
# Place first stairs from 0 to 1
placed = False
while not placed:
row = random.randrange(GRID_HEIGHT)
column = random.randrange(GRID_WIDTH)
value_0 = level_list[0].grid[row][column]
value_1 = level_list[1].grid[row][column]
if value_0 == 0 and value_1 == 0:
placed = True
stairs = Stairs("images/stairs_down.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Down"
level_list[0].stair_list.append(stairs)
stairs = Stairs("images/stairs_up.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Up"
level_list[1].stair_list.append(stairs)
# Place the second stairs from 0 to 1, making sure they aren't too close
last_x = column
last_y = row
placed = False
while not placed:
row = random.randrange(GRID_HEIGHT)
column = random.randrange(GRID_WIDTH)
value_0 = level_list[0].grid[row][column]
value_1 = level_list[1].grid[row][column]
distance = math.sqrt((last_x - column) ** 2 + (last_y - row) ** 2)
print(distance)
if value_0 == 0 and value_1 == 0 and distance > 15:
placed = True
stairs = Stairs("images/stairs_down.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Down"
level_list[0].stair_list.append(stairs)
stairs = Stairs("images/stairs_up.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Up"
level_list[1].stair_list.append(stairs)
# Place the stairs from 1 to 2
placed = False
while not placed:
row = random.randrange(GRID_HEIGHT)
column = random.randrange(GRID_WIDTH)
value_0 = level_list[1].grid[row][column]
value_1 = level_list[2].grid[row][column]
if value_0 == 0 and value_1 == 0:
placed = True
stairs = Stairs("images/stairs_down.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Down"
level_list[1].stair_list.append(stairs)
stairs = Stairs("images/stairs_up.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Up"
level_list[2].stair_list.append(stairs)
# Place the second stairs from 1 to 2, making sure they aren't too close
last_x = column
last_y = row
placed = False
while not placed:
row = random.randrange(GRID_HEIGHT)
column = random.randrange(GRID_WIDTH)
value_0 = level_list[1].grid[row][column]
value_1 = level_list[2].grid[row][column]
distance = math.sqrt((last_x - column) ** 2 + (last_y - row) ** 2)
if value_0 == 0 and value_1 == 0 and distance > 15:
placed = True
stairs = Stairs("images/stairs_down.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Down"
level_list[1].stair_list.append(stairs)
stairs = Stairs("images/stairs_up.png", WALL_SPRITE_SCALING)
stairs.center_x = column * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
stairs.tag = "Up"
level_list[2].stair_list.append(stairs)
def create_walls(level_list):
for index, level in enumerate(level_list):
i = index+1
wall_filename = f"images/wall-{i:02}.png"
level.wall_list = arcade.SpriteList()
for row in range(GRID_HEIGHT):
column = 0
while column < GRID_WIDTH:
while column < GRID_WIDTH and level.grid[row][column] == 0:
column += 1
start_column = column
while column < GRID_WIDTH and level.grid[row][column] == 1:
column += 1
end_column = column - 1
column_count = end_column - start_column + 1
column_mid = (start_column + end_column) / 2
wall = arcade.Sprite(wall_filename, WALL_SPRITE_SCALING,
repeat_count_x=column_count)
wall.center_x = column_mid * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
wall.center_y = row * WALL_SPRITE_SIZE + WALL_SPRITE_SIZE / 2
wall.width = WALL_SPRITE_SIZE * column_count
level.wall_list.append(wall)
def create_levels(player_sprite):
level_list = []
level = Level()
level.grid = get_level_1_array()
level_list.append(level)
level.background_color = arcade.color.BISTRE
level = Level()
level.grid = get_level_2_array()
level_list.append(level)
level.background_color = arcade.color.BLACK_OLIVE
level = Level()
level.grid = get_level_3_array()
level_list.append(level)
level.background_color = arcade.color.EERIE_BLACK
create_walls(level_list)
add_level_1_creatures(level_list[0])
add_level_2_creatures(level_list[1])
add_level_3_creatures(level_list[2], player_sprite)
create_stairs(level_list)
return level_list
| 6,503 |
aoc2018/day14.py
|
zoeimogen/AoC2018
| 1 |
2170004
|
#!/usr/bin/python3
'''Advent of Code 2018 Day 14 solution'''
from typing import Tuple
def runpart1(improve: int) -> str:
'''Solve part one'''
recipelist = [3, 7]
elfA = 0
elfB = 1
while len(recipelist) < (improve + 10):
newrecipes = str(recipelist[elfA] + recipelist[elfB])
recipelist += [int(r) for r in newrecipes]
elfA = (elfA + 1 + recipelist[elfA]) % len(recipelist)
elfB = (elfB + 1 + recipelist[elfB]) % len(recipelist)
return ''.join([str(r) for r in recipelist[improve:improve+10]])
def runpart2(finds: str) -> int:
'''Solve part two'''
recipelist = '37'
elfA = 0
elfB = 1
while True:
recipelist += str(int(recipelist[elfA]) + int(recipelist[elfB]))
elfA = (elfA + 1 + int(recipelist[elfA])) % len(recipelist)
elfB = (elfB + 1 + int(recipelist[elfB])) % len(recipelist)
if recipelist[-10:].find(finds) > -1:
return recipelist.find(finds)
def run() -> Tuple[str, int]:
'''Main'''
return(runpart1(635041), runpart2('635041'))
if __name__ == '__main__':
print(run())
| 1,109 |
apex/tests/L0/run_fused_layer_norm/test_fused_layer_norm.py
|
peterlazzarino/jukebox
| 16 |
2169986
|
import unittest
import os
import random
import torch
import apex
class TestFusedLayerNorm(unittest.TestCase):
def setUp(self):
self.module = apex.normalization.FusedLayerNorm(normalized_shape=[32, 64], elementwise_affine=False)
self.input_ = torch.randn(16, 32, 64)
torch.cuda.manual_seed(42)
def forward_cpu(self, input_):
self.module.cpu()
return self.module(input_.cpu())
def forward_cuda(self, input_):
self.module.cuda()
return self.module(input_.cuda())
def test_forward_cuda(self):
out_ = self.forward_cuda(self.input_)
assert out_.is_cuda == True
def test_forward_cpu(self):
out_ = self.forward_cpu(self.input_)
assert out_.is_cuda == False
def test_same_output(self):
out_cpu = self.forward_cpu(self.input_)
out_cuda = self.forward_cuda(self.input_)
torch.testing.assert_allclose(out_cpu, out_cuda.cpu())
class TestFusedLayerNormElemWise(TestFusedLayerNorm):
def setUp(self):
self.module = apex.normalization.FusedLayerNorm(normalized_shape=[32, 64], elementwise_affine=True)
self.input_ = torch.randn(16, 32, 64)
torch.cuda.manual_seed(42)
| 1,279 |
Mi_primer_Script.py
|
EsdeathParca/Gold
| 0 |
2168094
|
print(' Hola Mundo, este es un texto escrito con lenguaje python.')
print('\n')
print(' Existió una vez un hombre que sin saber como se llamaba y sin saber a donde')
print(' pertenecía siempre se preguntó por que él se encontraba donde se encontraba,')
print(' nada tenía sentido para él, sin embargo decidió caminar hasta que los piés se,')
print(' volviesen de fuego y las manos se le erocionaran a causa del viento.')
#A partir de aquí inicio con una práctica para conbinar distintos tipos de valores que puede incluir la función print.
#Agrego valores a funciones definidas para realizar una operación de suma o resta, obtengo un resultado y convino..
#ese resultado en otra ecuación a partir del valor anterior.
print('\n -----------------------------Sueldo mensual-------------------------------')
print('Depositado: $68.0000 ars\nC.Al: $10.000 ars')
a = 68000
b = 10000
total = a - b
print('Saldo:total','ars')
print('Compra: "Colchon" - Valor de: 12.000 ars')
c = total
d = 12000
substraction = c - d
print( 'Saldo:',substraction,'ars')
###################################
# /\ /\ #
# ||'''''''''|| #
# | o o || // #
# | ♥ || // #
# \ ww || // #
# \ ____// ********** #
# ****************** #
# ** ** ** ** #
# * * ** ** #
# #
###################################
| 1,481 |
tests/aiohttp/test_query_params.py
|
TimPansino/strawberry
| 0 |
2169218
|
async def test_no_query(aiohttp_app_client):
params = {"variables": '{"name": "James"}'}
response = await aiohttp_app_client.get("/graphql", params=params)
assert response.status == 400
async def test_get_with_query_params(aiohttp_app_client):
query = {
"query": """
query {
hello
}
"""
}
response = await aiohttp_app_client.get("/graphql", params=query)
data = await response.json()
assert response.status == 200
assert data["data"]["hello"] == "Hello world"
async def test_can_pass_variables_with_query_params(aiohttp_app_client):
query = {
"query": "query Hello($name: String!) { hello(name: $name) }",
"variables": '{"name": "James"}',
}
response = await aiohttp_app_client.get("/graphql", params=query)
data = await response.json()
assert response.status == 200
assert data["data"]["hello"] == "Hello James"
async def test_post_fails_with_query_params(aiohttp_app_client):
query = {
"query": """
query {
hello
}
"""
}
response = await aiohttp_app_client.post("/graphql", params=query)
assert response.status == 400
async def test_does_not_allow_mutation(aiohttp_app_client):
query = {
"query": """
mutation {
hello
}
"""
}
response = await aiohttp_app_client.get("/graphql", params=query)
assert response.status == 400
data = await response.text()
assert data == "400: mutations are not allowed when using GET"
async def test_fails_if_allow_queries_via_get_false(aiohttp_app_client_no_get):
query = {
"query": """
query {
hello
}
"""
}
response = await aiohttp_app_client_no_get.get("/graphql", params=query)
assert response.status == 400
data = await response.text()
assert data == "400: queries are not allowed when using GET"
| 2,016 |
measure_cache_effect.py
|
ianozsvald/explore_cpu_cache_effect
| 1 |
2170014
|
import cPickle
import argparse
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
pass
# Intention:
# Measure cache speed on a CPU using a single numpy array with some in-place
# operations. We measure the cache speed by increasing the size of the array,
# starting small enough to fit entirely in the cache and growing until the
# majority of the array must stay in RAM
# Usage:
# python measure_cache_effect.py --help # show help
# python measure_cache_effect.py --time <filename> # make recordings, write to
# filename
# python measure_cache_effect.py --graph <filename1> [<filnameN>...] # read
# the record timings and plot on a graph
# --graphline # switch from boxplot (default) to labelled line output (for
# comparing multiple runs)
def get_cpu_info():
"""Attempt to make sensible titles from processor info"""
cpu_model = "Unknown CPU model"
#cpu_cache_size = "Unknown cache size"
cpu_cache_bytes = 1 # unknown indicated by 1 byte
cpu_cache_alignment = "Unknown cache alignment"
try:
f = open("/proc/cpuinfo")
lines = f.readlines()
# convert lines like the following from /proc/cpuinfo into strings
# 'model name\t: Intel(R) Core(TM) i7-2720QM CPU @ 2.20GHz\n',
# 'cache size\t: 6144 KB\n',
# 'cache_alignment\t: 64\n',
cpu_model = [line for line in lines if 'model name' in line][0].strip().split(':')[1].strip()
cpu_cache_size = [line for line in lines if 'cache size' in line][0].strip().split(':')[1].strip()
if ' KB' in cpu_cache_size:
# assuming it looks like '6144 KB'
cpu_cache_bytes = int(cpu_cache_size.split()[0]) * 1000
cpu_cache_alignment = [line for line in lines if 'cache_alignment' in line][0].strip().split(':')[1].strip()
except IOError:
pass
return cpu_model, cpu_cache_bytes, cpu_cache_size, cpu_cache_alignment
# L2 cache but it looks odd?
# Core i7-2720QM
#max_length = 64000
#increment_by = 4000
#nbr_repeated_operations = 20000
#box_width = 25000
# good for L3 cache
# http://en.wikipedia.org/wiki/List_of_Intel_Core_i7_microprocessors#.22Sandy_Bridge_.28quad-core.29.22_.2832_nm.29
# Core i7-2720QM, 4*256KB L2, 6MB L3
max_length = 1.8e6
max_length = 268000 # very short plot
increment_by = 50000
nbr_repeated_operations = 100
box_width = 200000
# Core 2 Duo P7350 3MB cache
# Macbook Core 2 Duo with L2 3MB cache
#max_length = 600000
#increment_by = 20000
#box_width = 100000
# labels for this laptop
cpu_model, cpu_cache_bytes, cpu_cache_size, cpu_cache_alignment = get_cpu_info()
print cpu_model, cpu_cache_bytes, cpu_cache_alignment
#laptop_label = "Core i7-2720QM, 4*256KB L2, 6MB L3" # graph title
#cache_label = "6MB L3 cache" # text of red line marker for the graph
laptop_label = cpu_model
cache_location = cpu_cache_bytes # 6e6 # position of cache_label on the graph
cache_label = cpu_cache_size
trials = 30
starting_length = increment_by
array_length = starting_length
dtype = np.int_
OUT_FILE_NAME = 'pi_numpy_benefitsvectorization.pickle'
parser = argparse.ArgumentParser(description='Project description')
parser.add_argument('--time', type=str, help="Time cache behaviour, write output to <filename>")
parser.add_argument('--graph', nargs="*", type=str, help="Graph cache behaviour, read output from <filename>")
parser.add_argument('--graphline', action="store_true", default=False, help='By default plot uses boxplot, with graphline it plots a single line')
args = parser.parse_args()
print "Args:", args
if args.time:
nbytes = []
all_deltas = []
while array_length < max_length:
deltas = []
a = np.ones(array_length, dtype=dtype)
print "array_length {}, nbytes {}, ".format(array_length, a.nbytes)
nbytes.append(a.nbytes)
for t in xrange(trials):
a = np.ones(array_length, dtype=dtype)
t1 = time.time()
# loop on some basic operations, in-place
# a number of times so we have something to measure
for inner_loop in xrange(nbr_repeated_operations):
a *= a
a += 1
delta = time.time() - t1
delta /= float(a.nbytes) # normalise to time per byte
deltas.append(delta)
all_deltas.append(deltas)
array_length += increment_by
all_deltas = np.array(all_deltas)
nbytes = np.array(nbytes)
timings = all_deltas
print "Writing to:", args.time
with open(args.time, 'wb') as f:
dumped_data = {'timings': timings,
'nbytes': nbytes}
cPickle.dump(dumped_data, f)
if args.graph:
# make sure matplotlib has been imported
if 'plt' in dir():
plt.figure(1)
plt.clf()
graph_filenames = args.graph
for graph_filename in graph_filenames:
print "Loading data from", graph_filename
with open(graph_filename, 'rb') as f:
dumped_data = cPickle.load(f)
timings = dumped_data['timings']
nbytes = dumped_data['nbytes']
timings_averaged = np.average(timings, axis=1)
if args.graphline:
plt.plot(nbytes, timings_averaged, label=graph_filename)
else:
plt.boxplot(timings.T, positions=nbytes, widths=box_width)
plt.ylabel("Time per byte (seconds)")
plt.xticks(plt.xticks()[0], ["{:,}".format(int(xb / 1000.0)) for xb in nbytes], rotation=45)
plt.xlabel("Total array size (kB)")
# annotate the cache location
plt.vlines(cache_location, plt.ylim()[0], plt.ylim()[1], colors='r')
plt.annotate(cache_label, (cache_location, np.max(timings)))
plt.title(laptop_label)
plt.grid()
if args.graphline:
plt.legend()
plt.show()
else:
print "matplotlib must be installed to generate a graph"
| 5,958 |
spyder_vcs/tests/backend/template_hypothesis.py
|
spyder-ide/spyder-vcs
| 0 |
2169582
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
A set of Hypothesis powered templates for testing backends.
All the templates here acts as mixins, rather than
"""
# Standard library imports
from collections.abc import Iterable
from pathlib import Path
from tempfile import TemporaryDirectory
import typing
# Third party imports
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
# Local imports
from .generation import FilesTreeGenerator
from .template import TemplateBasic, TemplateCreate, tempdir_tostr
__all__ = ("BASE_SETTINGS", "HyTemplateBasic", "HyTemplateCreate")
TEMPDIR = typing.Union[TemporaryDirectory, str]
BASE_SETTINGS = settings(
suppress_health_check=(HealthCheck.too_slow, HealthCheck.data_too_large),
deadline=1000,
)
class HyTemplateBasic(TemplateBasic):
"""
The base of all Hypothesis powered backend test templates.
In addition to TemplateBasic requirements, any subclass of this must
provide the following strategies: local_repository, remote_repository,
local_with_remote_repository.
"""
local_repository: st.SearchStrategy[TEMPDIR]
remote_repository: st.SearchStrategy[TEMPDIR]
local_with_remote_repository: st.SearchStrategy[typing.Tuple[TEMPDIR,
TEMPDIR]]
def _draw_repodir(
self,
data: st.DataObject,
attrs: typing.Iterable[str] = ("local_repository", )
) -> typing.Tuple[TEMPDIR, typing.Union[TEMPDIR, typing.Sequence[TEMPDIR]]]:
drawed_repo = data.draw(st.one_of(*(getattr(self, attr) for attr in attrs)))
if isinstance(drawed_repo, Iterable):
return drawed_repo, tuple(tempdir_tostr(x) for x in drawed_repo)
return drawed_repo, tempdir_tostr(drawed_repo)
@given(st.data())
# @settings(max_examples=2)
def test_init_good(self, data: st.DataObject):
_, repodir = self._draw_repodir(
data, ("local_repository", "local_with_remote_repository"))
if not isinstance(repodir, str):
repodir = repodir[0]
super().test_init_good(repodir)
@given(st.builds(TemporaryDirectory))
# @settings(max_examples=1)
def test_init_norepo(self, norepodir: TemporaryDirectory):
super().test_init_norepo(norepodir.name)
class HyTemplateCreate(HyTemplateBasic, TemplateCreate):
"""Hypothesis powered implementations of TemplateCreate."""
@BASE_SETTINGS
# @settings(max_examples=1)
@given(st.builds(TemporaryDirectory))
def test_create_empty(self, repodir):
super().test_create_empty(repodir.name)
@BASE_SETTINGS
# @settings(max_examples=1)
@given(st.builds(TemporaryDirectory), st.data())
def test_create_noauth(self, repodir: TemporaryDirectory,
data: st.DataObject):
_, from_ = self._draw_repodir(data, ("remote_repository", ))
super().test_create_noauth(repodir.name, Path(from_).as_uri())
| 3,297 |
Projects/project07/grow-test.py
|
tonysulfaro/CSE-331
| 2 |
2168610
|
from HashTable import HashTable
def assertNode(node, key, value):
if key is None:
assert node is None
else:
assert node.key == key and node.value == value
ht = HashTable()
for i in range(6):
ht.insert(i * 'a', i)
assert ht.size == 5
assert ht.capacity == 8
assertNode(ht.table[0], None, None)
assertNode(ht.table[1], 'a', 1)
assertNode(ht.table[2], None, None)
assertNode(ht.table[3], None, None)
assertNode(ht.table[4], "aaaa", 4)
assertNode(ht.table[5], "aaaaa", 5)
assertNode(ht.table[6], "aa", 2)
assertNode(ht.table[7], "aaa", 3)
| 570 |
bce/dom/mathml/text.py
|
bce-toolkit/BCE
| 12 |
2168378
|
#!/usr/bin/env python
#
# Copyright 2014 - 2018 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.dom.mathml.base as _base
import bce.dom.mathml.types as _types
class TextComponent(_base.Base):
"""Text component."""
def __init__(self, text):
"""Initialize the component.
:param text: The text.
"""
_base.Base.__init__(self, _types.COMPONENT_TYPE_TEXT)
self.__text = text
def get_text(self):
"""Get the text.
:return: The text.
"""
return self.__text
def set_text(self, text):
"""Set the text.
:param text: The text.
"""
self.__text = text
def to_string(self, indent=0):
"""Serialize the component to string.
:type indent: int
:param indent: The indent space count.
:rtype : str
:return: The serialized string.
"""
return " " * indent + "<mtext>" + self.__text + "</mtext>"
| 1,078 |
discordbot/bot.py
|
justin-p/IaC-DigitalOcean-Minecraft
| 1 |
2169703
|
'''
Basic webhook wrapper for sending messages to discord chat.
'''
from discord_webhook import DiscordWebhook
import argparse
if __name__ == '__main__':
## initiate the parser with a description
parser = argparse.ArgumentParser(description = 'Basic webhook bot for discord')
optional = parser._action_groups.pop()
optional.add_argument("--skipmessage",default=False)
optional.add_argument("-u", "--uri", help="Webook Uri")
optional.add_argument("-c", "--content", help="Content to send")
cmdargs = parser.parse_args()
if cmdargs.skipmessage == "false":
webhook = DiscordWebhook(url=cmdargs.uri, content=cmdargs.content)
response = webhook.execute()
| 699 |
library_seats/timer.py
|
XC-Li/old_python_programs
| 0 |
2169823
|
# Countdown Timer
# Designed By Xc.Li @ Mar.2016
# coding = utf-8
import time
import os
def func(mode):
if mode == 5:
check_point = [00, 05, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]
if mode == 10:
check_point = [00, 10, 20, 30, 40, 50]
if mode == 15:
check_point = [00, 15, 30, 45]
if mode == "test":
check_point = [00, 10, 20, 23]
while True:
current_minute = time.ctime()[14:-8]
for item in check_point:
if str(item) == current_minute or current_minute == '00':
print "Check Point!"
time.sleep(5)
return True
print "Waiting at mode %d" % mode
time.sleep(5)
os.system("cls")
#
# def func1(mode):
# flag = False
# previous_minute = time.ctime()[14:-8]
# while True:
# time.sleep(5)
# print "Running at mode %d" % mode
# current_minute = time.ctime()[14:-8]
# if flag is True and int(current_minute) - int(previous_minute) == 2 or int(previous_minute) == 59:
# return True
# if current_minute == func(mode):
# flag = True
# continue
# return False
| 1,234 |
qUtilities.py
|
matthewse19/quantum-simulation
| 0 |
2170055
|
import math
import random
import numpy
import qConstants as qc
import qBitStrings as qb
def equal(a, b, epsilon):
'''Assumes that n >= 0. Assumes that a and b are both n-qbit states or n-qbit gates. Assumes that epsilon is a positive (but usually small) real number. Returns whether a == b to within a tolerance of epsilon. Useful for doing equality comparisons in the floating-point context. Warning: Does not consider global phase changes; for example, two states that are global phase changes of each other may be judged unequal. Warning: Use this function sparingly, for inspecting output and running tests. Probably you should not use it to make a crucial decision in the middle of a big computation. In past versions of CS 358, this function has not existed. I have added it this time just to streamline the tests.'''
diff = a - b
if len(diff.shape) == 0:
# n == 0. Whether they're gates or states, a and b are scalars.
return abs(diff) < epsilon
elif len(diff.shape) == 1:
# a and b are states.
return sum(abs(diff)) < epsilon
else:
# a and b are gates.
return sum(sum(abs(diff))) < epsilon
def uniform(n):
'''Assumes n >= 0. Returns a uniformly random n-qbit state.'''
if n == 0:
return qc.one
else:
psiNormSq = 0
while psiNormSq == 0:
reals = numpy.array(
[random.normalvariate(0, 1) for i in range(2**n)])
imags = numpy.array(
[random.normalvariate(0, 1) for i in range(2**n)])
psi = numpy.array([reals[i] + imags[i] * 1j for i in range(2**n)])
psiNormSq = numpy.dot(numpy.conj(psi), psi).real
psiNorm = math.sqrt(psiNormSq)
return psi / psiNorm
def bitValue(state):
'''Given a one-qbit state assumed to be exactly classical --- usually because a classical state was just explicitly assigned to it --- returns the corresponding bit value 0 or 1.'''
if (state == qc.ket0).all():
return 0
else:
return 1
def powerMod(k, l, m):
'''Given non-negative integer k, non-negative integer l, and positive integer m. Computes k^l mod m. Returns an integer in {0, ..., m - 1}.'''
kToTheL = 1
curr = k
while l >= 1:
if l % 2 == 1:
kToTheL = (kToTheL * curr) % m
l = l // 2
curr = (curr * curr) % m
return kToTheL
def quantumFromClassic(bitstring):
n = len(bitstring)
arr = numpy.array((0,) * (2 ** n))
one_position = qb.integer(bitstring)
arr[one_position] = 1
return arr
def quantumListToClassicTuple(quantum_list):
arr = []
for qbit in quantum_list:
arr.append(bitValue(qbit))
return tuple(arr)
def removeZeroRow(arr):
width = len(arr[0])
zero_row = (0,) * width
copy = list(arr)
if copy[-1] == zero_row:
copy.pop()
return copy
def missingLeadingRow(arr):
width = len(arr[0])
one_row = [1,] * width
copy = list(arr)
for row in arr:
first_one_index = row.index(1)
one_row[first_one_index] = 0
return one_row
def continuedFraction(n, m, x0):
'''x0 is a float in [0, 1). Tries probing depths j = 0, 1, 2, ... until
the resulting rational approximation x0 ~ c / d satisfies either d >= m or
|x0 - c / d| <= 1 / 2^(n + 1). Returns a pair (c, d) with gcd(c, d) = 1.'''
j = 0
while True:
c, d = fraction(x0, j)
if d >= m or abs(x0 - (c / d)) <= (1 / 2 ** (n + 1)):
return (c, d)
j += 1
def fraction(x0, j):
'''recursive helper to continuedFraction
calculates the c and d out j times to approxiate x0 as c / d'''
if x0 == 0:
return (0, 1)
elif j == 0:
a0 = math.floor(1 / x0)
c = 1
d = a0
return (c, d)
else:
a0 = math.floor(1 / x0)
x1 = (1 / x0) - a0
next_c, next_d = fraction(x1, j - 1)
c = next_d
d = (a0 * next_d) + next_c
return (c, d)
def fractions_test():
#test fraction
print(fraction(1 / math.pi, 2))
print(fraction(1 / 7, 4))
print(fraction(0, 3))
print(continuedFraction( 8, 4, (1 / math.pi)))
if __name__ == "__main__":
fractions_test()
| 4,273 |
python/dsShareDirectory/test.py
|
bryanlabs/cloudformation-custom-resources
| 0 |
2169772
|
import boto3
import os
import logging
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
# Shares a Directory with target account.
def create(event):
try:
logger.info("Got Create")
# Shares a Directory with target account.
ds = boto3.client('ds')
accounts = event['ResourceProperties']['ShareTargetIds']
for account in accounts:
ds.share_directory(
DirectoryId=event['ResourceProperties']['DirectoryId'],
ShareNotes=event['ResourceProperties']['ShareNotes'],
ShareTarget={
'Id': account,
'Type': 'ACCOUNT'
},
ShareMethod='HANDSHAKE'
)
return
except Exception as e:
print(e)
def delete(event):
# Unshares a Directory with target account.
try:
logger.info("Got Delete")
# Unshares a Directory with target account.
client = boto3.client('ds')
accounts = event['ResourceProperties']['ShareTargetIds']
for account in accounts:
client.unshare_directory(
DirectoryId=event['ResourceProperties']['DirectoryId'],
UnshareTarget={
'Id': account,
'Type': 'ACCOUNT'
}
)
except Exception as e:
print(e)
event = {
"ResourceProperties" : {
"ShareTargetIds" : ["546837296206","430151054399"],
"DirectoryId" : "d-90671c4dd3",
"ShareNotes" : "my Shared Directory"
}
}
# create(event)
# delete(event)
| 1,634 |
museum_site/migrations/0052_profile.py
|
DrDos0016/z2
| 3 |
2168702
|
# Generated by Django 3.1.7 on 2021-05-09 17:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('museum_site', '0051_article_last_revised'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')),
('patron', models.BooleanField(default=False)),
('patron_level', models.IntegerField(default=0)),
('char', models.IntegerField(default=2)),
('fg', models.CharField(default='white', max_length=11)),
('bg', models.CharField(default='darkblue', max_length=11)),
('site_credits_name', models.CharField(blank=True, default='', max_length=100)),
('stream_credits_name', models.CharField(blank=True, default='', max_length=100)),
('max_upload_size', models.IntegerField(default=1048576)),
('files_uploaded', models.IntegerField(default=0)),
('pronouns', models.CharField(max_length=50)),
],
),
]
| 1,323 |
Courses/Codeval/Easy/Python/03-SumPrimes.py
|
leparrav/Playground
| 1 |
2168199
|
'''
https://www.codeeval.com/open_challenges/4/
'''
import sys
import math
def isPrime(n):
limit = int(math.sqrt(n))+1
for i in range(2,limit):
if n % i == 0:
return False
return True
def main():
#end = int(sys.argv[1])
end = 1000
acc = 0
pcount, i = 0, 2
while(pcount < end):
if isPrime(i):
acc += i
pcount += 1
i += 1
print acc
if __name__ == '__main__':
main()
| 395 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.