max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
src/zope/i18n/tests/testii18naware.py
|
Shoobx/zope.i18n
| 0 |
2170720
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""This is a test for the II18nAware interface.
"""
import unittest
class TestII18nAware(unittest.TestCase):
def setUp(self):
self.object = self._createObject()
self.object.setDefaultLanguage('fr')
def _createObject(self):
# Should create an object that has lt, en and fr as available
# languages
pass
def testGetDefaultLanguage(self):
self.assertEqual(self.object.getDefaultLanguage(), 'fr')
def testSetDefaultLanguage(self):
self.object.setDefaultLanguage('lt')
self.assertEqual(self.object.getDefaultLanguage(), 'lt')
def testGetAvailableLanguages(self):
self.assertEqual(sorted(self.object.getAvailableLanguages()), ['en', 'fr', 'lt'])
def test_suite():
return unittest.TestSuite() # Deliberatly empty
| 1,452 |
python/p155.py
|
forewing/lc
| 0 |
2171292
|
class MinStack:
def __init__(self):
self.s = []
self.min = float('inf')
def push(self, val: int) -> None:
if val <= self.min:
self.s.append(self.min)
self.min = val
self.s.append(val)
def pop(self) -> None:
if self.s[-1] == self.min:
self.s.pop()
self.min = self.s[-1]
self.s.pop()
def top(self) -> int:
return self.s[-1]
def getMin(self) -> int:
return self.min
| 500 |
betweenness/groupBasedAlgorithm.py
|
puzis/experiments
| 0 |
2171686
|
import copy
from dataWorkshop import DataWorkshop
class GroupBasedAlgorithm(DataWorkshop):
"""
The only information held by the Set is the information about the
Set itself (in addition to common DataWorkshop).
From this info we are able to calculate Set's GBC.
"""
def createEmptySet(self, overheadModifier=1):
""" create empty represention of set of vertices.
The object returned is NOT instance of set!
However it supports methods add() and remove()
from python set interface.
In later versions it is expected to have all set's
functionality and actualy extend set()
"""
return self.Set(self,overheadModifier)
class Set:
"""
Object of this class is NOT instance of set!
However it supports methods add() and remove()
from python set interface.
In later versions it is expected to have all set's
functionality and actualy extend Set class
"""
class SubSet:
def __init__(self):
self.path = []
self.delta = 1.0
def __repr__(self):
return "<" + repr(self.path) + ", delta=" + repr(self.delta) + ">"
def __getitem__(self,index):
return self.path[index]
def __init__(self, GBAlg, overheadModifier):
from opus7.avlTree import AVLTree
self._alg = GBAlg
self._members = set([])
self._subsets = set([self.SubSet()])
#self._subsets.add(self.SubSet())
self._GB = 0
self._OV = 0
self._valid = True
self._OVModifier = overheadModifier
def __repr__(self):
self.getGB()
return "GroupBetweenness" + "<" + repr(self._members) + ",%.2f,%.2f"%(self._GB,self._OV)+">"
def __str__(self):
return self.__repr__()
def __iter__(self):
return self._members.__iter__()
def __getitem__(self,index):
return self._members[index]
def __len__(self):
return len(self._members)
def getGB(self):
if self._valid:
return self._GB - self._OV * self._OVModifier
else:
self.calculateGB()
self.calculateOverhead()
self._valid = True
return self.getGB()
def getMembers(self):
return self._members
def add(self,v):
"""
(Set, int) -> None
"""
if v not in self._members:
self._valid = False
v_subsets = set([])
d = self._alg.getDistanceMatrix()
for s in self._subsets:
ns = copy.deepcopy(s)
k = len(ns.path)-1
if len(ns.path) == 0:
ns.delta = 1.0
ns.path = [v]
v_subsets.add(ns)
elif d[v][ns[0]]+d[ns[0]][ns[k]]==d[v][ns[k]] :
ns.delta *= self._alg.getDelta(v,ns[0],ns[k])
ns.path[0:0] = [v]
v_subsets.add(ns)
elif d[ns[0]][v]==d[ns[0]][ns[k]]+d[ns[k]][v] :
ns.delta *= self._alg.getDelta(ns[0],ns[k],v)
ns.path[k+1:k+1] = [v]
v_subsets.add(ns)
else:
for i in range(k):
if d[ns[i]][ns[i+1]] == d[ns[i]][v] + d[v][ns[i+1]] :
ns.delta *= self._alg.getDelta(ns[i],v,ns[i+1])
ns.path[i+1:i+1] = [v]
v_subsets.add(ns)
break
#self._subsets.update(v_subsets)
self._subsets |= v_subsets
self._members.add(v)
def remove(self,v):
"""
(int) -> None
"""
if v in self._members:
self._valid = False
v_subsets = set([])
for s in self._subsets:
if v in s.path:
v_subsets.add(s)
#self._subsets.intersection_update(v_subsets)
self._subsets -= v_subsets
self._members.remove(v)
def calculateGB(self):
GB = 0
for s in self._subsets:
k = len(s.path) - 1
if k==-1: ##len(s.path)==0
pb = 0
elif k==0: ##len(s.path)==1
pb = self._alg.getB(s[0])
else: ##len(s.path)>1
pb = self._alg.getPairBetweenness(s[0],s[k]) * s.delta
sign = ((k+1)%2)*2 - 1 #odd->pos even->neg
#print pb, "\t", s.path
GB += sign * pb
self._GB=GB
return GB
def calculateOverhead(self):
totalOverhead = 0
for s in self._subsets:
k = len(s.path)-1
sOverhead = 0
doubleOverhead = 0
if k>=0 :
for u in self._members - set(s.path):
sOverhead += s.delta * self._alg.getDelta(u,s[0],s[k]) * self._alg.getDelta(u,s[k])
sOverhead += s.delta * self._alg.getDelta(u,s[k],s[0]) * self._alg.getDelta(u,s[0])
for v in self._members - set(s.path):
doubleOverhead += s.delta * self._alg.getDelta(u,s[0],s[k]) * self._alg.getDelta(u,s[k],v)
if k==0 :
sOverhead /= 2
doubleOverhead /= 2
sign = ((k+1)%2)*2 - 1 #odd->pos even->neg
totalOverhead += sign * (sOverhead - doubleOverhead)
#print sOverhead-doubleOverhead, "\t", s.path
self._OV = totalOverhead
return totalOverhead
| 6,906 |
python/lab/ws.py
|
tao12345666333/Talk-Is-Cheap
| 4 |
2171484
|
# -*- coding: utf-8 -*-
# from github
# one websocket demo
import asyncio
import uvloop
from aiohttp.web import Application, MsgType, WebSocketResponse
def add_socket(app, socket, user_id):
if user_id in app['connections']:
pass
else:
print('New connection added {}'.format(user_id))
app['connections'][user_id] = socket
async def remove_socket(app, socket, user_id):
app['connections'].pop(user_id, None)
print('user id: {} is disconnected')
await socket.close()
async def ws_handler(request):
ws = WebSocketResponse()
await ws.prepare(request)
user_id = request.GET.get('user_id', -1)
async for msg in ws:
if msg.tp == MsgType.text:
if msg.data == 'close':
await remove_socket(app=ws.app, socket=ws, user_id=user_id)
else:
add_socket(app=request.app, socket=ws, user_id=user_id)
ws.send_str(msg.data * 2)
return ws
async def init(loop):
app = Application(loop=loop)
app['connections'] = {}
app.router.add_route('GET', '/', ws_handler)
handler = app.make_handler()
srv = await loop.create_server(handler, '127.0.0.1', '8000')
print("Server running on 127.0.0.1:8000")
return app, srv, handler
async def cleanup(app, srv, handler):
for idx, ws in app['connections'].items():
ws.close()
await asyncio.sleep(0.1)
srv.close()
await handler.finish_connections()
await srv.wait_closed()
def main():
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
app, srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(cleanup(app, srv, handler))
if __name__ == "__main__":
main()
| 1,831 |
sdk/python/pulumi_google_native/vision/v1/get_product.py
|
AaronFriel/pulumi-google-native
| 44 |
2171783
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetProductResult',
'AwaitableGetProductResult',
'get_product',
'get_product_output',
]
@pulumi.output_type
class GetProductResult:
def __init__(__self__, description=None, display_name=None, name=None, product_category=None, product_labels=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if product_category and not isinstance(product_category, str):
raise TypeError("Expected argument 'product_category' to be a str")
pulumi.set(__self__, "product_category", product_category)
if product_labels and not isinstance(product_labels, list):
raise TypeError("Expected argument 'product_labels' to be a list")
pulumi.set(__self__, "product_labels", product_labels)
@property
@pulumi.getter
def description(self) -> str:
"""
User-provided metadata to be stored with this product. Must be at most 4096 characters long.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The user-provided name for this Product. Must not be empty. Must be at most 4096 characters long.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the product. Format is: `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`. This field is ignored when creating a product.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="productCategory")
def product_category(self) -> str:
"""
Immutable. The category for the product identified by the reference image. This should be one of "homegoods-v2", "apparel-v2", "toys-v2", "packagedgoods-v1" or "general-v1". The legacy categories "homegoods", "apparel", and "toys" are still supported, but these should not be used for new products.
"""
return pulumi.get(self, "product_category")
@property
@pulumi.getter(name="productLabels")
def product_labels(self) -> Sequence['outputs.KeyValueResponse']:
"""
Key-value pairs that can be attached to a product. At query time, constraints can be specified based on the product_labels. Note that integer values can be provided as strings, e.g. "1199". Only strings with integer values can match a range-based restriction which is to be supported soon. Multiple values can be assigned to the same key. One product may have up to 500 product_labels. Notice that the total number of distinct product_labels over all products in one ProductSet cannot exceed 1M, otherwise the product search pipeline will refuse to work for that ProductSet.
"""
return pulumi.get(self, "product_labels")
class AwaitableGetProductResult(GetProductResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProductResult(
description=self.description,
display_name=self.display_name,
name=self.name,
product_category=self.product_category,
product_labels=self.product_labels)
def get_product(location: Optional[str] = None,
product_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProductResult:
"""
Gets information associated with a Product. Possible errors: * Returns NOT_FOUND if the Product does not exist.
"""
__args__ = dict()
__args__['location'] = location
__args__['productId'] = product_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:vision/v1:getProduct', __args__, opts=opts, typ=GetProductResult).value
return AwaitableGetProductResult(
description=__ret__.description,
display_name=__ret__.display_name,
name=__ret__.name,
product_category=__ret__.product_category,
product_labels=__ret__.product_labels)
@_utilities.lift_output_func(get_product)
def get_product_output(location: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProductResult]:
"""
Gets information associated with a Product. Possible errors: * Returns NOT_FOUND if the Product does not exist.
"""
...
| 5,575 |
migrations/versions/d7d51a76eb11_shorturl_table.py
|
hedythedev/hello-flask
| 0 |
2170609
|
"""shorturl table
Revision ID: d7d51a76eb11
Revises:
Create Date: 2020-11-13 00:20:15.745976
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('shortURL',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(), nullable=True),
sa.Column('short', sa.String(length=8), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_shortURL_short'), 'shortURL', ['short'], unique=True)
op.create_index(op.f('ix_shortURL_url'), 'shortURL', ['url'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_shortURL_url'), table_name='shortURL')
op.drop_index(op.f('ix_shortURL_short'), table_name='shortURL')
op.drop_table('shortURL')
# ### end Alembic commands ###
| 1,066 |
target.py
|
davidrossignol/flooding-tool
| 0 |
2171388
|
import os
import IP2Location
class Target():
def __init__(self, host, port=0):
self.host = host
self.port = port
self.shortInfo = []
self.allInfo = []
self.ip2locInfo = []
def isAlive(self):
res = os.system("ping -c 1 " + self.host)
if res == 0:
print("Host %s is up!" % (self.host))
return True
else:
print("Host %s is down!" % (self.host))
return False
def getInformation(self):
ip2loc = IP2Location.IP2Location()
ip2loc.open("data/IP-COUNTRY-REGION-CITY-LATITUDE-LONGITUDE-ZIPCODE-TIMEZONE-ISP-DOMAIN-NETSPEED-AREACODE-WEATHER-MOBILE-ELEVATION-USAGETYPE-SAMPLE.BIN")
rec = ip2loc.get_all(self.host)
self.ip2locInfo = rec
def displayShortInfo(self):
if self.ip2locInfo == []:
self.getInformation()
print("Country: %s " % self.ip2locInfo.country_short)
print("Region: %s " % self.ip2locInfo.region)
print("Timezone: %s " % self.ip2locInfo.timezone)
else:
print("Country: %s " % self.ip2locInfo.country_short)
print("Region: %s " % self.ip2locInfo.region)
print("Timezone: %s " % self.ip2locInfo.timezone)
| 1,287 |
utils.py
|
KeunhoByeon/HDlearning_project2
| 0 |
2171472
|
import torch
# Save model state dict
def save_model(model, save_path):
state_dict = {}
state_dict['model'] = model.model
state_dict['classes'] = model.classes
state_dict['dim'] = model.dim
state_dict['encoder'] = {}
state_dict['encoder']['dim'] = model.encoder.dim
state_dict['encoder']['features'] = model.encoder.features
state_dict['encoder']['base'] = model.encoder.base
state_dict['encoder']['basis'] = model.encoder.basis
torch.save(state_dict, save_path)
def save_model_linear(model, save_path):
state_dict = {}
state_dict['model'] = model.model
state_dict['classes'] = model.classes
state_dict['dim'] = model.dim
state_dict['encoder'] = {}
state_dict['encoder']['dim'] = model.encoder.dim
state_dict['encoder']['features'] = model.encoder.features
state_dict['encoder']['m'] = model.encoder.m
state_dict['encoder']['level'] = model.encoder.level
state_dict['encoder']['levels'] = model.encoder.levels
state_dict['encoder']['basis'] = model.encoder.basis
torch.save(state_dict, save_path)
# Load trained model
def load_model(model, load_path):
state_dict = torch.load(load_path)
model.model = state_dict['model']
model.classes = state_dict['classes']
model.dim = state_dict['dim']
model.encoder.dim = state_dict['encoder']['dim']
model.encoder.features = state_dict['encoder']['features']
model.encoder.base = state_dict['encoder']['base']
model.encoder.basis = state_dict['encoder']['basis']
def load_model_linear(model, load_path):
state_dict = torch.load(load_path)
model.model = state_dict['model']
model.classes = state_dict['classes']
model.dim = state_dict['dim']
model.encoder.dim = state_dict['encoder']['dim']
model.encoder.features = state_dict['encoder']['features']
model.encoder.m = state_dict['encoder']['m']
model.encoder.level = state_dict['encoder']['level']
model.encoder.levels = state_dict['encoder']['levels']
model.encoder.basis = state_dict['encoder']['basis']
| 2,068 |
dpk_annotator/gui/Skeleton.py
|
jgraving/deepposekit-annotator
| 3 |
2170764
|
# -*- coding: utf-8 -*-
"""
Copyright 2018 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
from .GUI import GUI
from ..utils import hotkeys as keys
__all__ = ['Skeleton']
class Skeleton(GUI):
'''
A GUI for initializing a skeleton for a new dataset.
------------------------------------------------------------
Keys | Action
------------------------------------------------------------
> +,- | Rescale the image
> Left mouse | Move active keypoint
> W, A, S, D | Move active keypoint
> space | Changes W,A,S,D mode (swaps between 1px or 10px)
> J, L | Load previous or next image
> <, > | Jump 10 images backward or forward
> I, K or |
tab, shift+tab | Switch active keypoint
> R | Mark frame as unannotated, or "reset"
> F | Mark frame as annotated or "finished"
> Esc, Q | Quit the GUI
------------------------------------------------------------
Parameters
----------
image: str
Filepath of the image to be labeled.
skeleton: str
Filepath of the .csv or .xlsx file that has indexed information
on name of the keypoint (part, e.g. head), parent (the direct
connecting part, e.g. neck connects to head, parent is head),
and swap (swapping positions with a part when reflected over X)
Consult example of such a file for more information
scale: int/float, default 1
Scaling factor for the GUI (e.g. used in zooming).
text_scale: float
Scaling factor for the GUI font.
A text_scale of 1 works well for 1920x1080 (1080p) images
shuffle_colors: bool, default = True
Whether to shuffle the color order for drawing keypoints
refresh: int, default 100
Delay on receiving next keyboard input in milliseconds.
Attributes
----------
window_name: str
Name of the Annotation window when running program.
Set to be 'Annotation' unless otherwise changed.
n_images: int
Number of images in question (1 in this case).
key: int
The key that is pressed on the keyboard.
image_idx: int
Index of a specific image in the .h5 file.
save: method
Output method is set to be to_csv
Examples
--------
>>> from deepposekit import Skeleton
>>> app = Skeleton('input_image.png', 'skeleton.csv')
>>> app.run()
>>>
>>> app.save('skeleton_initialized.csv') # save the labels in skeleton.csv file
Note: must use app.save('file.csv') to save! Unlike the
Annotator, will not automatically save until that line runs.
'''
def __init__(self, image, skeleton, scale=1, text_scale=0.15, shuffle_colors=True, refresh=100):
if isinstance(image, str):
self.image = cv2.imread(image)
elif isinstance(image, np.ndarray):
self.image = image
super(GUI, self).__init__()
self.image_idx = 0
self.n_images = 1
self.window_name = 'Skeleton Creator'
self.shuffle_colors = shuffle_colors
self._init_skeleton(skeleton)
self._init_gui(scale, text_scale, shuffle_colors, refresh)
self.save = self.skeleton.to_csv
def _hotkeys(self):
''' Activates all key bindings.
Enables all the key functionalities described at the
start of the file.
'''
if self.key != keys.NONE:
self._wasd()
self._move_idx()
self._zoom()
self._update_canvas()
| 4,227 |
forml/lib/registry/filesystem/virtual.py
|
formlio/forml
| 78 |
2168751
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Virtual registry is a dummy registry implementation that doesn't persist anything outside of the current runtime.
"""
import collections
import logging
import tempfile
import typing
from forml.lib.registry.filesystem import posix
from forml.runtime import asset
if typing.TYPE_CHECKING:
from forml import project as prj
LOGGER = logging.getLogger(__name__)
class Registry(posix.Registry, alias='virtual'):
"""Virtual registry implementation provided as a non-distributed global registry persistent only during its
lifetime.
"""
def __init__(self):
self._storage: tempfile.TemporaryDirectory = tempfile.TemporaryDirectory( # pylint: disable=consider-using-with
prefix='registry-virtual-', dir=asset.TMPDIR.name
)
self._artifacts: dict['asset.Project.Key', dict['asset.Lineage.Key', 'prj.Artifact']] = collections.defaultdict(
dict
)
super().__init__(self._storage.name)
def projects(self) -> typing.Iterable['asset.Project.Key']:
return iter(self._artifacts.keys())
def lineages(self, project: 'asset.Project.Key') -> typing.Iterable['asset.Lineage.Key']:
return iter(self._artifacts[project].keys())
def mount(self, project: 'asset.Project.Key', lineage: 'asset.Lineage.Key') -> 'prj.Artifact':
return self._artifacts[project][lineage]
def pull(self, project: 'asset.Project.Key', lineage: 'asset.Lineage.Key') -> 'prj.Package':
raise NotImplementedError('No packages in virtual repository')
def push(self, package: 'prj.Package') -> None:
artifact = package.install(package.path) # avoid copying by installing to self
self._artifacts[package.manifest.name][package.manifest.version] = artifact
| 2,550 |
scheduler/migrations/0006_auto_20190129_1249.py
|
gijzelaerr/buis
| 0 |
2170176
|
# Generated by Django 2.1.5 on 2019-01-29 12:49
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0005_remove_workflow_run_id'),
]
operations = [
migrations.AddField(
model_name='workflow',
name='moment',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='workflow',
name='state',
field=models.CharField(choices=[('AD', 'Added'), ('RU', 'Running'), ('ER', 'Error'), ('OK', 'Done')], default='AD', max_length=2),
),
]
| 744 |
popupQuiz.py
|
kirkwor/py
| 0 |
2171520
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 11:19:07 2017
@author: Ionut
"""
count = 0
import tkinter as tk
root = tk.Tk()
import tkinter.messagebox
# does not work without explicit import
# basic messagebox
tkinter.messagebox.showinfo("Python Quiz", message="Welcome to your Python Journey")
answer = tkinter.messagebox.askquestion("Question 1", "Is print a command?")
if answer == "yes":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 2", "Is while an example of iteration?")
if answer == "yes":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 3","Are there more than 33 commands in python?")
if answer == "no":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 4","Do you need to assign variable types?")
if answer == "no":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 5","Can == be used to compare a variable to a string")
if answer == "yes":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 6","Does != mean that an integer is equal to a string?")
if answer == "no":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 7","Is there a variation between Python2 and Python3?")
if answer == "yes":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 8","Can you use () for a list?")
if answer == "no":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 9","Do you start counting at 1?")
if answer == "no":
print("correct")
count = count + 1
else:
print("unlucky")
answer = tkinter.messagebox.askquestion("Question 10","Do you love Python3?")
if answer == "yes":
print("correct")
count = count + 1
else:
print("unlucky")
print("Your final score is ", + count)
if count >= 9:
print("You are a Python Master!")
photo = tk.PhotoImage(file='congratulations.png')
lbl = tk.Label(root, image=photo)
lbl.pack()
canvas = tk.Canvas(root, width=450, height=200, bg="white")
canvas.create_text(225,100,fill="darkblue",font="Tahoma 20 bold", text=(count))
canvas.pack()
def printName(): print('File has been printed')
btn1 = tk.Button(root, text="Print result", command=printName).pack()
if __name__ == '__main__':
root.mainloop()
else:
print("Revise! Read! Work Harder!..... Or Else")
photo = tk.PhotoImage(file='nexttime.png')
lbl = tk.Label(root, image=photo)
lbl.pack()
canvas = tk.Canvas(root, width=450, height=200, bg="white")
canvas.create_text(225,100,fill="darkblue",font="Tahoma 20 bold", text="Work harder!")
canvas.pack()
def printName(): print('Test completed')
btn1 = tk.Button(root, text="Good bye!", command=printName).pack()
if __name__ == '__main__':
root.mainloop()
| 3,367 |
vise/analyzer/plot_brillouin_zone.py
|
kumagai-group/vise
| 16 |
2171048
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
import itertools
from dataclasses import dataclass
from math import sqrt
from typing import List, Dict
from monty.json import MSONable
import plotly.graph_objects as go
from vise.analyzer.plot_band_dos import plotly_sanitize_label
from vise.util.plotly_util import sort_coords, make_triangles
import numpy as np
@dataclass
class BZPlotInfo(MSONable):
faces: List[List[List[float]]]
# {"X": {"cart": [0.5, 0, 0], "frac": [0.7514, 0, 0]"}}
labels: Dict[str, Dict[str, List[float]]]
band_paths: List[List[List[float]]] = None
rec_lat_vec: List[List[float]] = None
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b)
return itertools.zip_longest(a, b, fillvalue=iterable[0])
class BZPlotlyPlotter:
def __init__(self, bz_plot_info: BZPlotInfo):
self._bz_plot_info = bz_plot_info
def create_figure(self):
data = []
for face_vertices in self._bz_plot_info.faces:
vertex_coords = sort_coords(np.array(face_vertices))
# data.append(go.Mesh3d(**make_triangles(vertex_coords),
# alphahull=-1, opacity=0.1, hoverinfo='skip',
# color="blue"))
for s, t in pairwise(vertex_coords):
data.append(go.Scatter3d(x=[s[0], t[0]],
y=[s[1], t[1]],
z=[s[2], t[2]], mode="lines",
hoverinfo="none",
marker_color="black"))
for label, coords in self._bz_plot_info.labels.items():
c_coords, f_coords = coords["cart"], coords["frac"]
data.append(go.Scatter3d(x=[c_coords[0]], y=[c_coords[1]], z=[c_coords[2]],
text=[plotly_sanitize_label(label)],
mode="markers+text",
marker=dict(color="orange", size=6),
meta=f_coords,
hovertemplate="(%{meta[0]:.2f} %{meta[1]:.2f} %{meta[2]:.2f})",
textposition="middle center",
textfont=dict(size=32)))
for i, l in self._bz_plot_info.band_paths:
data.append(go.Scatter3d(x=[i[0], l[0]],
y=[i[1], l[1]],
z=[i[2], l[2]], mode="lines",
opacity=0.5,
hoverinfo="none",
line_width=8,
marker_color="purple"))
_max = np.amax(np.array(sum(self._bz_plot_info.faces, [])))
c_max = _max * 1.3
c_cone = _max * 0.3
data.append(go.Cone(x=[c_max], y=[0], z=[0], u=[c_cone], v=[0], w=[0],
colorscale=[[0, 'rgb(200,200,200)'],
[1, 'rgb(200,200,200)']],
showscale=False, hovertemplate="<b>x</b>"))
data.append(go.Cone(x=[0], y=[c_max], z=[0], u=[0], v=[c_cone], w=[0],
colorscale=[[0, 'rgb(200,200,200)'],
[1, 'rgb(200,200,200)']],
showscale=False, hovertemplate="<b>y</b>"))
data.append(go.Cone(x=[0], y=[0], z=[c_max], u=[0], v=[0], w=[c_cone],
colorscale=[[0, 'rgb(200,200,200)'],
[1, 'rgb(200,200,200)']],
showscale=False, hovertemplate="<b>z</b>"))
data.append(go.Scatter3d(x=[0, c_max], y=[0, 0], z=[0, 0], mode="lines",
hoverinfo="none",
marker_color="black"))
data.append(go.Scatter3d(x=[0, 0], y=[0, c_max], z=[0, 0], mode="lines",
hoverinfo="none",
marker_color="black"))
data.append(go.Scatter3d(x=[0, 0], y=[0, 0], z=[0, c_max], mode="lines",
hoverinfo="none",
marker_color="black"))
for i, direct in zip(self._bz_plot_info.rec_lat_vec, ["kx", "ky", "kz"]):
nn = sqrt(i[0] ** 2 + i[1] ** 2 + i[2] ** 2)
kx, ky, kz = np.array(i) * c_max / nn * 1.15
norm_kx, norm_ky, norm_kz = np.array(i) * c_cone / nn
data.append(go.Cone(x=[kx], y=[ky], z=[kz],
u=[norm_kx], v=[norm_ky], w=[norm_kz],
colorscale=[[0, 'rgb(100,100,100)'],
[1, 'rgb(100,100,100)']],
showscale=False, hovertemplate=f"<b>{direct}</b>"))
data.append(go.Scatter3d(x=[0, kx], y=[0, ky], z=[0, kz],
mode="lines",
hoverinfo="none",
marker_color="black"))
range_max = c_max * 1.4
fig = go.Figure(data=data)
fig.update_layout(
title_font_size=30,
font_size=24,
width=900, height=900,
showlegend=False,
scene=dict(xaxis=dict(showspikes=False, range=[-range_max, range_max],
showticklabels=False, visible=False),
yaxis=dict(showspikes=False, range=[-range_max, range_max],
showticklabels=False, visible=False),
zaxis=dict(showspikes=False, range=[-range_max, range_max],
showticklabels=False, visible=False))
)
return fig
| 5,952 |
oakling/libs/signal/channel.py
|
zym1115718204/oakling
| 1 |
2171454
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created on 2017-06-08 07:21:34
# plugin
import json
class Channel(object):
"""
Channel connect to websocket
"""
def __init__(self, _channel):
"""
:param channel: websocket Handler
"""
self._channel = _channel
def log(self, log):
"""
:param log:
"""
if self._channel:
_log = {
"client_id": self._channel.client_id,
"name": "demo2",
"signal": "run",
"data": log,
}
self._channel.update_cache(json.dumps(_log))
self._channel.write_message(json.dumps(_log))
else:
print "Channel unavailable"
def prograss(self, prograss):
"""
:param prograss:
:return:
"""
if self._channel:
_log = {
"client_id": self._channel.client_id,
"name": "demo2",
"signal": "run",
"data": {
"prograss": prograss
},
}
self._channel.update_cache(json.dumps(_log))
self._channel.write_message(json.dumps(_log))
else:
print "Channel unavailable"
| 1,297 |
2019/try/ps_aux_pandas/reload.py
|
rishidevc/stkovrflw
| 0 |
2170675
|
import subprocess
import os
def get_process(command, param):
try:
print('---' * 30)
print('GETTING PROCESS ID')
command = f"{command} {param}"
print('Executing ' + command)
output = subprocess.check_output(command, shell=True)
print(output)
lines = output.strip().split('\n')
pid = lines[0].split()[1]
print(pid)
return pid, 0
except Exception as error:
print(error)
return None, 1
def kill_process(command, param):
param = param.format(pid=pid)
try:
print(f"Executing {command} {param}")
ret = subprocess.call(f"{command} {param}")
except Exception as error:
print(error)
ret = 1
return ret
def run(commands):
pid = None
for key, value in commands.items():
print('---' * 30)
if key == 'ps':
pid, ret = get_process_id(key, value)
if ret:
break
elif key == 'kill':
ret = kill_process(key, value)
if ret:
break
elif key == "git":
for i, sub_command in enumerate(value):
command = f"{key} {sub_command}"
print(f'{i} Executing {command}')
# output = subprocess.check_output(command, shell=True)
ret = subprocess.call(command, shell=True)
if not ret:
break
elif key == "cd":
ret = subprocess.call(f"{key} '{value}'", shell=True)
if not ret:
break
else:
ret = 0
print('---' * 30)
return ret
def deploy(*args, project_path="/home/centos/pandora", branch='jwt-auth-2', filter_str='uwsgi', port=8002, **kwargs):
from collections import OrderedDict
commands = OrderedDict()
commands["cd"] = project_path
commands["git"] = [
"status"
"log"
"stash"
f"pull origin {branch}"
]
commands["ps"] = f"aux | grep '{filter_str}'" # uwsgi
commands["kill"] = '-9 {pid}' # 16913
commands["uwsgi"] = f'server.ini --http :{port}' # 8002
return run(commands)
if __name__ == "__main__":
project_path = "/Users/hygull/Projects/Python3/CorporateFdRoot/pandora_old"
ret = deploy(project_path= project_path)
if ret:
print('OPERATION FAILED')
else:
print('OPERATION SUCCESSFUL')
| 2,002 |
src/anycsv/io_tools.py
|
jumbrich/pyanycsv
| 0 |
2171935
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import logging
from collections import namedtuple
from pathlib import Path
from anycsv import exceptions
from pyjuhelpers.logging import log_func_detail
logger = logging.getLogger(__name__)
import os
import gzip
import requests
import io
import logging
from contextlib import closing
import structlog
log = structlog.get_logger()
@log_func_detail(log)
def getContentFromDisk(fname_csv, max_lines=-1):
if not Path(fname_csv).exists():
raise FileNotFoundError("File {} does not exist".format(fname_csv))
if fname_csv[-3:] == '.gz':
with gzip.open(fname_csv, 'rb') as f:
if max_lines > -1:
file_content = b''
c =0
for line in f:
file_content += line
c+=1
if c > max_lines:
break
else:
file_content = f.read()
else:
with io.open(fname_csv, 'rb') as f:
if max_lines:
file_content = b''
#for line in f:
#print('l', type(line))
# break
for line in f.readlines(max_lines):
#print ('l',type(line))
file_content += line
#print('l', type(file_content))
else:
file_content = f.read()
return file_content
@log_func_detail(log)
def getContentAndHeaderFromWeb(url, max_lines=100):
# save file to local directory
headers= None
content = None
with closing(requests.get(url, timeout=1)) as r:
r.raise_for_status()
headers = r.headers
if r.ok:
if max_lines > - 1:
input = r.iter_lines(chunk_size=64 * 1024)#, delimiter=b"\n")
c = 0
content = b''
lines = []
l = []
for line in input:
content += line
lines.append(line)
l.append(len(line))
c += 1
if c >= max_lines:
break
# print ('LINES', len(lines), 'SUM', sum(l))
content = b'\n'.join(lines)
else:
content = r.read()
return content, headers
ContentHeaderResult = namedtuple("ContentHeaderResult",['content', 'header', 'exception'])
def getContentAndHeader(csv=None, max_lines=None)->ContentHeaderResult:
content = None
header = None
exception=None
try:
import urllib
if urllib.parse.urlparse(csv).scheme in ['http','https']:
content, header, status_code = getContentAndHeaderFromWeb(csv, max_lines=max_lines)
else:
content = getContentFromDisk(csv, max_lines=max_lines)
except Exception as e:
log.debug("Exception ", csv=csv, exec=e.__class__.__name__, msg=str(e))
exception = e
status_code= 701
return ContentHeaderResult(content, header,exception, status_code)
class TextIOBase(object):
def __init__(self, ios, encoding):
self.ios = ios
self.encoding = encoding
def __iter__(self):
return self
def __next__(self):
return self.readline()
def readline(self, *args, **kwargs):
line = self.ios.readline()
return line.decode(self.encoding)
class BufferedAutoEncodingStream(object):
def __init__(self, csv, max_buffer=100, max_file_size=-1):
self.hash = hashlib.md5()
self.digest = None
import urllib
if urllib.parse.urlparse(csv).scheme in ['http', 'https']:
resp = requests.get(csv, timeout=1)
resp.raise_for_status()
def my_iter_lines(result):
buf = b''
for chunk in result.iter_content(chunk_size=64 * 1024, decode_unicode=False):
buf += chunk
pos = 0
while True:
eol = buf.find(b'\n', pos)
if eol != -1:
yield buf[pos:eol]
pos = eol + 1
else:
buf = buf[pos:]
break
if buf:
yield buf
self.input = my_iter_lines(resp)
#self.input = resp.iter_lines(chunk_size=128 * 1024)#, delimiter=b"\n")
else:
if not Path(csv).exists():
raise FileNotFoundError("File {} does not exist".format(csv))
if csv.endswith('.gz') or csv.endswith('.gzip'):
ios = gzip.open(csv, 'rb')
else:
ios = io.open(csv, 'rb')
self.input = ios
self.max_file_size = max_file_size
self.buffered_lines=[]
self.lines_read = 0
self.max_buffer_lines = max_buffer
self.total_bytes_read=0
@property
def cur_buffer_size(self):
return len(self.buffered_lines)
@property
def _buffer_full(self):
return self.cur_buffer_size >= self.max_buffer_lines
def reset(self):
if self.lines_read > self.cur_buffer_size:
raise IOError("Cannot reset buffer, more lines than buffer size read")
self.lines_read = 0
def __iter__(self):
return self
def readline(self):
try:
if self.lines_read >= self.cur_buffer_size:
#we have more lines read as in buffer
line = next(self.input)
self.hash.update(line)
self.total_bytes_read += len(line)
if self.max_file_size>0 and self.total_bytes_read > self.max_file_size:
raise exceptions.FileSizeException(
"Maximum file size exceeded {} > {} ".format(self.total_bytes_read, self.max_file_size))
if not self._buffer_full:
self.buffered_lines.append(line)
else:
line = self.buffered_lines[self.lines_read]
self.lines_read +=1
return line
except StopIteration as e:
self.digest = self.hash.hexdigest()
raise e
def __next__(self):
try:
return self.readline()
except:
raise StopIteration()
def close(self):
self.input.close()
if __name__ == '__main__':
pass
| 6,606 |
app/usage.py
|
rgabeflores/Python-App-Manager
| 0 |
2170847
|
class UsageException(Exception):
'''Exception raised for incorrect command line usage.
Attributes:
expression -- input expression
message -- explanation of usage
'''
def __init__(self, expression, message=None, *args, **kwargs):
super().__init__(expression, args, kwargs)
self.expression = expression
self.message = '''
Usage:
python manage.py <options> <projectname>
Options:
-v Create project with virtualenv (must have virtualenv installed)
-w Create project with Website Files
-d Start with debug mode on
'''
def __str__(self):
return 'Incorrect Usage.\n{}'.format(self.message)
def __repr__(self):
return 'Incorrect Usage.\n{}'.format(self.message)
| 834 |
PyPractice/Q19.py
|
coedfetr/STTP-python
| 0 |
2171929
|
def is_semordnilap(filepath):
file = open(filepath)
words = file.read().split()
results = []
for word1 in words:
for word2 in words:
if word1 == word2[::-1]:
results.append(word1)
return results
print is_semordnilap('Q19.txt')
| 284 |
Python/IntermediatePython/args and kwargs/Args.py
|
Jac21/GistsCollection
| 5 |
2171474
|
"""
*args and **kwargs are used in function definitions, allow you
to pass a variable number of arguments to a function.
"""
def test_var_args(f_arg, *argv):
print("first normal arg: ", f_arg)
for arg in argv:
print("Another arg through argv: ", arg)
test_var_args('python', 'test', 'function', 'testable')
print '\n'
"""
**kwargs allows one to pass keyworded variable length args, used
to handle named args in a function
"""
def greet_me(**kwargs):
for key, value in kwargs.items():
print("{0} == {1}".format(key, value))
greet_me(name="Jeremy")
print '\n'
"""
Using *args and **kwargs to call a function...
"""
# first with *args
def test_args_kwargs(arg1, arg2, arg3):
print("arg1: ", arg1)
print("arg2: ", arg2)
print("arg3: ", arg3)
test_args_kwargs("two", 3, 5)
# now with **kwargs
kwargs = {"arg3": 3, "arg2": "two", "arg1": 5}
test_args_kwargs(**kwargs)
| 887 |
main.py
|
CuBeOnHere/python-pong
| 0 |
2171764
|
import pygame
from pygame.locals import *
import sys
import random
def ball_animation():
global ball_speed_x, ball_speed_y, player_score, opponent_score, score_time
ball.x += ball_speed_x
ball.y += ball_speed_y
if ball.top <= 0 or ball.bottom >= screen_height:
pygame.mixer.Sound.play(wallhit_sound)
ball_speed_y *= -1
# Player Score
if ball.left <= 0:
pygame.mixer.Sound.play(score_sound)
player_score += 1
score_time = pygame.time.get_ticks()
# Opponent Score
if ball.right >= screen_width:
pygame.mixer.Sound.play(score_sound)
opponent_score += 1
score_time = pygame.time.get_ticks()
if ball.colliderect(player) and ball_speed_x > 0:
pygame.mixer.Sound.play(pong_sound)
if abs(ball.right - player.left) < 10:
ball_speed_x *= -1
elif abs(ball.bottom - player.top) < 10 and ball_speed_y > 0:
ball_speed_y *= -1
elif abs(ball.top - player.bottom) < 10 and ball_speed_y < 0:
ball_speed_y *= -1
if ball.colliderect(opponent) and ball_speed_x < 0:
pygame.mixer.Sound.play(pong_sound)
if abs(ball.left - opponent.right) < 10:
ball_speed_x *= -1
elif abs(ball.bottom - opponent.top) < 10 and ball_speed_y > 0:
ball_speed_y *= -1
elif abs(ball.top - opponent.bottom) < 10 and ball_speed_y < 0:
ball_speed_y *= -1
def player_animation():
player.y += player_speed
if player.top <= 0:
player.top = 0
if player.bottom >= screen_height:
player.bottom = screen_height
def opponent_ai():
if opponent.top < ball.y:
opponent.y += opponent_speed
if opponent.bottom > ball.y:
opponent.y -= opponent_speed
if opponent.top <= 0:
opponent.top = 0
if opponent.bottom >= screen_height:
opponent.bottom = screen_height
def ball_restart():
global ball_speed_x, ball_speed_y, score_time
current_time = pygame.time.get_ticks()
ball.center = (screen_width / 2, screen_height / 2)
if current_time - score_time < 700:
number_three = basic_font.render('3', False, light_gray)
screen.blit(number_three, (screen_width / 2 - 10, screen_height / 2 + 20))
if 700 < current_time - score_time < 1400:
number_two = basic_font.render('2', False, light_gray)
screen.blit(number_two, (screen_width / 2 - 10, screen_height / 2 + 20))
if 1400 < current_time - score_time < 2100:
number_one = basic_font.render('1', False, light_gray)
screen.blit(number_one, (screen_width / 2 - 10, screen_height / 2 + 20))
if current_time - score_time < 2100:
ball_speed_x, ball_speed_y = 0, 0
else:
ball_speed_x = 7 * random.choice((1, -1))
ball_speed_y = 7 * random.choice((1, -1))
score_time = None
# General Setup
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.init()
clock = pygame.time.Clock()
# Screen setup
screen_width = 1280
screen_height = 960
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Pong')
logo = pygame.image.load('logo.png')
pygame.display.set_icon(logo)
# Game Rectangles
ball = pygame.Rect(screen_width / 2 - 15, screen_height / 2 - 15, 30, 30)
player = pygame.Rect(screen_width - 20, screen_height / 2 - 70, 10, 140)
opponent = pygame.Rect(10, screen_height / 2 - 70, 10, 140)
# Colors
bg_color = pygame.Color('grey12')
light_gray = (200, 200, 200)
player_score_color = (200, 200, 200)
opponent_score_color = (200, 200, 200)
ball_speed_x = 7 * random.choice((1, -1))
ball_speed_y = 7 * random.choice((1, -1))
player_speed = 0
opponent_speed = 7
# Score Text
player_score = 0
opponent_score = 0
basic_font = pygame.font.Font('freesansbold.ttf', 32)
# Sound
pong_sound = pygame.mixer.Sound('pong.wav')
wallhit_sound = pygame.mixer.Sound('wallhit.wav')
score_sound = pygame.mixer.Sound('score.wav')
# Score Timer
score_time = True
while True:
# Handling input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
player_speed += 7
if event.key == pygame.K_UP:
player_speed -= 7
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
player_speed -= 7
if event.key == pygame.K_UP:
player_speed += 7
# Game Logic
ball_animation()
player_animation()
opponent_ai()
# Visuals
screen.fill(bg_color)
pygame.draw.rect(screen, light_gray, player)
pygame.draw.rect(screen, light_gray, opponent)
pygame.draw.ellipse(screen, light_gray, ball)
pygame.draw.aaline(screen, light_gray, (screen_width / 2, 0), (screen_width / 2, screen_height))
if score_time:
ball_restart()
# Score
player_text = basic_font.render(f'{player_score}', False, player_score_color)
screen.blit(player_text, (660, 470))
opponent_text = basic_font.render(f'{opponent_score}', False, opponent_score_color)
screen.blit(opponent_text, (600, 470))
# Change 'Winner' score color
if player_score > opponent_score:
player_score_color = (255, 223, 0)
if player_score < opponent_score:
opponent_score_color = (255, 223, 0)
if player_score == opponent_score:
player_score_color = (200, 200, 200)
opponent_score_color = (200, 200, 200)
# Updating the window
pygame.display.flip()
clock.tick(60)
| 5,818 |
src/common.py
|
bcopos/network-traffic-analysis_tools
| 0 |
2172032
|
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import time
def filter_by_ip(data, ip):
results = []
results.extend(filter_by_field(data, 'orig_h', ip))
results.extend(filter_by_field(data, 'resp_h', ip))
return results
def filter_by_protocol(data, protocol):
return filter_by_field(data, 'proto', protocol)
def filter_by_field(data, field, value):
filtered_data = []
for datum in data:
if getattr(datum, field) == value:
filtered_data.append(datum)
return filtered_data
def get_unique_field(data, field):
results = set()
for datum in data:
results.add(getattr(datum, field))
return results
def get_unique_src_ips(data):
return get_unique_field(data, 'orig_h')
def get_unique_dst_ips(data):
return get_unique_field(data, 'resp_h')
def get_stats_per_dstip(data):
results = dict()
for datum in data:
dst_ip = datum.resp_h
dst_port = datum.resp_p
proto = datum.proto
try:
results[dst_ip][proto]['count'] += 1
results[dst_ip][proto]['ports'].add(dst_port)
except KeyError:
d = dict()
d[proto] = {'count': 1, 'ports': set([dst_port,]) }
results[datum.resp_h] = d
return results
def extract_inter_timing(data):
timestamps = [float(datum.ts) for datum in data]
timestamps.sort()
initial_time = timestamps[0]
normalized_timestamps = normalize(timestamps)
timing = []
for t in range(0, len(normalized_timestamps), 2):
try:
timing.append(normalized_timestamps[t+1] - normalized_timestamps[t])
except IndexError:
continue
return timing
def extract_frequencies(data, sampling_period, window_function = np.blackman):
# data is a list of values
# apply fourier transform
# apply window function
data = np.asarray([x*y for x,y in zip(data, window_function(len(data)))])
N = len(data) # number of data points
dt = sampling_period
yf = np.fft.fft(data)
#yf = np.fft.fftshift(yf)
n = np.arange(-N/2, N/2, 1)
freqs = np.true_divide(n, N*dt)
plt.figure()
plt.plot(freqs[N/2:], np.abs(yf[:N//2:]), 'o-')
plt.xlim(0, max(freqs[N/2:]))
plt.show()
return (freqs[N/2:], yf.real[N/2:])
def extract_psd(data, window_size, window_overlap, window_function = np.blackman):
return sp.signal.welch(data, sampling_freq, nperseg = window_size, noverlap = window_overlap, window = window_function)
# returns list of data sizes sent during connections
def extract_data_sizes(data):
results = []
omit_counter = 0
for datum in sorted(data, key=lambda k: k.ts):
try:
results.append(float(datum.orig_bytes))
except ValueError:
results.append(0)
omit_counter += 1
if omit_counter:
print "Zerod {0} items in size extraction".format(omit_counter)
return results
def extract_times(data):
results = []
for datum in sorted(data, key=lambda k: k.ts):
results.append(float(datum.ts))
return results
def extract_items_per_n_seconds_window(data, seconds):
sorted_data = sorted(data, key=lambda k: k.ts)
min_time = float(sorted_data[0].ts) - (float(sorted_data[0].ts) - int(float(sorted_data[0].ts)))
max_time = float(sorted_data[-1].ts) + (1 - (float(sorted_data[-1].ts) - int(float(sorted_data[-1].ts))))
sizes = []
times = []
start = min_time
end = start + seconds
count = 0
while True:
total_size = 0
count += 1
if end > max_time:
break
for datum in data:
if float(datum.ts) >= start and float(datum.ts) < end:
try:
total_size += int(datum.orig_ip_bytes)
except ValueError:
print "error"
continue
times.append(start)
sizes.append(total_size)
start = end
end = start + seconds
return times, sizes
def graph_tcp_data_sizes_per_ip(data, ip):
cip = filter_by_protocol(data, "tcp")
sizes = extract_data_sizes(cip)
times = extract_times(cip)
normalized_times = normalize(times)
graph_xy(normalized_times, sizes, 'connection-data-sizes-'+ip)
def normalize(sorted_data):
start = sorted_data[0]
return [i - start for i in sorted_data]
def split_by_24hr(connections):
sorted_conns = sorted(connections, key=lambda k: k['ts'])
connections_per_hour = []
start = float(sorted_conns[0]['ts'])
hour = []
print convert_epoch_time(start)
for conn in sorted_conns:
if float(conn['ts']) <= (start + 86400):
hour.append(conn)
else:
start = float(conn['ts'])
print convert_epoch_time(start)
connections_per_hour.append(hour)
hour = []
hour.append(conn)
connections_per_hour.append(hour)
return connections_per_hour
def graph_per_ip(connections):
ips = get_unique_dst_ips(connections)
for ip in ips:
print ip
ipConns = get_connections_per_dst_ip(connections, ip)
graph(frequency_between_conn(ipConns), 'nest-therm-' + ip + '.pdf')
def graph_xy(x, y, filename):
FORMAT = 'pdf'
plt.bar(x,y)
plt.xlabel('Time (seconds)')
plt.ylabel('Payload Bytes Sent')
plt.savefig(filename + FORMAT, format=FORMAT)
plt.show()
def graph(data, filename):
plt.plot(data)
plt.xlabel('Connection Index')
plt.ylabel('Delta Time BTW Connections')
#plt.savefig(filename,format='pdf')
plt.show()
def graph_multiple(dataList):
colors = ["black", "green", "red", "blue", "yellow", "cyan", "magenta"]
count = 0
if len(dataList) > len(colors):
print "Too many data sets, not enough colors"
return 1
for data in dataList:
plt.bar(data, color=colors[count], label=count)
count += 1
#plt.plot(d1, color="black", label="172.16.58.3")
#plt.plot(d2, color="green", label="172.16.58.3")
#plt.plot(d3, color="red", label="192.168.127.12")
#plt.plot(d4, color="blue", label="172.16.58.3")
plt.xlabel('Connection Index')
plt.ylabel('Time Delta')
plt.show()
def get_items_for_day(conns, start_time):
conns_for_day = []
end_time = start_time + (60*60*24)
for c in conns:
if float(c.ts) >= start_time and float(c.ts) <= end_time:
conns_for_day.append(c)
return conns_for_day
| 6,135 |
kkt/fetch.py
|
ar90n/kkt
| 1 |
2170088
|
from typing import List
from pathlib import Path
from dataclasses import dataclass
import requests
@dataclass
class PackageLocation:
url: str
name: str
def fetch_packages(
locations: List[PackageLocation], save_dir: Path, quiet: bool = False
) -> List[Path]:
outfiles = []
for loc in locations:
response = requests.get(loc.url)
outfile = save_dir / loc.name
with outfile.open("wb") as out:
out.write(response.content)
outfiles.append(outfile)
if not quiet:
print("Output file downloaded to %s" % str(outfile))
return outfiles
| 619 |
python/problem_74.py
|
leoriviera/Project-Euler
| 1 |
2171068
|
import math
def problem_74():
chain_count = 0
for n in range(1, 1000000):
terms = [n]
chain_link = n
factorial_sum = 0
while factorial_sum not in terms:
string = str(chain_link)
for digit_index in range(0, len(string)):
digit = int(string[digit_index])
factorial_sum += math.factorial(digit)
if factorial_sum not in terms:
terms.append(factorial_sum)
chain_link = factorial_sum
factorial_sum = 0
if len(terms) == 60:
chain_count += 1
return chain_count
if __name__ == "__main__":
answer = problem_74()
print(answer)
| 714 |
concerts_monitor/backstage.py
|
sir-Gollum/concertmon
| 1 |
2172038
|
# coding: utf-8
import os
import requests
from lxml import html
import tabulate
from .last_fm import get_top_bands
from .data import BackstageEvent
def get_backstage_events():
# We crawl this and don't know the number of pages in advance
page = -1
events = []
while True:
found_on_page = 0
page += 1
url = 'http://backstage.info/veranstaltungen-2/live'
if page:
url += '/' + str(page)
print(f'Requesting {url}...')
markup = requests.get(url).text
etree = html.fromstring(markup)
print(f'Parsing response from {url}...')
for el in etree.xpath('//div[@class="items"]/*/*'):
title = el.xpath('string((.//*[@title])[1]/@title)').strip()
bands = el.xpath('string(.//h5)').strip()
datetime = el.xpath('string((.//p)[1])').strip()
if not datetime or not title + bands:
continue
events.append(BackstageEvent(title=title, bands=bands, dt=datetime))
found_on_page += 1
print(f'Found {found_on_page} events on {url}')
if not found_on_page:
break
return events
def sort_and_deduplicate_events(events):
events = sorted(events, key=lambda e: (e.dt, e.title, e.bands))
result = []
for e in events:
if result and str(e) == str(result[-1]):
continue
result.append(e)
return result
if __name__ == '__main__':
LASTFM_USERNAME = os.environ['LASTFM_USERNAME']
LASTFM_API_KEY = os.environ['LASTFM_API_KEY']
LASTFM_PAGES_TO_FETCH = 5
bands = get_top_bands(LASTFM_PAGES_TO_FETCH, LASTFM_USERNAME, LASTFM_API_KEY)
print(f'Got {len(bands)} bands')
events = sort_and_deduplicate_events(get_backstage_events())
print(f'Got {len(events)} events')
with open(os.path.join(os.path.dirname(__file__), 'bands_blacklist.txt')) as f:
blacklist = set([l.strip() for l in f.read().split(u'\n')])
print('Filtering blacklist...')
whitelisted_bands = [b for b in bands if b.name not in blacklist]
print(f'Left with {len(whitelisted_bands)} after filtering')
table = []
for e in events:
table.append({
'!!!': '!!!' if e.is_interesting(whitelisted_bands) else '',
'Title': e.title.title(),
'Date': e.dt.strftime('%a, %d.%m.%Y %H:%M')
})
print("Events:")
print(tabulate.tabulate(table, headers='keys', tablefmt='github'))
| 2,493 |
_/0349_06_Code/19.py
|
paullewallencom/javascript-978-1-8495-1034-9
| 0 |
2170882
|
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.db.models import get_model
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import Context, Template
from django.template.defaultfilters import escape
from django.template.loader import get_template
from directory.functions import ajax_login_required
import directory.models
import json
import re
| 513 |
tests/test_daily_stac.py
|
pjhartzell/nclimgrid
| 0 |
2171644
|
import glob
import os
import unittest
from tempfile import TemporaryDirectory
from stactools.nclimgrid import constants, daily_stac
class DailyStacTestLocal(unittest.TestCase):
def test_create_singleitem_pre1970_createcogs(self):
base_nc_href = 'tests/test-data/netcdf/daily'
year = 1951
month = 1
day = 1
scaled_or_prelim = constants.Status.SCALED
with TemporaryDirectory() as temp_dir:
base_cog_href = temp_dir
items = daily_stac.create_daily_items(year,
month,
scaled_or_prelim,
base_cog_href,
base_nc_href=base_nc_href,
day=day)
num_cogs = len(glob.glob(os.path.join(base_cog_href, "*.tif")))
for item in items:
item.validate()
self.assertEqual(num_cogs, 4)
self.assertEqual(len(items), 1)
self.assertEqual(items[0].id, f"{year}{month:02d}-grd-scaled-01")
self.assertEqual(len(items[0].assets), 4)
def test_create_singleitem_pre1970_existingcogs(self):
base_cog_href = 'tests/test-data/cog/daily'
year = 1951
month = 1
day = 1
scaled_or_prelim = constants.Status.SCALED
items = daily_stac.create_daily_items(year,
month,
scaled_or_prelim,
base_cog_href,
day=day)
for item in items:
item.validate()
self.assertEqual(len(items), 1)
self.assertEqual(items[0].id, f"{year}{month:02d}-grd-scaled-01")
self.assertEqual(len(items[0].assets), 4)
def test_create_singleitem_pre1970_existingcogs_with_read_href_modifier(
self):
did_it = False
def do_it(href: str) -> str:
nonlocal did_it
did_it = True
return href
base_cog_href = 'tests/test-data/cog/daily'
year = 1951
month = 1
day = 1
scaled_or_prelim = constants.Status.SCALED
daily_stac.create_daily_items(year,
month,
scaled_or_prelim,
base_cog_href,
read_href_modifier=do_it,
day=day)
assert did_it
def test_create_singleitem_1970onward_prelim_createcogs(self):
base_nc_href = 'tests/test-data/netcdf/daily'
year = 2022
month = 1
day = 1
scaled_or_prelim = constants.Status.PRELIM
with TemporaryDirectory() as temp_dir:
base_cog_href = temp_dir
items = daily_stac.create_daily_items(year,
month,
scaled_or_prelim,
base_cog_href,
base_nc_href=base_nc_href,
day=day)
num_cogs = len(glob.glob(os.path.join(base_cog_href, "*.tif")))
for item in items:
item.validate()
self.assertEqual(num_cogs, 4)
self.assertEqual(len(items), 1)
self.assertEqual(items[0].id, f"{year}{month:02d}-grd-prelim-01")
self.assertEqual(len(items[0].assets), 4)
def test_create_singleitem_1970onward_prelim_existingcogs(self):
base_cog_href = 'tests/test-data/cog/daily'
year = 2022
month = 1
day = 1
scaled_or_prelim = constants.Status.PRELIM
items = daily_stac.create_daily_items(year,
month,
scaled_or_prelim,
base_cog_href,
day=day)
for item in items:
item.validate()
self.assertEqual(len(items), 1)
self.assertEqual(items[0].id, f"{year}{month:02d}-grd-prelim-01")
self.assertEqual(len(items[0].assets), 4)
def test_create_collection_prelim_createcogs(self):
start_yyyymm = "202201"
end_yyyymm = "202201"
scaled_or_prelim = constants.Status.PRELIM
base_nc_href = 'tests/test-data/netcdf/daily'
destination = "test_collection"
with TemporaryDirectory() as temp_dir:
base_cog_href = temp_dir
collection = daily_stac.create_daily_collection(
start_yyyymm,
end_yyyymm,
scaled_or_prelim,
base_cog_href,
base_nc_href=base_nc_href)
num_cogs = len(glob.glob(os.path.join(base_cog_href, "*.tif")))
collection.normalize_hrefs(destination)
collection.validate()
self.assertEqual(num_cogs, 4)
self.assertEqual(len(list(collection.get_all_items())), 1)
self.assertEqual(collection.id, "nclimgrid-daily")
def test_create_collection_prelim_existingcogs(self):
base_cog_href = 'tests/test-data/cog/daily'
start_yyyymm = "202201"
end_yyyymm = "202201"
destination = 'test_collection'
scaled_or_prelim = constants.Status.PRELIM
collection = daily_stac.create_daily_collection(
start_yyyymm, end_yyyymm, scaled_or_prelim, base_cog_href)
collection.normalize_hrefs(destination)
collection.validate()
self.assertEqual(len(list(collection.get_all_items())), 1)
self.assertEqual(collection.id, "nclimgrid-daily")
# --Remote Data Tests: Not used for GitHub CI--
# class DailyStacTestRemote(unittest.TestCase):
# def test_create_items_1970onward_createcogs(self):
# base_nc_href = "https://nclimgridwesteurope.blob.core.windows.net/nclimgrid/nclimgrid-daily" # noqa
# year = 2021
# month = 12
# scaled_or_prelim = constants.Status.SCALED
# with TemporaryDirectory() as temp_dir:
# base_cog_href = temp_dir
# items = daily_stac.create_daily_items(year,
# month,
# scaled_or_prelim,
# base_cog_href,
# base_nc_href=base_nc_href)
# num_cogs = len(glob.glob(os.path.join(base_cog_href, "*.tif")))
# for item in items:
# item.validate()
# num_days = monthrange(year, month)[1]
# self.assertEqual(num_cogs, num_days * 4)
# self.assertEqual(len(items), num_days)
# self.assertEqual(items[0].id, f"{year}{month:02d}-grd-scaled-01")
# self.assertEqual(len(items[0].assets), 4)
# def test_create_items_pre1970_createcogs(self):
# base_nc_href = "https://nclimgridwesteurope.blob.core.windows.net/nclimgrid/nclimgrid-daily" # noqa
# year = 1951
# month = 1
# scaled_or_prelim = constants.Status.SCALED
# with TemporaryDirectory() as temp_dir:
# base_cog_href = temp_dir
# items = daily_stac.create_daily_items(year,
# month,
# scaled_or_prelim,
# base_cog_href,
# base_nc_href=base_nc_href)
# num_cogs = len(glob.glob(os.path.join(base_cog_href, "*.tif")))
# for item in items:
# item.validate()
# num_days = monthrange(year, month)[1]
# self.assertEqual(num_cogs, num_days * 4)
# self.assertEqual(len(items), num_days)
# self.assertEqual(items[0].id, f"{year}{month:02d}-grd-scaled-01")
# self.assertEqual(len(items[0].assets), 4)
# def test_create_collection_createcogs(self):
# start_month = "196912"
# end_month = "197001"
# scaled_or_prelim = constants.Status.SCALED
# base_nc_href = "https://nclimgridwesteurope.blob.core.windows.net/nclimgrid/nclimgrid-daily" # noqa
# destination = "test_collection"
# with TemporaryDirectory() as temp_dir:
# base_cog_href = temp_dir
# collection = daily_stac.create_daily_collection(
# start_month,
# end_month,
# scaled_or_prelim,
# base_cog_href,
# base_nc_href=base_nc_href)
# num_cogs = len(glob.glob(os.path.join(base_cog_href, "*.tif")))
# collection.normalize_hrefs(destination)
# collection.validate()
# self.assertEqual(num_cogs, 62 * 4)
# self.assertEqual(len(list(collection.get_all_items())), 62)
# self.assertEqual(collection.id, "nclimgrid-daily")
| 9,191 |
hf_cola.py
|
silvia0v0/Label-Representation-in-Modeling-Classification-as-Seq2Seq
| 0 |
2170810
|
# -*- coding: utf-8 -*-
"""hf-cola.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bOPS2191jGvVtN8J0HdI1ANR1CGSQx-c
"""
#from google.colab import drive
#drive.mount('/content/drive')
import functools
import t5
import torch
import transformers
import tensorflow_datasets as tfds
import tensorflow.compat.v1 as tf
import random
import string
DATA_DIR = 'drive/My Drive/MLLU/finalProject/glue_data'
MODEL_DIR = 'drive/My Drive/MLLU/finalProject/model'
PREDICTION_DIR = 'drive/My Drive/MLLU/finalProject/model/cola'
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# model = t5.models.HfPyTorchModel("t5-base", "/tmp/hft5/", device)
model = t5.models.HfPyTorchModel("t5-base", MODEL_DIR, device)
ds = tfds.load(
"glue/cola",
data_dir=DATA_DIR,
# Download data locally for preprocessing to avoid using GCS space.
download_and_prepare_kwargs={"download_dir": "./downloads"})
print("A few raw validation examples...")
for ex in tfds.as_numpy(ds["validation"].take(2)):
print(ex)
possible_labels = [0,1]
def randomString():
stringLength = random.randint(1,15)
"""Generate a random string of random length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
label_map = {}
label_set = set()
for i in range(len(possible_labels)):
label_map[possible_labels[i]] = randomString()
label_set.add(label_map[possible_labels[i]])
assert(len(possible_labels) == len(label_set))
print(label_map)
"""Now, we write a preprocess function to convert the examples in the `tf.data.Dataset` into a text-to-text format, with both `inputs` and `targets` fields. The preprocessor also normalizes the text by lowercasing it and removing quotes since the answers are sometimes formatted in odd ways. Finally, we prepend 'trivia question:' to the inputs so that the model knows what task it's trying to solve."""
def label_preprocessor(ds):
def normalize_text(text):
"""Lowercase and remove quotes from a TensorFlow string."""
text = tf.strings.lower(text)
text = tf.strings.regex_replace(text,"'(.*)'", r"\1")
return text
def to_inputs_and_targets(ex):
return {
"inputs": normalize_text(ex["input"]),
"targets": ex["output"],
"idx": ex["idx"]
}
ds_ = ds.map(to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds_
def cola_extract(ds):
def extract_io(ex):
return {
"input": "cola sentence: "+ex["sentence"],
"output": "cold" if ex["label"] == 0 else "hot", # change labels
"idx": ex["idx"]
}
return ds.map(extract_io, num_parallel_calls=tf.data.experimental.AUTOTUNE)
t5.data.TaskRegistry.remove("cola_random_label")
t5.data.TaskRegistry.add(
"cola_random_label",
# A TfdsTask takes in a TFDS name instead of a tf.data.Dataset function.
t5.data.TfdsTask,
tfds_name="glue/cola:1.0.0",
tfds_data_dir=DATA_DIR,
sentencepiece_model_path=t5.data.DEFAULT_SPM_PATH,
text_preprocessor=[cola_extract, label_preprocessor],
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.matthews_corrcoef]
)
# Load and print a few examples.
cola_task = t5.data.TaskRegistry.get("cola_random_label")
ds = cola_task.get_dataset(split="train", sequence_length={"inputs": 64, "targets": 4})
print("A few preprocessed train examples...")
for ex in tfds.as_numpy(ds.take(5)):
print(ex)
# Evaluate on the original task
print('Evaluate on the original task...')
model.eval(
"glue_cola_v002",
sequence_length={"inputs": 64, "targets": 4},
batch_size=128,
)
# Evaluate the pre-trained checkpoint, before further fine-tuning
print('Evaluate on the current task...')
model.eval(
"cola_random_label",
sequence_length={"inputs": 64, "targets": 4},
batch_size=128,
)
# Run 2000 steps of fine-tuning
print('Start training...')
model.train(
mixture_or_task_name="cola_random_label",
steps=2000,
save_steps=200,
sequence_length={"inputs": 64, "targets": 4},
split="train",
batch_size=32,
optimizer=functools.partial(transformers.AdamW, lr=1e-4),
)
# Evaluate after fine-tuning
print('Evaluating the current task after fine-tuning...')
model.eval(
"cola_random_label",
checkpoint_steps="all",
sequence_length={"inputs": 64, "targets": 4},
batch_size=32,
)
# Generate some predictions
print('Generate some predictions...')
inputs = [
"cola sentence: This is a totally valid sentence.",
"cola sentence: A doggy detail was walking famously.",
"cola sentence: The weather is fine today.",
"cola sentence: She ran outside of the window largely.",
]
model.predict(
inputs,
sequence_length={"inputs": 64},
batch_size=2,
output_file=PREDICTION_DIR + "/example_predictions.txt",
)
model.load_latest_checkpoint()
# Use model.predict to predict the validation set
print('Predicting the validation set...')
inputs_all = []
targets_all = []
except_num = 0
mrpc_task = t5.data.TaskRegistry.get("glue_cola_v002")
ds = mrpc_task.get_dataset(split="validation", sequence_length={"inputs": 64, "targets": 4})
for ex in tfds.as_numpy(ds):
try:
inputs_all.append(ex["inputs_plaintext"].decode("ascii"))
targets_all.append(ex["targets_plaintext"].decode("ascii"))
except:
except_num += 1
# print('EXCEPT!!\n!!!\n'+str(ex["inputs_plaintext"]) + '\n'+str(ex["targets_plaintext"])+'!!!\n!!!')
continue
model.predict(
inputs_all,
sequence_length={"inputs": 64,'targets': 4},
batch_size=128,
output_file=PREDICTION_DIR + "/val_predictions.txt",
)
f = open(PREDICTION_DIR + "/val_predictions.txt", "r")
lines = f.readlines()
predicts_all = [line.rstrip() for line in lines]
#print('except:', except_num)
#print('without exception:', len(predicts_all))
#print('len targets:', len(targets_all))
#print('predicts_all:', predicts_all)
#print('targets_all:', targets_all)
score = t5.evaluation.metrics.matthews_corrcoef(targets_all, predicts_all)
print('prediction score:',score)
| 6,186 |
stack.py
|
ruyadorno/simple-data-structures
| 2 |
2171551
|
from linkedlist import LinkedListNode
class Stack:
"""Simple stack implementation"""
def __init__(self, maxsize=-1):
self._top = None
self._size = 0
self._maxsize = maxsize
def push(self, value):
"""Push an item to the top of the stack in O(1)"""
if self._size == self._maxsize:
raise StackError('Stack is already full')
self._size += 1
self._top = LinkedListNode(value, self._top)
def pop(self):
"""Pops the top item out of the stack in O(1)"""
if self._size == 0:
raise StackError('Stack is empty')
self._size -= 1
popping = self._top
self._top = popping.next()
popping.setNextNode(None)
return popping.value
def peek(self):
"""Access the value of top item without modifying the stack, O(1)"""
return self._top.value
def size(self):
"""Get the current stack size"""
return self._size
class StackError(Exception):
def __init__(self, msg):
self.message = msg
| 1,078 |
pysonyci/__init__.py
|
predat/pysonyci
| 1 |
2171376
|
# coding: utf8
import logging
from .version import __version__
__title__ = 'pysonyci'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 <NAME>'
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
from .sonyci import SonyCi
| 420 |
tower/species/undead.py
|
alisonbnt/watchtower
| 1 |
2172059
|
exports = {
"name": "Undead",
"description": "A souless body which walks among the mortals. "
"Hungry for flesh, no humanity",
"block": 30,
"health": 10,
"hit": 60,
"crime": "murder",
"aspects": {
"materials": [
{
"item": "copper",
"effect": "health",
"power": 2,
"description": "Metal used on the coins for the ferryman. "
"Lower value, lower health"
},
{
"item": "bronze",
"effect": "hit",
"power": 5,
"description": "Metal used on the coins for the ferryman."
"Higher value, higher effectiveness"
}
],
"places": [
{
"item": "prison",
"effect": "health",
"power": 1,
"description": "Undeads are basically evil "
"energy within empty bodies. "
"Prisons are natural sources of evil and "
"injustice. Each strike will make more damage"
},
{
"item": "cemetery",
"effect": "hit",
"power": 15,
"description": "Undeads are slower near places ruled by death."
"Attacks are more effective"
}
],
"amulets": {
"health": 1,
"hit": 5
},
"potions": {
"health": 1,
"hit": 5
},
"summonings": [
{
"item": "calling",
"effect": "health",
"power": 1,
"description": "By calling the dead, the spirit will try to "
"find eternal rest. They shall perish sooner"
},
{
"item": "luring",
"effect": "hit",
"power": 5,
"description": "When lured, undead appears to grow hunger, "
"but easier to hit"
}
],
"banishments": [
{
"item": "burying",
"effect": "health",
"power": 2,
"description": "By burying your deads, their time will finally"
"come. Less health for your foes"
}
]
},
"traces": {
"crime_scene": ['smell', 'rotten_flesh', 'scratches'],
"witness": ['groaning', 'creature_dead', 'creature_wounds',
'creature_dirty', 'humanoid'],
"victim": ['bites', 'dismemberment']
}
}
| 2,796 |
playground/playground.py
|
ritiek/rust-without-rust
| 2 |
2171542
|
import json
import sys
import argparse
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_arguments(raw_args=None):
parser = argparse.ArgumentParser(
description="Use Python to execute simple Rust code "
"by running it on https://play.rust-lang.org/",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("file",
metavar="FILE",
help="path to file containing Rust code")
parser.add_argument("--release",
default=False,
action="store_true",
help="build artifacts in release mode, "
"with optimizations")
parser.add_argument("--channel",
default="stable",
choices={"stable", "beta", "nightly"},
help="set Rust channel")
parser.add_argument("--target",
default="ast",
choices={"ast", "asm", "mir", "llvm-ir", "wasm"},
help="build for the target triple")
parser.add_argument("--disable-color",
default=False,
action="store_true",
help="disable colors and styles for stderr")
parsed = parser.parse_args(raw_args)
return parsed
def parse_json(code, channel, mode, target):
json_data = { "channel" : channel,
"code" : code.decode("utf-8"),
"crateType" : "bin",
"mode" : mode,
"tests" : False,
"assemblyFlavor" : "att",
"demangleAssembly" : "demangle",
"target" : target,
"tests" : False,
"execute" : target == "ast" }
return json_data
def make_request(json_data):
if json_data["execute"]:
url = "http://play.rust-lang.org/execute"
else:
url = "http://play.rust-lang.org/compile"
data = json.dumps(json_data).encode("utf-8")
response = urlopen(url, data)
return json.loads(response.read())
def add_style(text, words):
try:
from colorama import init, Fore, Back, Style
except ImportError:
print("Install the colorama python package to "
"enable colored output or pass"
"--disable-color to hide this message")
else:
for word in words:
color_word = "{style}{word}{unstyle}".format(
style=Style.BRIGHT + Fore.GREEN,
word=word,
unstyle=Style.RESET_ALL + Fore.RESET)
text = text.replace(word, color_word)
finally:
return text
def filter_results(results, color=True):
for key, value in results.items():
results[key] = str(value).rstrip()
return results
def command_line():
args = get_arguments()
with open(args.file, "rb") as raw_data:
content = raw_data.read()
if args.release:
mode = "release"
else:
mode = "debug"
json_data = parse_json(content,
channel=args.channel,
mode=mode,
target=args.target)
response = make_request(json_data)
results = filter_results(response)
if not args.disable_color:
results["stderr"] = add_style(results["stderr"],
("Compiling", "Finished", "Running"))
if "code" in results:
print(results["code"])
print(results["stderr"])
print(results["stdout"])
if __name__ == "__main__": # pragma: no cover
command_line()
| 3,835 |
tests/unit/copy/binlog_copy/test_eq.py
|
denssk/backup
| 69 |
2168082
|
from twindb_backup.copy.binlog_copy import BinlogCopy
def test_two_eq():
copy1 = BinlogCopy('foo', 'bar', 10)
copy2 = BinlogCopy('foo', 'bar', 10)
assert copy1 == copy2
def test_two_neq_created():
copy1 = BinlogCopy('foo', 'bar', 10)
copy2 = BinlogCopy('foo', 'bar', 20)
assert copy1 != copy2
def test_two_neq_name():
copy1 = BinlogCopy('foo', 'bar1', 10)
copy2 = BinlogCopy('foo', 'bar2', 10)
assert copy1 != copy2
| 458 |
SetShot.py
|
Proceduralism/PyCore
| 0 |
2171805
|
#!~/conda3/bin/python
# -*- coding: cp949 -*-
# Type : Executable
# Filename : SetShot.py
# Author : <NAME>
# Version : 1.0
############################
import sys, os
from SetShotCore import SetShot
core = SetShot()
env_filename = '{}/{}'.format(os.getenv("HOME")
, ".envs")
env_json = '{}/{}'.format(os.getenv("HOME")
, "env.json")
show_json = '{}/{}'.format(os.getenv("HOME")
, "show.json")
if __name__ == "__main__":
# Some
show = core.setData("Show", core.getShow(show_json))
seq = core.setData("Sequence", core.getSeq(show_json, show))
shot = core.setData("Shot", core.getShot(show_json, show, seq))
core.exportShow(env_json, show, seq, shot)
print("")
print("")
print("###############################")
print("Shortcut: ", '{}:{}:{}'.format(show,seq,shot))
print('{}'.format("Shot Setup Done!"))
print("###############################")
| 1,021 |
advanced/Iteration.py
|
halomzh/halo-python
| 0 |
2172124
|
# 迭代
aList = [5, 4, 3, 2, 1]
for index, value in enumerate(aList):
print("index:", index)
print("value:", value)
for a, b in [(1, 'a'), (2, 'b'), (3, 'c')]:
print(a, b)
| 183 |
app.py
|
liliangbin/faceRecognition
| 4 |
2170508
|
import requests
from Infrared import Infrared
from read_camera import Camera_reader
if __name__ == '__main__':
camera = Camera_reader()
infrared = Infrared()
while True:
if infrared.detct(): # INFO 2019/6/12 21:03 红外传感器发现有人进入
infrared.light_up() # INFO 2019/6/12 21:03 亮灯
if camera.get_one_picture():
# INFO 2019/6/12 21:00 去请求打开门锁的接口。
data = {
"key": "<KEY>",
"id": 1,
"status": "unlock"
}
response = requests.get("http://kuailezhai.cn/update/", data=data)
print(response.text)
print("是管理员,允许开门")
pass
infrared.done()
camera.camera_done()
| 775 |
src/lib/ghostlines/logger.py
|
jackjennings/ghostlines-robofont
| 0 |
2170605
|
import sys
import logging
from mechanic import env
logger = logging.getLogger('Ghostlines')
logger.setLevel(getattr(logging, env.log_level.upper(), logging.DEBUG))
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
formatting = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(formatting)
handler.setFormatter(formatter)
debug = logger.debug
info = logger.info
warn = logger.warn
error = logger.error
critical = logger.critical
info('Logger set to level %s', logging.getLevelName(logger.level))
| 572 |
test.py
|
aandree/sample_python_tests
| 0 |
2172019
|
import unittest, time
from HTMLTestRunner import HTMLTestRunner
from webdriverio.app import WebdriverIO
from selenium.webdriver.common.by import By
def add_delay():
#time.sleep(1)
pass
class WebdriverIO_BasicTests(unittest.TestCase):
def setUp(self):
self.app = WebdriverIO()
self.app.open("/")
def tearDown(self):
print("Page:", self.app.current_page)
add_delay()
self.app.quit()
def test_api_link_in_the_header_points_to_API_doc_page(self):
"""Test That the header link for API points to the right page"""
app = self.app
self.app.navigate_to('API')
assert app.current_page == app.api
app.current_page.side_bar.wait_for_presence()
def test_searching_for_click_api_fucntionality(self):
"""Validate that when we search for Click, the first result from 'element' section is the click API doc"""
app = self.app
self.app.navigate_to('API')
app.search('Click')
with app.current_page.search_widget.results_block as results:
time.sleep(1)
assert results.is_present
results_for_element = results.all('element')
assert results_for_element
results_for_element[0].click()
assert app.current_page == app.api
app.current_page.side_bar.wait_for_presence()
self.assertTupleEqual(app.current_page.side_bar.active_menu,('element', 'click'))
def test_api_protocols_list_is_correct(self):
"""Verify that the API->Protocols list in the menue, matches the expected list of protocols"""
app = self.app
self.app.navigate_to('API')
app.current_page.side_bar.wait_for_presence()
menues = app.current_page.side_bar.all_menues
assert "Protocols" in menues
app.current_page.side_bar.all_menues['Protocols'].click()
time.sleep(1)
submenues = list(app.current_page.side_bar.active_submenues.keys())
expected = ["WebDriver Protocol", "Appium", "Mobile JSON Wire Protocol", "Chromium", "Sauce Labs", "Selenium Standalone", "JSON Wire Protocol"]
self.assertListEqual(expected, submenues)
class WebdriverIO_ExtendedTests(unittest.TestCase):
def setUp(self):
self.app = WebdriverIO()
self.app.open("/")
def tearDown(self):
print("Page:", self.app.current_page)
add_delay()
self.app.quit()
def test_empty_search_cannot_be_cleared(self):
"""Validate that empty serach can not be cleared (button should not be presented)"""
app = self.app
app.search('')
assert not app.current_page.search_widget.clear_button.is_present
def test_search_clear_functionality(self):
"""Validate that when we clear the search, then input is empty and clear button dissaper"""
app = self.app
app.search('Test')
time.sleep(1)
assert app.current_page.search_widget.clear_button.is_present
app.current_page.search_widget.clear()
assert app.current_page.search_widget.input.value == ""
assert not app.current_page.search_widget.clear_button.is_present
def test_search_see_more_results(self):
"""Validate that when we clear the search, then input is empty and clear button dissaper"""
app = self.app
app.search('Click')
time.sleep(1)
assert app.current_page.search_widget.see_all_button.is_present
app.current_page.search_widget.expand()
assert app.current_page == app.full_search
def test_search_see_more_results_in_case_of_single_hit(self):
"""Validate that when we clear the search, then input is empty and clear button dissaper"""
app = self.app
app.search('Only one result')
time.sleep(1)
assert not app.current_page.search_widget.see_all_button.is_present
if __name__ == "__main__":
unittest.main()
#unittest.main(testRunner=HTMLTestRunner.HTMLTestRunner(stream=open('report.html', 'w')))
| 4,137 |
src/rogerthat/bizz/maps/poi/search.py
|
goubertbrent/oca-backend
| 0 |
2170118
|
# -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from __future__ import unicode_literals
import itertools
from functools32 import lru_cache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from typing import List
from mcfw.rpc import returns
from rogerthat.bizz.communities.models import Community
from rogerthat.bizz.elasticsearch import delete_index, create_index, delete_doc, index_doc_operations, \
execute_bulk_request, es_request
from rogerthat.bizz.job import run_job, MODE_BATCH
from rogerthat.bizz.maps.poi.models import PointOfInterest, POIStatus
from rogerthat.bizz.maps.services import SearchTag, get_place_details
from rogerthat.models.elasticsearch import ElasticsearchSettings
@lru_cache(1)
@returns(unicode)
def _get_elasticsearch_index():
return ElasticsearchSettings.create_key().get().poi_index
def _delete_index():
return delete_index(_get_elasticsearch_index())
def _create_index():
request = {
'mappings': {
'properties': {
'id': {
'type': 'keyword'
},
'name': {
'type': 'keyword'
},
'suggestion': {
'type': 'search_as_you_type'
},
'location': {
'type': 'geo_point'
},
'tags': {
'type': 'keyword'
},
'txt': {
'type': 'text'
},
}
}
}
return create_index(_get_elasticsearch_index(), request)
def get_poi_uid(poi_id):
return unicode(poi_id)
def cleanup_poi_index(poi_id):
uid = get_poi_uid(poi_id)
return delete_doc(_get_elasticsearch_index(), uid)
def _get_poi_index_ops(poi, community):
# type: (PointOfInterest, Community) -> dict
uid = get_poi_uid(poi.id)
tags = {
SearchTag.community(community.id),
SearchTag.environment(community.demo),
SearchTag.poi_status(poi.status),
}
if poi.status == POIStatus.VISIBLE:
tags.add(SearchTag.visible_for_end_user())
if community.country:
tags.add(SearchTag.country(community.country))
for place_type in poi.place_types:
place_details = get_place_details(place_type, 'en')
if not place_details:
continue
tags.add(SearchTag.place_type(place_type))
txt = [poi.title]
if poi.description:
txt.append(poi.description)
doc = {
'id': uid,
'name': poi.title,
'suggestion': poi.title,
'location': [],
'tags': list(tags),
'txt': txt
}
lat = float(poi.location.coordinates.lat)
lon = float(poi.location.coordinates.lon)
doc['location'].append({'lat': lat, 'lon': lon})
return index_doc_operations(uid, doc)
def re_index_poi(poi, community):
# type: (PointOfInterest, Community) -> dict
return execute_bulk_request(_get_elasticsearch_index(), _get_poi_index_ops(poi, community))
def _query_re_index_all():
# Order by community so that we only have to fetch 1 or 2 communities per request
return PointOfInterest.query().order(PointOfInterest.community_id)
def _worker_re_index_all(poi_keys):
poi_list = ndb.get_multi(poi_keys) # type: List[PointOfInterest]
community_ids = {poi.community_id for poi in poi_list}
communities = {comm.id: comm for comm in ndb.get_multi([Community.create_key(c) for c in community_ids])}
ops = itertools.chain.from_iterable(_get_poi_index_ops(poi, communities[poi.community_id]) for poi in poi_list)
return execute_bulk_request(_get_elasticsearch_index(), ops)
def re_index_all():
_delete_index()
_create_index()
run_job(_query_re_index_all, [], _worker_re_index_all, [], mode=MODE_BATCH)
def _suggest_poi(tags, lat, lon, search, community_ids):
# type: (List[unicode], float, float, str, List[int]) -> List[dict]
qry = {
'size': 12,
'from': 0,
'_source': {
'includes': [],
},
'query': {
'bool': {
'must': [
{
'multi_match': {
# See https://www.elastic.co/guide/en/elasticsearch/reference/7.x/search-as-you-type.html
'query': search,
'fields': ['suggestion',
'suggestion._2gram',
'suggestion._3gram'],
'type': 'bool_prefix'
}
}
],
'filter': [{'term': {'tags': tag}} for tag in tags],
}
},
'sort': [
{'_score': {'order': 'desc'}},
{
'_geo_distance': {
'location': {
'lat': lat,
'lon': lon
},
'order': 'asc',
'unit': 'm'
}
}
]
}
if community_ids:
# Must match one of the specified community ids
qry['query']['bool']['must'].append({
'bool': {
'should': [{'term': {'tags': SearchTag.community(community_id)}} for community_id in community_ids],
'minimum_should_match': 1
}
})
path = '/%s/_search' % _get_elasticsearch_index()
result_data = es_request(path, urlfetch.POST, qry)
results = []
for hit in result_data['hits']['hits']:
results.append({'id': hit['_source']['id'], 'name': hit['_source']['name']})
return results
def search_poi(tags, place_type_tags, lat=None, lon=None, distance=None, cursor=None, limit=50, search_qry=None,
community_ids=None):
# we can only fetch up to 10000 items with from param
start_offset = long(cursor) if cursor else 0
if (start_offset + limit) > 10000:
limit = 10000 - start_offset
if limit <= 0:
return None, []
qry = {
'size': limit,
'from': start_offset,
'_source': {
'includes': ['id', 'name'],
},
'query': {
'bool': {
'must': [],
'filter': [],
'should': []
}
},
'sort': [
'_score',
]
}
if lat and lon:
qry['sort'].insert(0, {
'_geo_distance': {
'location': {
'lat': lat,
'lon': lon
},
'order': 'asc',
'unit': 'm'
}
})
else:
qry['sort'].insert(0, {'name': {'order': 'asc'}})
# Must match all tags
for tag in tags:
qry['query']['bool']['filter'].append({
'term': {
'tags': tag
}
})
if place_type_tags:
# Must match one of the specified place types
qry['query']['bool']['must'].append({
'bool': {
'should': [{'term': {'tags': tag}} for tag in place_type_tags],
"minimum_should_match": 1
}
})
if search_qry:
qry['query']['bool']['should'].append({
'multi_match': {
'query': search_qry,
'fields': ['name^500', 'txt^5'],
"fuzziness": "1"
}
})
if community_ids:
# Must match one of the specified community ids
qry['query']['bool']['must'].append({
'bool': {
'should': [{'term': {'tags': SearchTag.community(community_id)}} for community_id in community_ids],
'minimum_should_match': 1
}
})
elif search_qry:
qry['query']['bool']['must'].append({
'multi_match': {
'query': search_qry,
'fields': ['name^500', 'txt^5'],
"operator": "and"
}
})
else:
if lat and lon:
qry['query']['bool']['filter'].append({
'geo_distance': {
'distance': '%sm' % distance,
'location': {
'lat': lat,
'lon': lon
}
}
})
path = '/%s/_search' % _get_elasticsearch_index()
result_data = es_request(path, urlfetch.POST, qry)
new_cursor = None
if place_type_tags or search_qry:
pass # no cursor
else:
next_offset = start_offset + len(result_data['hits']['hits'])
if result_data['hits']['total']['relation'] in ('eq', 'gte'):
if result_data['hits']['total']['value'] > next_offset and next_offset < 10000:
new_cursor = '%s' % next_offset
result_ids = [hit['_source']['id'] for hit in result_data['hits']['hits']]
return new_cursor, result_ids
| 9,628 |
bnw/scripts/search.py
|
stiletto/bnw
| 23 |
2172060
|
import os
import sys
from twisted.application import internet, service
from twisted.web import server
import bnw.core.base
import bnw.core.bnw_mongo
from bnw.search.search_server import RPCSearch
import config
bnw.core.base.config.register(config)
bnw.core.bnw_mongo.open_db()
application = service.Application('BnW search service')
r = RPCSearch(config.search_db, config.search_language)
if isinstance(config.search_port, str) and ':' in config.search_port:
search_host, search_port = config.search_port.rsplit(":", 1)
else:
search_host, search_port = "127.0.0.1", config.search_port
search_service = internet.TCPServer(
search_port, server.Site(r), interface=search_host)
search_service.setServiceParent(application)
def runintwistd():
sys.argv.insert(1,__file__)
sys.argv.insert(1,'-y')
from twisted.scripts.twistd import run
| 858 |
TelegramBot/TelegramBot_test.py
|
satya-sudo/Twitter_trends-1
| 7 |
2168048
|
import os
from TelegramBot import TelegramBot
# initializing bot with API key
tweet_bot = TelegramBot(os.environ['TELEGRAM_BOT_API'])
# Get Chat ID from telegram subscribers
ChatIDs = tweet_bot.get_ChatID()
# Sending ChatIDs and message
tweet_bot.send(ChatIDs, 'Hey buddy sup?')
print(ChatIDs)
| 298 |
dvxplorer_ros_driver/scripts/scripts/dvxplorer_lite_recording.py
|
ziimiin14/rpg_dvs_ros_modifed
| 0 |
2170397
|
"""DVXplorer Test.
Author: <NAME>
Email : <EMAIL>
"""
from __future__ import print_function, absolute_import
import numpy as np
import cv2
from pyaer.dvxplorer import DVXPLORER
device = DVXPLORER()
print("Device ID:", device.device_id)
print("Device Serial Number:", device.device_serial_number)
print("Device USB bus Number:", device.device_usb_bus_number)
print("Device USB device address:", device.device_usb_device_address)
print("Device String:", device.device_string)
print("Device Firmware Version:", device.firmware_version)
print("Logic Version:", device.logic_version)
print("Device Chip ID:", device.chip_id)
if device.device_is_master:
print("Device is master.")
else:
print("Device is slave.")
print("MUX has statistics:", device.mux_has_statistics)
print("Device size X:", device.dvs_size_X)
print("Device size Y:", device.dvs_size_Y)
print("DVS has statistics:", device.dvs_has_statistics)
print("IMU Type:", device.imu_type)
print("EXT input has generator:", device.ext_input_has_generator)
clip_value = 10
histrange = [(0, v) for v in (device.dvs_size_Y, device.dvs_size_X)]
device.start_data_stream()
# load new config
device.set_bias_from_json("./scripts/configs/dvxplorer_config.json")
ignore_pixel_set = [(174,154),(152,169),(92,19),(135,0),(108,178),(238,127),(182,294),(155,133)]
# cc = 0
# print(device.get_bias())
# count = 0
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# out = cv2.VideoWriter('output.mp4', fourcc, 100, (320,240),0)
while True:
try:
(pol_events, num_pol_event,
special_events, num_special_event,
imu_events, num_imu_event) = \
device.get_event("events")
if num_pol_event != 0:
# img = pol_events[...,1]+pol_events[...,0]
# print(pol_events[-1,0]-pol_events[0,0])
# print(pol_events[0,0])
print(num_pol_event)
# events = pol_events[:,1:]
# time = pol_events[:,0]
# for x in ignore_pixel_set:
# img[x] = 0
# a = img.max()
# img = img/a
# cv2.imshow("image", img)
# img = img*255
# # img = int(img)
# img = img.astype(np.uint8)
# out.write(img)
# cv2.imwrite('./synthetic_event_frame/mannequin'+str(count)+'.png',img)
# count += 1
cv2.waitKey(10)
except KeyboardInterrupt:
device.shutdown()
# out.release()
cv2.destroyAllWindows()
break
| 2,554 |
videoveKameradanCanliGoruntuAlma (2).py
|
ozgeKrt/OpenCv-CesitliCalismalar
| 1 |
2171440
|
import cv2
import numpy as np
kamera=cv2.VideoCapture(0)
while True:
ret,kare=kamera.read() #kameranın çalışıp çalışmadığını kontrol eder.
bolge=kare[0:200,0:200]
cv2.imshow("Video",kare)
cv2.imshow("Bolge",bolge)
if cv2.waitKey(25) & 0xFF ==('q'):
break
kamera.release()
cv2.destroyAllWindows()
| 369 |
service/models.py
|
Promobyte/n
| 0 |
2172099
|
from urllib.parse import quote
from django.utils.encoding import iri_to_uri
from django.db import models
from django.utils import timezone
from django.utils import text
from django.db.models import signals
class Service(models.Model):
created = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=200)
image = models.ImageField(upload_to='service')
icon = models.ImageField(upload_to='service')
slug = models.SlugField(max_length=50, null=True, blank=True, db_index=True, allow_unicode=True, unique=True)
def get_absolute_url(self):
url = "%s/" % self.slug
return url
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = text.slugify(self.title, allow_unicode=True)
super(Service, self).save(*args, **kwargs)
def get_queryset(self):
return Service.objects.filter(
#order__order_reference=self.kwargs['service'],
access_key=self.kwargs['slug:service'],
)
| 1,013 |
test/utils.py
|
FabianHertwig/pytorch_20BN-JESTER_Dataset
| 5 |
2171272
|
from pathlib import Path
from typing import Union
import numpy as np
from PIL import Image
class TestDataSet:
def __init__(self, path: Union[str, Path], number_of_videos: (int, int, int), min_length: int, max_length: int,
overwrite=False, labels=["Swipe Right", "Swipe Left", "Do Nothing"]) -> None:
self.test_data_path = Path(path)
self.test_data_video_path = self._create_directories(overwrite)
self.number_of_videos = number_of_videos
self.min_length = min_length
self.max_length = max_length
self.overwrite = overwrite
self.labels = labels
def create(self):
self._create_labels_files(self.labels)
video_number = 0
for mode_number, mode in enumerate(["train", "test", "validation"]):
data_file = self.test_data_path / f"jester-v1-{mode}.csv"
for _ in range(0, self.number_of_videos[mode_number]):
video_number += 1
video_dir_path = self.test_data_video_path / str(video_number)
self._create_video(self.max_length, self.min_length, video_dir_path)
self._append_to_data_file(data_file, self.labels, mode, video_number)
def remove(self):
self._rm_recursive(self.test_data_path)
def _append_to_data_file(self, data_file, labels, mode, video_number):
with data_file.open("a") as f:
random_label = np.random.choice(labels)
if mode is not "validation":
f.write(f"{video_number};{random_label}\n")
else:
f.write(f"{video_number}\n")
def _create_directories(self, overwrite):
if self.test_data_path.exists() and not overwrite:
raise FileExistsError(
f"Directory for test data already exists, please remove it. Path: {self.test_data_path}")
elif self.test_data_path.exists() and overwrite:
self.remove()
self.test_data_path.mkdir()
test_data_video_path = self.test_data_path / "20bn-jester-v1"
test_data_video_path.mkdir()
return test_data_video_path
def _create_labels_files(self, labels):
labels_file = self.test_data_path / "jester-v1-labels.csv"
with labels_file.open("a") as f:
f.writelines([label + "\n" for label in labels])
def _create_video(self, max_length, min_length, video_dir_path):
video_dir_path.mkdir()
number_of_frames = np.random.randint(min_length, max_length)
self._create_frames(number_of_frames, video_dir_path)
def _create_frames(self, number_of_frames, video_dir_path):
for frame_number in range(number_of_frames):
frame_file_path = video_dir_path / f"{frame_number:05d}.jpg"
random_rgb = tuple((np.random.randint(0, 255) for _ in range(3)))
image = Image.new("RGB", (224, 224), random_rgb)
image.save(frame_file_path)
def _rm_recursive(self, pth: Path):
for child in pth.iterdir():
if child.is_file():
child.unlink()
else:
self._rm_recursive(child)
pth.rmdir()
def project_dir() -> Path:
return Path(__file__).parents[1]
if __name__ == '__main__':
testDataset = TestDataSet(project_dir() / "test_data", (10, 3, 3), 2, 5, True)
testDataset.create()
| 3,369 |
maili-develop/scope/service.py
|
fortyMiles/my-family
| 0 |
2172144
|
from scope.models import Scope
from scope.models import ScopeGroup
from relation.service import is_close_family_member
from group.group_service import create_home_group
from group.group_service import join_to_group
import time
import json
FRIEND = 'friend'
HOME = 'home'
RELATION = 'relation'
SCOPES = [FRIEND, HOME, RELATION]
def create_one_scope(username, tag):
'''
By username and tag, creats a new scope
'''
scope_name = get_scope_name(username, tag)
feed_group = Scope(scope=scope_name, owner=username, tag=tag)
if tag == HOME:
# set scope_name to this home group's id
create_home_group(username, scope_name)
feed_group.save()
def get_scope_name(username, tag):
'''
Based on username and tag, gives a unique scope name
'''
length = 13
time_length = 5
identity = username[length * (-1):]
assic = str(ord(tag[-1]))
time_str = str(time.time())[-1*time_length:].replace('.', '')
scope_name = identity + time_str + assic
return scope_name
def create_default_scope(username):
'''
Creates the default groups of each user.
Each user has three groups, which is Global, Person Family, Big Family.
'''
for tag in SCOPES:
create_one_scope(username, tag)
def update_user_scope(user1, user2, scope, relation):
'''
Adds user2 to user1's scope memeber.
e.g
user1 = 'BigHeadSon', user2 = 'HisMother', socpe = 'home'
then add HisMother to BigHeadSon's Home Socpe Group
Before update user scope, the two persons need already have realtion.
'''
scope_dict = {
'H': HOME,
'R': RELATION,
'F': FRIEND
}
scope = scope_dict.get(scope)
if scope == HOME:
update_scope_group(user1, user2, HOME)
home_id = get_home_id(user1)
join_to_group(username=user2, group=home_id)
# add user2 to user1's group home
if is_close_family_member(user1, user2, relation):
update_scope_group(user2, user1, HOME)
home_id = get_home_id(user2)
join_to_group(username=user1, group=home_id)
# add user1 to user2's group home
else:
update_scope_group(user2, user1, RELATION)
else:
update_scope_group(user1, user2, scope)
update_scope_group(user2, user1, scope)
def update_scope_group(user1, user2, tag):
'''
Add user2 to user1's scope member.
'''
scope = Scope.objects.filter(owner=user1).filter(tag=tag)[0].scope
scope_group = ScopeGroup(scope=scope, member=user2, tag=tag)
scope_group.save()
def get_home_member(user):
'''
Gets the 'seven' closet persons.
'''
scope_name = Scope.objects.filter(owner=user).filter(tag=HOME)[0].scope
scope_member = ScopeGroup.objects.filter(scope=scope_name)
user_json = {}
for member in scope_member:
user_json[member.member] = member.tag
return user_json
def _get_scope_id(user, tag):
scope_list = []
initial_scope = Scope.objects.filter(owner=user).filter(tag=tag)[0].scope
scope_list.append(initial_scope)
invite_scope = ScopeGroup.objects.filter(member=user).filter(tag=tag)
for s in invite_scope:
scope_list.append(s.scope)
if tag == HOME:
return scope_list
else:
return scope_list[0]
def get_home_id(user):
'''
Gets the person's hoem id, for group chatting.
'''
return _get_scope_id(user, HOME)
def get_relation_id(user):
'''
Gets the person's group id, for feed.
'''
return _get_scope_id(user, RELATION)
def get_global_id(user):
'''
Gets the person's global friend id, for feed.
'''
return _get_scope_id(user, FRIEND)
def get_all_join_scope(username):
'''
Get all involved scopes.
Including his intital three scoeps and other involeved groups;
'''
query_set = ScopeGroup.objects.filter(member=username)
scope_list = []
for query in query_set:
scope_list.append(query.scope)
home_scope = get_home_id(username)
for s in home_scope:
scope_list.append(s)
relation_scope = get_relation_id(username)
for s in relation_scope:
scope_list.append(s)
friend_scope = get_global_id(username)
for s in friend_scope:
scope_list.append(s)
# import pdb; pdb.set_trace()
# json_data = json.dumps(scope_list)
return scope_list
def get_home_creator(home_id):
"""
Based on the home_id give home's info. Including home's creator,
creator's avator, home member.
"""
query_set = Scope.objects.filter(scope=home_id)
if len(query_set) > 0:
creator = query_set[0].owner
else:
creator = None
return creator
| 4,733 |
tests/test_edge_case.py
|
pombredanne/charset_normalizer
| 0 |
2168387
|
from charset_normalizer import from_bytes
def test_alphabet_property_undefined_range():
payload = b'\xef\xbb\xbf\xf0\x9f\xa9\xb3'
best_guess = from_bytes(payload).best()
assert best_guess is not None, "Payload should have given something, detection failure"
assert best_guess.encoding == "utf_8", "UTF-8 payload wrongly detected"
assert best_guess.alphabets == [], "This property in that edge case, should return a empty list"
| 451 |
regression/__init__.py
|
apblair/project6
| 0 |
2170876
|
"""
BMI203: Biocomputing algorithms Winter 2022
Assignment 6: Logistic regression
"""
from .logreg import *
from .utils import *
__version__ = '0.1.0'
| 151 |
twitpop/migrations/0001_initial.py
|
gsiegman/django-twitpop
| 1 |
2170778
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TwitterSearchTerm'
db.create_table('twitpop_twittersearchterm', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('term', self.gf('django.db.models.fields.CharField')(max_length=140)),
))
db.send_create_signal('twitpop', ['TwitterSearchTerm'])
def backwards(self, orm):
# Deleting model 'TwitterSearchTerm'
db.delete_table('twitpop_twittersearchterm')
models = {
'twitpop.twittersearchterm': {
'Meta': {'object_name': 'TwitterSearchTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '140'})
}
}
complete_apps = ['twitpop']
| 1,012 |
mr_reports/urls.py
|
hlongmore/django-mr_reports
| 0 |
2171988
|
from django.conf.urls import patterns, url
from mr_reports import views
urlpatterns = patterns('',
# ex: /reports/
url(r'^$', views.index, name='index'),
# ex: /reports/5/
url(r'^(?P<report_id>\d+)/$', views.report, name='report'),
url(r'^(?P<report_id>\d+)/(?P<format>\w+)/$', views.report, name='report'),
)
| 332 |
src/nodes/BTTask.py
|
mortenmj/cyborg_ros_bt
| 0 |
2171567
|
#!/usr/bin/env python
import actionlib
import behavior_tree_core.msg
from enum import Enum
class BTStatus(Enum):
Running = 0
Success = 1
Failure = 2
class BTTask(object):
# Messages that are used to publish feedback/result
_feedback = behavior_tree_core.msg.BTFeedback()
_result = behavior_tree_core.msg.BTResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, behavior_tree_core.msg.BTAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
# Called immediately before execution. Can be used to reset state from previous execution.
def on_start(self):
pass
# Called to run the actual task
def on_update(self, goal=None):
pass
# Called after execution success or failure
def on_end(self):
pass
# Called when the behavior tree finishes
def on_complete(self):
pass
# Called when behavior is paused and resumed
def on_pause(self, paused):
pass
# Get this tasks priority of running
def get_priority(self):
pass
def set_status(self, status):
if status is BTStatus.Success:
self._feedback.status = 1
self._result.status = self._feedback.status
rospy.loginfo('Action %s: Succeeded' % self._action_name)
self._as.set_succeeded(self._result)
elif status is BTStatus.Failure:
self._feedback.status = 2
self._result.status = self._feedback.status
rospy.loginfo('Action %s: Failed' % self._action_name)
self._as.set_succeeded(self._result)
else:
rospy.logerr('Action %s: has a wrong return status' % self._action_name)
def execute_cb(self, goal):
self.on_start()
running = True
while self.on_update(goal) is 'running':
running = self.on_update(self)
self.on_end()
class BTAction(BTTask):
def __init__(self, name):
super(BTAction, self).__init__(name)
class BTCondition(BTTask):
def __init__(self, name):
super(BTCondition, self).__init__(name)
| 2,185 |
compact/GUI/Ui/CompactMeasurementMainWindow.py
|
CoCoMol/CoCoPy
| 1 |
2171824
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Fri Jun 26 15:11:22 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CompactMeasurementMainWindow(object):
def setupUi(self, CompactMainWindow):
CompactMainWindow.setObjectName(_fromUtf8("CompactMeasurementMainWindow"))
CompactMainWindow.resize(509, 417)
self.centralWidget = QtGui.QWidget(CompactMainWindow)
self.centralWidget.setObjectName(_fromUtf8("centralWidget"))
self.pushButton = QtGui.QPushButton(self.centralWidget)
self.pushButton.setGeometry(QtCore.QRect(30, 290, 115, 32))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.widget = QtGui.QWidget(self.centralWidget)
self.widget.setGeometry(QtCore.QRect(189, 90, 281, 251))
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayoutWidget = QtGui.QWidget(self.widget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 281, 251))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
CompactMainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtGui.QMenuBar(CompactMainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 509, 22))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
self.menuDatei = QtGui.QMenu(self.menuBar)
self.menuDatei.setObjectName(_fromUtf8("menuDatei"))
self.menuSettings = QtGui.QMenu(self.menuBar)
self.menuSettings.setObjectName(_fromUtf8("menuSettings"))
CompactMainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtGui.QToolBar(CompactMainWindow)
self.mainToolBar.setObjectName(_fromUtf8("mainToolBar"))
CompactMainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtGui.QStatusBar(CompactMainWindow)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
CompactMainWindow.setStatusBar(self.statusBar)
self.actionSettings = QtGui.QAction(CompactMainWindow)
self.actionSettings.setObjectName(_fromUtf8("actionSettings"))
self.menuSettings.addAction(self.actionSettings)
self.menuBar.addAction(self.menuDatei.menuAction())
self.menuBar.addAction(self.menuSettings.menuAction())
self.retranslateUi(CompactMainWindow)
QtCore.QMetaObject.connectSlotsByName(CompactMainWindow)
def retranslateUi(self, CompactMainWindow):
CompactMainWindow.setWindowTitle(_translate("CompactMainWindow", "MainWindow", None))
self.pushButton.setText(_translate("CompactMainWindow", "Start", None))
self.menuDatei.setTitle(_translate("CompactMainWindow", "Datei", None))
self.menuSettings.setTitle(_translate("CompactMainWindow", "Settings", None))
self.actionSettings.setText(_translate("CompactMainWindow", "Settings", None))
| 3,626 |
app/Server/app.py
|
Alan-delete/I2L-MeshNet_RELEASE
| 0 |
2170594
|
from flask import Flask, send_from_directory
from flask import jsonify
from flask import request
from flask_ngrok import run_with_ngrok
from flask_cors import CORS
from markupsafe import escape
import json
import os
import sys
import cv2
from numpy.core.numeric import Infinity
from werkzeug.utils import secure_filename
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.nn.parallel.data_parallel import DataParallel
import torch.backends.cudnn as cudnn
from importlib import reload
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png','jpg','jpeg','jfif'}
# load YOLO5
YOLO5_model = torch.hub.load('ultralytics/yolov5','yolov5m', pretrained=True)
YOLO5_model.cuda()
# There is 'utils' in YOLO which will conflict with local 'utils' module, we need to import and override utils
import utils
# current path is assumed to be root_dir/app/Server/app.py
root_dir =os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
main_dir = os.path.join(root_dir,'main')
common_dir = os.path.join(root_dir,'common')
sys.path.append(main_dir)
sys.path.append(common_dir)
reload(utils)
from config import cfg
from model import get_model
from nets.SemGCN.export import SemGCN
from utils.transforms import transform_joint_to_other_db
from utils.preprocessing import process_bbox,generate_patch_image
app = Flask(__name__ ,static_folder = 'public',static_url_path='/public')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CORS(app)
run_with_ngrok(app)
cudnn.benchmark = True
class Action_reader():
def __init__(self, json_name = 'standard_joints.json'):
json_file = os.path.join(app.static_folder, json_name)
assert os.path.exists(json_file), 'Cannot find json file'
self.standard_action = []
self.user_action_idx = None
with open(json_file) as f:
standard_action = json.load(f)
self.standard_action = standard_action
self.action_list = [ action['name'] for action in self.standard_action]
def __getitem__(self, idx):
return self.standard_action[idx]
def __len__(self):
return len(self.standard_action)
def get_json(self):
return self.standard_action
def get_loss(self, user_action, gt_action):
loss = 0
for key in user_action.keys():
if not key in gt_action.keys():
continue
loss += np.absolute( np.array(user_action[key]) - \
np.array( gt_action[key] ) ).mean()
return loss
# user_action is assumed to be in form as {'human_joint_coords': , ...}
def get_frame_idx(self, user_action):
#result = {'action_idx':None, 'frame_idx':None}
loss = Infinity
threshold = Infinity
first_idx = -1
second_idx = -1
for action_idx, action in enumerate (self.standard_action) :
for frame_idx, action_per_frame in enumerate(action['data']):
temp_loss = self.get_loss(user_action, action_per_frame)
if temp_loss<loss and temp_loss < threshold:
loss = temp_loss
first_idx = action_idx
second_idx = frame_idx
return first_idx,second_idx, loss
def get_action_list(self):
return self.action_list
class Videos_reader():
def __init__(self, action_list, video_dir = "Fitness_video"):
self.videos = []
self.action_list = action_list
for action_name in self.action_list:
video = []
video_path = os.path.join(app.static_folder,video_dir,'{}.mp4'.format(action_name))
cap = cv2.VideoCapture(video_path)
assert cap.isOpened(), 'Fail in opening video file'
while (cap.isOpened()):
success, original_img = cap.read()
if success:
video.append(original_img)
else:
break
self.videos.append(video)
cap.release()
def __getitem__(self,idx):
return self.videos[idx]
def __len__(self):
return len(self.videos)
# return name and frame data
def get_frame(self, action_idx,frame_idx):
return self.action_list[action_idx], self.videos[action_idx][frame_idx]
def init_I2L(joint_num = 29,test_epoch = 12,mode = 'test'):
# snapshot load
model_path = os.path.join(cfg.model_dir,'snapshot_demo.pth.tar')
assert os.path.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
I2L_model = get_model( joint_num, mode)
I2L_model = DataParallel(I2L_model).cuda()
ckpt = torch.load(model_path)
I2L_model.load_state_dict(ckpt['network'], strict=False)
I2L_model.eval()
return I2L_model
def init_semGCN(test_epoch = 1):
# snapshot load
model_path = os.path.join(cfg.model_dir, 'sem_gcn_epoch{}.pth.tar'.format(test_epoch))
assert os.path.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
SemGCN_model = SemGCN(cfg.skeleton).cuda()
ckpt = torch.load(model_path)
SemGCN_model.load_state_dict(ckpt['network'], strict=False)
SemGCN_model.eval()
return SemGCN_model
ar = Action_reader()
vr = Videos_reader(ar.get_action_list())
I2L_model = init_I2L()
SemGCN_model = init_semGCN()
#cv2.imwrite('./test.png',vr.get_frame(ar[1]['data'][20]))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/",methods=['GET', 'POST'])
def home():
#return render_template('index.html')
print(app.static_folder)
return send_from_directory(app.static_folder, 'index.html')
@app.route("/getFitness",methods=['GET', 'POST'])
def get_fitness_action():
return jsonify( ar.get_json() )
def get_output(img_path):
with torch.no_grad():
transform = transforms.ToTensor()
# prepare input image
original_img = cv2.imread(img_path)
original_img_height, original_img_width = original_img.shape[:2]
# prepare bbox
# shape of (N = number of detected objects ,6) xmin ymin xmax ymax confidence class
bboxs = YOLO5_model([img_path]).xyxy[0]
bboxs = bboxs [ bboxs[: , 5] ==0 ]
# the bbox is already sorted by confidence
bbox = []
if len(bboxs >0):
xmin = bboxs[0][0]
ymin = bboxs[0][1]
width = bboxs[0][2] - xmin
height = bboxs[0][3] - ymin
bbox = [xmin , ymin, width, height]
else:
bbox = [1.0, 1.0, original_img_width, original_img_height]
#bbox = [139.41, 102.25, 222.39, 241.57] # xmin, ymin, width, height
bbox = process_bbox(bbox, original_img_width, original_img_height)
img, img2bb_trans, bb2img_trans = generate_patch_image(original_img, bbox, 1.0, 0.0, False, cfg.input_img_shape)
img = transform(img.astype(np.float32))/255
img = img.cuda()[None,:,:,:]
# forward
inputs = {'img': img}
targets = {}
meta_info = {'bb2img_trans': None}
out = I2L_model(inputs, targets, meta_info, 'test')
# of shape (29,3) (17,3)
I2L_joints = out['joint_coord_img'][0]
human36_joints = transform_joint_to_other_db(I2L_joints.cpu().numpy(),cfg.smpl_joints_name , cfg.joints_name)
Sem_joints = SemGCN_model(torch.from_numpy(human36_joints).cuda()[...,:2])[0]
return {'smpl_joint_coords':I2L_joints.tolist(),\
'human36_joint_coords':human36_joints.tolist(),\
'Sem_joints':Sem_joints.tolist() }
# as tested on my laptop, currently the speed of file upload and neural network process is nearly 1 frame per second. For pure neural network process, 19.84 seconds for 100 image
# also returns match_action name, the action estimate will be executed on front end, since it's little calculation and every user has their own different data record.
@app.route("/imageUpload", methods = ['PUT','POST'])
def file_upload():
# print("file uploaded, processing")
data = None
store_folder = os.path.join(app.static_folder, app.config['UPLOAD_FOLDER'])
if not os.path.exists(store_folder):
os.mkdir(store_folder)
# todo: customize file save name
# todo alt: directly pass the file to NN api
if 'image' in request.files:
print("upload success!")
file = request.files['image']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(store_folder, filename))
data = get_output(os.path.join(store_folder, filename))
action_idx, frame_idx, loss = ar.get_frame_idx(data)
if action_idx == -1:
data['action_name'] = 'Loss exceeds threshold!'
data['loss'] = loss
else:
match_action, match_frame = vr.get_frame(action_idx, frame_idx)
data['action_name'] = match_action
data['loss'] = loss
cv2.imwrite(os.path.join(app.static_folder, 'match_frame.png') , match_frame)
#return json of coordinates
return jsonify(data)
app.run()
| 9,503 |
PlayStore/__init__.py
|
geoffduong/PlayStoreLinks_Bot
| 81 |
2171693
|
from PlayStore.App import App
from PlayStore.AppNotFoundException import AppNotFoundException
from PlayStore.PlayStoreClient import PlayStoreClient
| 147 |
app.py
|
Knightbot101/at-mombasa
| 0 |
2170607
|
from flask import Flask
app = Flask(__name__)
#decorator-anythong before a function that starts with an @
#yatupeleka kwa broswer
@app.route('/')
def index():
return '<h1>Hello World!</h1>'
@app.route('/mombasa')
def mombasa():
return '<h1>Hello Mombasa!</h1>'
@app.route('/town/<name>')
def town(name):
#string formating,using f
return f'<h1>I am in {name} </h1>'
@app.route('/town/latin/<latin_name>')
def latin(latin_name):
latiname = ''
if latin_name[-1] == 'y':
latiname = latin_name[:-1] + 'iful'
else:
latiname = latin_name + 'y'
return f'<h1>My latin name is {latiname}</h1>'
if __name__ == '__main__':
app.run()
| 642 |
scisalt/utils/progressbar_mod.py
|
joelfrederico/mytools
| 1 |
2171476
|
import sys as _sys
import time as _time
import datetime as _dt
class progressbar(object):
"""
Creates an animated progress bar.
.. versionadded:: 1.3
Parameters
----------
total : int
Total number of steps.
length : int
Number of characters long.
"""
def __init__(self, total, length=20):
print('')
self._step = 0
self._total = total
self._length = length
self._timestamp = None
self._timestart = None
@property
def step(self):
"""
The current step.
"""
return self._step
@step.setter
def step(self, step):
lasttime = self._timestamp
self._timestamp = _time.perf_counter()
if lasttime is not None:
dt = self._timestamp-lasttime
time_remain = (self._total - step - 1) * dt
remain_str = ', {} remain'.format(str(_dt.timedelta(seconds=time_remain)))
else:
remain_str = ''
if self._timestart is None:
self._timestart = self._timestamp
elapsed_str = ''
else:
elapsed = self._timestamp-self._timestart
elapsed_str = ', {} elapsed'.format(str(_dt.timedelta(seconds=elapsed)))
if step > self._total:
step = self._total
self._step = step
step = step - 1
bartext = '#'*round(step/self._total * self._length) + ' '*round((self._total-step)/self._total * self._length)
text = '\r\033[1AOn step {} of {} ({:0.1f}% completed{}{}):\n[ {} ]'.format(self._step, self._total, 100.0*step/self._total, remain_str, elapsed_str, bartext)
_sys.stdout.write(text)
_sys.stdout.flush()
# print(text)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
text = '\r\033[K\033[1A\033[K'
_sys.stdout.write(text)
_sys.stdout.flush()
| 1,947 |
models/base.py
|
Hristiyan-Bonev/python-descriptors-abc
| 1 |
2171186
|
import inspect
from functools import partialmethod
from descriptors.base import Field
class InitMeta(type):
def __new__(cls, bases, dicts, class_attrs):
class_obj = super().__new__(cls, bases, dicts, class_attrs)
init_params = []
for attr_name, attr in class_obj.__dict__.items():
if isinstance(attr, Field):
param = inspect.Parameter(attr_name, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=attr.__dict__.get(
'default_value', inspect.Parameter.empty))
init_params.append(param)
setattr(class_obj, '__signature__',
inspect.Signature(parameters=init_params))
return class_obj
class BaseModel(metaclass=InitMeta):
def __init__(self, error_container=None, *args, **kwargs):
self.errors_list = error_container or []
sig = self.__signature__.bind(*args, **kwargs)
sig.apply_defaults()
for attr_name, attr in sig.arguments.items():
try:
setattr(self, attr_name, attr)
except (ValueError, TypeError,) as exc:
self.errors_list.append(exc)
| 1,159 |
users/models.py
|
riteshh00/covihelp
| 0 |
2172130
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
age = models.PositiveIntegerField(null=True,blank=True)
# Create your models here.
| 230 |
medianasms/client.py
|
medianasms/python-rest-sdk
| 0 |
2169848
|
from medianasms.httpclient import HTTPClient
from medianasms.models import Message, Recipient, InboxMessage, Pattern
# base url for api
BASE_URL = "http://rest.medianasms.com"
# default timeout for http client
DEFAULT_TIMEOUT = 30
# client version
CLIENT_VERSION = "1.0.1"
class Client:
''' medianasms client class
'''
def __init__(self, apikey, http_client=None):
self.client = http_client or HTTPClient(
apikey,
BASE_URL,
DEFAULT_TIMEOUT,
CLIENT_VERSION,
)
self.apikey = apikey
def get_credit(self):
r"""Get authenticated user credit
:return: :class:`float <float>` object
:rtype: float
"""
res = self.client.get("/v1/credit")
try:
return res.data["credit"]
except:
raise ValueError("returned response not valid")
def send(self, originator, recipients, message):
r"""Send a message from originator to many recipients.
:param originator: originator number, string.
:param recipients: recipients list, list.
:param message: message to send, string.
:return: :class:`int <int>` object
:rtype: int
"""
res = self.client.post("/v1/messages", {
"originator": originator,
"recipients": recipients,
"message": message,
})
try:
return res.data["bulk_id"]
except:
raise ValueError("returned response not valid")
def get_message(self, bulk_id):
r"""Get a message brief info
:param bulk_id: bulk id, int.
:return: :class:`Message <Message>` object
:rtype: models.Message
"""
res = self.client.get("/v1/messages/" + str(bulk_id))
try:
return Message(res.data["message"])
except:
raise ValueError("returned response not valid")
def fetch_statuses(self, bulk_id, page=0, limit=10):
r"""Fetch message recipients status
:param bulk_id: bulk id, int.
:param page: page number(start from 0), int.
:param limit: fetch limit, int.
:return: :class:`[]Recipient <[]Recipient>` object
:rtype: []models.Recipient
"""
res = self.client.get("/v1/messages/%s/recipients" % str(bulk_id), {
"page": page,
"limit": limit,
})
try:
recipients = []
for recipient in res.data["recipients"]:
recipients.append(Recipient(recipient))
return recipients, res.meta
except:
raise ValueError("returned response not valid")
def fetch_inbox(self, page=0, limit=10):
r"""Fetch inbox messages
:param page: page number(start from 0), int.
:param limit: fetch limit, int.
:return: :class:`[]InboxMessage <[]InboxMessage>` object
:rtype: []models.InboxMessage
"""
res = self.client.get("/v1/messages/inbox", {
"page": page,
"limit": limit,
})
try:
messages = []
for message in res.data["messages"]:
messages.append(InboxMessage(message))
return messages, res.meta
except:
raise ValueError("returned response not valid")
def create_pattern(self, pattern, is_shared=False):
r"""Create a pattern
:param pattern: pattern schema, string.
:param is_shared: determine that pattern shared or not, bool.
:return: :class:`Pattern <Pattern>` object
:rtype: []models.Pattern
"""
res = self.client.post("/v1/messages/patterns", {
"pattern": pattern,
"is_shared": is_shared,
})
try:
return Pattern(res.data["pattern"])
except:
raise ValueError("returned response not valid")
def send_pattern(self, pattern_code, originator, recipient, values={}):
r"""Send message with pattern
:param pattern_code: pattern code, string.
:param originator: originator number, string.
:param recipient: recipient number, string.
:param values: pattern values, dict.
:return: :class:`int <int>` object
:rtype: int
"""
res = self.client.post("/v1/messages/patterns/send", {
"pattern_code": pattern_code,
"originator": originator,
"recipient": recipient,
"values": values,
})
try:
return res.data["bulk_id"]
except:
raise ValueError("returned response not valid")
| 4,676 |
website/urls.py
|
gustavosilva-gss/shielding
| 0 |
2170422
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("volunteer", views.volunteer_view, name="volunteer"),
path("volunteer/profile", views.volunteer_profile, name="volunteer_profile"),
path("establishment/<int:id>", views.establishment_profile, name="establishment"),
path("establishment/manage", views.establishment_manage, name="establishment_manage"),
path('chat/<int:chat_id>', views.room, name='room'),
# AUTHENTICATION
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.user_register, name="user_register"),
path("establishment/register", views.establishment_register, name="establishment_register"),
path("volunteer/register", views.volunteer_register, name="volunteer_register"),
# API ONLY
# here are some paths whose classification fall only under API category
# this means some paths of authentication were left out
path("establishments", views.establishments, name="establishments"),
path("donate", views.donate, name="donate"),
path("edit-establishment/<str:field>", views.edit_establishment, name="edit_establishment"),
path("donation/<int:id>", views.donation, name="donation"),
path("edit-donation/<int:id>", views.edit_donation, name="edit_donation"),
path("edit-volunteer/<str:field>", views.edit_volunteer, name="edit_volunteer"),
path('open-chat', views.open_chat, name='open_chat'),
]
| 1,522 |
covidsimulation/plot.py
|
victorhunguyen/covidsimulation
| 27 |
2171310
|
from functools import partial
from typing import Sequence, Tuple, Optional, Union
import numpy as np
import plotly.graph_objects as go
from .series import Series
from .stats import MetricResult
PLOT_COLORS = [
('rgba(0,0,255,1)', 'rgba(0,0,255,0.25)'),
('rgba(255,0,0,1)', 'rgba(255,0,0,0.25)'),
('rgba(0,0,0,1)', 'rgba(0,0,0,0.25)'),
('rgba(128,0,240,1)', 'rgba(128,0,240,0.25)'),
('rgba(240,128,0,1)', 'rgba(240,128,0,0.25)'),
('rgba(0,128,240,1)', 'rgba(0,128,240,0.25)'),
('rgba(0,255,0,1)', 'rgba(0,255,0,0.25)'),
]
def plot_line(fig, series, pop_name, color_index):
fig.add_trace(go.Scatter(
x=series.x,
y=series.y,
line_color=PLOT_COLORS[color_index][0],
name=pop_name,
))
def concat_seq(s1, s2):
if isinstance(s1, np.ndarray):
return np.stack([s1, s2]).reshape(len(s1) + len(s2))
return list(s1) + list(s2)
def plot_confidence_range(fig, series_low, series_high, legend, color_index):
assert len(series_low.x) == len(series_high.x)
fig.add_trace(go.Scatter(
x=concat_seq(series_high.x, series_low.x[::-1]),
y=concat_seq(series_high.y, series_low.y[::-1]),
fill='toself',
fillcolor=PLOT_COLORS[color_index][1],
line_color=PLOT_COLORS[color_index][1],
showlegend=legend is not None,
name=legend,
))
def plot(
pop_stats_name_tuples: Sequence[Tuple[Union[MetricResult, Series, Sequence[Series]], str]],
title: str,
log_scale: bool = False,
size: Optional[int] = None,
stop: Optional[int] = None,
start: Optional[int] = None,
ymax: Optional[float] = None,
cindex: Optional[int] = None,
show_confidence_interval: bool = True,
):
fig = go.FigureWidget()
area_fns = []
line_fns = []
for color_index, (data, pop_name) in enumerate(pop_stats_name_tuples):
color_index = color_index % len(PLOT_COLORS)
if cindex is not None:
color_index = cindex
if isinstance(data, Series):
line_fn = partial(plot_line, fig, data.trim(start, stop), pop_name, color_index)
line_fns.append(line_fn)
elif len(data) == 1:
line_fn = partial(plot_line, fig, data[0].trim(start, stop), pop_name, color_index)
line_fns.append(line_fn)
elif len(data) == 3:
if show_confidence_interval:
area_fn = partial(plot_confidence_range, fig, data[1].trim(start, stop), data[2].trim(start, stop),
None, color_index)
area_fns.append(area_fn)
line_fn = partial(plot_line, fig, data[0].trim(start, stop), pop_name, color_index)
line_fns.append(line_fn)
elif len(data) == 2:
area_fn = partial(plot_confidence_range, fig, data[0].trim(start, stop), data[1].trim(start, stop),
pop_name, color_index)
area_fns.append(area_fn)
else:
raise ValueError('Invalid number of elements to plot')
for area_fn in area_fns:
area_fn()
for line_fn in line_fns:
line_fn()
fig.update_layout(
title=title)
if ymax:
fig.update_yaxes(range=[1 if log_scale else 0, ymax])
if log_scale:
fig.update_layout(yaxis_type="log")
if size:
fig.update_layout(width=size)
if len(pop_stats_name_tuples) == 1:
fig.update_layout(showlegend=False)
return fig
| 3,505 |
setup.py
|
DanielWinklehner/dans_pymodules
| 0 |
2171260
|
from setuptools import setup
from Cython.Build import cythonize
import numpy
setup(name='dans_pymodules',
version='4.5.3',
description='Useful little modules that I likely need in more than one application',
url='https://github.com/DanielWinklehner/dans_pymodules',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['dans_pymodules'],
package_data={'': ['PlotSettingsDialog.glade', 'header.tex', 'fishfinder.png', 'vitruvian.jpg']},
include_package_data=True,
zip_safe=False,
ext_modules=cythonize("dans_pymodules/particle_pusher.pyx"),
include_dirs=[numpy.get_include()],
)
| 678 |
pyvisdk/enums/agent_install_failed_reason.py
|
Infinidat/pyvisdk
| 0 |
2170944
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
AgentInstallFailedReason = Enum(
'AgentNotReachable',
'AgentNotRunning',
'AgentUploadFailed',
'AgentUploadTimedout',
'InstallTimedout',
'NotEnoughSpaceOnDevice',
'PrepareToUpgradeFailed',
'SignatureVerificationFailed',
'UnknownInstallerError',
)
| 445 |
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/numpy/fft/info.py
|
J-E-J-S/aaRS-Pipeline
| 7 |
2171717
|
"""\
Core FFT routines
==================
Standard FFTs
fft
ifft
fft2
ifft2
fftn
ifftn
Real FFTs
rfft
irfft
rfft2
irfft2
rfftn
irfftn
Hermite FFTs
hfft
ihfft
"""
depends = ['core']
| 233 |
Scripts/Python/json_dates.py
|
Romyho/project
| 0 |
2170641
|
#!/usr/bin/env python
# Name:<NAME>
# Student number:11007303
"""
This script selects specified data from csv and returns json file.
"""
import json
import pandas as pd
import csv
import sys
def convert(file_name):
# Read csv file
csv_name = file_name[0]
df = pd.read_csv(csv_name)
# Load top 100 ranked
top100 = list(range(67420))
df1 = df.loc[top100]
# Select name and first date in dataset
name_date = {}
boolean2 = ''
index = 0
for i in df1.name:
if i != boolean2:
boolean2 = i
name_date[i]= df1['date'][index]
index += 1
# Write json file
with open('name_date.json', 'w') as outfile:
json.dump(name_date, outfile, indent=4)
if __name__ == "__main__":
# Convert csv file to json
convert(sys.argv[1:])
| 827 |
gsww/zznuojget.py
|
sunlupeng2020/gswwspider
| 0 |
2169786
|
from selenium import webdriver
from lxml import etree
driver = webdriver.PhantomJS(executable_path=r'C:\Users\Administrator\phantomjs-2.1.1-windows\bin\phantomjs.exe')
# driver = webdriver.Chrome(executable_path="C:\\Program Files (x86)\\ChromeCore\\chromedriver.exe")
driver.get('http://47.95.10.46/problemset.php?p=1')
#
# driver.get('http://www.baidu.com/')
# timutr = driver.find_elements_by_xpath("//tbody/tr")
# timuid01s= timutr.xpath("./td[2]")
# print(timuid01s.text)
# 得到题目ID号
timuids = driver.find_elements_by_xpath("//tbody/tr/td[2]")
# 得到题目标题
timutitles = driver.find_elements_by_xpath("//tbody/tr/td[3]")
# 得到题目提交数,通过数,通过率
timutglvs = driver.find_elements_by_xpath("//tbody/tr/td[6]")
# 循环输出题目ID号
print(len(timuids)) # 输出49
print(timuids[2].text) #输出第二行的数据
# for timuid in timuids:
# print(data.text)
# import urllib.requesta
# from lxml import etree
# import re
#
# headers ={
# 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
# 'Cookie': 'PHPSESSID=<KEY>'
# }
#
# url = "http://acm.hi-54.com/problemset.php?p=1"
# request = urllib.request.Request(url, headers=headers)
# response = urllib.request.urlopen(request)
# html = response.read().decode("utf-8")
# root = etree.HTML(html)
#
#
# timuids = root.xpath("//*[@id='oj-ps-problemlist']/tr/td[2]/text()")
# print(timuids)
# print(root)
# print(html)
| 1,403 |
app/models.py
|
nvw1/exe_orientation_Q
| 1 |
2169162
|
# -*- coding: utf-8 -*-
#author : Hao
from django.db import models
# Create your models here.
#Used for the game
class User(models.Model):
userID = models.AutoField(primary_key=True)
username = models.CharField(max_length=45)
class Developers(models.Model):
devID = models.AutoField(primary_key=True)
user_userID = models.ForeignKey(User, on_delete=models.CASCADE)
class gameMaster(models.Model):
GMID = models.AutoField(primary_key=True)
user_userID = models.ForeignKey(User, on_delete=models.CASCADE)
class Routes(models.Model):
routeID = models.IntegerField(blank=False, null=False, primary_key=True)
Node = models.CharField(max_length = 45)
NodeID = models.IntegerField()
RouteName = models.CharField(max_length = 45)
gameMaster_GMID = models.ForeignKey(gameMaster, on_delete=models.DO_NOTHING)
class Hints(models.Model):
HintText = models.CharField(max_length = 100, primary_key = True)
HintNo = models.IntegerField()
Routes_routeID = models.ForeignKey(Routes, on_delete=models.DO_NOTHING)
Routes_NodeID = models.IntegerField()
class Players(models.Model):
playerID = models.AutoField(primary_key=True)
user_userID = models.ForeignKey(User, on_delete=models.DO_NOTHING)
class Questions(models.Model):
auto_increment_id = models.AutoField(primary_key=True)
questions = models.CharField(max_length=100)
answers = models.CharField(max_length=100)
node_num = models.IntegerField()
hints = models.CharField(max_length=100,default="")
location = models.CharField(max_length=1000,default="")
longtitude = models.FloatField(default=-1.1)
latitude = models.FloatField(default=-1.1)
routeID = models.ForeignKey(Routes, on_delete=models.CASCADE,default=1)
class Groups(models.Model):
GroupID = models.AutoField(primary_key=True)
GroupName = models.CharField(max_length=45)
Players_playerID = models.ForeignKey(Players, on_delete=models.DO_NOTHING)
class Gamecode(models.Model):
groupcode = models.CharField(max_length=250)
questionNum = models.IntegerField(default=1)
routeID = models.ForeignKey(Routes, on_delete=models.CASCADE,default=1)
map = models.CharField(max_length=50,default = "False")
score = models.IntegerField(default = 0)
def __str__(self): #convert objects in to strings
return self.groupcode
# Questions.objects.all()
# question = "Where is the library located?"
# if Questions.objects.filter(questions=question.strip()).exists():
# pass
# else:
# a = Questions(questions=question, answers="The forum",node_num=1,hints="the name also refers to discussion board on the Internet")
# a.save()
# question = "Which is the tallest building on campus?"
# if Questions.objects.filter(questions=question.strip()).exists():
# pass
# else:
# b= Questions(questions="Which is the tallest building on campus?", answers="Physics building",node_num=2, hints = "What subject did albert einstein study in?")
# b.save()
# question = "A place where new ideas are produced"
# if Questions.objects.filter(questions=question.strip()).exists():
# pass
# else:
# a = Questions(questions=question, answers="Innovation centre",node_num=3,hints="It is past the Harrison Building",
# location= "https://www.google.com/maps/embed?pb=!1m14!1m8!1m3!1d10100.076253535459!2d-3.5306391!3d50.7381353!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x0%3A0x9a5f61816c99672c!2sThe%20Innovation%20Centre!5e0!3m2!1sen!2suk!4v1583711670328!5m2!1sen!2suk",
# latitude= 50.738162, longtitude=-3.530587)
# a.save()
| 3,621 |
setup.py
|
s-alexey/rwby
| 0 |
2172040
|
from setuptools import setup
from pip import req
def parse_requirements(filename):
return [str(ir.req) for ir in req.parse_requirements(filename, session=False)]
__version__ = __import__('rwby').__version__
setup(
name='Belarusian Railway API',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
description='Python wrapper for rasp.rw.by',
packages=['rwby'],
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/s-alexey/rwby",
include_package_data=True,
)
| 536 |
api/flags/migrations/0002_auto_20210518_1615.py
|
uktrade/lite-ap
| 3 |
2171434
|
# Generated by Django 3.1.8 on 2021-05-18 15:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('flags', '0001_squashed_0012_auto_20210309_1521'),
]
operations = [
migrations.RenameField(
model_name='flag',
old_name='blocks_approval',
new_name='blocks_finalising',
),
]
| 393 |
loss.py
|
AveryLiu/TD-DMN
| 32 |
2171486
|
from utils.utils import *
from torch.nn import functional as F
from math import ceil
from utils.utils import doc_flat_mask
def cal_loss_with_attn(logits, event_labels, sent_len, neg_pos_ratio=0,
neg_label=None, pad_label=None, partial=False):
"""
logits -> (N, S, W, L)
sent_len -> (N, S)
event_labels -> (N, S, W)
"""
# Check dimension
batch_size, sent_num, word_num, label_num = logits.size()
assert(event_labels.size() == (batch_size, sent_num, word_num))
# Move to proper device
event_labels = event_labels.to(DEVICE)
# logits_flat -> (N*S*W, L)
logits_flat = logits.view(-1, logits.size(-1))
# event_label_flat -> (N*S*W, 1)
event_labels_flat = event_labels.view(-1, 1)
log_probs_flat = F.log_softmax(logits_flat, dim=1)
loss = -torch.gather(log_probs_flat, dim=1, index=event_labels_flat)
# Mask padding, set corresponding values to 0
loss = loss.squeeze() * doc_flat_mask(sent_len)
# Perform negative sampling
if neg_pos_ratio > 0:
assert neg_label is not None
assert pad_label is not None
event_labels_flat = event_labels_flat.squeeze()
sample_mask = torch.zeros(event_labels_flat.size(0), device=DEVICE)
# Set the mask of positives to 1,
sample_mask[(event_labels_flat != neg_label) & (event_labels_flat != pad_label)] = 1
# Get positive label number
num_positive = torch.sum(sample_mask)
num_negative = torch.sum(event_labels_flat == neg_label)
num_negative_retained = ceil(num_positive * neg_pos_ratio) if num_positive > 0 else 10
# Get negative indexes
neg_indexes = (event_labels_flat == neg_label).nonzero()
neg_retained_indexes = neg_indexes[torch.randperm(num_negative)][:num_negative_retained]
sample_mask[neg_retained_indexes.squeeze()] = 1
# Get ignored negative sample number
num_negative_ignored = (num_negative - num_negative_retained).float()
# mask loss with negative sampling
loss = loss.squeeze() * sample_mask
if partial:
return torch.sum(loss), (torch.sum(sent_len).float().to(DEVICE) - num_negative_ignored)
else:
return torch.sum(loss) / (torch.sum(sent_len).float().to(DEVICE) - num_negative_ignored)
else:
if partial:
return torch.sum(loss), torch.sum(sent_len).float().to(DEVICE)
else:
return torch.sum(loss) / torch.sum(sent_len).float().to(DEVICE)
| 2,527 |
starkbank/iso8583/utils/parser.py
|
starkbank/iso8583-python
| 4 |
2171657
|
from base64 import b64encode, b64decode
from binascii import hexlify, unhexlify
from .. import getEncoding
def parseString(text, encoding=None):
return text.decode(encoding or getEncoding())
def unparseString(text, encoding=None):
return text.encode(encoding or getEncoding())
def parseBin(text, encoding=None):
return b64encode(text)
def unparseBin(text, encoding=None):
return b64decode(text)
def parseBytesToBin(text, encoding=None, length=64):
hexString = hexlify(text)
binString = bin(int(hexString, 16))[2:].zfill(length)
return binString
def unparseBytesToBin(text, encoding=None, length=64):
hexString = hex(int(text, 2))[2:].replace("L", "")
byteString = unhexlify(hexString.zfill(length//4))
return byteString
def parseDE048(text, encoding=None):
encoding = encoding or getEncoding()
json = {
"SE00": text[0:1].decode(encoding)
}
text = text[1:]
while text:
key, length, text = text[0:2].decode(encoding), int(text[2:4].decode(encoding)), text[4:]
value, text = text[0:length].decode(encoding), text[length:]
json["SE" + key.zfill(2)] = value
return json
def unparseDE048(data, encoding=None):
encoding = encoding or getEncoding()
json = data.copy()
string = json.pop("SE00").encode(encoding)
for key, value in sorted(json.items()):
key = key.replace("SE", "")
length = len(value)
string += key.encode(encoding) + str(length).zfill(2).encode(encoding) + value.encode(encoding)
return string
def parseDE112(text, encoding=None):
encoding = encoding or getEncoding()
json = {}
while text:
key, length, text = text[0:3].decode(encoding), int(text[3:6].decode(encoding)), text[6:]
value, text = text[0:length].decode(encoding), text[length:]
json["SE" + key.zfill(3)] = value
return json
def unparseDE112(data, encoding=None):
encoding = encoding or getEncoding()
json = data.copy()
string = ""
for key, value in sorted(json.items()):
key = key.replace("SE", "")
length = len(value)
string += key.encode(encoding) + str(length).zfill(3).encode(encoding) + value.encode(encoding)
return string
def parsePds(text):
json = {}
while text:
tag, length, text = text[0:4], int(text[4:7]), text[7:]
value, text = text[0:length], text[length:]
json["PDS" + tag.zfill(4)] = value
return json
def unparsePds(json):
string = ""
for key, value in sorted(json.items()):
tag = key.replace("PDS", "")
length = str(len(value)).zfill(3)
partial = tag + length + value
if len(string + partial) > 999:
break
string += partial
json.pop(key)
return string
| 2,796 |
fluf/cli/__init__.py
|
mfiers/fluf
| 0 |
2171282
|
import click
import fluf
GLOPTIONS = []
def option(name, **kwargs):
global GLOPTIONS
GLOPTIONS[name, kwargs]
def cli():
""" Run a fluffy CLI """
from fluf.cli import core
for oname, oargs in GLOPTIONS.items():
click.option('--' + oname, **oargs)(core.cli)
for func in fluf.FUNCTIONS:
import inspect
fsig = inspect.signature(func)
useable = True
for par in fsig.parameters.keys():
ann = fsig.parameters[par].annotation
if ann not in (int, str, float):
useable = False
if useable:
#cmd = click.pass_context(func)
cmd = core.cli.command(name=func.__name__)(func)
for par in fsig.parameters.keys():
ann = fsig.parameters[par].annotation
default = fsig.parameters[par].default
pardata = fsig.parameters[par]
opargs = dict(type=ann)
if default != inspect._empty:
opargs['default'] = default
cmd = click.option('--' + par, **opargs)(cmd)
q = core.cli()
| 1,125 |
tasks/printer/make.py
|
irdkwmnsb/lkshl-ctf
| 3 |
2172112
|
import argparse
import os
from itertools import count, cycle
from multiprocessing.pool import Pool
from typing import Dict, Tuple
from PIL import Image
import numpy as np
def make_pic(data) -> Tuple[str, Image.Image]:
team = data[0]
flag = data[1]
bits = ''.join('{0:08b}'.format(ord(x), 'b') for x in flag)
ar = np.array(Image.open("org.bmp"))
ar = np.rot90(ar)
for i, c in zip(range(ar.shape[0]), cycle(bits)):
ari = np.logical_or(ar[i], np.uint8(np.round(np.random.rand(ar[0].shape[0]) * 0.6)))
if (c == '1'):
ar[i] = ari
else:
ar[i] = np.zeros(ar[i].shape)
ar = np.rot90(ar, k=3)
img = Image.fromarray(np.uint8((1-ar)*255))
img = img.convert("RGBA")
img = img.resize((img.width*4, img.height*4))
white = Image.new("RGBA", img.size, (256,) * 4)
white2 = Image.new("RGBA", img.size, (240,) * 4)
off = 180
img = img.resize((img.width - off, img.height - off))
white.paste(img, (off//2,)*2)
img = white
img = img.rotate(2.34, Image.BICUBIC)
img = Image.alpha_composite(white2, img)
img = img.convert("L")
return team, img
where = "./out"
def initializer(*_where: str) -> None:
global where
where = ''.join(_where)
def save(data) -> None:
team = data[0]
img = data[1]
img.save(os.path.join(where, str(team) + ".jpg"))
def make_and_save(data) -> None:
save(make_pic(data))
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Make dem tasks")
flag_group = parser.add_mutually_exclusive_group(required=True)
flag_group.add_argument('--flags', help="File to take flags from", type=list)
flag_group.add_argument('--flag', help="Single flag to be hidden")
flag_group.add_argument('--from-file', help="Input from flags_and_teams.py", action="store_true")
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument('--where', help="Where would you like task files outputted to")
output_group.add_argument('--v', help="Open made scan", action="store_true")
g = parser.parse_args()
output = dict() # type: Dict[str, Image]
if g.flag:
output.update([make_pic(['0', g.flag])])
elif g.flags:
for flag, i in zip(count(), g.flags):
output[i] = make_pic([i, flag])[1]
elif g.where and g.from_file:
from flags_and_teams import data
pool = Pool(5, initializer=initializer, initargs=(g.where))
if not os.path.exists(g.where):
os.makedirs(g.where)
pool.map(make_and_save, tqdm(list(data.items())))
exit(0)
elif g.from_file:
from flags_and_teams import data
for team_id, flag in data.items():
output.update([make_pic([team_id, flag])])
print(output)
if g.v:
for team, pic in output.items():
pic.show()
elif g.where:
if not os.path.exists(g.where):
os.makedirs(g.where)
for team_id, flag in output.items():
save((team_id, flag))
| 3,093 |
scripts/utils.py
|
nmningmei/dichotic-listening-of-tones-is-influenced-by-the-content-of-speech
| 0 |
2171010
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 8 17:07:19 2019
@author: nmei
"""
import numpy as np
import pandas as pd
from scipy import stats
def resample_ttest(x,baseline = 0.5,n_ps = 100,n_permutation = 10000,one_tail = False,
n_jobs = 12, verbose = 0):
"""
http://www.stat.ucla.edu/~rgould/110as02/bshypothesis.pdf
https://www.tau.ac.il/~saharon/StatisticsSeminar_files/Hypothesis.pdf
Inputs:
----------
x: numpy array vector, the data that is to be compared
baseline: the single point that we compare the data with
n_ps: number of p values we want to estimate
one_tail: whether to perform one-tailed comparison
"""
import numpy as np
# t statistics with the original data distribution
t_experiment = (np.mean(x) - baseline) / (np.std(x) / np.sqrt(x.shape[0]))
null = x - np.mean(x) + baseline # shift the mean to the baseline but keep the distribution
from joblib import Parallel,delayed
import gc
gc.collect()
def t_statistics(null,size,):
"""
null: shifted data distribution
size: tuple of 2 integers (n_for_averaging,n_permutation)
"""
null_dist = np.random.choice(null,size = size,replace = True)
t_null = (np.mean(null_dist,0) - baseline) / (np.std(null_dist,0) / np.sqrt(null_dist.shape[0]))
if one_tail:
return ((np.sum(t_null >= t_experiment)) + 1) / (size[1] + 1)
else:
return ((np.sum(np.abs(t_null) >= np.abs(t_experiment))) + 1) / (size[1] + 1) /2
ps = Parallel(n_jobs = n_jobs,verbose = verbose)(delayed(t_statistics)(**{
'null':null,
'size':(null.shape[0],int(n_permutation)),}) for i in range(n_ps))
return np.array(ps)
def resample_ttest_2sample(a,b,n_ps=100,n_permutation = 10000,
one_tail=False,
match_sample_size = True,
n_jobs = 6,
verbose = 0):
# when the samples are dependent just simply test the pairwise difference against 0
# which is a one sample comparison problem
if match_sample_size:
difference = a - b
ps = resample_ttest(difference,baseline=0,
n_ps=n_ps,n_permutation=n_permutation,
one_tail=one_tail,
n_jobs=n_jobs,
verbose=verbose,)
return ps
else: # when the samples are independent
t_experiment,_ = stats.ttest_ind(a,b,equal_var = False)
def t_statistics(a,b):
group = np.random.choice(np.concatenate([a,b]),size = int(len(a) + len(b)),replace = True)
new_a = group[:a.shape[0]]
new_b = group[a.shape[0]:]
t_null,_ = stats.ttest_ind(new_a,new_b,equal_var = False)
return t_null
from joblib import Parallel,delayed
import gc
gc.collect()
ps = np.zeros(n_ps)
for ii in range(n_ps):
t_null_null = Parallel(n_jobs = n_jobs,verbose = verbose)(delayed(t_statistics)(**{
'a':a,
'b':b}) for i in range(n_permutation))
if one_tail:
ps[ii] = ((np.sum(t_null_null >= t_experiment)) + 1) / (n_permutation + 1)
else:
ps[ii] = ((np.sum(np.abs(t_null_null) >= np.abs(t_experiment))) + 1) / (n_permutation + 1) / 2
return ps
class MCPConverter(object):
import statsmodels as sms
"""
https://gist.github.com/naturale0/3915e2def589553e91dce99e69d138cc
https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method
input: array of p-values.
* convert p-value into adjusted p-value (or q-value)
"""
def __init__(self, pvals, zscores = None):
self.pvals = pvals
self.zscores = zscores
self.len = len(pvals)
if zscores is not None:
srted = np.array(sorted(zip(pvals.copy(), zscores.copy())))
self.sorted_pvals = srted[:, 0]
self.sorted_zscores = srted[:, 1]
else:
self.sorted_pvals = np.array(sorted(pvals.copy()))
self.order = sorted(range(len(pvals)), key=lambda x: pvals[x])
def adjust(self, method = "holm"):
import statsmodels as sms
"""
methods = ["bonferroni", "holm", "bh", "lfdr"]
(local FDR method needs 'statsmodels' package)
"""
if method is "bonferroni":
return [np.min([1, i]) for i in self.sorted_pvals * self.len]
elif method is "holm":
return [np.min([1, i]) for i in (self.sorted_pvals * (self.len - np.arange(1, self.len+1) + 1))]
elif method is "bh":
p_times_m_i = self.sorted_pvals * self.len / np.arange(1, self.len+1)
return [np.min([p, p_times_m_i[i+1]]) if i < self.len-1 else p for i, p in enumerate(p_times_m_i)]
elif method is "lfdr":
if self.zscores is None:
raise ValueError("Z-scores were not provided.")
return sms.stats.multitest.local_fdr(abs(self.sorted_zscores))
else:
raise ValueError("invalid method entered: '{}'".format(method))
def adjust_many(self, methods = ["bonferroni", "holm", "bh", "lfdr"]):
if self.zscores is not None:
df = pd.DataFrame(np.c_[self.sorted_pvals, self.sorted_zscores], columns=["p_values", "z_scores"])
for method in methods:
df[method] = self.adjust(method)
else:
df = pd.DataFrame(self.sorted_pvals, columns=["p_values"])
for method in methods:
if method is not "lfdr":
df[method] = self.adjust(method)
return df
def change_width(ax,new_value):
for patch in ax.patches:
current_width = patch.get_width()
diff = current_width - new_value
# we change the bat width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
| 6,301 |
ui_curses.py
|
AndreaOrru/Yugen
| 1 |
2171345
|
"""Implementation of the user interface with Curses."""
import curses
from curses import ascii
from curses.ascii import isctrl, unctrl
from attribute import Color
from key import Key
from ui import UI, UIWindow
class CursesWindow(UIWindow):
"""Class representing a window in Curses.
See parent class UIWindow for details.
"""
def __init__(self, ui, line, column, n_lines, n_columns):
super().__init__(ui, line, column, n_lines, n_columns)
self._window = curses.newpad(self._n_lines, self._n_columns)
self._window.keypad(True)
self._scroll_lines = 0
self._scroll_columns = 0
self._drawn_cursor = None
@UIWindow.cursor.getter
def cursor(self):
return UIWindow.cursor.fget(self)
@cursor.setter
def cursor(self, cursor):
UIWindow.cursor.fset(self, cursor)
line, column = cursor
if line >= self._scroll_lines + self._n_lines:
self._scroll_lines += line - (self._scroll_lines + self._n_lines) + 1
elif line < self._scroll_lines:
self._scroll_lines -= self._scroll_lines - line
if column >= self._scroll_columns + self._n_columns:
self._scroll_columns += column - (self._scroll_columns + self._n_columns) + 1
elif column < self._scroll_columns:
self._scroll_columns -= self._scroll_columns - column
def refresh(self):
if self._drawn_cursor:
attr = self._window.inch(*self._drawn_cursor) & ~0xFF & ~curses.A_REVERSE
self._window.chgat(self._drawn_cursor[0], self._drawn_cursor[1], 1, attr)
if self._cursor_show:
self._drawn_cursor = self._cursor
attr = (self._window.inch(*self._drawn_cursor) & ~0xFF) | curses.A_REVERSE
self._window.chgat(self._cursor[0], self._cursor[1], 1, attr)
self._window.noutrefresh(self._scroll_lines, self._scroll_columns,
self._line, self._column, self._line + self._n_lines-1, self._column + self._n_columns-1)
def attributes_set(self, colors, properties):
self._window.bkgd(' ', self._ui.color_pair(colors) | properties)
def __check_size(self, line, length):
height, width = self._window.getmaxyx()
if line >= height:
height *= 2
if length >= width:
width *= 2
self._window.resize(height, width)
def line_update(self, line, content, attributes):
self.__check_size(line, len(content))
self._window.move(line, 0)
for column, (char, attribute) in enumerate(zip(content, attributes)):
self._window.addstr(line, column, char, self._ui.color_pair(attribute[0]) | attribute[1])
self._window.clrtoeol()
def line_insert(self, line, content, attributes):
self.__check_size(line, len(content))
self._window.move(line, 0)
self._window.insertln()
self.line_update(line, content, attributes)
def line_delete(self, line):
self._window.move(line, 0)
self._window.deleteln()
def key_get(self):
key1 = self._window.getch()
key2 = self._window.getch() if (key1 == ascii.ESC) else None
meta = (key1 == ascii.ESC)
key = (key2 if meta else key1)
# FIX: Ugly hack to make TAB work:
if (key == ascii.TAB):
ctrl = False
else:
ctrl = isctrl(key)
key = ord(unctrl(key)[-1].lower()) if (key < 0x20) else key
return Key(key, ctrl, meta)
class Curses(UI):
"""Class representing the Curses toolkit."""
def __init__(self, screen):
super().__init__()
self._screen = screen
self._color_pair = {Color.Defaults: 0}
curses.raw()
curses.curs_set(0)
@property
def max_lines(self):
return curses.LINES
@property
def max_columns(self):
return curses.COLS
def refresh(self):
for window in self._ui_windows:
window.refresh()
curses.doupdate()
def color_pair(self, attribute):
try:
n = self._color_pair[attribute]
except KeyError:
n = len(self._color_pair)
curses.init_pair(n, *attribute)
self._color_pair[attribute] = n
return curses.color_pair(n)
def window_create(self, line, column, n_lines, n_columns):
window = CursesWindow(self, line, column, n_lines, n_columns)
self._ui_windows.append(window)
return window
| 4,520 |
src/open_weather_map/api.py
|
stas12312/weather-bot
| 0 |
2171800
|
import logging
from typing import Any
import aiohttp
from .consts import BASE_URL, VERSION
class ApiService:
"""
Сервис для работы с API
"""
def __init__(self, api_key: str):
self.api_key = api_key
self.session = self.make_session()
@classmethod
def make_session(cls) -> aiohttp.ClientSession:
"""
Создание сессии
:return: Сессия
"""
session = aiohttp.ClientSession()
return session
async def api_request(
self,
api_service: str,
api_method: str,
params: dict[str, str | float | int],
) -> dict[str, Any]:
"""
Выполнение запроса к openweathermap.org
:param api_service: API сервис
:param api_method: API метод
:param params: GET параметры запроса
:return: Результат запроса
"""
url = self.make_url(api_service, api_method)
params['appid'] = self.api_key
params['units'] = 'metric'
params['lang'] = 'ru'
result = await self.raw_request(url, params)
return result
async def raw_request(
self,
url: str,
params: dict
) -> dict:
"""
Выполнение запроса
:param url: URL запроса,
:param params: GET параметры запроса
:return:
"""
response = await self.session.get(url, params=params)
data = await response.json()
logging.info(response.url)
return data
@classmethod
def make_url(
cls,
api_service: str,
api_method: str,
) -> str:
"""
Формирование полного URL для запроса
:param api_service: API сервис
:param api_method: API метод
:return: URL для запроса
"""
return f'{BASE_URL}/{api_service}/{api_method}'
| 1,885 |
clustering/src/entities.py
|
ndricimrr/Athene
| 6 |
2170040
|
from numpy import array
Word = str
Sentence = str
ElmoVector = array
class TextBlock:
id: str
text: Sentence
def __init__(self, id: str, text: Sentence = None):
self.id = id
if text is not None:
self.text = text
@classmethod
def from_dict(cls, dict: dict) -> 'TextBlock':
return cls(dict['id'], dict['text'])
class Embedding:
id: str
vector: ElmoVector
def __init__(self, id: str, vector: ElmoVector):
self.id = id
self.vector = vector
@classmethod
def from_dict(cls, dict: dict) -> 'Embedding':
return cls(dict['id'], dict['vector'])
| 650 |
LeetCode_Solutions/1592. Rearrange Spaces Between Words.py
|
foxfromworld/Coding-Interview-Preparation-with-LeetCode-and-An-Algorithm-Book
| 0 |
2171599
|
# Source : https://leetcode.com/problems/rearrange-spaces-between-words/
# Author : foxfromworld
# Date : 16/12/2020
# Second attempt
class Solution:
def reorderSpaces(self, text: str) -> str:
space = text.count(" ")# calculate spaces
collection = text.split()# calculate words
if len(collection)-1 <=0:
return "".join(collection)+(" "*space)
a,b = divmod(space,len(collection)-1)
return (" "*a).join(collection) + (" "*b)
# Date : 16/12/2020
# First attempt
class Solution:
def reorderSpaces(self, text: str) -> str:
if len(text) == 1:
return text
space = 0
string = ""
collection = []
for ch in text:# calculate spaces and words
if ch == " ":
space+=1
if string!="":
collection.append(string)
string = ""
else:
string+=ch
if string!="":
collection.append(string)
if len(collection)-1 <=0:
return "".join(collection)+(" "*space)
a,b = divmod(space,len(collection)-1)
return (" "*a).join(collection) + (" "*b)
| 1,060 |
setup.py
|
adiralashiva8/sp-historic
| 1 |
2171985
|
from setuptools import find_packages, setup
setup(
name='sp-historic',
version="0.1.1",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/adiralashiva8/sp-historic',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'robotframework',
'config',
'flask',
'flask-mysqldb'
],
entry_points={
'console_scripts': [
'sphistoric=sp_historic.app:main',
'sphistoricsetup=sp_historic.setupargs:main',
'sphistoricparser=sp_historic.parserargs:main',
]
},
)
| 695 |
stage/configuration/test_field_order_processor.py
|
Sentienz/datacollector-tests
| 0 |
2170245
|
import pytest
from streamsets.testframework.decorators import stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'default_type': 'BOOLEAN', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'DATE', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'DATETIME', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'DECIMAL', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'DOUBLE', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'FLOAT', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'INTEGER', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'LONG', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'SHORT', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'STRING', 'missing_fields': 'USE_DEFAULT'},
{'default_type': 'TIME', 'missing_fields': 'USE_DEFAULT'}])
def test_default_type(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'missing_fields': 'USE_DEFAULT'}])
def test_default_value(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'extra_fields': 'TO_ERROR'}])
def test_discard_fields(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'extra_fields': 'DISCARD'}, {'extra_fields': 'TO_ERROR'}])
def test_extra_fields(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_fields_to_order(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'missing_fields': 'TO_ERROR'}, {'missing_fields': 'USE_DEFAULT'}])
def test_missing_fields(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'output_type': 'LIST'}, {'output_type': 'LIST_MAP'}])
def test_output_type(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_preconditions(sdc_builder, sdc_executor):
pass
@stub
def test_required_fields(sdc_builder, sdc_executor):
pass
| 2,776 |
Python/Flask/code/migrations/versions/3994efd4962e_.py
|
LaoMuJi/Python1
| 0 |
2170746
|
"""empty message
Revision ID: 3994efd4962e
Revises: <PASSWORD>
Create Date: 2017-11-21 09:14:17.545040
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3994efd4962e'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tbl_authors', sa.Column('id_card', sa.String(length=18), nullable=True))
op.create_unique_constraint("id_card", 'tbl_authors', ['id_card'])
op.add_column('tbl_books', sa.Column('leader', sa.String(length=32), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tbl_books', 'leader')
op.drop_constraint('id_card', 'tbl_authors', type_='unique')
op.drop_column('tbl_authors', 'id_card')
# ### end Alembic commands ###
| 935 |
statistical_test/fdr_bonferroni.py
|
nishimoto/py_r_stats
| 0 |
2171173
|
#!/usr/bin/env python
from statsmodels.stats.multitest import multipletests
p_values = [0.21, 0.001, 0.1, 0.06, 0.005]
print(multipletests(p_values, method='bonferroni')[1]) # => [1, 0.005, 0.5, 0.3, 0.025]
| 208 |
datacatalog/formats/__init__.py
|
SD2E/python-datacatalog
| 0 |
2172046
|
from .biofab import Biofab
from .ginkgo import Ginkgo
from .transcriptic import Transcriptic
from .sample_attributes import SampleAttributes
from .caltech import Caltech
from .marshall import Marshall
from .duke_haase import Duke_Haase
from .duke_validation import Duke_Validation
from .tulane import Tulane
from .classify import get_converter
| 345 |
tests/test_raw_moments.py
|
daneshvar-amrollahi/polar
| 1 |
2168667
|
import glob
import os
import unittest
from cli import ArgumentParser
from cli.common import prepare_program
from recurrences import RecBuilder
from recurrences.solver import RecurrenceSolver
from utils import unpack_piecewise
from tests.common import get_test_specs
from sympy import Symbol, sympify
def create_raw_moment_test(benchmark, monom, initial_value, general_form):
monom = sympify(monom)
n = Symbol("n", integer=True)
initial_value = sympify(initial_value).xreplace({Symbol("n"): n})
general_form = sympify(general_form).xreplace({Symbol("n"): n})
def test(self: RawMomentsTest):
solution, is_exact = get_raw_moment(benchmark, monom)
self.assertTrue(is_exact)
self.assertEqual(initial_value.expand(), solution.subs({n: 0}))
self.assertEqual(general_form.expand(), unpack_piecewise(solution).expand())
return test
def get_raw_moment(benchmark, monom):
args = ArgumentParser().get_defaults()
program = prepare_program(benchmark, args)
rec_builder = RecBuilder(program)
recurrences = rec_builder.get_recurrences(monom)
solver = RecurrenceSolver(recurrences, False, False, 0)
moment = solver.get(monom)
return moment, solver.is_exact
class RawMomentsTest(unittest.TestCase):
pass
benchmarks = glob.glob(os.path.dirname(__file__) + "/benchmarks/*")
for benchmark in benchmarks:
benchmark_name = os.path.basename(benchmark).replace(".prob", "")
specs = get_test_specs(benchmark, "raw")
for spec in specs:
test_case = create_raw_moment_test(benchmark, spec[0], spec[1], spec[2])
monom_id = spec[0].replace("*", "")
test_name = f"test_raw_moment_{benchmark_name}_{monom_id}"
setattr(RawMomentsTest, test_name, test_case)
if __name__ == '__main__':
unittest.main()
| 1,815 |
chatbot.py
|
Technik-Tueftler/TeTueTwitchBot
| 2 |
2171738
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# COPYRIGHT INFORMATION
# ---------------------
# This bot is forked from Carberra YouTube channel: https://www.youtube.com/playlist?list=PLYeOw6sTSy6ZFDkfO9Kl8d37H_3wLyNxO
# This bot can be freely copied and modified without permission, but not sold as is.
# Some code in this file is licensed under the Apache License, Version 2.0.
# http://aws.amazon.com/apache2.0/
from irc.bot import SingleServerIRCBot
from requests import get
import automod
import misc
import cmds
import db
import react
import tetueSrc
import user_management
tetueSrc.load_configuration()
misc.load_new_command_infos(None, None)
read_successful, cfg = tetueSrc.get_configuration("bot")
read_successful, cfg_owner = tetueSrc.get_configuration("vipbot")
class Bot(SingleServerIRCBot):
def __init__(self):
# Init for Chat-Bot
self.HOST = "irc.chat.twitch.tv"
self.PORT = 6667
self.USERNAME = cfg["name"].lower()
self.CLIENT_ID = cfg["client_id"]
self.TOKEN = cfg["token"]
self.owner = cfg["owner"]
self.CHANNEL = f"#{self.owner}"
url = f"https://api.twitch.tv/helix/users?login={self.USERNAME}"
#headers = {"Client-ID": self.CLIENT_ID, "Accept": "application/vnd.twitchtv.v5+json"}
headers = {"Client-ID": self.CLIENT_ID, "Authorization": f"Bearer {self.TOKEN}"}
resp = get(url, headers=headers).json()
print(resp)
self.channel_id = resp["data"][0]["id"]
super().__init__([(self.HOST, self.PORT, f"oauth:{self.TOKEN}")], self.USERNAME, self.USERNAME)
# Init for TeTue-Channel
url_owner = f"https://api.twitch.tv/kraken/users?login={self.owner}"
headers_owner = {"Client-ID": cfg_owner["client_id"], "Accept": "application/vnd.twitchtv.v5+json"}
resp_owner = get(url_owner, headers=headers_owner).json()
self.channel_id = resp_owner["users"][0]["_id"]
print(resp_owner)
def on_welcome(self, cxn, event):
for req in ("membership", "tags", "commands"):
cxn.cap("REQ", f":twitch.tv/{req}")
cxn.join(self.CHANNEL)
print("Chatroom joined")
react.create_hen_name_list()
print("Update data")
user_management.update_user_awards()
react.update_kd_counter(bot)
tetueSrc.log_header_info("Stream-Start")
#self.send_message("En Gude Tüftlies " + tetueSrc.get_string_element("hunname", "icon"))
print("Online")
def on_pubmsg(self, cxn, event):
tags = {kvpair["key"]: kvpair["value"] for kvpair in event.tags}
message = event.arguments[0]
# print(event)
tetueSrc.log_event_info(tags)
tetueSrc.log_event_info(message)
active_user = user_management.get_active_user(tags["user-id"], tags["display-name"], tags["badges"])
if active_user.get_name() != cfg["name"] and automod.clear(bot, active_user, message):
react.process(bot, active_user, message)
cmds.process(bot, active_user, message)
if "custom-reward-id" in tags:
react.channel_point(bot, active_user, message, tags["custom-reward-id"])
elif "bits" in tags:
react.update_bits_records(bot, active_user, tags["bits"])
react.thank_for_cheer(bot, active_user, tags["bits"])
def send_message(self, message):
self.connection.privmsg(self.CHANNEL, message)
def get_channel_info(self):
url = f"https://api.twitch.tv/kraken/channels/{self.channel_id}"
headers = {"Client-ID": cfg_owner["client_id"], "Accept": "application/vnd.twitchtv.v5+json"}
resp = get(url, headers=headers).json()
stream_info = {"Game": None}
try:
stream_info["Game"] = resp["game"]
if stream_info["Game"] is not None:
_ = db.check_category_exist_or_create(stream_info["Game"], "tbd", "tbd", "tbd", None)
except Exception:
pass
finally:
return stream_info
def get_chatroom_info(self):
# {'_links': {}, 'chatter_count': 5, 'chatters': {'broadcaster': ['technik_tueftler'], 'vips': [], 'moderators': [], 'staff': [], 'admins': [], 'global_mods': [], 'viewers': ['carbob14xyz', 'dialogiktv', 'kopfsalto1337', 'streamelements']}}
url = f"https://tmi.twitch.tv/group/user/{self.owner}/chatters"
return get(url).json()
def get_extern_channel_info(self, user):
url = f"https://api.twitch.tv/kraken/users?login={user}"
headers = {"Client-ID": self.CLIENT_ID, "Accept": "application/vnd.twitchtv.v5+json"}
return get(url, headers=headers).json()
if __name__ == "__main__":
bot = Bot()
bot.start()
| 4,748 |
BensPractice/Practise2.py
|
openUniverse/singularity
| 0 |
2171544
|
# # 1. Define a function max() that takes two numbers as arguments and returns the largest of them.
# # Use the if-then-else construct available in Python.
# # (It is true that Python has the max() function built in, but writing it yourself is nevertheless a good exercise.)
#
# def max (a, b):
# if a>b:
# return a
# else:
# return b
#
# print(max(8, 11))
#
# # 6. Define a function sum() and a function multiply() that sums and multiplies (respectively) all the numbers in a list of numbers.
# # For example, sum([1, 2, 3, 4]) should return 10, and multiply([1, 2, 3, 4]) should return 24.
#
# # n+=x means store n + x in n (means n = n + x)
# # n*=x means store n * x in n
# # = is not equals to it is store in
#
# NumList=[1, 2, 3, 4]
#
# def sum (list):
# n=0
# for element in list:
# n+= element
# return n
# print (sum(NumList))
#
# def mult (list):
# n=1
# for element in list:
# n*= element
# return n
# print (mult(NumList))
# 7. Define a function reverse() that computes the reversal of a string (string is a list of characters.
# For example, reverse("I am testing") should return the string "gnitset ma I". (Strings enver need to be reversed, dumb question.
# to do so hwoever, is "snake kobra" [::-1]
print ("snake kobra" [::-1])
pokemon = "snake kobra"
print (pokemon [::-1])
#the follwoing is a more complicated method to teach what each indivual thing in it means
def reverse(list):
length = len(list) # len gets the length of a list
newList = [] # creates a new, empty list
for element in range (0, length): # rangecreates a new list (x, y) from start number (x) to end number (y)
newList.append(list[(length-1) - element]) # "for containerName in" is a loop method
# .append is add to newList. A list is x long but python coutns starting from 0
# so length-1 is the position of the last element.
# it is building the list start at element 0
# ending position minus puts the ending element first then the next position is 1
# so it works backwards
return "".join(newList) #join the string in newList as string eg turn ["q","w","x"] into [qwx]
# PList= "snake kobra"
#
# print (PList.reverse ())
#
# # def reverse(list):
# # length = len(list) #len will get the length as a number
# # RevList = [] #Creates a new, empty list
# # for element in range(0, length): #creates a new list (x, y) from x to y
# # tempIndex = (length-1) - element
# # RevList.apend(list(tempIndex))
# # return "".join(RevList)
#Splitting Practise
| 2,825 |
BFS/2178.py
|
kjh9267/BOJ_Python
| 0 |
2168962
|
def bfs(graph,visit):
queue = [[1,1]]
level = []
cnt = 0
while len(queue) is not 0:
for i in range(len(queue)):
visit[queue[i][0]-1][queue[i][1]-1] = 1
if [n, m] in queue:
break
if queue[i][0] - 1 >= 1:
if graph[queue[i][0]-2][queue[i][1]-1] == 1 and visit[queue[i][0]-2][queue[i][1]-1] == 0 and [queue[i][0] - 1,queue[i][1]] not in level:
level.append([queue[i][0] - 1,queue[i][1]])
if queue[i][1] - 1 >= 1:
if graph[queue[i][0]-1][queue[i][1]-2] == 1 and visit[queue[i][0]-1][queue[i][1]-2] == 0 and [queue[i][0],queue[i][1]-1] not in level:
level.append([queue[i][0],queue[i][1]-1])
if queue[i][0] + 1 <= len(graph):
if graph[queue[i][0]][queue[i][1]-1] == 1 and visit[queue[i][0]][queue[i][1]-1] == 0 and [queue[i][0] + 1,queue[i][1]] not in level:
level.append([queue[i][0] + 1,queue[i][1]])
if queue[i][1] + 1 <= len(graph[0]):
if graph[queue[i][0]-1][queue[i][1]] == 1 and visit[queue[i][0]-1][queue[i][1]] == 0 and [queue[i][0],queue[i][1]+1] not in level:
level.append([queue[i][0],queue[i][1]+1])
queue = []
if len(queue) is 0:
queue = level
level = []
cnt += 1
return cnt
n, m = map(int,raw_input().split())
graph = [map(int,list(raw_input())) for i in range(n)]
visit = [[0 for j in range(m)] for i in range(n)]
print bfs(graph,visit)
| 1,554 |
constructutils/misc.py
|
shiftinv/construct-utils
| 0 |
2171704
|
import inspect
import contextlib
import collections
from enum import Enum
from construct import \
Adapter, Subconstruct, Container, ListContainer, Prefixed, Switch, ConstructError, MappingError, Construct, \
StopFieldError, StreamError, \
stream_tell, stream_seek
from typing import Any, Dict, Iterator, TypeVar, Union, List, Tuple, IO, Type, OrderedDict
from .noemit import NoEmitMixin
from .rawcopy import RawCopyBytes
class DictZipAdapter(Adapter):
'''
Adapter for joining a predefined list of keys with a parsed list of values.
Subconstruct must parse to a :class:`ListContainer`, e.g. :class:`Array` or :class:`Sequence`;
build input must be an :class:`OrderedDict`.
'''
def __init__(self, keys: Union[List[str], Tuple[str, ...]], subcon: Construct):
super().__init__(subcon)
self.keys = keys
def _decode(self, obj: ListContainer, context: Container, path: str) -> Container:
assert isinstance(obj, ListContainer)
assert len(self.keys) == len(obj)
container = Container(zip(self.keys, obj))
# copy attributes set by :class:`AttributeRawCopy`
for k, v in obj.__dict__.items():
if isinstance(v, RawCopyBytes):
setattr(container, k, v)
return container
def _encode(self, obj: OrderedDict[str, Any], context: Container, path: str) -> List[Any]:
assert isinstance(obj, collections.OrderedDict)
values = list(obj.values())
assert len(self.keys) == len(values)
return values
class EnumConvert(Subconstruct):
'''
Similar to :class:`construct.Enum`, but more restrictive regarding the input/output types.
Parsing and building will both return an instance of the provided enum type
(cf. :class:`construct.Enum`, where building will return the built subcon value instead of the enum value)
'''
def __init__(self, subcon: Construct, enum: Type[Enum]):
if not issubclass(enum, Enum):
raise MappingError(f'enum parameter must be of type `Enum` (not {type(enum).__name__})')
super().__init__(subcon)
self.enum = enum
self.decmapping = {e.value: e for e in self.enum}
def _parse(self, stream, context, path):
obj = super()._parse(stream, context, path)
try:
return self.decmapping[obj]
except KeyError:
raise MappingError(f'no `{self.enum.__name__}` mapping for value {obj!r}', path=path)
def _build(self, obj, stream, context, path):
if not isinstance(obj, self.enum):
raise MappingError(f'expected `{self.enum.__name__}` value, got {obj!r}', path=path)
super()._build(obj.value, stream, context, path)
return obj
class StrictGreedyRange(Subconstruct):
'''
Similar to :class:`construct.GreedyRange`, but only returns collected values if a
:class:`construct.StopFieldError` or :class:`construct.StreamError` occurred,
and raises/forwards any other exceptions.
Does *not* seek back to previous block on errors to avoid
inconsistencies between seekable and non-seekable streams.
'''
def _parse(self, stream, context, path):
obj = ListContainer()
try:
while True:
obj.append(self.subcon._parse(stream, context, path))
except (StopFieldError, StreamError):
pass
except Exception:
raise
return obj
def _build(self, obj, stream, context, path):
raise NotImplementedError
#####
# switch stuff
#####
class SwitchKeyError(ConstructError):
pass
class _DictNoDefault(Dict[Any, Any]):
def get(self, key, default=None):
try:
# drop default parameter
return self[key]
except KeyError:
raise SwitchKeyError(f'unknown key for switch: {key!r}')
class SwitchNoDefault(NoEmitMixin, Switch):
'''
Similar to :class:`Switch`, but does not pass successfully if no case matches
'''
# (it's not pretty, but it's the easiest solution without having to copy and modify the code)
def __init__(self, keyfunc, cases):
# patch case dictionary to drop default parameter
super().__init__(keyfunc, _DictNoDefault(cases))
def _parse(self, stream, context, path):
try:
return super()._parse(stream, context, path)
except SwitchKeyError as e:
# re-raise error with path
raise SwitchKeyError(e.args[0], path=path)
def _build(self, obj, stream, context, path):
try:
return super()._build(obj, stream, context, path)
except SwitchKeyError as e:
# re-raise error with path
raise SwitchKeyError(e.args[0], path=path)
#####
# stream stuff
#####
@contextlib.contextmanager
def seek_temporary(stream: IO[Any], path: str, offset: int) -> Iterator[None]:
'''
Context manager which seeks to the specified offset on entering,
and seeks back to the original offset on exit
'''
fallback = stream_tell(stream, path)
stream_seek(stream, offset, 0, path)
yield
stream_seek(stream, fallback, 0, path)
def get_offset_in_outer_stream(stream, context, path):
'''
Tries to calculate the current offset in the outermost stream by traversing the context tree.
This is very likely to go completely wrong in many configurations;
right now it takes streams in other contexts and the :class:`Prefixed` type into account.
'''
offset = stream_tell(stream, path)
# collect offsets of enclosing streams by walking up the tree
prev_stream = stream
for c in iter_context_tree(context):
curr_stream = getattr(c, '_io', None)
if curr_stream is None:
break
# add to offset if stream changed
if curr_stream is not prev_stream:
offset += stream_tell(curr_stream, path)
prev_stream = curr_stream
# the Prefixed type writes the length _after_ building the subcon (which makes sense),
# but that also means that the current data will be written at [current offset] + [size of length field],
# which has to be taken into account as the stream's offset doesn't include the length field yet
stack = inspect.stack()
try:
for info in stack:
if info.function != '_build':
continue
local_self = info.frame.f_locals.get('self')
if isinstance(local_self, Prefixed):
offset += local_self.lengthfield.sizeof()
finally:
del stack # just to be safe, see https://docs.python.org/3/library/inspect.html#the-interpreter-stack
return offset
#####
# context stuff
#####
def iter_context_tree(context: Container) -> Iterator[Container]:
yield context
# walk up the tree until no new parent (`_`) exists
while True:
next_parent = getattr(context, '_', context)
if next_parent is context: # either no `_` attribute, or self-reference
break
context = next_parent
yield context
return context
def get_root_context(context: Container) -> Container:
'''
Returns the topmost/root context relative to a provided context
'''
*_, root = iter_context_tree(context)
return root
def get_root_stream(context: Container) -> IO[Any]:
'''
Returns the outermost IO/stream relative to a provided context
'''
top_io = None
for c in iter_context_tree(context):
top_io = getattr(c, '_io', top_io)
assert top_io is not None
return top_io
_TGlobal = TypeVar('_TGlobal')
def context_global(context: Container, name: str, default: _TGlobal) -> _TGlobal:
'''
Returns a context-global value, creating it if it doesn't exist yet
'''
root = get_root_context(context)
if hasattr(root, name):
val = getattr(root, name)
else:
val = default
setattr(root, name, val)
return val
| 7,978 |
rest/rest_server.py
|
olafayomi/srv6-controller
| 13 |
2170896
|
#!/usr/bin/python
from optparse import OptionParser
from pyroute2 import IPRoute
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from collections import namedtuple
from urlparse import parse_qs
import logging
import time
import json
import socket
import ssl
# Global variables definition
# Server reference
rest_server = None
# Netlink socket
ip_route = None
# Cache of the resolved interfaces
interfaces = ['eth0']
idxs = {}
# logger reference
logger = logging.getLogger(__name__)
# Server ip/ports
REST_IP = "::"
REST_PORT = 8080
# Debug option
SERVER_DEBUG = False
# SRv6 base path
SRV6_BASE_PATH = "/srv6-explicit-path"
# HTTP utilities
ResponseStatus = namedtuple("HTTPStatus", ["code", "message"])
ResponseData = namedtuple("ResponseData", ["status"])
HTTP_STATUS = {"OK": ResponseStatus(code=204, message="OK"),
"NOT_FOUND": ResponseStatus(code=404, message="Not found")}
PUT = "PUT"
DELETE = "DELETE"
# SRv6 mapping
OP = {
"create":"add",
"remove":"del",
"destination":"dst",
"device":"dev",
"encapmode":"encapmode",
"segments":"segs"
}
# SSL certificate
CERTIFICATE = 'cert_server.pem'
# HTTP utilities class
class HTTPUtils:
""" Class containing utilities method for HTTP processing """
"""
SRv6 explicit path configuration example
POST /srv6-explicit-path?operation={create|remove}
{
"paths": [
{
"device": "eth0",
"destination": "fc00:db20:35b:7399::5/128",
"encapmode": "inline",
"segments": [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
]
},
{
"device": "eth0",
"destination": "fdf8:f53e:61e4::18/128",
"encapmode": "encap",
"segments": [
"fc00:db20:35b:7399::5",
"333fc00:e968:6179::de52:7100",
"3333:fc00:e968:6179::de52:7100"
]
}
]
}
"""
@staticmethod
def get_srv6_p(http_path):
# Init steps
path = {}
# Get srv6 path
for k,v in http_path.iteritems():
# Translating key and saving values
path[OP[k]] = v
return path
@staticmethod
def get_srv6_ep(request, query):
# Init steps
msg = {}
# Get operation type
op_type = OP[query['operation'][0]]
# Let's parse paths
length = int(request.headers['Content-Length'])
http_data = request.rfile.read(length)
http_data = json.loads(http_data)
# Get paths
paths = []
http_paths = http_data['paths']
for http_path in http_paths:
paths.append(HTTPUtils.get_srv6_p(http_path))
# Finally let's fill the python dict
msg['operation'] = op_type
msg['paths'] = paths
return msg
class HTTPv6Server(HTTPServer):
address_family = socket.AF_INET6
class SRv6HTTPv6Server(ThreadingMixIn, HTTPv6Server):
"""An HTTP Server that handles each srv6-explicit-path request using a new thread"""
daemon_threads = True
class SRv6HTTPRequestHandler(BaseHTTPRequestHandler):
""""HTTP 1.1 SRv6 request handler"""
protocol_version = "HTTP/1.1"
def setup(self):
self.wbufsize = -1
self.disable_nagle_algorithm = True
BaseHTTPRequestHandler.setup(self)
def send_headers(self, status):
# Send proper HTTP headers
self.send_response(status.code, status.message)
self.end_headers()
def do_POST(self):
# Extract values from the query string
path, _, query_string = self.path.partition('?')
query = parse_qs(query_string)
# Handle post requests
if path == SRV6_BASE_PATH:
srv6_config = HTTPUtils.get_srv6_ep(self, query)
"""
{
"paths": [
{
"dev": "eth0",
"dst": "fc00:db20:35b:7399::5/128",
"encapmode": "inline",
"segs": [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
]
},
{
"dev": "eth0",
"dst": "3333:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128",
"encapmode": "encap",
"segs": [
"fc00:db20:35b:7399::5",
"fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b",
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
]
}
]
}
"""
logger.debug("config received:\n%s", json.dumps(srv6_config, indent=2, sort_keys=True))
# Let's push the routes
for path in srv6_config["paths"]:
ip_route.route(srv6_config["operation"], dst=path['dst'], oif=idxs[path['dev']],
encap={'type':'seg6', 'mode':path['encapmode'], 'segs':path['segs']})
# and create the response
response = ResponseData(status=HTTP_STATUS["OK"])
else:
# Unexpected paths
logger.info("not supported yet")
response = ResponseData(status=HTTP_STATUS["NOT_FOUND"])
# Done, send back the respons
self.send_headers(response.status)
# Start HTTP/HTTPS server
def start_server(secure):
# Configure Server listener and ip route
global rest_server, ip_route
# Setup server
if rest_server is not None:
logger.error("HTTP/HTTPS Server is already up and running")
else:
rest_server = SRv6HTTPv6Server((REST_IP, REST_PORT), SRv6HTTPRequestHandler)
# If secure let's protect the socket with ssl
if secure:
rest_server.socket = ssl.wrap_socket(rest_server.socket, certfile=CERTIFICATE,
server_side=True)
# Setup ip route
if ip_route is not None:
logger.error("IP Route is already setup")
else:
ip_route = IPRoute()
# Resolve the interfaces
for interface in interfaces:
idxs[interface] = ip_route.link_lookup(ifname=interface)[0]
# Start the loop for REST
logger.info("Listening %s" %("HTTPS" if secure else "HTTP"))
rest_server.serve_forever()
# Parse options
def parse_options():
global REST_PORT
parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", help="Activate debug logs")
parser.add_option("-s", "--secure", action="store_true", help="Activate secure mode")
# Parse input parameters
(options, args) = parser.parse_args()
# Setup properly the logger
if options.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
SERVER_DEBUG = logger.getEffectiveLevel() == logging.DEBUG
logger.info("SERVER_DEBUG:" + str(SERVER_DEBUG))
# Return secure/insecure mode
if options.secure:
REST_PORT = 443
return True
return False
if __name__ == "__main__":
secure = parse_options()
start_server(secure)
| 6,518 |
src/pyrtable/exceptions.py
|
vilarneto/pyrtable
| 6 |
2172126
|
class RequestError(Exception):
def __init__(self, message, type):
self.type = type
super().__init__(message)
__all__ = ['RequestError']
| 158 |
app/fedgraphnn/social_networks_graph_clf/trainer/gin_trainer.py
|
ray-ruisun/FedML
| 0 |
2171649
|
import logging
import numpy as np
import torch
import wandb
from fedml.core.alg_frame.client_trainer import ClientTrainer
class GINSocialNetworkTrainer(ClientTrainer):
def get_model_params(self):
return self.model.cpu().state_dict()
def set_model_params(self, model_parameters):
logging.info("set_model_params")
self.model.load_state_dict(model_parameters)
def train(self, train_data, device, args):
model = self.model
model.to(device)
model.train()
val_data, test_data = None, None
try:
val_data = self.val_data
test_data = self.test_data
except:
pass
if args.client_optimizer == "sgd":
optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.learning_rate,
weight_decay=args.weight_decay,
)
else:
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.learning_rate,
weight_decay=args.weight_decay,
)
max_test_acc = 0
best_model_params = {}
for epoch in range(args.epochs):
ngraphs = 0
acc_sum = 0
for idx_batch, batch in enumerate(train_data):
batch.to(device)
optimizer.zero_grad()
pred = model(batch)
label = batch.y
acc_sum += pred.max(dim=1)[1].eq(label).sum().item()
loss = model.loss(pred, label)
loss.backward()
optimizer.step()
ngraphs += batch.num_graphs
acc = acc_sum / ngraphs
# if val_data:
# acc_v, _ = self.test(val_data, device)
if ((idx_batch + 1) % args.frequency_of_the_test == 0) or (
idx_batch == len(train_data) - 1
):
if test_data is not None:
test_acc, _ = self.test(self.test_data, device)
print(
"Epoch = {}, Iter = {}/{}: Test accuracy = {}".format(
epoch, idx_batch + 1, len(train_data), acc
)
)
if test_acc > max_test_acc:
max_test_acc = test_acc
best_model_params = {
k: v.cpu() for k, v in model.state_dict().items()
}
print("Current best = {}".format(max_test_acc))
return max_test_acc, best_model_params
def test(self, test_data, device):
logging.info("----------test--------")
model = self.model
model.eval()
model.to(device)
acc_sum = 0.0
ngraphs = 0
for batch in test_data:
batch.to(device)
with torch.no_grad():
pred = model(batch)
label = batch.y
acc_sum += pred.max(dim=1)[1].eq(label).sum().item()
ngraphs += batch.num_graphs
return acc_sum / ngraphs, model
def test_on_the_server(
self, train_data_local_dict, test_data_local_dict, device, args=None
) -> bool:
logging.info("----------test_on_the_server--------")
model_list, score_list = [], []
for client_idx in test_data_local_dict.keys():
test_data = test_data_local_dict[client_idx]
score, model = self.test(test_data, device)
# for idx in range(len(model_list)):
# self._compare_models(model, model_list[idx])
model_list.append(model)
score_list.append(score)
logging.info("Client {}, Test accuracy = {}".format(client_idx, score))
if args.enable_wandb:
wandb.log({"Client {} Test/accuracy".format(client_idx): score})
avg_score = np.mean(np.array(score_list))
logging.info("Test accuracy = {}".format(avg_score))
if args.enable_wandb:
wandb.log({"Test/accuracy": avg_score})
return True
def _compare_models(self, model_1, model_2):
models_differ = 0
for key_item_1, key_item_2 in zip(
model_1.state_dict().items(), model_2.state_dict().items()
):
if torch.equal(key_item_1[1], key_item_2[1]):
pass
else:
models_differ += 1
if key_item_1[0] == key_item_2[0]:
logging.info("Mismatch found at", key_item_1[0])
else:
raise Exception
if models_differ == 0:
logging.info("Models match perfectly! :)")
| 4,791 |
tasks/api/urls.py
|
Roman673/django-task-list
| 0 |
2171065
|
from django.urls import path
from .views import (
TaskListAPIView,
TaskCreateAPIView,
TaskDestroyAPIView,
TaskRetrieveAPIView,
TaskUpdateAPIView,
)
app_name = 'tasks-api'
urlpatterns = [
path('', TaskListAPIView.as_view(), name="tasks-list"),
path('create/', TaskCreateAPIView.as_view(), name="task-create"),
path('<int:pk>/detail/', TaskRetrieveAPIView.as_view(), name="task-detail"),
path('<int:pk>/update/', TaskUpdateAPIView.as_view(), name="task-update"),
path('<int:pk>/delete/', TaskDestroyAPIView.as_view(), name="task-delete"),
]
| 586 |
tests/test_applications.py
|
theblackfly/protoflow
| 3 |
2171198
|
"""ProtoFlow layers test suite."""
import io
import unittest
import numpy as np
import protoflow as pf
class TestGLVQ(unittest.TestCase):
def setUp(self):
ndata = 100
nclasses = 5
input_dim = 10
self.model = pf.applications.GLVQ(nclasses=nclasses,
input_dim=input_dim,
prototypes_per_class=3)
self.x = np.random.rand(ndata, input_dim)
self.y = np.random.randint(0, nclasses, size=(ndata, ))
def test_prototype_distribution(self):
self.assertEqual(self.model.prototype_layer.prototype_distribution,
[3, 3, 3, 3, 3])
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_summary(self, stdout):
self.model.summary()
summary_string = stdout.getvalue()
stdout.close()
self.assertIn("Trainable params:", summary_string)
def test_compile_and_fit(self):
self.model.compile(optimizer='adam')
self.model.fit(self.x, self.y, batch_size=64)
def tearDown(self):
del self.model
del self.x
del self.y
class TestGMLVQ(unittest.TestCase):
def setUp(self):
ndata = 100
nclasses = 5
input_dim = 10
mapping_dim = 2
self.model = pf.applications.GMLVQ(nclasses=nclasses,
input_dim=input_dim,
mapping_dim=mapping_dim,
prototypes_per_class=3)
self.x = np.random.rand(ndata, input_dim)
self.y = np.random.randint(0, nclasses, size=(ndata, ))
def test_prototype_distribution(self):
self.assertEqual(self.model.prototype_layer.prototype_distribution,
[3, 3, 3, 3, 3])
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_summary(self, stdout):
self.model.summary()
summary_string = stdout.getvalue()
stdout.close()
self.assertIn("Trainable params:", summary_string)
def test_compile_and_fit(self):
self.model.compile(optimizer='adam')
self.model.fit(self.x, self.y, batch_size=64)
def tearDown(self):
del self.model
del self.x
del self.y
class TestDeepLVQ(unittest.TestCase):
def setUp(self):
ndata = 100
nclasses = 5
input_dim = 10
self.model = pf.applications.DeepLVQ(nclasses=nclasses,
input_dim=input_dim,
hidden_units=[1, 2],
prototypes_per_class=3)
self.x = np.random.rand(ndata, input_dim)
self.y = np.random.randint(0, nclasses, size=(ndata, ))
def test_prototype_distribution(self):
self.assertEqual(self.model.prototype_layer.prototype_distribution,
[3, 3, 3, 3, 3])
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_summary(self, stdout):
self.model.summary()
summary_string = stdout.getvalue()
stdout.close()
self.assertIn("Trainable params:", summary_string)
def test_compile_and_fit(self):
self.model.compile(optimizer='adam')
self.model.fit(self.x, self.y, batch_size=64)
def tearDown(self):
del self.model
del self.x
del self.y
if __name__ == '__main__':
unittest.main()
| 3,527 |
algorithms_and_data_structures_for_programming_contest/chap12-5/answer.py
|
toku345/practice
| 2 |
2171964
|
#!/usr/bin/env python3
NIL: int = -1
class DFS():
def __init__(self, n, G, debug=False):
self.n = n
self.G = G
self.color = [NIL] * n
self.debug = debug
self._debug_print_G()
self._debug_print_color()
def assign_color(self):
id = 1 # color の id
for u in range(self.n):
if self.color[u] == NIL:
self._dfs(u, id)
id += 1
self._debug_print_color()
def can_reach(self, s, t) -> ():
"""boolを返すっぽいけど、yes / no を print するメソッド"""
if self.color[s] == self.color[t]:
print('yes')
else:
print('no')
def _dfs(self, r: int, c: int):
S = [] # list をスタックとして使う
S.append(r)
self.color[r] = c
while len(S) > 0:
u = S.pop()
for v in self.G[u]:
if self.color[v] == NIL:
self.color[v] = c
S.append(v)
def _debug_print_G(self):
if not self.debug:
return
for i in range(self.n):
print(f'{i}: {self.G[i]}')
def _debug_print_color(self):
if not self.debug:
return
print(f'color: {self.color}')
def main():
n, m = [int(char) for char in input().split()]
G = [[] for _ in range(n)] # n要素の配列の配列
for _ in range(m):
s, t = [int(char) for char in input().split()]
G[s].append(t)
G[t].append(s)
dfs = DFS(n, G)
dfs.assign_color()
q = int(input())
for _ in range(q):
s, t = [int(char) for char in input().split()]
dfs.can_reach(s, t)
if __name__ == '__main__':
main()
| 1,724 |
simplesynth/synth.py
|
OdysseasKr/simple-synth
| 0 |
2170559
|
from abc import ABC
import numpy as np
import itertools
from synthplayer.oscillators import Sine, Triangle, Square, SquareH, Sawtooth
from synthplayer.oscillators import Pulse, WhiteNoise, Semicircle, MixingFilter
from synthplayer.oscillators import EnvelopeFilter
from synthplayer import params
from .filters import LowPassFilter
osc_1_options = {
'Sine': Sine,
'Triangle': Triangle,
'Square': Square,
'SquareH': SquareH,
'Sawtooth': Sawtooth,
'Pulse': Pulse,
'Semicircle': Semicircle
}
osc_2_options = {
'Sine': Sine,
'Triangle': Triangle,
'Square': Square,
'SquareH': SquareH,
'Sawtooth': Sawtooth,
'Pulse': Pulse,
'WhiteNoise': WhiteNoise,
'Semicircle': Semicircle
}
class Synth(ABC):
def __init__(self, sr=44100):
"""
Parameters
----------
sr : int
Samplerate used for the resulting sounds
"""
self.sr = sr
self.set_parameters()
def set_parameters(self, **kwargs):
"""Sets the parameters of the synth"""
self._check_parameters_values(kwargs)
self.osc_1_name = kwargs.get('osc_1', 'Sine')
self.osc_1 = osc_1_options[self.osc_1_name]
self.osc_2_name = kwargs.get('osc_2', 'Sine')
self.osc_2 = osc_2_options[self.osc_2_name]
self.mix = kwargs.get('mix', 0.5)
self.phase_1 = kwargs.get('phase_1', 0)
self.attack = kwargs.get('attack', 0)
self.decay = kwargs.get('decay', 0)
self.sustain = kwargs.get('sustain', 1)
self.sustain_level = kwargs.get('sustain_level', 1)
self.release = kwargs.get('release', 0)
self.cutoff = kwargs.get('cutoff', 10000)
def _check_parameters_values(self, kwargs):
if kwargs.get('osc_1', 'Sine') not in osc_1_options:
raise AssertionError('Invalid shape for osc 1')
if kwargs.get('osc_2', 'Sine') not in osc_2_options:
raise AssertionError('Invalid shape for osc 2')
if not 0 <= kwargs.get('mix', 0.5) <= 1:
raise AssertionError('Parameter `mix` should be in the range [0,1]')
if not 0 <= kwargs.get('phase', 0) <= 0.5:
raise AssertionError('Parameter `phase` should be in the range [0,0.5]')
if kwargs.get('attack', 0) < 0 or kwargs.get('decay', 0) < 0 or kwargs.get('sustain', 0) < 0 or kwargs.get('release', 0) < 0:
raise AssertionError('ADSR parameters should be >= 0')
if not 0 <= kwargs.get('sustain_level', 0) <= 1:
raise AssertionError('Parameter `sustain_level` should be in the range [0,1]')
def get_parameters(self):
"""Returns a dict with the current paramters"""
return {
'osc_1': self.osc_1_name,
'osc_2': self.osc_2_name,
'mix': self.mix,
'phase_1': self.phase_1,
'cutoff': self.cutoff,
}
def _get_raw_data_from_obj(self, obj, duration):
num_blocks = int(self.sr*duration//params.norm_osc_blocksize)
tmp = np.array(list(itertools.islice(obj.blocks(), num_blocks)))
return tmp.flatten()
def _hookup_modules(self, note):
"""Creates oscillators with the correct parameters pipeline"""
osc1 = self.osc_1(note,
amplitude=1 - self.mix,
phase=self.phase_1,
samplerate=self.sr)
osc2 = self.osc_2(note,
amplitude=self.mix,
samplerate=self.sr)
mixer = MixingFilter(osc1, osc2)
adsr = EnvelopeFilter(mixer,
attack=self.attack,
decay=self.decay,
sustain=self.sustain,
sustain_level=self.sustain_level,
release=self.release)
self.out = LowPassFilter(adsr, cutoff=self.cutoff, samplerate=self.sr)
def get_sound_array(self, note=440, duration=1):
"""Returns a sound for the set parameters
Returns
-------
sound_array : np.ndarray
The sound for the given note and duration
"""
self._hookup_modules(note)
return self._get_raw_data_from_obj(self.out, duration)
| 4,290 |
synthesize_blocks.py
|
aluo-x/shape2prog
| 109 |
2169194
|
from __future__ import print_function
import os
import h5py
import numpy as np
from programs.sample_blocks import sample_batch
def synthesize_data():
"""
synthesize the (block, program) pairs
:return: train_shape, train_prog, val_shape, val_prog
"""
# == training data ==
data = []
label = []
n_samples = [5000,
30000, 5000, 5000, 5000, 10000,
5000, 5000, 5000, 5000, 30000,
15000, 30000, 8000, 5000, 30000,
15000, 6000, 6000, 6000, 10000,
30000, 10000, 6000, 6000, 6000,
30000, 40000, 30000, 10000]
for i in range(len(n_samples)):
d, s = sample_batch(num=n_samples[i], primitive_type=i)
data.append(d)
label.append(s)
n_samples = [30000, 30000, 40000, 30000, 30000,
30000, 20000, 10000, 40000, 35000,
40000, 35000, 35000, 35000, 50000]
for i in range(len(n_samples)):
d, s = sample_batch(num=n_samples[i], primitive_type=100+i+1)
data.append(d)
label.append(s)
train_data = np.vstack(data)
train_label = np.vstack(label)
# == validation data ==
data = []
label = []
for i in range(30):
d, s = sample_batch(num=640, primitive_type=i)
data.append(d)
label.append(s)
for i in range(15):
d, s = sample_batch(num=640, primitive_type=100+i+1)
data.append(d)
label.append(s)
val_data = np.vstack(data)
val_label = np.vstack(label)
return train_data, train_label, val_data, val_label
if __name__ == '__main__':
print('==> synthesizing (part, block_program) pairs')
train_x, train_y, val_x, val_y = synthesize_data()
print('Done')
if not os.path.isdir('./data'):
os.makedirs('./data')
train_file = './data/train_blocks.h5'
val_file = './data/val_blocks.h5'
print('==> saving data')
f_train = h5py.File(train_file, 'w')
f_train['data'] = train_x
f_train['label'] = train_y
f_train.close()
f_val = h5py.File(val_file, 'w')
f_val['data'] = val_x
f_val['label'] = val_y
f_val.close()
print('Done')
| 2,196 |
lib/utils/trainer.py
|
VIP-Lab-UNIST/CUCPS_public
| 0 |
2171865
|
import time
import os.path as osp
import huepy as hue
import torch
from torch.nn.utils import clip_grad_norm_
from .logger import MetricLogger
from .misc import ship_data_to_cuda, warmup_lr_scheduler, resume_from_checkpoint
class Trainer():
def __init__(self, args, model, train_loader, optimizer, lr_scheduler, device):
self.args = args
self.model = model
self.train_loader = train_loader
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.device = device
def run(self):
print(hue.info(hue.bold(hue.green("Start training from %s epoch"%str(self.args.train.start_epoch)))))
steps = 0
for epoch in range(self.args.train.start_epoch, self.args.train.epochs):
## Initial epochs
if epoch == 1 and self.args.train.lr_warm_up:
warmup_factor = 1. / 1000
warmup_iters = len(self.train_loader) - 1
sub_scheduler = warmup_lr_scheduler(self.optimizer, warmup_iters, warmup_factor)
metric_logger = MetricLogger()
for iteration, data in enumerate(self.train_loader):
## Initial iterations
steps = epoch*len(self.train_loader) + iteration
if steps % self.args.train.disp_interval == 0:
start = time.time()
# Load data
images, targets = ship_data_to_cuda(data, self.device)
# Pass data to model
loss_dict = self.model(epoch, images, targets)
# Total loss
losses = self.args.train.w_RPN_loss_cls * loss_dict['loss_objectness'] \
+ self.args.train.w_RPN_loss_box * loss_dict['loss_rpn_box_reg'] \
+ self.args.train.w_RCNN_loss_bbox * loss_dict['loss_box_reg'] \
+ self.args.train.w_RCNN_loss_cls * loss_dict['loss_detection'] \
+ self.args.train.w_OIM_loss_oim * loss_dict['loss_reid']
self.optimizer.zero_grad()
losses.backward()
if self.args.train.clip_gradient > 0:
clip_grad_norm_(self.model.parameters(), self.args.train.clip_gradient)
self.optimizer.step()
## Post iteraions
if epoch == 1 and self.args.train.lr_warm_up:
sub_scheduler.step()
if steps % self.args.train.disp_interval == 0:
# Print
loss_value = losses.item()
state = dict(loss_value=loss_value,
lr=self.optimizer.param_groups[0]['lr'])
state.update(loss_dict)
# Update logger
batch_time = time.time() - start
metric_logger.update(batch_time=batch_time)
metric_logger.update(**state)
# Print log on console
metric_logger.print_log(epoch, iteration, len(self.train_loader))
else:
state = None
## Post epochs
self.lr_scheduler.step()
if epoch % 2 == 0:
save_name = osp.join(self.args.path, 'checkpoint_epoch%d.pth'%epoch)
else:
save_name = osp.join(self.args.path, 'checkpoint_epoch_last.pth')
torch.save({
'epoch': epoch,
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'lr_scheduler': self.lr_scheduler.state_dict()
}, save_name)
print(hue.good('save model: {}'.format(save_name)))
return None
| 3,770 |
nx.py
|
Evroc/ProjektTS2021
| 0 |
2171431
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import time
#-----------------------slave
color_map = []
color_map_sla = []
cords_sl = {"Obiekt w CNC": (0, 0),
"Obrobka": (2, 0),
"Odlozenie": (1, -1)
}
options_sl = {
'node_color': color_map_sla,
'edge_color': 'red',
'node_size': 800,
'width': 2,
'with_labels': True,
'pos': cords_sl,
'node_shape': 'D'
}
edges_sl = {
# ("Zamkniecie drzwi elementu podrzednego", "Odlozenie"),
("Obiekt w CNC", "Obrobka"),
("Obrobka", "Odlozenie"),
("Odlozenie", "Obiekt w CNC")
}
nodes_sl = [
#"Zamkniecie drzwi elementu podrzednego"
"Obiekt w CNC", #1
"Obrobka",#2
"Odlozenie"#3
]
#----------------- master autom
cords = {"Zamkniecie drzwi elementu podrzednego": (-5, 0),
"Obiekt w podajniku": (-5, 2),
"Przenoszenie do CNC": (-3, 2),
"CNC": (-1, 2),
"Kontrola jakosci": (1, 2),
"Element odrzucony": (3, 2),
#2linia
"Przeniesienie obiektu do pudelek": (2, 1.5),
"Odbior": (-1, 1.5),
"Proces zatrzymany": (-4, 1.5),
}
options_g = {
'node_color': color_map,
'edge_color': 'blue',
'node_size': 18000,
'width': 0.5,
'with_labels': True,
'pos': cords,
'node_shape': 's',
'font_size': 13,
}
edges = [("Obiekt w podajniku", "Przenoszenie do CNC"),
("Obiekt w podajniku", "Proces zatrzymany"),
("Przenoszenie do CNC", "CNC"),
("CNC", "Kontrola jakosci"),
("Kontrola jakosci", "Element odrzucony"),
("Kontrola jakosci", "Przeniesienie obiektu do pudelek"),
#("Element odrzucony", "Obiekt w podajniku"),
("Przeniesienie obiektu do pudelek", "Odbior"),
("Przeniesienie obiektu do pudelek", "Obiekt w podajniku"),
("Odbior", "Proces zatrzymany"),
("Proces zatrzymany", "Obiekt w podajniku")
]
nodes = ["Obiekt w podajniku", #1
"Przenoszenie do CNC",#2
"Proces zatrzymany",#3
"CNC",#4
"Kontrola jakosci",#5
"Element odrzucony",#6
"Przeniesienie obiektu do pudelek",#7
"Odbior",#8
]
#ew todo strzalki
#dodanie x, y wspoł napisow - matplotlib dodanie !
labels = {('Obiekt w podajniku', 'Przenoszenie do CNC'): "Sygnał - Nowy element!",
("Obiekt w podajniku", "Proces zatrzymany"): "STOP!",
("Przenoszenie do CNC", "CNC"): "Element odłożony!",
("CNC", "Kontrola jakosci"): "Element gotowy!",
("Kontrola jakosci", "Element odrzucony"): "Odrzucono!", ("Kontrola jakosci", "Przeniesienie obiektu do pudelek"): "Przyjęto!",
("Przeniesienie obiektu do pudelek", "Odbior"): "Pudełko pełne!", ("Przeniesienie obiektu do pudelek", "Obiekt w podajniku"): "Gotowe!",
("Odbior", "Proces zatrzymany"): "Zatrzymanie procesu!",
("Proces zatrzymany", "Obiekt w podajniku"): "Wznów!"
}
g = nx.DiGraph() #master g
h = nx.DiGraph() #slave g
#Addinng edges/nodes master - slave
g.add_edges_from(edges)
g.add_nodes_from(nodes)
h.add_edges_from(edges_sl)
h.add_nodes_from(nodes_sl)
#plt.figure('Slave automa', figsize=(6, 3))
#nx.draw(h, **options_sl)
#plt.draw()
#plt.show()
#plt.figure('Slave automa', figsize=(6, 3))
def draw_graph_slave(current_state):
plt.ion()
plt.figure('Slave automa', figsize=(6, 3))
for node in h:
if node == current_state:
color_map_sla.append('blue')
else:
color_map_sla.append('red')
nx.draw(h, **options_sl)
#plt.draw()
plt.show()
color_map_sla.clear()
plt.pause(1)
def draw_graph(current_state):
plt.ion()
plt.figure('Master automa', figsize=(14, 6))
for node in g:
if node == current_state:
color_map.append('blue')
else:
color_map.append('red')
nx.draw(g, **options_g)
plt.show()
color_map.clear()
plt.pause(1)
| 3,993 |
xcesm/plots/colormap.py
|
Yefee/xcesm
| 12 |
2171354
|
import numpy as np
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import json
import os
COLOR_PATH = os.path.join(os.path.dirname(__file__))
with open(COLOR_PATH + '/colormap.json', 'r') as f:
colors = json.load(f)
def cmap(name, bins=None):
data = np.array(colors[name])
data = data / np.max(data)
cmap = ListedColormap(data, name=name)
if isinstance(bins, int):
cmap = cmap._resample(bins)
return cmap
def subplots(nrow=2, ncol=2, figsize=None, ind=None, **kwarg):
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
if figsize is not None:
fig = plt.figure(figsize=figsize)
else:
fig = plt.figure()
tol = nrow * ncol
if ind is not None:
ind_total = range(1, tol+1)
ind1 = list(set(ind_total) - set(ind))
projection = ccrs.PlateCarree(**kwarg)
ax1 = [fig.add_subplot(nrow, ncol, i, projection=projection) for i in ind]
ax2 = [fig.add_subplot(nrow, ncol, i) for i in ind1]
ax = ax1 + ax2
else:
ind_total = range(1, tol+1)
projection = ccrs.PlateCarree(**kwarg)
ax = [fig.add_subplot(nrow, ncol, i, projection=projection) for i in ind_total]
return fig, ax
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
def change_color_spines(ax, color):
for sp in ax.spines.values():
sp.set_color(color)
ax.tick_params(axis='x', which='both', colors=color)
ax.tick_params(axis='y', which='both', colors=color)
ax.xaxis.label.set_color(color)
ax.yaxis.label.set_color(color)
def mk_stacking_axes(num_axes, fig=None, hsapce=-0.4, color=None, ratio=None, **kwargs):
fig, axs = plt.subplots(num_axes, 1, sharex=True, gridspec_kw={'height_ratios':ratio}, **kwargs)
# generate axes
# axs = []
# for i in range(num_axes):
# if i == 0:
# axs.append(fig.add_subplot(num_axes, 1, i+1,))
# else:
# axs.append(fig.add_subplot(num_axes, 1, i+1, sharex=axs[0]))
# if ratio is not None:
# for i in range(num_axes):
# ax = axs[i]
# xmin, xmax = ax.get_xlim()
# ymin, ymax = ax.get_ylim()
# ax.set_aspect(abs((xmax-xmin)/(ymax-ymin))*ratio[i], adjustable='box-forced')
# set right and left y axes, all other are invisible
for i, ax in enumerate(axs):
make_patch_spines_invisible(ax)
ax.patch.set_alpha(0)
if i % 2 == 0:
ax.tick_params(axis='both', which='both', bottom='off', top='off',
left='on', labelleft='on', right='off', labelbottom='off')
ax.spines['left'].set_visible(True)
ax.yaxis.tick_left()
else:
ax.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='on',labelright='on', labelbottom='off')
ax.spines['right'].set_visible(True)
ax.yaxis.tick_right()
# add x-axis on top and bottom
axs[0].spines['top'].set_visible(True)
axs[0].tick_params(axis='both', which='both', labeltop='on', top='on')
axs[-1].spines['bottom'].set_visible(True)
axs[-1].tick_params(axis='both', which='both', labelbottom='on', bottom='on')
# stacking all axes
plt.subplots_adjust(hspace=hsapce)
# change color for all axes
if color is not None:
for ax, c in zip(axs, color):
change_color_spines(ax, c)
return axs
| 3,644 |
output/models/nist_data/atomic/unsigned_long/schema_instance/nistschema_sv_iv_atomic_unsigned_long_max_exclusive_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1 |
2172053
|
from output.models.nist_data.atomic.unsigned_long.schema_instance.nistschema_sv_iv_atomic_unsigned_long_max_exclusive_3_xsd.nistschema_sv_iv_atomic_unsigned_long_max_exclusive_3 import NistschemaSvIvAtomicUnsignedLongMaxExclusive3
__all__ = [
"NistschemaSvIvAtomicUnsignedLongMaxExclusive3",
]
| 299 |
datasette_verify/__init__.py
|
simonw/datasette-verify
| 2 |
2171733
|
from datasette import hookimpl
import click
import sqlite3
@hookimpl
def register_commands(cli):
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
def verify(files):
"Verify that SQLite files can be opened using Datasette"
for file in files:
conn = sqlite3.connect(str(file))
try:
conn.execute("select * from sqlite_master")
except sqlite3.DatabaseError:
raise click.ClickException("Invalid database: {}".format(file))
| 547 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.