max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
seshat/account/tests/test_forms.py
|
XecusM/SESHAT
| 0 |
2170005
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from account import forms, models
class AccountFormsTests(TestCase):
'''
Test all forms for account appliction
'''
def setUp(self):
'''
Initial setup for ever test
'''
self.first_name = 'Mohamed'
self.last_name = 'Aboel-fotouh'
self.password = '<PASSWORD>'
self.superuser = get_user_model().objects.create_user(
username='xecus',
first_name=self.first_name,
last_name=self.last_name,
password=self.password,
is_active=True,
is_superuser=True
)
self.user = get_user_model().objects.create_user(
username='usename',
first_name=self.first_name,
last_name=self.last_name,
password=<PASSWORD>,
is_active=True,
)
def test_new_account_form_fields(self):
'''
Test user new account form fields
'''
expected = [
'first_name', 'last_name', 'username',
'password1', '<PASSWORD>', 'job',
'is_superuser', 'is_active'
]
actual = list(forms.NewForm(user=self.superuser).fields)
self.assertSequenceEqual(expected, actual)
def test_new_account_form_valid_data(self):
'''
Test valid form for creating new account user
'''
form = forms.NewForm(data={
'username': 'another_user',
'first_name': 'Mohamed',
'last_name': 'Aboel-fotouh',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}, user=self.superuser)
self.assertTrue(form.is_valid())
def test_new_account_form_invalid_data(self):
'''
Test invalid form senarios for creating new account user
'''
form_username = forms.NewForm(data={
'username': '',
'first_name': 'Mohamed',
'last_name': 'Aboel-fotouh',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}, user=self.superuser)
form_first_name = forms.NewForm(data={
'username': 'xecus',
'first_name': '',
'last_name': 'Aboel-fotouh',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}, user=self.superuser)
form_last_name = forms.NewForm(data={
'username': 'another_user',
'first_name': 'Mohamed',
'last_name': '',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}, user=self.superuser)
self.assertFalse(form_username.is_valid())
self.assertFalse(form_first_name.is_valid())
self.assertFalse(form_last_name.is_valid())
def test_edit_account_form_fields(self):
'''
Test edit account user form fields
'''
expected = [
'first_name', 'last_name','username',
'job', 'is_superuser', 'is_active'
]
actual = list(forms.EditForm(
instance=self.user, user=self.superuser
).fields)
self.assertSequenceEqual(expected, actual)
def test_edit_account_form_valid_data(self):
'''
Test valid form for edit account user details
'''
form = forms.EditForm(data={
'username': 'another_user',
'first_name': 'Mohamed',
'last_name': 'Aboel-fotouh',
},
instance=self.user, user=self.superuser)
self.assertTrue(form.is_valid())
def test_edit_account_form_invalid_data(self):
'''
Test invalid form senarios for edit account user details
'''
form_username = forms.EditForm(data={
'username': '',
'first_name': 'Mohamed',
'last_name': 'Aboel-fotouh',
},
instance=self.user, user=self.superuser)
form_first_name = forms.EditForm(data={
'username': 'another_user',
'first_name': '',
'last_name': 'Aboel-fotouh',
},
instance=self.user, user=self.superuser)
form_last_name = forms.EditForm(data={
'username': 'another_user',
'first_name': 'Mohamed',
'last_name': '',
},
instance=self.user, user=self.superuser)
self.assertFalse(form_username.is_valid())
self.assertFalse(form_first_name.is_valid())
self.assertFalse(form_last_name.is_valid())
def test_user_profile_form_fields(self):
'''
Test user profile form fields
'''
expected = [
'photo', 'email', 'phone', 'birthdate', 'gender',
'x', 'y', 'width', 'height',
]
actual = list(forms.ProfileForm().fields)
self.assertSequenceEqual(expected, actual)
def test_user_profile_form_valid_data(self):
'''
Test valid form for user profile
'''
form = forms.ProfileForm(data={
'email': '<EMAIL>',
'phone': '+999988888',
'birthdate': '1982-5-24',
'gender': models.UserProfile.MALE,
})
self.assertTrue(form.is_valid())
def test_user_profile_form_invalid_data(self):
'''
Test invalid form senarios for user profile
'''
form_email = forms.ProfileForm(data={
'email': 'user-email.com',
'phone': '+999988888',
'birthdate': '1982-5-24',
'gender': models.UserProfile.MALE,
})
form_phone = forms.ProfileForm(data={
'email': '<EMAIL>',
'phone': 'error',
'birthdate': '1982-5-24',
'gender': models.UserProfile.MALE,
})
form_birthdate = forms.ProfileForm(data={
'email': '<EMAIL>',
'phone': '+999988888',
'birthdate': '15/10/12',
'gender': models.UserProfile.MALE,
})
self.assertFalse(form_email.is_valid())
self.assertFalse(form_phone.is_valid())
self.assertFalse(form_birthdate.is_valid())
def test_user_settings_form_fields(self):
'''
Test user settings form fields
'''
expected = ['language', 'paginate', 'default_page', ]
actual = list(forms.SettingsForm().fields)
self.assertSequenceEqual(expected, actual)
def test_user_settings_form_valid_data(self):
'''
Test valid form for user settings
'''
form = forms.SettingsForm(data={
'language': models.UserSettings.ENGLISH,
'paginate': 30,
'default_page': models.UserSettings.ADMIN,
})
self.assertTrue(form.is_valid())
def test_user_settings_form_invalid_data(self):
'''
Test invalid form senarios for user settings
'''
form_language = forms.SettingsForm(data={
'language': '',
'paginate': 30,
'default_page': models.UserSettings.ADMIN,
})
form_paginate = forms.SettingsForm(data={
'language': models.UserSettings.ENGLISH,
'paginate': '',
'default_page': models.UserSettings.ADMIN,
})
form_page = forms.SettingsForm(data={
'language': models.UserSettings.ENGLISH,
'paginate': 30,
'default_page': '',
})
self.assertFalse(form_language.is_valid())
self.assertFalse(form_paginate.is_valid())
self.assertFalse(form_page.is_valid())
| 9,800 |
sql/crud.py
|
EliasEriksson/ignition
| 0 |
2171066
|
from typing import *
import uuid
from sqlalchemy import exc
from sqlalchemy.orm import Session
from . import models
import schemas
from . import errors
# noinspection PyPackageRequirements
from argon2 import PasswordHasher
from fastapi.security import OAuth2PasswordRequestForm
hasher = PasswordHasher()
class Crud:
session: Session
def __init__(self, session: Session, token: Optional[schemas.token.Token] = None) -> None:
self.session = session
self.token = token
def __enter__(self) -> "Crud":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class User(Crud):
def get_by_id(self, id: uuid.UUID) -> Optional[models.User]:
return user if (
user := self.session.query(models.User).filter(models.User.id == id).first()
) else None
def get_by_email(self, email: str) -> Optional[models.User]:
return user if (
user := self.session.query(models.User).filter(models.User.email == email).first()
) else None
def get_by_token(self, token: str) -> Optional[models.User]:
return user if (
user := self.session.query(models.User).filter(models.User.token == token).first()
) else None
def create(self, user: OAuth2PasswordRequestForm) -> models.User:
db_user = models.User(
email=user.username, password_hash=hasher.hash(user.password),
)
try:
self.session.add(db_user)
self.session.commit()
except exc.IntegrityError:
raise errors.DuplicateEmail(user.username)
self.session.refresh(db_user)
return db_user
def update_by_id(self, id: uuid.UUID, user: schemas.user.UserAuthData) -> Optional[models.User]:
db_user = self.get_by_id(id)
if not db_user or self.token != db_user.token:
return
try:
db_user.email = user.email
db_user.password_hash = <PASSWORD>(user.password)
self.session.commit()
except exc.IntegrityError:
raise errors.DuplicateEmail(user.email)
return db_user
def delete_by_id(self, id: uuid.UUID) -> Optional[models.User]:
db_user = self.get_by_id(id)
if not db_user or self.token != db_user.token:
return
self.session.delete(db_user)
self.session.commit()
return db_user
class Token(Crud):
def get_by_id(self, id: int) -> Optional[models.Token]:
return token if (
token := self.session.query(models.Token).filter(models.Snippet.id == id).first()
) else None
def get_by_access_token(self, access_token: str) -> Optional[models.Token]:
return token if (
token := self.session.query(models.Token).filter(models.Token.access_token == access_token).first()
) else None
def create(self, user: models.User) -> None:
if user.token:
return self.update(user.token)
token = models.Token(user=user)
self.session.add(token)
self.session.commit()
self.session.refresh(token)
def update(self, token: schemas.token.Token) -> None:
self.session.execute("select update_expiration_of_row(:id);", {"id": token.id})
self.session.commit()
self.session.refresh(token)
def delete_by_id(self, id: int) -> Optional[models.Token]:
db_token = self.get_by_id(id)
if not db_token:
return
self.session.delete(db_token)
self.session.commit()
return db_token
def delete_by_value(self, access_token: str) -> Optional[models.Token]:
db_token: models.Token = self.session.query(models.Token).filter(models.Token.access_token == access_token).first()
if not db_token:
return
self.session.delete(db_token)
self.session.commit()
return db_token
class Quota(Crud):
def get_by_id(self, id: int) -> Optional[models.Quota]:
return quota if (
quota := self.session.query(models.Quota).filter(models.Quota.id == id).first()
) else None
def create(self, user: models.User) -> None:
if user.quota:
return
quota = models.Quota(user=user)
self.session.add(quota)
self.session.commit()
self.session.refresh(quota)
def update(self, quota: models.Quota) -> None:
pass
def delete_by_id(self, id: int) -> Optional[models.Quota]:
db_quota: models.Quota = self.session.query(models.Quota).filter(models.Quota.id == id).first()
if not db_quota:
return
self.session.delete(db_quota)
self.session.commit()
return db_quota
class Snippet(Crud):
def get_by_id(self, id: uuid.UUID) -> Optional[models.Snippet]:
return snippet if (
snippet := self.session.query(models.Snippet).filter(models.Snippet.id == id).first()
) else None
def create(self, user: models.User, snippet: schemas.snippet.SnippetData) -> models.Snippet:
db_snippet = models.Snippet(
**snippet.dict(),
user=user
)
try:
self.session.add(db_snippet)
self.session.commit()
except exc.IntegrityError:
# the slim chance that there is a duplicate uuid
# retry once
self.session.add(db_snippet)
self.session.commit()
self.session.refresh(db_snippet)
return db_snippet
def update_by_id(self, id: uuid.UUID, snippet: schemas.snippet.SnippetData) -> Optional[models.Snippet]:
db_snippet: models.Snippet = self.get_by_id(id)
if not db_snippet or self.token != db_snippet.user.token:
return
db_snippet.language = snippet.language
db_snippet.code = snippet.code
db_snippet.args = snippet.args
self.session.commit()
self.session.refresh(db_snippet)
return db_snippet
def delete_by_id(self, id: uuid.UUID) -> Optional[models.Snippet]:
db_snippet = self.get_by_id(id)
if not db_snippet:
return
self.session.delete(db_snippet)
self.session.commit()
return db_snippet
| 6,243 |
fortnite_api/creator_code.py
|
NextChai/py-wrapper
| 20 |
2171373
|
from .account import Account
class CreatorCode:
"""Represents a Creator Code.
Attributes
-----------
user: :class:`User`
The user of the creator code.
disabled: :class:`bool`
Whether the Creator Code is disabled or not.
code: :class:`str`
The slug of the Creator Code
verified: :class:`bool`
Whether the Creator Code is verified or not.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and re-creating the class.
"""
def __init__(self, data):
self.code = data.get('code')
self.user = Account(data.get('account')) if data.get('account') else None
self.disabled = data.get('status', '').lower() == 'disabled'
self.verified = data.get('verified', False)
self.raw_data = data
| 821 |
KevinSolution/EasyCollection/SortingAndSearching/88.merge-sorted-array.py
|
zenz34/LearningDataStructureAlgorithm
| 0 |
2171356
|
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
self.merge0(nums1, m, nums2, n)
def merge0(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
i = m - 1
j = n - 1
k = len(nums1) - 1
while i >= 0 and j >= 0:
if nums1[i] < nums2[j]:
nums1[k] = nums2[j]
j -= 1
else:
nums1[k] = nums1[i]
i -= 1
k -= 1
nums1[:j + 1] = nums2[:j + 1]
| 634 |
networking_cisco/db/migration/alembic_migrations/versions/liberty/expand/1e9e22602685_ha_for_routing_service_in_cisco_devices.py
|
mail2nsrajesh/networking-cisco
| 8 |
2171451
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ha_for_routing_service_in_cisco_devices
Revision ID: 1e9e22602685
Revises: <PASSWORD>
Create Date: 2015-09-28 09:33:27.294138
"""
# revision identifiers, used by Alembic.
revision = '1e9e22602685'
down_revision = '2921fe565328'
from alembic import op
import sqlalchemy as sa
from networking_cisco.plugins.cisco.extensions import ha
ha_states = sa.Enum('ACTIVE', 'STANDBY', name='ha_states')
def upgrade():
op.create_table('cisco_router_ha_settings',
sa.Column('router_id', sa.String(36), nullable=True),
sa.Column('ha_type', sa.String(255), nullable=True),
sa.Column('redundancy_level', sa.Integer,
server_default=str(ha.MIN_REDUNDANCY_LEVEL)),
sa.Column('priority', sa.Integer, nullable=True),
sa.Column('probe_connectivity', sa.Boolean, nullable=True),
sa.Column('probe_target', sa.String(64), nullable=True),
sa.Column('probe_interval', sa.Integer, nullable=True),
sa.Column('state', ha_states, server_default='ACTIVE'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
op.create_table('cisco_router_ha_groups',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('ha_type', sa.String(255), nullable=True),
sa.Column('group_identity', sa.String(255), nullable=True),
sa.Column('ha_port_id', sa.String(36), nullable=False),
sa.Column('extra_port_id', sa.String(36), nullable=True),
sa.Column('subnet_id', sa.String(36), nullable=True),
sa.Column('user_router_id', sa.String(36), nullable=True),
sa.Column('timers_config', sa.String(255), nullable=True),
sa.Column('tracking_config', sa.String(255), nullable=True),
sa.Column('other_config', sa.String(255), nullable=True),
sa.ForeignKeyConstraint(['ha_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['extra_port_id'], ['ports.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id']),
sa.ForeignKeyConstraint(['user_router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('ha_port_id')
)
op.create_table('cisco_router_redundancy_bindings',
sa.Column('redundancy_router_id', sa.String(36)),
sa.Column('priority', sa.Integer),
sa.Column('state', ha_states, server_default='STANDBY'),
sa.Column('user_router_id', sa.String(36)),
sa.ForeignKeyConstraint(['redundancy_router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('redundancy_router_id')
)
| 3,524 |
submodules/ethio/laser_stack/laser_stack_app.py
|
BerkeleyLab/CMOC
| 3 |
2169891
|
#!/usr/bin/python
import os
import sys
import numpy as np
import logging
base_dir = os.path.dirname(os.path.abspath(__file__)) or '.'
pspeps_dir = os.path.join(base_dir, '..')
if pspeps_dir not in sys.path:
sys.path.insert(0, pspeps_dir)
from pspeps_io.utilities import CommonUtilities
from pspeps_io.interfaces import WormholeInterface, LocalbusInterface
from pspeps_io.fpga_cores import SipFmc110
from pspeps_io.utilities import print_reg
from twisted.internet import defer
from laser_stack_io import LaserStackIO
logger = logging.getLogger(__name__)
class LaserStackApp(LaserStackIO, WormholeInterface, CommonUtilities):
""" ML605 + FMC110 Application"""
def __init__(self, **kwargs):
super(LaserStackApp, self).__init__(**kwargs)
print self.dac_len
json_path = os.path.join(pspeps_dir, 'pspeps_io/board_support.json')
self.core_fmc110 = SipFmc110(base=0x100000, path=json_path, core_name='fmc110')
self.regmap.update(self.core_fmc110.regmap)
self.reg_addr_dict.update(self.core_fmc110.reg_addr_dict)
@defer.inlineCallbacks
def init(self):
reg_dict = {'FMC110_CPLD_CTRL_REG0': 0x10}
self.write_reg_dict(reg_dict)
reg_dict = {
'FMC110_AD9517_CTRL_REG0x199': 0x33,
'FMC110_AD9517_CTRL_REG0x230': 0x1,
'FMC110_AD9517_CTRL_REG0x230': 0x0,
'FMC110_AD9517_CTRL_REG0x232': 0x1,}
self.write_spi_dict(reg_dict)
reg_dict = {
'FMC110_ADS5400_CTRL0_REG0x5': 0xb8, # enable sync mode
'FMC110_ADS5400_CTRL1_REG0x5': 0xb8,}
self.write_spi_dict(reg_dict)
yield self.adc_init()
self.dac_init()
self.write_dac_wfm1()
alist = [100,101,102]
dlist = [3,self.adc_len, self.dac_len]
self.write_mem_gate(alist,dlist)
alist = [0]
dlist = [1]
self.write_mem_gate(alist,dlist)
@defer.inlineCallbacks
def adc_init(self):
reg_dict = {
'FMC110_ADS5400_CTRL0_REG0x6': 0xc4, # enable test pattern
'FMC110_ADS5400_CTRL1_REG0x6': 0xc4,}
self.write_spi_dict(reg_dict)
reg_dict = {
'FMC110_ADS5400_PHY0_COMMAND': 0x3, # reset clock buffer and idelay
'FMC110_ADS5400_PHY1_COMMAND': 0x3,}
self.write_spi_dict(reg_dict)
reg_dict = {
'FMC110_ADS5400_PHY0_COMMAND': 0x4, # reset ISERDESs, when the clocks are stable
'FMC110_ADS5400_PHY1_COMMAND': 0x4,}
self.write_spi_dict(reg_dict)
reg_dict = {
'FMC110_ADS5400_PHY0_COMMAND': 0x8, # start_align, io_reset, clk_reset, delay_reset
'FMC110_ADS5400_PHY1_COMMAND': 0x8, # start_align, io_reset, clk_reset, delay_reset
'FMC110_ADS5400_PHY0_CONTROL': 0x10, # phy pattern check clear
'FMC110_ADS5400_PHY1_CONTROL': 0x10,} # phy pattern check clear
self.write_spi_dict(reg_dict)
regs_want = []
regs_want.append('FMC110_ADS5400_PHY0_COMMAND')
regs_want.append('FMC110_ADS5400_PHY0_CONTROL')
regs_want.append('FMC110_ADS5400_PHY1_COMMAND')
regs_want.append('FMC110_ADS5400_PHY1_CONTROL')
res = yield self.read_cmd_regs(regs_want)
try:
if (res[0] != 0x1fff0001):
raise ValueError('ADCPHY0 align error: '+hex(res[0]))
if (res[1] != 0x0):
raise ValueError('ADCPHY0 pattern check error: '+hex(res[1]))
if (res[2] != 0x1fff0001):
raise ValueError('ADCPHY1 align error: '+hex(res[2]))
if (res[3] != 0x0):
raise ValueError('ADCPHY0 pattern check error: '+hex(res[3]))
except ValueError as err:
logger.error("ads5400_init : %r" % err)
# raise
reg_dict = {
'FMC110_ADS5400_PHY0_COMMAND': 0x0, # disable start_align
'FMC110_ADS5400_PHY1_COMMAND': 0x0, # disable start_align
'FMC110_ADS5400_CTRL0_REG0x6': 0x4, # disable traning pattern
'FMC110_ADS5400_CTRL1_REG0x6': 0x4,} # disable traning pattern
self.write_spi_dict(reg_dict)
regs_want.append('FMC110_ADS5400_PHY0_TAP_VAL0')
regs_want.append('FMC110_ADS5400_PHY0_TAP_VAL1')
regs_want.append('FMC110_ADS5400_PHY0_TAP_VAL2')
regs_want.append('FMC110_ADS5400_PHY0_TAP_VAL3')
regs_want.append('FMC110_ADS5400_PHY1_TAP_VAL0')
regs_want.append('FMC110_ADS5400_PHY1_TAP_VAL1')
regs_want.append('FMC110_ADS5400_PHY1_TAP_VAL2')
regs_want.append('FMC110_ADS5400_PHY1_TAP_VAL3')
res = yield self.read_cmd_regs(regs_want)
for reg in zip(regs_want, map(hex, res)):
logger.info(reg)
def dac_init(self):
reg_dict = {
'FMC110_DAC5681Z_PHY0_CONTROL': 0x4, # drive TXENABLE high
'FMC110_DAC5681Z_PHY1_CONTROL': 0x4,} # drive TXENABLE high
self.write_spi_dict(reg_dict)
def align_adc(self, adc_in, bits=16):
clk4_div = 8 # ad9517 0x199
adc_out = adc_in
for i, dat in enumerate(adc_in):
dat_sync = map(np.bitwise_and, dat[0:clk4_div], [0x1000]*clk4_div)
first_index = list(dat_sync).index(0x1000)
logger.debug("adc chan: %d, align_index: %d"%(i, first_index))
print i, first_index
adc_out[i] = np.append(adc_in[i][first_index:], [0]*first_index)
return adc_out
def print_adc_len(self, dat, bits=16):
for chan_dat in dat:
print 'length:',len(chan_dat)
# print map(decode_2s_comp,chan_dat,[bits]*len(chan_dat))
return dat
def read_adc_wfm(self):
d = self.read_adc_dpram(self.adc_addr_list, self.adc_read_len)
d.addCallback(self.calc_adc_wfm, self.adc_bits)
return d
def read_adc_wfm_addr(self,addr_list):
addrall,indexlist = [],[]
length = 0
for addr in addr_list:
index = (length,length+len(addr))
indexlist.append(index)
length = length + len(addr)
addrall = addrall + addr
# print 'addall',addrall
# print 'indexlist',indexlist
d = self.read_mem_gate(addrall)
d.addCallback(self.calc_adc_wfm_seperate,indexlist,self.adc_bits)
return d
def read_adc_print(self):
d = self.read_adc_dpram(self.adc_addr_list, self.adc_read_len)
d.addCallback(self.print_adc_wfm, self.adc_bits)
def read_adc_plot(self):
d = self.read_adc_dpram(self.adc_addr_list, self.adc_read_len)
d.addCallback(self.align_adc, self.adc_bits)
d.addCallback(self.update_adc_wfm, self.adc_bits)
def write_dac_wfm0(self):
# XXX automatic register map decoding
d0 = self.prep_sin_data(self.dac_len)
d1 = self.prep_sin_data(self.dac_len)
d0 = [0]*64+d0[0:64]
dac0_addr = 0x31000
dac1_addr = 0x41000
self.write_dac_dpram(dac0_addr, d0)
self.write_dac_dpram(dac1_addr, d1)
def write_dac_wfm1(self):
# XXX automatic register map decoding
d0 = self.prep_zeros_data(self.dac_len)
d1 = self.prep_zeros_data(self.dac_len)
dac0_addr = 0x31000
dac1_addr = 0x41000
self.write_dac_dpram(dac0_addr, d0)
self.write_dac_dpram(dac1_addr, d1)
def write_slowdac_wfms(self,input_dicts):
self.write_slowdac_wfm(input_dicts.next())
def write_slowdac_wfm(self,input_dict={200:0x42,201:0x321,202:0x67d,203:0x8fa,204:0xb03,205:0xca5,206:0xd0d,207:0xfff}):
alist = input_dict.keys()
dlist = input_dict.values()
self.write_mem_gate(alist,dlist)
def get_ads5400_temp(self):
reg_names = ['FMC110_ADS5400_CTRL0_REG0x8', 'FMC110_ADS5400_CTRL1_REG0x8']
d = self.read_cmd_regs(reg_names).addCallback(self.core_fmc110.decode_ads5400_temp)
return d
def get_config_rom(self):
return self.get_lbnl_rom(0x200000)
def get_fmc110_fcnt(self):
freq_dict = {
'CMD_CLK': 0,'ADC0': 1,'DAC0':2,
'TRIGGER':3,'ADC1':4,'DAC1':5,'TO_FPGA':6}
d = self.get_fmc_fcnt('FMC110',freq_dict)
return d
@defer.inlineCallbacks
def read_mon(self):
d = yield self.get_fmc110_fcnt()
print_reg(d)
d = yield self.get_ads5400_temp()
print_reg(d)
@defer.inlineCallbacks
def read_diag(self):
regs_want = []
for i in range(0,0x9+1):
regs_want.append('FMC110_ADS5400_CTRL0_REG'+hex(i))
# for i in range(0,0x9+1):
# regs_want.append('FMC110_ADS5400_CTRL1_REG'+hex(i))
regs_want.append('FMC110_ADS5400_PHY0_CONTROL')
regs_want.append('FMC110_ADS5400_PHY0_COMMAND')
regs_want.append('FMC110_ADS5400_PHY1_CONTROL')
regs_want.append('FMC110_ADS5400_PHY1_COMMAND')
regs_want.append('FMC110_FMC110_CTRL_CONTROL')
res = yield self.read_cmd_regs(regs_want)
for reg in zip(regs_want, map(hex, res)):
logger.info(reg)
| 9,084 |
hooks/post_gen_project.py
|
LiamRMoore/cookiecutter-ds-python
| 0 |
2169731
|
""" post_gen_project.py
post-creation hooks to remove unnecessary files
"""
import os
import shutil
import warnings
def remove(filepath):
if os.path.isfile(filepath):
os.remove(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath)
create_aws_scripts = '{{cookiecutter.use_aws_ec2_instance}}' == 'y'
if not create_aws_scripts:
# remove relative file nested inside the generated folder
remove(os.path.join('{{cookiecutter.repo_name}}', 'bin', 'connect_to_aws'))
remove(os.path.join('{{cookiecutter.repo_name}}', 'bin', 'connect_to_notebook'))
remove(os.path.join('{{cookiecutter.repo_name}}', 'bin', 'update_ssh_config'))
else:
# sanity check
if not os.path.isfile('{{ cookiecutter.aws_ec2_ssh_key }}'):
warnings.warn("SSH key not found at {{ cookiecutter.aws_ec2_ssh_key }}!")
# remove absolute path to file nested inside the generated folder
#remove(os.path.join(os.getcwd(), '{{cookiecutter.repo_name}}', 'file_one.py'))
| 1,003 |
projects/messengerBot/messenger_bot.py
|
singularity-bit/univ
| 0 |
2170135
|
from fbchat import log, Client
from fbchat.models import *
client = Client("100008489650804", "Vasile.Grosu98")
#thread_id='100005919199300'
thread_type=ThreadType.USER
# print("Own id: {}".format(client.uid))
# client.send(Message(text="test msg"), thread_id=thread_id, thread_type=ThreadType.USER)
# #client.logout()
class EchoBot(Client):
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsDelivered(thread_id, message_object.uid)
self.markAsRead(thread_id)
log.info("{} from {} in {}".format(message_object, thread_id, thread_type.name))
# If you're not the author, echo
if message_object.text == "gg2":
self.send(message_object, thread_id=thread_id, thread_type=thread_type)
clinet = EchoBot("100008489650804", "Vasile.Grosu98")
client.listen()
| 865 |
task/w2/practic/6-n chisel.py
|
beregok/pythontask
| 1 |
2171569
|
# Дано три цілих числа. Визначте, скільки серед них збігаються. Програма повинна вивести одне з чисел: 3 (якщо все збігаються), 2 (якщо два збігається) або 0 (якщо все числа різні).
#
# ## Формат введення
#
# Вводяться три цілих числа.
#
# ## Формат виведення
#
# Виведіть відповідь до задачі.
a = int(input())
b = int(input())
c = int(input())
if (a == b) and (a == c) and (b == c):
print(3)
elif (a == b) or (a == c) or (b == c):
print(2)
else:
print(0)
| 468 |
hera_sim/tests/test_utils.py
|
ninastijepovic/MasterThesis
| 0 |
2170845
|
from __future__ import print_function
import unittest
from hera_sim import utils, noise
from hera_sim.data import DATA_PATH
import numpy as np
import aipy
import nose.tools as nt
import os
np.random.seed(0)
class TestUtils(unittest.TestCase):
def test_gen_delay_filter(self):
np.random.seed(0)
fqs = np.linspace(.1, .2, 100, endpoint=False)
bl_len_ns = 50.0
standoff = 0.0
df = utils.gen_delay_filter(fqs, bl_len_ns, standoff=standoff, filter_type='tophat')
nt.assert_almost_equal(np.sum(df), 11)
df = utils.gen_delay_filter(fqs, bl_len_ns, standoff=standoff, filter_type='gauss')
nt.assert_almost_equal(np.sum(df), 3.133285343289006)
df = utils.gen_delay_filter(fqs, bl_len_ns, standoff=standoff, filter_type='trunc_gauss')
nt.assert_almost_equal(np.sum(df), 3.1332651717678575)
df = utils.gen_delay_filter(fqs, bl_len_ns, standoff=standoff, filter_type='none')
nt.assert_almost_equal(np.sum(df), 100)
df = utils.gen_delay_filter(fqs, bl_len_ns, standoff=standoff, filter_type='tophat',
min_delay=100.0)
nt.assert_almost_equal(np.sum(df), 0)
df = utils.gen_delay_filter(fqs, bl_len_ns, standoff=standoff, filter_type='tophat',
max_delay=50.0)
nt.assert_almost_equal(np.sum(df), 11)
def test_rough_delay_filter(self):
np.random.seed(0)
lsts = np.linspace(0, 2*np.pi, 200)
fqs = np.linspace(.1, .2, 100, endpoint=False)
bl_len_ns = 50.0
standoff = 0.0
data = noise.white_noise((len(lsts), len(fqs)))
dfilt = utils.rough_delay_filter(data, fqs, bl_len_ns, standoff=standoff, filter_type='gauss')
dfft = np.mean(np.abs(np.fft.ifft(dfilt, axis=1)), axis=0)
nt.assert_true(np.isclose(dfft[20:-20], 0.0).all())
def test_gen_fringe_filter(self):
np.random.seed(0)
lsts = np.linspace(0, 2*np.pi, 200)
fqs = np.linspace(.1, .2, 100, endpoint=False)
bl_len_ns = 50.0
FRF = np.load(os.path.join(DATA_PATH, "H37_FR_Filters_small.npz"))
fr_filt = FRF['PB_rms'][0].T
fr_frates = FRF['frates']
fr_freqs = FRF['freqs'] / 1e9
ff = utils.gen_fringe_filter(lsts, fqs, bl_len_ns, filter_type='none')
nt.assert_true(np.isclose(ff, 1.0).all())
ff = utils.gen_fringe_filter(lsts, fqs, bl_len_ns, filter_type='tophat')
nt.assert_almost_equal(np.sum(ff[50]), np.sum(ff[-50]), 41)
ff = utils.gen_fringe_filter(lsts, fqs, bl_len_ns, filter_type='gauss', fr_width=1e-4)
nt.assert_almost_equal(np.sum(ff[50]), 63.06179070109816)
ff = utils.gen_fringe_filter(lsts, fqs, bl_len_ns, filter_type='custom', FR_filter=fr_filt, FR_frates=fr_frates, FR_freqs=fr_freqs)
nt.assert_almost_equal(np.sum(ff[50]), 14.66591593210259, places=3)
def test_rough_fringe_filter(self):
np.random.seed(0)
lsts = np.linspace(0, 2*np.pi, 400)
fqs = np.linspace(.1, .2, 100, endpoint=False)
bl_len_ns = 50.0
FRF = np.load(os.path.join(DATA_PATH, "H37_FR_Filters_small.npz"))
fr_filt = FRF['PB_rms'][0].T
fr_frates = FRF['frates']
fr_freqs = FRF['freqs'] / 1e9
data = noise.white_noise((len(lsts), len(fqs)))
dfilt = utils.rough_fringe_filter(data, lsts, fqs, bl_len_ns, filter_type='gauss', fr_width=1e-4)
dfft = np.mean(np.abs(np.fft.ifft(dfilt, axis=0)), axis=1)
nt.assert_true(np.isclose(dfft[50:150], 0.0).all())
def test_bl_vec():
bl = 1
assert len(utils._get_bl_len_vec(bl)) == 3
bl = (0, 1)
assert len(utils._get_bl_len_vec(bl)) == 3
bl = [0, 1]
assert len(utils._get_bl_len_vec(bl)) == 3
bl = np.array([0, 1, 2])
assert len(utils._get_bl_len_vec(bl)) == 3
def test_delay_filter_norm():
N = 50
fqs = np.linspace(0.1, 0.2, N)
tsky = np.ones(N)
np.random.seed(1234) # set the seed for reproducibility.
out = 0
nreal = 5000
for i in range(nreal):
_noise = tsky * noise.white_noise(N)
outnoise = utils.rough_delay_filter(_noise, fqs, 30, normalize=1)
out += np.sum(np.abs(outnoise)**2)
out /= nreal
print((out, np.sum(tsky**2)))
assert np.isclose(out, np.sum(tsky**2), atol=0, rtol=1e-2)
if __name__ == '__main__':
unittest.main()
| 4,429 |
planetutils/elevation_tile_merge.py
|
xu183255/planetutils
| 143 |
2171591
|
#!/usr/bin/env python
from __future__ import absolute_import, unicode_literals, print_function
import argparse
import sys
import fnmatch
import os
import subprocess
import tempfile
from . import log
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--scale', help="Resample to 8 bit with (min,max) range")
parser.add_argument('outpath', help='Output filename')
parser.add_argument('inpath', help='Input directory')
args = parser.parse_args()
outpath = args.outpath
tmppath = args.outpath
if args.scale and len(args.scale.split(',')) != 2:
print("Must provide min, max values")
sys.exit(1)
elif args.scale:
# Output to tmp file
_, tmppath = tempfile.mkstemp(suffix='.tif')
matches = []
for root, dirnames, filenames in os.walk(args.inpath):
for filename in fnmatch.filter(filenames, '*.tif'):
matches.append(os.path.join(root, filename))
if len(matches) == 0:
print("No input files")
sys.exit(0)
print("Found %s files:"%len(matches))
for i in matches:
print("\t%s"%(i))
# gdal_merge.py -init 0 -o out.tif
print("Merging... %s"%(tmppath))
cmd = ['gdal_merge.py', '-init', '0', '-o', tmppath]
cmd += matches
p = subprocess.check_call(cmd)
# gdal_translate -of GTiff -ot Byte -scale 0 255 0 255 out.tif out8.tif
if args.scale:
print("Scaling: %s -> %s"%(tmppath, outpath))
a = args.scale.split(",")
cmd = ['gdal_translate', '-of', 'GTiff', '-ot', 'Byte', '-scale', a[0], a[1], '0', '255', tmppath, outpath]
subprocess.check_call(cmd)
# cleanup
try: os.unlink('%s.aux.xml'%outpath)
except: pass
try: os.unlink(tmppath)
except: pass
if __name__ == '__main__':
main()
| 1,825 |
src/libdoc2json/__main__.py
|
Mohorelien/robotframework-LibDoc2Json
| 9 |
2171605
|
import sys
import os
from robot.libdoc import libdoc
from .libdoc2json import libdoc2json
def main():
args = sys.argv[1:]
if len(args) != 2:
print(f'Usage: python -m libdoc2json <LIBRARY or *.robot or *.py> <Outputfile.json>!\n'
f'Example: python -m libdoc2json SeleniumLibrary SeleniumLibrary4.0.json\n'
f'\nArguments: {args}')
else:
libdoc(args[0], 'tml_file.xml')
libdoc2json('tml_file.xml', args[1])
os.remove('tml_file.xml')
if __name__ == "__main__":
main()
| 544 |
utils/Logger.py
|
TinaBBB/bayesian-critiquing-recommender
| 3 |
2168051
|
import os
import sys
import time
import logging
class Logger:
def __init__(self, log_dir):
self.logger = logging.getLogger('RecSys')
self.logger.setLevel(logging.INFO)
# File handler
self.log_dir = self.get_log_dir(log_dir)
fh = logging.FileHandler(os.path.join(self.log_dir, 'log.txt'))
fh.setLevel(logging.DEBUG)
fh_format = logging.Formatter('%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(fh_format)
self.logger.addHandler(fh)
# Console handler
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch_format = logging.Formatter('%(message)s')
ch.setFormatter(ch_format)
self.logger.addHandler(ch)
def info(self, msg):
self.logger.info(msg)
def get_log_dir(self, log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_dirs = os.listdir(log_dir)
if len(log_dirs) == 0:
idx = 0
else:
idx_list = sorted([int(d.split('_')[0]) for d in log_dirs])
idx = idx_list[-1] + 1
cur_log_dir = '%d_%s' % (idx, time.strftime('%Y%m%d-%H%M'))
full_log_dir = os.path.join(log_dir, cur_log_dir)
if not os.path.exists(full_log_dir):
os.mkdir(full_log_dir)
return full_log_dir
| 1,378 |
tests/test_not_infinite.py
|
roniemartinez/DocCron
| 3 |
2170378
|
from datetime import datetime, timedelta
from dateutil.tz import tzlocal
import doccron
def bar() -> None:
pass
def test_non_infinite_jobs() -> None:
next_minute = datetime.now(tz=tzlocal()).replace(second=0, microsecond=0) + timedelta(minutes=1)
bar.__doc__ = """
/etc/crontab::
{} {} {} {} * {}
""".format(
next_minute.minute,
next_minute.hour,
next_minute.day,
next_minute.month,
next_minute.year,
)
jobs_found = False
for next_schedule, function_object in doccron.run_jobs(simulate=True):
assert next_schedule == datetime.now(tz=tzlocal()).replace(second=0, microsecond=0) + timedelta(minutes=1)
jobs_found = True
assert jobs_found
| 743 |
re/buuctf/Youngter-drive/exp.py
|
dejavudwh/WriteUp
| 0 |
2170361
|
s = 'QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm' #off_418000
d = 'TOiZiZtOrYaToUwPnToBsOaOapsyS' #off_418004
flag = ''
for i in range(len(d)):
if i % 2 == 0:
flag = flag + d[i]
else:
if(d[i].isupper()):
flag = flag + chr(s.find(d[i]) + 96)
else:
flag = flag + chr(s.find(d[i]) + 38)
print(flag)
| 361 |
pymc3/ode/utils.py
|
Dr-G/pymc3
| 0 |
2165412
|
import numpy as np
import theano
import theano.tensor as tt
def augment_system(ode_func, n, m):
"""
Function to create augmented system.
Take a function which specifies a set of differential equations and return
a compiled function which allows for computation of gradients of the
differential equation's solition with repsect to the parameters.
Parameters
----------
ode_func : function
Differential equation. Returns array-like.
n : int
Number of rows of the sensitivity matrix.
m : int
Number of columns of the sensitivity matrix.
Returns
-------
system : function
Augemted system of differential equations.
"""
# Present state of the system
t_y = tt.vector("y", dtype=theano.config.floatX)
t_y.tag.test_value = np.zeros((n,))
# Parameter(s). Should be vector to allow for generaliztion to multiparameter
# systems of ODEs. Is m dimensional because it includes all ode parameters as well as initical conditions
t_p = tt.vector("p", dtype=theano.config.floatX)
t_p.tag.test_value = np.zeros((m,))
# Time. Allow for non-automonous systems of ODEs to be analyzed
t_t = tt.scalar("t", dtype=theano.config.floatX)
t_t.tag.test_value = 2.459
# Present state of the gradients:
# Will always be 0 unless the parameter is the inital condition
# Entry i,j is partial of y[i] wrt to p[j]
dydp_vec = tt.vector("dydp", dtype=theano.config.floatX)
dydp_vec.tag.test_value = np.zeros(n * m)
dydp = dydp_vec.reshape((n, m))
# Stack the results of the ode_func into a single tensor variable
yhat = ode_func(t_y, t_t, t_p)
if not isinstance(yhat, (list, tuple)):
yhat = (yhat,)
t_yhat = tt.stack(yhat, axis=0)
# Now compute gradients
J = tt.jacobian(t_yhat, t_y)
Jdfdy = tt.dot(J, dydp)
grad_f = tt.jacobian(t_yhat, t_p)
# This is the time derivative of dydp
ddt_dydp = (Jdfdy + grad_f).flatten()
system = theano.function(
inputs=[t_y, t_t, t_p, dydp_vec],
outputs=[t_yhat, ddt_dydp],
on_unused_input="ignore",
)
return system
| 2,237 |
low_level_interface/CO2_oil_mixture_Zhelezny.py
|
gu997/mixture
| 0 |
2171278
|
import numpy as np
import matplotlib.pyplot as plt
import CoolProp.CoolProp as CP
from scipy.optimize import fsolve
import compressore as c
import grafici_termodinamici as gt
# =============================================================================
# "<NAME>"
# a1 = 6.59664
# a2 = -1.61705E+03
# a3 = 7.00550E+04
# a4 = 3.88634
# a5 = -1.46846E+03
# a6 = 1.19438E+05
# a7 = 3.40692E-01
# a8 = -2.54276E+02
# a9 = 2.11410E+04
#
# plt.figure(dpi=200)
# plt.grid()
# T=np.linspace(0,10)+273.15
# omega=0.05#np.linspace(0.05,0.4,5)
# #np.log10(P) = a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2
# P = 10**(a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2)
# plt.plot(T,P)
#
# omega=0.1#np.linspace(0.05,0.4,5)
# #np.log10(P) = a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2
# P = 10**(a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2)
# plt.plot(T,P)
#
# omega=0.2#np.linspace(0.05,0.4,5)
# #np.log10(P) = a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2
# P = 10**(a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2)
# plt.plot(T,P)
#
# omega=0.4#np.linspace(0.05,0.4,5)
# #np.log10(P) = a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2
# P = 10**(a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2)
# plt.plot(T,P)
#
# omega=0.99#np.linspace(0.05,0.4,5)
# #np.log10(P) = a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2
# P = 10**(a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2)
# plt.plot(T,P)
#
# omega=1#np.linspace(0.05,0.4,5) #no il 100% lo fa praticamente al 40
# #np.log10(P) = a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2
# P = 10**(a1 + a2/T + a3/(T**2) + (a4 + a5/T + a6/(T**2))*np.log10(omega) + (a7 + a8/T + a9/(T**2))*np.log10(omega)**2)
# plt.plot(T,P)
# =============================================================================
"Zhelezny (2007)"
a1 = 73.7537
a2 = -36.4423
a3 = -37.0377
a4 = -1.816879
a5 = 0.973128
b1 = 303.897
b2 = -303.666
b3 = -1.22524
b4 = 0.225542
c1 = 6.79411
c2 = -5.6879
c3 = -0.624642
def P_s(w,T_bub): #bisogna essere dentro la campana
P_c = (a1+a2*w + a3*w**2)/(1 + a4*w + a5*w**2) #pressione pseudo critica in bar
T_c = (b1 + b2*w)/(1 + b3*w + b4*w**2)
alpha = (c1+c2*w)/(1+c3*w)
tau=np.log(T_c/T_bub)
#np.log(P_c/P_s)= alpha*tau + 5.957*tau**2.64
#np.log(P_s) = np.log(P_c) - alpha*tau - 5.957*tau**2.64
P_ss=np.exp( np.log(P_c) - alpha*tau - 5.957*tau**2.64 )
return P_ss*10**5
def f_P_s(T_bub,w):
#print(T_bub)
#print(w)
#print(P_s(w,T_bub))
return P_s(w,T_bub)-P[0]
T=np.zeros(4)
P=np.zeros(4)
H=np.zeros(4)
H_f=np.zeros(4)
H_o=np.zeros(4)
Q=np.zeros(4)
S=np.zeros(4)
w=np.ones(4)
T_ref=273.15
lib="REFPROP"
fld = CP.AbstractState(lib, "CO2")
oil = CP.AbstractState("INCOMP", "SAB")
"punto 2"
k=0.03 #fraz di olio nel totale massa olio/massa totale
#w =frazione di olio nel liquido massa olio/massa liquida
T[2]=40+T_ref
P[2]=95*10**5
fld.update(CP.PT_INPUTS, P[2], T[2])
H_f[2]=fld.hmass()
oil.update(CP.PT_INPUTS, P[2], T[2])
H_o[2]=oil.hmass()
H[2]=k*H_o[2] + (1-k)*H_f[2]
"punto 3"
H[3]=H[2]
T[3]=-5+T_ref
fld.update(CP.HmassT_INPUTS, H[3], T[3])
P[3]=fld.p() # P di tentativo
def P_3(p3):
P[3]=p3
oil.update(CP.PT_INPUTS, P[3], T[3])
H_o[3]=oil.hmass()
H_f[3]=( H[3]- k*H_o[3] )/(1-k)
fld.update(CP.HmassT_INPUTS, H_f[3], T[3])
Q[3]=fld.Q()
# =============================================================================
# fld.update(CP.QT_INPUTS, 0, T[3])
# H_f_l = fld.hmass()
#
#
# fld.update(CP.QT_INPUTS, 1, T[3])
# H_f_v = fld.hmass()
#
# Q[3] = ( H_f[3] - H_f_l )/(H_f_v-H_f_l)
# =============================================================================
# =============================================================================
# fld.update(CP.HmassT_INPUTS, H_f[2], T[3])
# fld.Q()
# fld.p()
# =============================================================================
z_l=(1-k-Q[3]+k*Q[3])/(1-Q[3]+k*Q[3])
w[3]=1-z_l
return P_s(w[3],T[3])-p3
P[3]=fsolve(P_3, P[3])
"punto 0"
P[0]=P[3]
w[0]= 0.56 #è ottimizzabile siamo noi a deciderlo
Q_tot=1-k/w[0]
z_l=1-w[0]
Q[0]=(1-z_l-k)/(1-z_l-k+z_l*k)
T[0]=T[3] #inizializzo
T[0]=fsolve(f_P_s,T[0],args=(w[0]))
oil.update(CP.PT_INPUTS, P[0], T[0])
H_o[0]=oil.hmass()
# =============================================================================
# fld.update(CP.QT_INPUTS, 0, T[0])
# H_f_l = fld.hmass()
#
#
# fld.update(CP.QT_INPUTS, 1, T[0])
# H_f_v = fld.hmass()
#
# H_f[0] = H_f_l*(1-Q[0]) + H_f_v*Q[0]
# =============================================================================
fld.update(CP.QT_INPUTS, Q[0], T[0])
H_f[0]=fld.hmass()
H[0] = k*H_o[0] + (1-k)*H_f[0]
"punto 1"
P[1]=P[2]
T[1]=c.Temperatura_mandata(T[3]-T_ref, T[0]-T_ref, P[1]*10**-5)+T_ref
fld.update(CP.PT_INPUTS, P[1], T[1])
H_f[1]=fld.hmass()
oil.update(CP.PT_INPUTS, P[1], T[1])
H_o[1]=oil.hmass()
H[1]=k*H_o[1] + (1-k)*H_f[1]
COP=(H[0]-H[2])/(H[1]-H[0])/Q[0]
print('cop con olio='+str(COP))
# =============================================================================
# P=P/100000
# H=H/1000
#
# plt.figure(dpi=200)
# plt.grid()
# plt.xlabel('H [kJ/kg]')
# plt.ylabel('P [bar]')
# plt.plot(H,P,'r')
# plt.plot([H[3],H[0]],[P[3],P[0]],'r')
# =============================================================================
# =============================================================================
# plt.figure(dpi=200)
# plt.grid()
# plt.xlabel('T [°C]')
# plt.ylabel('P [bar]')
#
# n=6
# m=50
# ww=np.linspace(0,0.6,n)
# TT=np.linspace(-20,30,m)+T_ref
# PP=np.zeros(m)
#
# for i in range(n):
# plt.plot(TT-T_ref,P_s(ww[i],TT)/100000)
#
# for i in range(m):
# fld.update(CP.QT_INPUTS, 0, TT[i])
# PP[i]=fld.p()
#
# plt.plot(TT-T_ref,PP/100000,'--')
#
# fld.p_critical()
# fld.T_critical()-273.15
# =============================================================================
"CICLO SOLO CO2"
t=np.zeros(4)
p=np.zeros(4)
h=np.zeros(4)
"punto 2"
t[2]=T[2]
p[2]=P[2]
fld.update(CP.PT_INPUTS, p[2], t[2])
h[2]=fld.hmass()
"punto 3"
h[3]=h[2]
t[3]=T[3]
fld.update(CP.HmassT_INPUTS, h[3], t[3])
p[3]=fld.p() # P di tentativo
"punto 0"
p[0]=p[3]
fld.update(CP.PQ_INPUTS, p[0], 1)
t[0]=fld.T()
h[0]=fld.hmass()
"punto 1"
p[1]=p[2]
t[1]=c.Temperatura_mandata(t[3]-T_ref, t[0]-T_ref, p[1]*10**-5)+T_ref
fld.update(CP.PT_INPUTS, p[1], t[1])
h[1]=fld.hmass()
cop=(h[0]-h[2])/(h[1]-h[0])
print('cop senza olio='+str(cop))
plt.figure(dpi=200)
gt.grafico_PH_super_semplice(P/100000,H/1000, 'r',0)
gt.grafico_PH_super_semplice(p/100000,h/1000, 'b',1)
| 7,274 |
newspaper_TextBlob_sentimental_analysis/tweet_collector/get_tweets2.py
|
dashnak90/Docker-data-pipeline
| 0 |
2171114
|
import config
from tweepy import OAuthHandler, Cursor, API
from tweepy.streaming import StreamListener
import logging
import pymongo
# create a connection - (create engine)
client = pymongo.MongoClient('mongodb') #name of the docker container
db = client.tweetsdb #the name of a database*(autom created)
collection = db.tweet_data #create a collection
def authenticate():
"""Function for handling Twitter Authentication. Please note
that this script assumes you have a file called config.py
which stores the 2 required authentication tokens:
1. API_KEY
2. API_SECRET
See course material for instructions on getting your own Twitter credentials.
"""
auth = OAuthHandler(config.API_KEY, config.API_SECRET)
return auth
if __name__ == '__main__':
auth = authenticate()
api = API(auth)
list=['washingtonpost', 'nytimes'] #chosing 2 diff newspapers
for i in list:
cursor = Cursor(
api.user_timeline,
id = i,
tweet_mode = 'extended'
)
for status in cursor.items(100): #get 100 tweets per newspaper
text = status.full_text
# take extended tweets into account
# TODO: CHECK
if 'extended_tweet' in dir(status):
text = status.extended_tweet.full_text
if 'retweeted_status' in dir(status):
r = status.retweeted_status
if 'extended_tweet' in dir(r):
text = r.extended_tweet.full_text
tweet = {
'text': text,
'username': status.user.screen_name
}
collection.insert_one(tweet) #insert into Mongodb
print(tweet)
| 1,787 |
polygon_area_calc/main.py
|
DarkRosaleen/code_camp
| 1 |
2169774
|
import shape_calculator
def main():
rect = shape_calculator.Rectangle(5, 10)
print(rect.get_area())
rect.set_width(3)
print(rect.get_perimeter())
print(rect)
sq = shape_calculator.Square(9)
print(sq.get_area())
sq.set_side(4)
print(sq.get_diagonal())
print(sq)
if __name__ == "__main__":
main()
| 343 |
nowtrade/trading_fee.py
|
lohithn4/NowTrade
| 87 |
2171372
|
"""
Trading fee module contains all the different types of trading fee classes
that can be applied to a trading profile.
"""
from nowtrade import logger
class TradingFee(object):
"""
The base class for all trading fee classes.
Simply initializes the logger for now.
"""
def __init__(self):
self.logger = logger.Logger(self.__class__.__name__)
class StaticFee(TradingFee):
"""
The StaticFee simply applies the same static fee to all entry/exit trades.
"""
def __init__(self, fee):
TradingFee.__init__(self)
self.fee = fee
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'StaticFee(fee=%s)' %self.fee
def __repr__(self):
return 'StaticFee(fee=%s)' %self.fee
def get_fee(self, price, shares): # pylint: disable=unused-argument
"""
Given a symbol price and the amount of shares purchased, this method
will return the fees incurred for the trade.
"""
return self.fee
| 1,021 |
Nova pasta (2)/Trabalho no VS/clientes.py
|
cristest/python
| 0 |
2171208
|
class cliente:
def __init__(self, nome, telefone):
self.nome = nome
self.telefone = telefone
class Conta:
def __init__(self, saldo, lista_de_clientes, numero)
self.saldo = 0
self.lista_de_clientes = lista_de_clientes
self.numero = numero
def Resumo(self):
print("CC numero")
def Saque(self, valor):
if
def Deposito(self,)
| 399 |
examples/offline/utils.py
|
BFAnas/tianshou
| 0 |
2169567
|
from typing import Tuple
import d4rl
import gym
import h5py
import numpy as np
from tianshou.data import ReplayBuffer
from tianshou.utils import RunningMeanStd
def load_buffer_d4rl(expert_data_task: str) -> ReplayBuffer:
dataset = d4rl.qlearning_dataset(gym.make(expert_data_task))
replay_buffer = ReplayBuffer.from_data(
obs=dataset["observations"],
act=dataset["actions"],
rew=dataset["rewards"],
done=dataset["terminals"],
obs_next=dataset["next_observations"]
)
return replay_buffer
def load_buffer(buffer_path: str) -> ReplayBuffer:
with h5py.File(buffer_path, "r") as dataset:
buffer = ReplayBuffer.from_data(
obs=dataset["observations"],
act=dataset["actions"],
rew=dataset["rewards"],
done=dataset["terminals"],
obs_next=dataset["next_observations"]
)
return buffer
def normalize_all_obs_in_replay_buffer(
replay_buffer: ReplayBuffer
) -> Tuple[ReplayBuffer, RunningMeanStd]:
# compute obs mean and var
obs_rms = RunningMeanStd()
obs_rms.update(replay_buffer.obs)
_eps = np.finfo(np.float32).eps.item()
# normalize obs
replay_buffer._meta["obs"] = (replay_buffer.obs -
obs_rms.mean) / np.sqrt(obs_rms.var + _eps)
replay_buffer._meta["obs_next"] = (replay_buffer.obs_next -
obs_rms.mean) / np.sqrt(obs_rms.var + _eps)
return replay_buffer, obs_rms
| 1,512 |
src/clever/__init__.py
|
deeso/clever-clown
| 0 |
2170889
|
import dns.rdatatype
from socket import AF_INET as ip4, AF_INET6 as ip6
RECORD_TYPES_ENUMS = {
dns.rdatatype.A: 'A',
dns.rdatatype.A6: 'A6',
dns.rdatatype.AAAA: 'AAAA',
dns.rdatatype.AFSDB: 'AFSDB',
dns.rdatatype.ANY: 'ANY',
dns.rdatatype.APL: 'APL',
dns.rdatatype.AVC: 'AVC',
dns.rdatatype.AXFR: 'AXFR',
dns.rdatatype.CAA: 'CAA',
dns.rdatatype.CDNSKEY: 'CDNSKEY',
dns.rdatatype.CDS: 'CDS',
dns.rdatatype.CERT: 'CERT',
dns.rdatatype.CNAME: 'CNAME',
dns.rdatatype.CSYNC: 'CSYNC',
dns.rdatatype.DHCID: 'DHCID',
dns.rdatatype.DLV: 'DLV',
dns.rdatatype.DNAME: 'DNAME',
dns.rdatatype.DNSKEY: 'DNSKEY',
dns.rdatatype.DS: 'DS',
dns.rdatatype.EUI48: 'EUI48',
dns.rdatatype.EUI64: 'EUI64',
dns.rdatatype.GPOS: 'GPOS',
dns.rdatatype.HINFO: 'HINFO',
dns.rdatatype.HIP: 'HIP',
dns.rdatatype.IPSECKEY: 'IPSECKEY',
dns.rdatatype.ISDN: 'ISDN',
dns.rdatatype.IXFR: 'IXFR',
dns.rdatatype.KEY: 'KEY',
dns.rdatatype.KX: 'KX',
dns.rdatatype.LOC: 'LOC',
dns.rdatatype.MAILA: 'MAILA',
dns.rdatatype.MAILB: 'MAILB',
dns.rdatatype.MB: 'MB',
dns.rdatatype.MD: 'MD',
dns.rdatatype.MF: 'MF',
dns.rdatatype.MG: 'MG',
dns.rdatatype.MINFO: 'MINFO',
dns.rdatatype.MR: 'MR',
dns.rdatatype.MX: 'MX',
dns.rdatatype.NAPTR: 'NAPTR',
dns.rdatatype.NONE: 'NONE',
dns.rdatatype.NS: 'NS',
dns.rdatatype.NSAP: 'NSAP',
dns.rdatatype.NSAP_PTR: 'NSAP_PTR',
dns.rdatatype.NSEC: 'NSEC',
dns.rdatatype.NSEC3: 'NSEC3',
dns.rdatatype.NSEC3PARAM: 'NSEC3PARAM',
dns.rdatatype.NULL: 'NULL',
dns.rdatatype.NXT: 'NXT',
dns.rdatatype.OPT: 'OPT',
dns.rdatatype.PTR: 'PTR',
dns.rdatatype.PX: 'PX',
dns.rdatatype.RP: 'RP',
dns.rdatatype.RRSIG: 'RRSIG',
dns.rdatatype.RT: 'RT',
dns.rdatatype.SIG: 'SIG',
dns.rdatatype.SOA: 'SOA',
dns.rdatatype.SPF: 'SPF',
dns.rdatatype.SRV: 'SRV',
dns.rdatatype.SSHFP: 'SSHFP',
dns.rdatatype.TA: 'TA',
dns.rdatatype.TKEY: 'TKEY',
dns.rdatatype.TLSA: 'TLSA',
dns.rdatatype.TSIG: 'TSIG',
dns.rdatatype.TXT: 'TXT',
dns.rdatatype.UNSPEC: 'UNSPEC',
dns.rdatatype.URI: 'URI',
dns.rdatatype.WKS: 'WKS',
dns.rdatatype.X25: 'X25',
}
RECORD_TYPES = {
'A': dns.rdatatype.A,
'A6': dns.rdatatype.A6,
'AAAA': dns.rdatatype.AAAA,
'AFSDB': dns.rdatatype.AFSDB,
'ANY': dns.rdatatype.ANY,
'APL': dns.rdatatype.APL,
'AVC': dns.rdatatype.AVC,
'AXFR': dns.rdatatype.AXFR,
'CAA': dns.rdatatype.CAA,
'CDNSKEY': dns.rdatatype.CDNSKEY,
'CDS': dns.rdatatype.CDS,
'CERT': dns.rdatatype.CERT,
'CNAME': dns.rdatatype.CNAME,
'CSYNC': dns.rdatatype.CSYNC,
'DHCID': dns.rdatatype.DHCID,
'DLV': dns.rdatatype.DLV,
'DNAME': dns.rdatatype.DNAME,
'DNSKEY': dns.rdatatype.DNSKEY,
'DS': dns.rdatatype.DS,
'EUI48': dns.rdatatype.EUI48,
'EUI64': dns.rdatatype.EUI64,
'GPOS': dns.rdatatype.GPOS,
'HINFO': dns.rdatatype.HINFO,
'HIP': dns.rdatatype.HIP,
'IPSECKEY': dns.rdatatype.IPSECKEY,
'ISDN': dns.rdatatype.ISDN,
'IXFR': dns.rdatatype.IXFR,
'KEY': dns.rdatatype.KEY,
'KX': dns.rdatatype.KX,
'LOC': dns.rdatatype.LOC,
'MAILA': dns.rdatatype.MAILA,
'MAILB': dns.rdatatype.MAILB,
'MB': dns.rdatatype.MB,
'MD': dns.rdatatype.MD,
'MF': dns.rdatatype.MF,
'MG': dns.rdatatype.MG,
'MINFO': dns.rdatatype.MINFO,
'MR': dns.rdatatype.MR,
'MX': dns.rdatatype.MX,
'NAPTR': dns.rdatatype.NAPTR,
'NONE': dns.rdatatype.NONE,
'NS': dns.rdatatype.NS,
'NSAP': dns.rdatatype.NSAP,
'NSAP_PTR': dns.rdatatype.NSAP_PTR,
'NSEC': dns.rdatatype.NSEC,
'NSEC3': dns.rdatatype.NSEC3,
'NSEC3PARAM': dns.rdatatype.NSEC3PARAM,
'NULL': dns.rdatatype.NULL,
'NXT': dns.rdatatype.NXT,
'OPT': dns.rdatatype.OPT,
'PTR': dns.rdatatype.PTR,
'PX': dns.rdatatype.PX,
'RP': dns.rdatatype.RP,
'RRSIG': dns.rdatatype.RRSIG,
'RT': dns.rdatatype.RT,
'SIG': dns.rdatatype.SIG,
'SOA': dns.rdatatype.SOA,
'SPF': dns.rdatatype.SPF,
'SRV': dns.rdatatype.SRV,
'SSHFP': dns.rdatatype.SSHFP,
'TA': dns.rdatatype.TA,
'TKEY': dns.rdatatype.TKEY,
'TLSA': dns.rdatatype.TLSA,
'TSIG': dns.rdatatype.TSIG,
'TXT': dns.rdatatype.TXT,
'UNSPEC': dns.rdatatype.UNSPEC,
'URI': dns.rdatatype.URI,
'WKS': dns.rdatatype.WKS,
'X25': dns.rdatatype.X25,
}
def get_addr_family(af_str):
if af_str == 'ipv6':
return ip6
else:
return ip4
def get_record_type(rt):
return RECORD_TYPES.get(rt, None)
def get_record_str(rt_num):
return RECORD_TYPES_ENUMS.get(rt_num, None)
def get_basic_message(domain_name, query_type='A', qid=None):
if query_type not in RECORD_TYPES:
return None
qname = dns.name.from_text(domain_name)
q = dns.message.make_query(qname, RECORD_TYPES.get(query_type))
if isinstance(qid, int) or isinstance(qid, int):
q.id = qid
return q
def basic_udp_query(domain_name, query_type, dns_server, dns_port,
af='ipv4', qid=None):
q = get_basic_message(domain_name, query_type, qid=qid)
if q is None:
return None
return dns.query.udp(q, dns_server, port=dns_port, af=get_addr_family(af))
def basic_tcp_query(domain_name, query_type, dns_server, dns_port,
af='ipv4', qid=None):
q = get_basic_message(domain_name, query_type, qid=qid)
if q is None:
return None
return dns.query.tcp(q, dns_server, port=dns_port, af=get_addr_family(af))
| 5,617 |
setup.py
|
zkbt/mosasaurus
| 2 |
2170833
|
#!/usr/bin/env python
# The template for this setup.py came from <NAME>,
# who I understand took it from <NAME>. And then Geert
# Barentsen and <NAME> helped explain a few
# more neat tips. Thanks all!
import os, sys
from setuptools import setup, find_packages
# Prepare and send a new release to PyPI
if "release" in sys.argv[-1]:
os.system("python setup.py sdist")
# uncomment this to test out on test.pypi.com/project/tess-zap
# os.system("twine upload --repository-url https://test.pypi.org/legacy/ dist/*")
os.system("twine upload dist/*")
os.system("rm -rf dist/tesszap*")
sys.exit()
# a little kludge to be able to get the version number from the __init__.py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__MOSASAURUS_SETUP__ = True
import mosasaurus
version = mosasaurus.__version__
# pull the long description from the readmedef readme():
def readme():
with open('README.md') as f:
return f.read()
setup(name = "mosasaurus",
version = version,
description = "Tools for extracting chromatic light curves from MultiObject Spectra.",
long_description = readme(),
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/zkbt/mosasaurus",
packages = find_packages(),
package_data = {'mosasaurus': [ '../data/LDSS3C/vph-red/*',
'../data/LDSS3C/vph-all/*']},
include_package_data=True,
scripts = [],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['numpy', 'astropy', 'astroquery', 'scipy', 'matplotlib', 'craftroom'], #'emcee', 'corner',
dependency_links=['git+https://github.com/zkbt/craftroom.git@master#egg=craftroom'],
zip_safe=False,
license='MIT',
)
| 1,899 |
FireBolt/firewall.py
|
shashi28/nuts
| 1 |
2171675
|
import platform
import pydivert
from pydivert.windivert import *
from pydivert.winutils import *
from pydivert.enum import *
from pydivert.models import *
from pydivert.decorators import *
from PyQt4.QtCore import *
import impacket
from impacket.ImpactDecoder import EthDecoder
version = '1.0'
class Bolt(QThread):
def __init__(self,parent = None):
super(Bolt,self).__init__(parent)
self.block = True
driver_dir = os.path.join(os.path.realpath(os.curdir), "lib", version)
if platform.architecture()[0] == "32bit":
driver_dir = os.path.join(driver_dir, "x86")
else:
driver_dir = os.path.join(driver_dir, "amd64")
os.chdir(driver_dir)
reg_key = r"SYSTEM\CurrentControlSet\Services\WinDivert" + version
dll_path = os.path.join(driver_dir, "WinDivert.dll")
self.dev = WinDivert(dll_path)
self.dev.register()
self.decoder = EthDecoder()
def drop(self):
with Handle(filter=self.filter,layer=Layer.NETWORK,priority=0,flags=0) as handle:
while self.block:
rawdata = handle.recv()
self.pkt = self.dev.parse_packet(rawdata)
protocol = self.calcProtocol()
self.emit(SIGNAL('tableinput(QString,QString,QString,QString,QString,QString)'),str(self.pkt.src_addr),str(self.pkt.dst_addr),str(protocol),str(self.pkt.src_port),str(self.pkt.dst_port),str(self.pkt))
def calcProtocol(self):
if self.pkt.ipv4_hdr is not None:
if self.pkt.ipv4_hdr.Protocol == 1:
return 'icmp'
elif self.pkt.ipv4_hdr.Protocol == 6:
return 'tcp'
elif self.pkt.ipv4_hdr.Protocol == 17:
return 'udp'
def run(self):
self.drop()
self.exec_()
def setFilter(self,filtr):
self.filter = str(filtr)
self.block = True
def handle_slot_stop(self):
self.block = False
| 1,979 |
resources/python/KemendagriKTP/xlsToCsv.py
|
freezyoff/kosan-server
| 0 |
2171456
|
import pandas as exporter
import glob
def convert(src, dest):
read_file = exporter.read_excel(src)
read_file.to_csv(dest, index = None, header=True)
# convert all files in directory
# @param srcDir (list) - source dir path
# @param srcExt (string) - source file extension
# @param destDir (string) - destination path
def convertAll(srcDir, srcExt, destDir):
for item in glob.glob(srcDir+"/*."+str(srcExt)):
list = item.split("/")
filename = list[len(list)-1]
exportToCsv(item, destDir+"/"+filename.replace("."+srcExt,".csv"))
| 539 |
rplidar_ws/lidarDrive.py
|
OjasMor/donkeycar-LIDAR
| 0 |
2171193
|
#!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive)
Options:
-h --help Show this screen.
"""
import os
import time
import numpy as np
import cv2
import threading
from picamera.array import PiRGBArray
from picamera import PiCamera
from docopt import docopt
from lidarInfo import *
import donkeycar as dk
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
from donkeycar.parts.camera import PiCamera
class MyCVController:
'''
CV based controller
'''
throttle = 0.2
def run(self, cam_img):
steering = 0
throttle = 0
left_ang_avg = 0
left_dis_avg = 0
right_ang_avg = 0
right_dis_avg = 0
cent_ang_avg = 0
cent_dis_avg = 0
final_ang_avg = 0;
final_dis_avg = 0;
recording = False
frame = cam_img
data = getScan()
length = len(data[0])
# print (data)
data_counter = 1
# Here we split the 180 degree zone in front of the car into 3 segments: right, left, and center
for i in range (len(data[0])):
if data[0][i] >= 90.0 and data[0][i] < 270.0:
continue
elif data[1][i] == 0:
data[1][i] = 10000
if data[1][i] != 0 and data[0][i] >= 30.0 and data[0][i] < 90.0:
#left_ang_avg = left_ang_avg + data[0][i]
left_ang_avg = 60
left_dis_avg = left_dis_avg + data[1][i]
data_counter = data_counter + 1
elif data[1][i] != 0 and data[0][i] >= 270.0 and data[0][i] < 330.0:
#right_ang_avg = right_ang_avg + data[0][i]
right_ang_avg = 300
right_dis_avg = right_dis_avg + data[1][i]
data_counter = data_counter + 1
elif data[1][i] != 0 and ((data[0][i] < 30.0 and data[0][i] > 0) or (data[0][i]$
#cent_ang_avg = cent_ang_avg + data[0][i]
cent_ang_avg = 0
cent_dis_avg = cent_dis_avg + data[1][i]
data_counter = data_counter + 1
else:
continue
# Average all the data points scanned by the Lidar in each of the 3 segments
#left_ang_avg = left_ang_avg / length
right_dis_avg = right_dis_avg / data_counter
#right_ang_avg = right_ang_avg / length
left_dis_avg = left_dis_avg / data_counter
#cent_ang_avg = cent_ang_avg / length
cent_dis_avg = cent_dis_avg / data_counter
# Radian/Cartesian conversions
right_y = math.sin(math.pi/3) * right_dis_avg
right_x = math.cos(math.pi/3) * right_dis_avg
left_y = math.sin(math.pi/3) * left_dis_avg
left_x = -1 * math.cos(math.pi/3) * left_dis_avg
center_y = cent_dis_avg
sum_x = round(left_x + right_x,2)
sum_y = round(center_y - (left_y + right_y)/2,2)
if sum_y < 100:
sum_y = 100
sum_angle = math.atan2(sum_x,sum_y) * 180.0 / math.pi
# print (left_x, left_y, right_x, right_y, center_y, sum_x, sum_y, sum_angle) --For debugging
print (left_dis_avg, cent_dis_avg, right_dis_avg)
if (sum_angle > -90 and sum_angle < 0):
steering = (((sum_angle + 90) * 1) / 90) + 0
else:
steering = (((sum_angle - 0) * 1) / 90) - 1
print (steering)
if ((right_dis_avg < 600 and left_dis_avg < 600 and cent_dis_avg < 370) or (cent_di$
throttle = 0
else:
throttle = 0.21
print (throttle)
return steering ,throttle, recording
# ----------Default code below, no need to edit below this line----------
def drive(cfg):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
# Initialize car
V = dk.vehicle.Vehicle()
# Camera
camera = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
V.add(camera, outputs=['cam/image_array'], threaded=True)
# Controller
V.add(MyCVController(),
inputs=['cam/image_array'],
outputs=['steering', 'throttle', 'recording'])
# Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
# Drive train setup
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PC$
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
| 5,193 |
day1/day1.py
|
alexmotoc/AoC17
| 0 |
2169892
|
with open('day1_input.txt') as file:
input = file.read()
# Part 1
sum = 0
for i in range(len(input) - 1):
if input[i] == input[i + 1]:
sum += int(input[i])
if input[len(input) - 1] == input[0]:
sum += int(input[0])
print(sum)
# Part 2
sum = 0
forward = len(input) / 2
for i in range(forward):
if input[i] == input[i + forward]:
sum += int(input[i])
for i in range(forward, len(input)):
if input[i] == input[(i + forward) % len(input)]:
sum += int(input[i])
print(sum)
| 518 |
flaat/caches.py
|
BorjaEst/flaat
| 1 |
2171398
|
from cachetools import LRUCache, TTLCache
from flaat.user_infos import UserInfos
class UserInfoCache(LRUCache):
"""This caches user_infos for access tokens for an unspecified time.
Before yielding UserInfos, the validity of user infos is checked."""
def __getitem__(self, key):
def _fail(msg):
self.__delitem__(key)
raise KeyError(msg)
item = super().__getitem__(key)
if isinstance(item, UserInfos):
if item.valid_for_secs is None:
_fail("Cache entry validity can not be determined")
if item.valid_for_secs <= 0: # pragma: no cover
_fail("Cache entry has expired")
return item
# cache at most 1024 user infos until they are expired
user_infos_cache = UserInfoCache(maxsize=1024)
# cache issuer configs for an hour
issuer_config_cache = TTLCache(maxsize=128, ttl=3600)
# cache access_token_issuer mappings indefinitely
access_token_issuer_cache = LRUCache(maxsize=1024)
| 1,003 |
modbus_client/types.py
|
KrystianD/modbus_client
| 0 |
2166512
|
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, Tuple, Union
class RegisterType(Enum):
Coil = 1
DiscreteInputs = 2
InputRegister = 4
HoldingRegister = 5
class RegisterValueType(str, Enum):
S16 = 'int16'
U16 = 'uint16'
S32BE = 'int32be'
S32LE = 'int32le'
U32BE = 'uint32be'
U32LE = 'uint32le'
F32BE = 'float32be'
F32LE = 'float32le'
RegisterValue = Union[int, bool]
@dataclass
class ModbusReadSession:
registers_dict: Dict[Tuple[RegisterType, int], RegisterValue] = field(default_factory=dict)
__all__ = [
"RegisterType",
"ModbusReadSession",
"RegisterValueType",
]
| 679 |
project/api/serializers.py
|
asgray/devroastproject
| 1 |
2171647
|
from rest_framework.fields import SerializerMethodField
from rest_framework.serializers import ModelSerializer
from project.models import Project, Comment, Tag, User, Vote
class TagSerializer(ModelSerializer):
class Meta:
model = Tag
fields = ['id', 'tagname', 'description']
class VoteSerializer(ModelSerializer):
class Meta:
model = Vote
fields = ['id', 'user', 'comment', 'positive']
class CommentSerializer(ModelSerializer):
replies = SerializerMethodField()
# tags = TagSerializer(source='tag_set', many=True)
username = SerializerMethodField()
votes = SerializerMethodField()
class Meta:
model = Comment
fields = '__all__'
def get_replies(self, obj):
return CommentSerializer(obj.replies.filter(prompt__isnull=False), many=True).data
def get_username(self, obj):
return User.objects.get(username=obj.user).username
def get_votes(self, obj):
# query votes related to the comment
votes = VoteSerializer(Vote.objects.filter(comment=obj.id).select_related().order_by('user'), many=True).data
# query users related to the votes
related_users = User.objects.filter(pk__in=[vote['user'] for vote in votes]).order_by('id')
# extract user names
user_names = [user.username for user in related_users]
# add user names to votes (both queries are sorted by the same value)
for i in range(len(votes)):
votes[i]['username'] = user_names[i]
return votes
class ProjectSerializer(ModelSerializer):
comments = SerializerMethodField()
comment_count = SerializerMethodField()
username = SerializerMethodField()
tags = SerializerMethodField()
class Meta:
model = Project
fields = '__all__'
def get_comments(self, obj):
return CommentSerializer(obj.comments.filter(prompt__isnull=True), many=True).data
def get_comment_count(self, obj):
return obj.comments.count()
def get_username(self, obj):
return User.objects.get(username=obj.user).username
def get_tags(self, obj):
return TagSerializer(Tag.objects.filter(project=obj), many=True).data
| 2,242 |
network/discovery.py
|
andycavatorta/thirtybirds3.0
| 2 |
2171360
|
#!/usr/bin/python
"""
Intended use:
Multiple hosts on a LAN can use this script to create a self-assembling network.
One host is configured as the server and listens for IP broadcast messages
on a specific IP and port.
Other hosts are configured as clients which send broadcast messages containing
the client hostname and IP on a specific IP and port.
When the server receives a broadcast message from a client, it sends a
return message containg the server hostname and IP.
Both client and server report the interaction to a discovery_update_receiver method that is passed
into this module's init function.
This script may eventually support other methods such as connection brokers.
"""
import json
import os
import socket
import struct
import sys
import threading
import time
import yaml
import zmq
root_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(root_path[0:root_path.find("/thirtybirds")])
from thirtybirds3.reporting.exceptions import capture_exceptions
from . import Network_Defaults
#####################
##### RESPONDER #####
#####################
@capture_exceptions.Class
class Responder(threading.Thread):
def __init__(
self,
hostname,
local_ip,
discovery_multicast_group,
discovery_multicast_port,
discovery_response_port,
discovery_update_receiver,
caller_period):
threading.Thread.__init__(self)
self.hostname = hostname
self.local_ip = local_ip
self.discovery_multicast_port = discovery_multicast_port
self.discovery_response_port = discovery_response_port
self.discovery_update_receiver = discovery_update_receiver
self.caller_period = caller_period
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((discovery_multicast_group, discovery_multicast_port))
self.multicast_request = struct.pack("4sl", socket.inet_aton(discovery_multicast_group), socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, self.multicast_request)
#self.last_responese_by_ip = {}
def response(self, remoteIP, msg_json): # response sends the local IP to the remote device
#if remoteIP in self.last_responese_by_ip.keys():
# if self.last_responese_by_ip[remoteIP] + (self.caller_period * 2) > time.time():
# return
#else:
# self.last_responese_by_ip[remoteIP] = time.time()
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.connect("tcp://%s:%s" % (remoteIP,self.discovery_response_port))
socket.send(msg_json)
socket.close()
def run(self):
while True:
msg_json = self.sock.recv(1024)
msg_d = yaml.safe_load(msg_json)
remoteIP = msg_d["ip"]
print("remote ip discovered by thirtybirds:",remoteIP)
msg_d["status"] = "device_discovered"
if self.discovery_update_receiver:
resp_d = self.discovery_update_receiver(msg_d)
resp_json = json.dumps({"ip":self.local_ip,"hostname":socket.gethostname()})
resp_json = str.encode(resp_json)
self.response(remoteIP,resp_json)
##################
##### CALLER #####
##################
@capture_exceptions.Class
class Caller_Send(threading.Thread):
def __init__(self, local_hostname, local_ip, discovery_multicast_group, discovery_multicast_port, caller_period):
threading.Thread.__init__(self)
self.discovery_multicast_group = discovery_multicast_group
self.discovery_multicast_port = discovery_multicast_port
self.caller_period = caller_period
self.multicast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.multicast_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
self.msg_d = {"ip":local_ip,"hostname":local_hostname}
self.msg_json = json.dumps(self.msg_d)
self.mcast_msg = bytes(self.msg_json, 'utf-8')
self.active = True
self.lock = threading.Lock()
def set_active(self,val):
self.lock .acquire()
self.active = val
self.lock .release()
def run(self):
while True:
self.lock .acquire()
active = bool(self.active)
self.lock .release()
if active == True:
self.multicast_socket.sendto(self.mcast_msg, (self.discovery_multicast_group, self.discovery_multicast_port))
time.sleep(self.caller_period)
@capture_exceptions.Class
class Caller_Recv(threading.Thread):
def __init__(self, recv_port, discovery_update_receiver, caller_send):
threading.Thread.__init__(self)
self.discovery_update_receiver = discovery_update_receiver
self.caller_send = caller_send
self.listen_context = zmq.Context()
self.listen_sock = self.listen_context.socket(zmq.PAIR)
self.listen_sock.bind("tcp://*:%d" % recv_port)
self.msg = ""
self.server_ip = ""
def run(self):
while True:
msg_json = self.listen_sock.recv()
msg_d = yaml.safe_load(msg_json)
msg_d["status"] = "device_discovered"
if self.discovery_update_receiver:
self.discovery_update_receiver(msg_d)
###################
##### WRAPPER #####
###################
@capture_exceptions.Class
class Discovery():
def __init__(
self,
ip_address,
hostname,
controller_hostname,
discovery_multicast_group,
discovery_multicast_port,
discovery_response_port,
caller_period,
discovery_update_receiver,
exception_receiver,
status_receiver
):
capture_exceptions.init(exception_receiver)
self.ip_address = ip_address
self.hostname = hostname
self.controller_hostname = controller_hostname
self.discovery_multicast_group = discovery_multicast_group
self.discovery_multicast_port = discovery_multicast_port
self.discovery_response_port = discovery_response_port
self.caller_period = caller_period
self.discovery_update_receiver = discovery_update_receiver
self.status_receiver = status_receiver
self.exception_receiver = exception_receiver
self.role = Network_Defaults.DISCOVERY_ROLE_RESPONDER if hostname == controller_hostname else Network_Defaults.DISCOVERY_ROLE_CALLER
self.server_ip = ""
self.status_receiver.collect("starting",self.status_receiver.types.INITIALIZATIONS)
if self.role == Network_Defaults.DISCOVERY_ROLE_RESPONDER:
self.responder = Responder(
self.hostname,
self.ip_address,
self.discovery_multicast_group,
self.discovery_multicast_port,
self.discovery_response_port,
self.discovery_update_receiver,
self.caller_period
)
self.responder.daemon = True
self.responder.start()
if self.role == Network_Defaults.DISCOVERY_ROLE_CALLER:
self.caller_send = Caller_Send(
self.hostname,
self.ip_address,
self.discovery_multicast_group,
self.discovery_multicast_port,
self.caller_period
)
self.caller_recv = Caller_Recv(
self.discovery_response_port,
self.discovery_update_receiver,
self.caller_send
)
self.caller_recv.daemon = True
self.caller_send.daemon = True
self.caller_recv.start()
self.caller_send.start()
self.status_receiver.collect("started",self.status_receiver.types.INITIALIZATIONS)
def start_caller(self):
if self.role == Network_Defaults.DISCOVERY_ROLE_CALLER:
self.caller_send.set_active(True)
def end_caller(self):
if self.role == Network_Defaults.DISCOVERY_ROLE_CALLER:
self.caller_send.set_active(False)
| 8,337 |
kedb/files/wsgi.py
|
salt-formulas/salt-formula-kedb
| 0 |
2171452
|
{% from "kedb/map.jinja" import server with context %}
import os
import sys
sys.stdout = sys.stderr
import site
site.addsitedir('/srv/kedb/lib/python{{ server.python_version }}/site-packages')
import os
#os.environ['PYTHON_EGG_CACHE'] = '/www/lostquery.com/mod_wsgi/egg-cache'
sys.path.append('/srv/kedb/kedb')
sys.path.append('/srv/kedb/site')
os.environ['DJANGO_SETTINGS_MODULE'] = 'core.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| 495 |
tests/controller/test_system.py
|
vyahello/trump-bullet
| 0 |
2168567
|
import pytest
from app import PropertyError
from app.controller.systems import SystemProperty
from app.model.character import Character
from app.model.images import ScreenImages
from app.model.visual import Clock
def test_system_images(system_property: SystemProperty) -> None:
assert isinstance(system_property.images(), ScreenImages)
def test_system_character(system_property: SystemProperty) -> None:
assert isinstance(system_property.character(), Character)
def test_system_clock(system_property: SystemProperty) -> None:
assert isinstance(system_property.clock(), Clock)
def test_system_empty_bullets(system_property: SystemProperty) -> None:
assert not system_property.bullets()
def test_system_is_run(system_property: SystemProperty) -> None:
assert system_property.is_run is True
def test_set_system_is_run(system_property: SystemProperty) -> None:
system_property.is_run = False
assert system_property.is_run is False
def test_set_error_system_is_run(system_property: SystemProperty) -> None:
with pytest.raises(PropertyError):
system_property.is_run = None
def test_system_last_move(system_property: SystemProperty) -> None:
assert system_property.last_move == "right"
def test_set_system_last_move(system_property: SystemProperty) -> None:
system_property.last_move = "left"
assert system_property.last_move == "left"
def test_set_error_system_last_move(system_property: SystemProperty) -> None:
with pytest.raises(PropertyError):
system_property.last_move = None
| 1,559 |
rest_registration/utils/common.py
|
FelipeSanchezCalzada/django-rest-registration
| 0 |
2171403
|
from typing import Any, Callable, Union
LazyBool = Callable[[], bool]
def identity(value: Any) -> Any:
return value
def implies(premise: bool, conclusion: Union[bool, LazyBool]) -> bool:
"""
Calculate material implication for given premise and conclusion.
The conclusion may be lazy evaluated if it is of LazyBool type.
>>> implies(True, True)
True
>>> implies(True, False)
False
>>> implies(False, True)
True
>>> implies(False, False)
True
>>> implies(True, lambda: True)
True
>>> implies(True, lambda: False)
False
>>> implies(False, lambda: True)
True
>>> implies(False, lambda: False)
True
"""
if not premise:
return True
_conclusion = conclusion() if callable(conclusion) else conclusion
return _conclusion
| 821 |
esmvalcore/preprocessor/_derive/gtfgco2.py
|
aperezpredictia/ESMValCore
| 1 |
2166776
|
"""Derivation of variable `gtfgco2`."""
import iris
import numpy as np
from ._baseclass import DerivedVariableBase
def calculate_total_flux(fgco2_cube, cube_area):
"""
Calculate the area of unmasked cube cells.
Requires a cube with two spacial dimensions. (no depth coordinate).
Parameters
----------
cube: iris.cube.Cube
Data Cube
cube_area: iris.cube.Cube
Cell area Cube
Returns
-------
numpy.array:
An numpy array containing the total flux of CO2.
"""
data = []
times = fgco2_cube.coord('time')
fgco2_cube.data = np.ma.array(fgco2_cube.data)
for time_itr in np.arange(len(times.points)):
total_flux = fgco2_cube[time_itr].data * cube_area.data
total_flux = np.ma.masked_where(fgco2_cube[time_itr].data.mask,
total_flux)
data.append(total_flux.sum())
######
# Create a small dummy output array
data = np.array(data)
return data
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable `gtfgco2`."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{
'short_name': 'fgco2',
'mip': 'Omon'
},
{
'short_name': 'areacello',
'mip': 'fx'
},
]
if project == 'CMIP6':
required = [
{
'short_name': 'fgco2',
'mip': 'Omon'
},
{
'short_name': 'areacello',
'mip': 'Ofx'
},
]
return required
@staticmethod
def calculate(cubes):
"""Compute longwave cloud radiative effect."""
fgco2_cube = cubes.extract_strict(
iris.Constraint(name='surface_downward_mass_flux_of_carbon_dioxide'
'_expressed_as_carbon'))
try:
cube_area = cubes.extract_strict(iris.Constraint(name='cell_area'))
except iris.exceptions.ConstraintMismatchError:
pass
total_flux = calculate_total_flux(fgco2_cube, cube_area)
# Dummy result cube
result = fgco2_cube.collapsed(
['latitude', 'longitude'],
iris.analysis.MEAN,
)
result.units = fgco2_cube.units * cube_area.units
result.data = total_flux
return result
| 2,526 |
pyaz/mysql/server/ad_admin/__init__.py
|
py-az-cli/py-az-cli
| 0 |
2167639
|
from .... pyaz_utils import _call_az
def create(resource_group, server_name, display_name=None, no_wait=None, object_id=None):
'''
Create an Active Directory administrator for MySQL server.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- server_name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
Optional Parameters:
- display_name -- Display name of the Azure AD administrator user or group.
- no_wait -- Do not wait for the long-running operation to finish.
- object_id -- The unique ID of the Azure AD administrator.
'''
return _call_az("az mysql server ad-admin create", locals())
def list(resource_group, server_name):
'''
List all Active Directory Administrators for MySQL server.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- server_name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
'''
return _call_az("az mysql server ad-admin list", locals())
def delete(resource_group, server_name, yes=None):
'''
Delete an Active Directory Administrator for MySQL server.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- server_name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
Optional Parameters:
- yes -- Do not prompt for confirmation.
'''
return _call_az("az mysql server ad-admin delete", locals())
def show(resource_group, server_name):
'''
Get Active Directory Administrator information for a MySQL server.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- server_name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
'''
return _call_az("az mysql server ad-admin show", locals())
def wait(resource_group, server_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the MySQL server Active Directory Administrator is met.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- server_name -- Name of the server. The name can contain only lowercase letters, numbers, and the hyphen (-) character. Minimum 3 characters and maximum 63 characters.
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az mysql server ad-admin wait", locals())
| 3,638 |
src/kubeseal_auto/kubeseal.py
|
shini4i/kubeseal-auto
| 0 |
2170883
|
import subprocess
from tempfile import NamedTemporaryFile
import click
import questionary
import yaml
from colorama import Fore
from icecream import ic
from kubeseal_auto.cluster import Cluster
class Kubeseal:
def __init__(self, select_context: bool, certificate=None):
self.detached_mode = False
if certificate is not None:
click.echo("===> Working in a detached mode")
self.detached_mode = True
self.certificate = certificate
else:
cluster = Cluster(select_context=select_context)
self.controller_name = cluster.get_controller_name()
self.controller_namespace = cluster.get_controller_namespace()
self.current_context_name = cluster.get_context()
self.namespaces_list = cluster.get_all_namespaces()
self.temp_file = NamedTemporaryFile()
def __del__(self):
click.echo("===> Removing temporary file")
self.temp_file.close()
def collect_parameters(self) -> dict:
if self.detached_mode:
namespace = questionary.text(
"Provide namespace for the new secret"
).unsafe_ask()
else:
namespace = questionary.select(
"Select namespace for the new secret",
choices=self.namespaces_list,
).unsafe_ask()
secret_type = questionary.select(
"Select secret type to create",
choices=["generic", "tls", "docker-registry"],
).unsafe_ask()
secret_name = questionary.text("Provide name for the new secret").unsafe_ask()
return {"namespace": namespace, "type": secret_type, "name": secret_name}
def create_generic_secret(self, secret_params: dict):
click.echo(
"===> Provide literal entry/entries one per line: "
f"[{Fore.CYAN}literal{Fore.RESET}] key=value "
f"[{Fore.CYAN}file{Fore.RESET}] filename"
)
secrets = questionary.text(
"Secret Entries one per line", multiline=True
).unsafe_ask()
ic(secrets)
click.echo("===> Generating a temporary generic secret yaml file")
secret_entries = ""
for secret in secrets.split():
if "=" in secret:
secret_entries = f"{secret_entries} --from-literal={secret}"
else:
secret_entries = f"{secret_entries} --from-file={secret}"
command = (
f"kubectl create secret generic {secret_params['name']} {secret_entries} "
f"--namespace {secret_params['namespace']} --dry-run=client -o yaml "
f"> {self.temp_file.name}"
)
ic(command)
subprocess.call(command, shell=True)
def create_tls_secret(self, secret_params: dict):
click.echo("===> Generating a temporary tls secret yaml file")
command = (
f"kubectl create secret tls {secret_params['name']} "
f"--namespace {secret_params['namespace']} --key tls.key --cert tls.crt "
f"--dry-run=client -o yaml > {self.temp_file.name}"
)
ic(command)
subprocess.call(command, shell=True)
def create_regcred_secret(self, secret_params: dict):
click.echo("===> Generating a temporary tls secret yaml file")
docker_server = questionary.text("Provide docker-server").unsafe_ask()
docker_username = questionary.text("Provide docker-username").unsafe_ask()
docker_password = questionary.text("Provide docker-password").unsafe_ask()
command = (
f"kubectl create secret docker-registry {secret_params['name']} "
f"--namespace {secret_params['namespace']} "
f"--docker-server={docker_server} "
f"--docker-username={docker_username} "
f"--docker-password={<PASSWORD>} "
f"--dry-run=client -o yaml > {self.temp_file.name}"
)
ic(command)
subprocess.call(command, shell=True)
def seal(self, secret_name: str):
click.echo("===> Sealing generated secret file")
if self.detached_mode:
command = (
f"kubeseal --format=yaml "
f"--cert={self.certificate} < {self.temp_file.name} "
f"> {secret_name}.yaml"
)
else:
command = (
f"kubeseal --format=yaml "
f"--context={self.current_context_name} "
f"--controller-namespace={self.controller_namespace} "
f"--controller-name={self.controller_name} < {self.temp_file.name} "
f"> {secret_name}.yaml"
)
ic(command)
subprocess.call(command, shell=True)
self.append_argo_annotation(filename=f"{secret_name}.yaml")
click.echo("===> Done")
@staticmethod
def parse_existing_secret(secret_name: str):
try:
with open(secret_name, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError:
click.echo("Provided file is an invalid yaml. Aborting.")
exit(1)
except FileNotFoundError:
click.echo("Provided file does not exists. Aborting.")
exit(1)
def merge(self, secret_name: str):
click.echo(f"===> Updating {secret_name}")
if self.detached_mode:
command = (
f"kubeseal --format=yaml --merge-into {secret_name} "
f"--cert={self.certificate} < {self.temp_file.name} "
)
else:
command = (
f"kubeseal --format=yaml --merge-into {secret_name} "
f"--context={self.current_context_name} "
f"--controller-namespace={self.controller_namespace} "
f"--controller-name={self.controller_name} < {self.temp_file.name}"
)
ic(command)
subprocess.call(command, shell=True)
self.append_argo_annotation(filename=secret_name)
click.echo("===> Done")
def append_argo_annotation(self, filename: str):
"""
This method is used to append an annotations that will allow
ArgoCD to process git repository which has SealedSecrets before
the related controller is deployed in the cluster
Parameters:
filename: the filename of the resulting yaml file
"""
secret = self.parse_existing_secret(filename)
click.echo("===> Appending ArgoCD related annotations")
secret["metadata"]["annotations"] = {
"argocd.argoproj.io/sync-options": "SkipDryRunOnMissingResource=true"
}
with open(filename, "w") as file:
yaml.dump(secret, file)
def fetch_certificate(self):
"""
This method downloads a certificate that can be used in the future
to encrypt secrets without direct access to the cluster
"""
click.echo("===> Downloading certificate for kubeseal...")
command = (
f"kubeseal --controller-namespace {self.controller_namespace} "
f"--context={self.current_context_name} "
f"--controller-name {self.controller_name} --fetch-cert "
f"> {self.current_context_name}-kubeseal-cert.crt"
)
ic(command)
subprocess.call(command, shell=True)
click.echo(
f"===> Saved to {Fore.CYAN}{self.current_context_name}-kubeseal-cert.crt"
)
| 7,541 |
smarthome_kvant/remote_control/urls.py
|
S1r0pchik/SmartHome_Kvant
| 6 |
2167573
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name="home"),
path('led/<int:num>', views.led_1, name="led_1"),
path('termometr', views.termometr, name="termometr"),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
path('accounts/', include('allauth.urls')),
]
| 385 |
src/PhoneticArray.py
|
zhu2qian1/WordGenerator
| 0 |
2171690
|
from typing import OrderedDict as OrderedDictType
class PhoneticArray:
def __init__(self, phonetic_array: OrderedDictType):
self.__phonetic_array = phonetic_array
def __repr__(self):
return repr(self.__phonetic_array)
@property
def written(self) -> str:
return str("".join(self.__phonetic_array.keys()))
@property
def features(self) -> list:
return [i for i in [j for j in list(self.__phonetic_array.values())]]
@property
def chart(self) -> list:
return [self.__phonetic_array.keys(), self.__phonetic_array.values()]
| 595 |
kerasTUT/3-backend.py
|
hebangyi/tutorials
| 10,786 |
2168603
|
"""
To know more or get code samples, please visit my website:
https://mofanpy.com/tutorials/
Or search: 莫烦Python
Thank you for supporting!
"""
# please note, all tutorial code are running under python3.5.
# If you use the version like python2.7, please modify the code accordingly
# 3 - backend
"""
Details are showing in the video.
----------------------
Method 1:
If you have run Keras at least once, you will find the Keras configuration file at:
~/.keras/keras.json
If it isn't there, you can create it.
The default configuration file looks like this:
{
"image_dim_ordering": "tf",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "theano"
}
Simply change the field backend to either "theano" or "tensorflow",
and Keras will use the new configuration next time you run any Keras code.
----------------------------
Method 2:
define this before import keras:
>>> import os
>>> os.environ['KERAS_BACKEND']='theano'
>>> import keras
Using Theano backend.
"""
| 992 |
data_visualization_gui/src/dynamic.py
|
bbueno5000/DataGUI
| 0 |
2169267
|
"""
DOCSTRING
"""
# standard
import datetime
# non-standard
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas_datareader.data as web
app = dash.Dash()
stock = 'GOOGL'
start = datetime.datetime(2015, 1, 1)
end = datetime.datetime(2018, 2, 8)
df = web.DataReader(stock, 'google', start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
app.layout = html.Div(children=[html.Div(children='ticker symbol'),
dcc.Input(id='input', value='', type='text'),
html.Div(id='output-graph')])
@app.callback(dash.dependencies.Output(component_id='output-graph', component_property='children'),
[dash.dependencies.Input(component_id='input', component_property='value')])
def update_value(input_data):
"""
DOCSTRING
"""
start = datetime.datetime(2015, 1, 1)
end = datetime.datetime.now()
df = web.DataReader(input_data, 'google', start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
return dcc.Graph(id='example-graph',
figure={'data':[{'x':df.index,
'y':df.Close,
'type':'line',
'name':input_data}],
'layout':{'title':input_data}})
app.run_server(debug=True)
| 1,493 |
vmaig_blog/uwsgi-2.0.14/t/sharedarea/sharedarea_incdec.py
|
StanYaha/Blog
| 1 |
2171450
|
import uwsgi
import unittest
class SharedareaTest(unittest.TestCase):
def setUp(self):
uwsgi.sharedarea_write(0, 0, '\0' * 64)
def test_32(self):
uwsgi.sharedarea_write32(0, 0, 17)
self.assertEqual(uwsgi.sharedarea_read32(0, 0), 17)
def test_inc32(self):
uwsgi.sharedarea_write32(0, 4, 30)
uwsgi.sharedarea_inc32(0, 4, 3)
self.assertEqual(uwsgi.sharedarea_read32(0, 4), 33)
def test_dec32(self):
uwsgi.sharedarea_write32(0, 5, 30)
uwsgi.sharedarea_dec32(0, 5, 4)
self.assertEqual(uwsgi.sharedarea_read32(0, 5), 26)
def test_inc64(self):
uwsgi.sharedarea_write64(0, 8, 17 * (1024 ** 5))
uwsgi.sharedarea_inc64(0, 8, 1)
self.assertEqual(uwsgi.sharedarea_read64(0, 8), 17 * (1024 ** 5) + 1)
def test_dec64(self):
uwsgi.sharedarea_write64(0, 8, 30 * (1024 ** 5))
uwsgi.sharedarea_dec64(0, 8, 30 * (1024 ** 5) - 1)
self.assertEqual(uwsgi.sharedarea_read64(0, 8), 1)
unittest.main()
| 1,034 |
src/plotting.py
|
0-k/Absolute-Mehrheit
| 0 |
2170724
|
import matplotlib.pyplot as plt
import math
import seaborn as sns
from simulation import Simulation
import parties
import coalitions
def plot_seats_by_coalitions(seats_by_coalition):
rows = 2
columns = int(math.ceil(len(coalitions.ALL) / rows))
fig, axs = plt.subplots(rows, columns)
idx = 0
for coalition in coalitions.ALL:
plot_idx = idx // columns, idx % columns
axs[plot_idx].hist(seats_by_coalition[coalition.name], bins=10, color='black')
axs[plot_idx].set_title(coalition.name)
axs[plot_idx].set_xlim(151, 450)
axs[plot_idx].axvline(300, color='red', linestyle='--')
idx += 1
plt.show()
def plot_correlation(corr):
sns.heatmap(corr, annot=True)
plt.show()
if __name__ == '__main__':
sim = Simulation(parties.ALL)
plot_seats_by_coalitions(sim)
| 846 |
http_status_error.py
|
Heckie75/kodi-submodule-rssaddon
| 0 |
2171301
|
class HttpStatusError(Exception):
message = ""
def __init__(self, msg):
self.message = msg
| 109 |
mitmproxy/web/master.py
|
jvillacorta/mitmproxy
| 0 |
2171098
|
from __future__ import absolute_import, print_function, division
import sys
import collections
import tornado.httpserver
import tornado.ioloop
from typing import Optional # noqa
from mitmproxy import builtins
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import options
from mitmproxy.web import app
from netlib.http import authentication
class Stop(Exception):
pass
class WebFlowView(flow.FlowView):
def __init__(self, store):
super(WebFlowView, self).__init__(store, None)
def _add(self, f):
super(WebFlowView, self)._add(f)
app.ClientConnection.broadcast(
type="UPDATE_FLOWS",
cmd="add",
data=app.convert_flow_to_json_dict(f)
)
def _update(self, f):
super(WebFlowView, self)._update(f)
app.ClientConnection.broadcast(
type="UPDATE_FLOWS",
cmd="update",
data=app.convert_flow_to_json_dict(f)
)
def _remove(self, f):
super(WebFlowView, self)._remove(f)
app.ClientConnection.broadcast(
type="UPDATE_FLOWS",
cmd="remove",
data=dict(id=f.id)
)
def _recalculate(self, flows):
super(WebFlowView, self)._recalculate(flows)
app.ClientConnection.broadcast(
type="UPDATE_FLOWS",
cmd="reset"
)
class WebState(flow.State):
def __init__(self):
super(WebState, self).__init__()
self.view._close()
self.view = WebFlowView(self.flows)
self._last_event_id = 0
self.events = collections.deque(maxlen=1000)
def add_log(self, e, level):
self._last_event_id += 1
entry = {
"id": self._last_event_id,
"message": e,
"level": level
}
self.events.append(entry)
app.ClientConnection.broadcast(
type="UPDATE_EVENTLOG",
cmd="add",
data=entry
)
def clear(self):
super(WebState, self).clear()
self.events.clear()
app.ClientConnection.broadcast(
type="UPDATE_EVENTLOG",
cmd="reset",
data=[]
)
class Options(options.Options):
def __init__(
self,
intercept=None, # type: Optional[str]
wdebug=bool, # type: bool
wport=8081, # type: int
wiface="127.0.0.1", # type: str
wauthenticator=None, # type: Optional[authentication.PassMan]
wsingleuser=None, # type: Optional[str]
whtpasswd=None, # type: Optional[str]
**kwargs
):
self.wdebug = wdebug
self.wport = wport
self.wiface = wiface
self.wauthenticator = wauthenticator
self.wsingleuser = wsingleuser
self.whtpasswd = whtpasswd
self.intercept = intercept
super(Options, self).__init__(**kwargs)
# TODO: This doesn't belong here.
def process_web_options(self, parser):
if self.wsingleuser or self.whtpasswd:
if self.wsingleuser:
if len(self.wsingleuser.split(':')) != 2:
return parser.error(
"Invalid single-user specification. Please use the format username:password"
)
username, password = self.wsingleuser.split(':')
self.wauthenticator = authentication.PassManSingleUser(username, password)
elif self.whtpasswd:
try:
self.wauthenticator = authentication.PassManHtpasswd(self.whtpasswd)
except ValueError as v:
return parser.error(v.message)
else:
self.wauthenticator = None
class WebMaster(flow.FlowMaster):
def __init__(self, server, options):
super(WebMaster, self).__init__(options, server, WebState())
self.addons.add(options, *builtins.default_addons())
self.app = app.Application(
self, self.options.wdebug, self.options.wauthenticator
)
# This line is just for type hinting
self.options = self.options # type: Options
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log(
"Could not read flow file: %s" % v,
"error"
)
if options.outfile:
err = self.start_stream_to_path(
options.outfile[0],
options.outfile[1]
)
if err:
print("Stream file error: {}".format(err), file=sys.stderr)
sys.exit(1)
if self.options.app:
self.start_app(self.options.app_host, self.options.app_port)
def run(self): # pragma: no cover
iol = tornado.ioloop.IOLoop.instance()
http_server = tornado.httpserver.HTTPServer(self.app)
http_server.listen(self.options.wport)
iol.add_callback(self.start)
tornado.ioloop.PeriodicCallback(lambda: self.tick(timeout=0), 5).start()
try:
print("Server listening at http://{}:{}".format(
self.options.wiface, self.options.wport), file=sys.stderr)
iol.start()
except (Stop, KeyboardInterrupt):
self.shutdown()
def _process_flow(self, f):
if self.state.intercept and self.state.intercept(
f) and not f.request.is_replay:
f.intercept(self)
f.reply.take()
return f
@controller.handler
def request(self, f):
super(WebMaster, self).request(f)
return self._process_flow(f)
@controller.handler
def response(self, f):
super(WebMaster, self).response(f)
return self._process_flow(f)
@controller.handler
def error(self, f):
super(WebMaster, self).error(f)
return self._process_flow(f)
def add_log(self, e, level="info"):
super(WebMaster, self).add_log(e, level)
return self.state.add_log(e, level)
| 6,190 |
MyHelloWorldKugan/consts.py
|
thivi/MyHelloWorld
| 0 |
2169990
|
name = 'MyHelloWorldKugan'
version = "0.6.9-dev0"
author = '<NAME> <<EMAIL>>'
homepage = 'https://github.com/sathiyakugan/MyHelloWorld'
default_user_agent = '{}/{} (+{})'.format(name, version, homepage)
default_json_headers = [
('Content-Type', 'application/json'),
('Cache-Control', 'no-store'),
('Pragma', 'no-cache'),
]
| 336 |
main.py
|
Almas-Ali/Gender-Guise-App
| 0 |
2171380
|
from tkinter import *
from tkinter.messagebox import *
root = Tk()
def about():
showinfo("About","Guise Your Gender version 1.0")
def author():
showinfo('Author','This App is made by Md. <NAME>\n\<NAME>')
mainmenu = Menu(root)
mainmenu.add_command(label='About', command=about)
#mainmenu.add_separator()
mainmenu.add_command(label='Author', command=author)
mainmenu.add_command(label="Exit", command=quit)
root.config(menu=mainmenu)
def nameTest():
if name.get() == '':
showinfo('Result','You haven’t enter anyname in box. ')
try:
nam = name.get().lower()
enm = nam[-1]
enm2 = nam[-2:]
enm3 = nam[-3:]
except:
pass
else:
if enm == 'a':
showinfo('Result','You are Female.')
elif enm == 'b':
showinfo('Result','You are Male.')
elif enm == 'c':
showinfo('Result','You are Male. ')
elif enm == 'd':
showinfo('Result','You are Male. ')
elif enm == 'e':
if nam == 'rone':
showinfo('Result','You can be Male/Female.')
else:
showinfo('Result','You are Male.')
elif enm == 'f':
showinfo('Result','You are *Male. ')
elif enm == 'g':
showinfo('Result','You are *Male. ')
elif enm == 'h':
showinfo('Result','You are *Male. ')
elif enm == 'i':
if nam == 'roni':
showinfo('Result','You can be Male/Female.')
elif nam == 'ali':
showinfo('Result','You are Male.')
else:
showinfo('Result','You are Female. ')
elif enm == 'j':
showinfo('Result','You are *Male. ')
elif enm == 'k':
showinfo('Result','You are *Male. ')
elif enm == 'l':
showinfo('Result','You are Male.')
elif enm == 'm':
if nam == 'mim':
showinfo('Result','You can be Male/Female.')
elif enm3 == 'mim':
showinfo('Result','You can be Female.')
else:
showinfo('Result','You are *Male. ')
elif enm == 'n':
if enm3 == 'lin':
showinfo('Result','You are Female')
elif enm3 == 'rin':
showinfo('Result','You are Female')
else:
showinfo('Result','You are Male')
elif enm == 'o':
showinfo('Result','You are Male. ')
elif enm == 'p':
showinfo('Result','You are *Male. ')
elif enm == 'q':
showinfo('Result','You are *Male. ')
elif enm == 'r':
if nam == 'nur':
showinfo('Result','You are Male. ')
else:
showinfo('Result', 'You are male.')
elif enm == 's':
showinfo('Result','You are Male. ')
elif enm == 't':
showinfo('Result','You are *Male. ')
elif enm == 'u':
showinfo('Result','You are Female. ')
elif enm == 'v':
showinfo('Result','You are *Male. ')
elif enm == 'w':
showinfo('Result','You are *Male. ')
elif enm == 'x':
showinfo('Result','You are *Male. ')
elif enm == 'y':
if nam == 'rony':
showinfo('Result','You can be Male/Female.')
else:
showinfo('Result','You are Female.')
elif enm == 'z':
showinfo('Result','You are Male. ')
else:
showinfo('Result','Error Name! ')
root.title('Guise Your Gender ')
root.geometry("500x400")
f1 = Frame(root, bg="lightblue", borderwidth=6, relief=SUNKEN)
f1.pack(side='top', fill='x')
f2 = Frame(root, bg='black')
f2.pack(side='bottom',fill="x")
f3 = Frame(root, bg='lightblue')
f3.pack(side='top',fill='both')
Label(f1, text='Guise Your Gender', fg='blue', bg="lightblue", font="Arial 18 bold").pack(padx=20,pady=40)
Label(f1, text='Enter Your Name : ', bg="lightblue", font='Arial 8 bold', fg='orange').pack(pady=20)
Label(f2, text="© Copyright collected by Md. Almas Ali.",fg='red', bg='black', font="Arial 8 bold").pack()
name = StringVar()
Entry(f1, textvariable=name, bg='lightblue',fg='red', font="Arial 8 italic").pack()
Button(f1, text='Test', bg='red', fg='white', command=nameTest).pack(pady=30)
photo = PhotoImage(file='./img/author.png')
Label(f3, image=photo).pack()
Label(f3, text='Md. <NAME>', font='Arial 10', fg='darkblue', bg='lightblue').pack(pady=8)
Label(f3, text='Diploma in Engineering in Computer Technology', fg='darkblue', bg='lightblue', font='Arial 6').pack()
Label(f3, text='2nd Semester, 2nd Shift', fg='darkblue', bg='lightblue', font='Arial 6').pack()
Label(f3, text='Chapai Nawabganj Polytechnic Institute, Chapai Nawabganj.', fg='darkblue', bg='lightblue', font='Arial 6').pack()
Label(f3, text='Contact: <EMAIL>', fg='darkblue', bg='lightblue', font='Arial 6').pack()
root.mainloop()
| 4,265 |
src/posts/models.py
|
ashutoshsuman99/Web-Blog-D19
| 0 |
2171207
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import pre_save
from django.utils.text import slugify
# Create your models here.
# MVC MODEL VIEW CONTROLLER
def upload_location(instance, filename):
#filebase, extension = filename.split(".")
#return "%s/%s.%s" %(instance.id, instance.id, extension)
return "%s/%s" %(instance.id, filename)
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
title = models.CharField(max_length=120)
slug = models.SlugField(unique=True)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
content = models.TextField()
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:detail", kwargs={"id": self.id})
class Meta:
ordering = ["-timestamp", "-updated"]
def pre_save_post_reciver(sender, instance, *args, **kwargs):
slug = slugify (instance.title)
exists = Post.objects.filter(slug=slug).exists()
if exists:
slug = "%s-%s" %(slug, instance.id)
instance.slug = slug
pre_save.connect(pre_save_post_reciver, sender=Post)
| 1,679 |
nsd1802/python/day07/bear2.py
|
MrWangwf/nsd1806
| 0 |
2171570
|
class Vendor:
def __init__(self, phone, email):
self.phone = phone
self.email = email
def call(self):
print('calling %s' % self.phone)
class BearToy:
def __init__(self, color, size, phone, email):
self.color = color # 绑定属性到实例
self.size = size
self.vendor = Vendor(phone, email)
if __name__ == '__main__':
bigbear = BearToy('Brown', 'Middle', '4008009999', '<EMAIL>')
print(bigbear.color)
bigbear.vendor.call()
| 486 |
sharedVolume/printall.py
|
hdnndh/simple_api
| 1 |
2171547
|
import sqlite3
import pandas as pd
conn = sqlite3.connect('fruit.db')
c = conn.cursor()
print (pd.read_sql_query("SELECT * FROM fruit", conn))
| 144 |
CIE-Pastpaper/CIE pastpaper .py
|
YHPeter/Python-Spider
| 2 |
2171610
|
# -*- coding: utf-8 -*-
from time import sleep
from selenium import webdriver
web = 'https://cie.fraft.org/'
def download_pdf(lnk,name,t):
chrome_options = webdriver.ChromeOptions()
download_folder = "c:\\Users\\Peter\\Desktop"
profile = {"plugins.plugins_list": [{"enabled": False,
"name": "Chrome PDF Viewer"}],
"profile.default_content_settings.popups": 0,
"download.default_directory": download_folder,
"download.extensions_to_open": ""}
chrome_options.add_experimental_option("prefs", profile)
print("Downloading file from link: {}".format(lnk))
driver = webdriver.Chrome(r"chromedriver.exe", options=chrome_options)
driver.get(lnk)
sleep(t)
driver.find_element_by_xpath('//*[@id="download"]').click()
sleep(0.1)
filename = name
print("File: {}".format(filename))
print("Status: Download Complete.")
driver.close()
driver.quit()
objec = 9707 #subject code
season = ['w']#m or s or w
year = [18] #test year
typ = ['ms','qp']# answer paper or question paper
paper = range(1,7+1)#paper code
con = range(1,3+1)#paper conponent
for s in season:
for y in year:
for t in typ:
for p in paper:
for c in con:
name = str(objec)+'_'+str(s)+str(y)+'_'+str(t)+'_'+str(p)+str(c)+'.pdf'
url = web+str(objec)+'_'+str(s)+str(y)+'_'+str(t)+'_'+str(p)+str(c)+'.pdf'
download_pdf(url,name,6)
download_pdf(web+str(objec)+'_'+str(s)+str(y)+'_'+'gt'+'.pdf',str(objec)+'_'+str(s)+str(y)+'_'+'gt'+'.pdf',8)
download_pdf(web+str(objec)+'_'+str(s)+str(y)+'_'+'er'+'.pdf',str(objec)+'_'+str(s)+str(y)+'_'+'gt'+'.pdf',20)
| 1,820 |
client/test/test_pipeline/test_dsl/test_io_types/mock_step.py
|
qiaoshuangshuang/PaddleFlow
| 23 |
2171399
|
#!/usr/bin/env python3
""" mock paddleflow.pipeline.dsl.steps.Step
"""
class Step(object):
""" mock paddleflow.pipeline.dsl.steps.Step
"""
def __init__(self, name):
""" init
"""
self.name = name
| 234 |
function/python/brightics/function/extraction/test/add_shift_test.py
|
parkjh80/studio
| 202 |
2168784
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.function.extraction import add_shift
from brightics.common.datasets import load_iris
import unittest
import math
import HtmlTestRunner
import os
class AddShiftTest(unittest.TestCase):
def setUp(self):
print("*** Add Shift UnitTest Start ***")
self.iris = load_iris()
def tearDown(self):
print("*** Add Shift UnitTest End ***")
def test_add_shift1(self):
add_shift_out = add_shift(table=self.iris, input_col='sepal_length', shift_list=[-2, -1, 0, 1, 2])
table = add_shift_out['out_table'].values.tolist()
self.assertListEqual(table[0][:8], [5.1, 3.5, 1.4, 0.2, 'setosa', 4.7, 4.9, 5.1])
self.assertTrue(math.isnan(table[0][8]))
self.assertTrue(math.isnan(table[0][9]))
self.assertListEqual(table[1][:9], [4.9, 3.0, 1.4, 0.2, 'setosa', 4.6, 4.7, 4.9, 5.1])
self.assertTrue(math.isnan(table[0][9]))
self.assertListEqual(table[2], [4.7, 3.2, 1.3, 0.2, 'setosa', 5.0, 4.6, 4.7, 4.9, 5.1])
self.assertListEqual(table[3], [4.6, 3.1, 1.5, 0.2, 'setosa', 5.4, 5.0, 4.6, 4.7, 4.9])
self.assertListEqual(table[4], [5.0, 3.6, 1.4, 0.2, 'setosa', 4.6, 5.4, 5.0, 4.6, 4.7])
def test_add_shift2(self):
add_shift_out = add_shift(table=self.iris, input_col='sepal_length', shift_list=[1], order_by=['sepal_width'], group_by=['species'])
table = add_shift_out['out_table'].values.tolist()
self.assertListEqual(table[0][:5], [4.5, 2.3, 1.3, 0.3, 'setosa'])
self.assertTrue(math.isnan(table[0][5]))
self.assertListEqual(table[1], [4.4, 2.9, 1.4, 0.2, 'setosa', 4.5])
self.assertListEqual(table[2], [4.9, 3.0, 1.4, 0.2, 'setosa', 4.4])
self.assertListEqual(table[3], [4.8, 3.0, 1.4, 0.1, 'setosa', 4.9])
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
| 2,638 |
favfilter.py
|
rbn42/python-mpd-script
| 0 |
2170262
|
#!/usr/bin/python
import os.path
KEYS = 'time', 'title', 'artist', 'album', 'track' # 'pos'
_data = []
def parse2str(item):
# 2016-12-05 09:44:13 Mon NZDT
# path取末尾两级,这样移动文件,改变目录结构的时候可以提供容错
# 因为有time标记做保险,即使有重名文件存在,撞车的概率应该也不会很大
path = item['file']
path = '/'.join(path.split('/')[-2:])
keys = sorted([k for k in item if k in KEYS])
values = [path] + [item[k] for k in keys]
return str(values)
_str = ','.join(values)
return _str
def getData():
path = os.path.expanduser('~/.mpd/dislike')
f = open(path)
l = [eval(s) for s in f]
result = []
for item in l:
_str = parse2str(item)
result.append(_str)
return set(result)
def dislike(item):
# if len(_data) < 1:
# _data.append(getData())
_str = parse2str(item)
return _str in getData()
return _str in _data[0]
if __name__ == '__main__':
print(getData())
| 918 |
EvaluationUtils/vision_metrics.py
|
oronnir/CAST
| 8 |
2166466
|
import numpy as np
from numpy import dot
from numpy.linalg import norm
class CVMetrics:
@staticmethod
def cosine_similarity(a, b):
return dot(a, b)/(norm(a)*norm(b))
@staticmethod
def range_intersection(a_start, a_end, b_start, b_end):
if a_end < b_start or a_start > b_end:
return 0
last = min(a_end, b_end)
first = max(a_start, b_start)
return last - first
@staticmethod
def bb_intersection_over_union(box_a, box_b):
"""intersection over union of axis aligned bounding boxes"""
horizontal_intersection = CVMetrics.range_intersection(box_a.X, box_a.X + box_a.Width, box_b.X,
box_b.X + box_b.Width)
vertical_intersection = CVMetrics.range_intersection(box_a.Y, box_a.Y + box_a.Height, box_b.Y,
box_b.Y + box_b.Height)
intersection_area = horizontal_intersection * vertical_intersection
union_area = box_a.area() + box_b.area() - intersection_area
iou = float(intersection_area) / union_area
return iou
@staticmethod
def matching_bbox_sets(bbox_arr_a, bbox_arr_b, min_iou):
ious = np.zeros([len(bbox_arr_a), len(bbox_arr_b)])
for index_a in range(len(bbox_arr_a)):
for index_b in range(len(bbox_arr_b)):
ious[index_a, index_b] = CVMetrics.bb_intersection_over_union(bbox_arr_a[index_a], bbox_arr_b[index_b])
ious_copy = ious.copy()
a_to_b_match = dict()
while ious.any():
linear_index = ious.argmax()
x, y = np.unravel_index(linear_index, shape=[len(bbox_arr_a), len(bbox_arr_b)])
if ious[x, y] < min_iou:
return a_to_b_match, ious_copy
a_to_b_match[x] = y
ious[x, :] = 0
ious[:, y] = 0
return a_to_b_match, ious_copy
@staticmethod
def precision_recall_at_iou(gt_dict, pred_dict, min_iou):
ids_superset = set(gt_dict.keys()) | set(gt_dict.keys())
ordered_ids = sorted(ids_superset)[5:-5]
# book level/video level
fp = 0
tp = 0
fn = 0
for frame_id in ordered_ids:
if frame_id not in gt_dict:
fp += len(pred_dict)
continue
if frame_id not in pred_dict:
fn += len(gt_dict)
continue
ordered_gt = list(gt_dict[frame_id].values())
ordered_pred = list(pred_dict[frame_id].values())
gt_pred_match, _ = CVMetrics.matching_bbox_sets(ordered_gt, ordered_pred, min_iou)
tp += len(gt_pred_match)
missed_boxes = len(ordered_gt) - len(gt_pred_match)
false_detections = len(ordered_pred) - len(gt_pred_match)
if missed_boxes < 0 or false_detections < 0:
stop = 1
fp += false_detections
fn += missed_boxes
precision = 1.0 * tp / (fp + tp)
recall = 1.0 * tp / (tp + fn)
return precision, recall
| 3,116 |
tests/graphics/data_samples.py
|
Ze1598/quotespy
| 2 |
2171496
|
default_settings_lyrics = {
"font_family": "arial.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000000", "#ffffff"],
"wrap_limit": 20,
"margin_bottom": 0
}
default_settings_lyrics_returned = {
"font_family": "arial.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": [(0, 0, 0), (255, 255, 255)],
"wrap_limit": 20,
"margin_bottom": 0
}
default_settings_quote = {
"font_family": "arial.ttf",
"font_size": 200,
"size": [3840, 2160],
"color_scheme": ["#000000", "#ffffff"],
"wrap_limit": 32,
"margin_bottom": 0
}
default_settings_quote_returned = {
"font_family": "arial.ttf",
"font_size": 200,
"size": [3840, 2160],
"color_scheme": [(0, 0, 0), (255, 255, 255)],
"wrap_limit": 32,
"margin_bottom": 0
}
valid_custom_settings = {
"font_family": "arial.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 15
}
valid_custom_settings_returned = {
"font_family": "arial.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": [(0, 0, 0), (255, 255, 255)],
"wrap_limit": 20,
"margin_bottom": 15
}
valid_custom_settings_rgba = {
"font_family": "arial.ttf",
"font_size": 100,
"size": [1800, 1800],
"color_scheme": ["rgba(0, 0, 0, 0)", "#ffffff"],
"wrap_limit": 32,
"margin_bottom": 30
}
valid_custom_settings_rgba_returned = {
"font_family": "arial.ttf",
"font_size": 100,
"size": [1800, 1800],
"color_scheme": ["rgba(0, 0, 0, 0)", (255, 255, 255)],
"wrap_limit": 32,
"margin_bottom": 30
}
valid_custom_settings_none_bg = {
"font_family": "arial.ttf",
"font_size": 100,
"size": [1800, 1800],
"color_scheme": [None, "#ffffff"],
"wrap_limit": 32,
"margin_bottom": 30
}
valid_custom_settings_none_bg_returned = {
"font_family": "arial.ttf",
"font_size": 100,
"size": [1800, 1800],
"color_scheme": [None, (255, 255, 255)],
"wrap_limit": 32,
"margin_bottom": 30
}
missing_font_family = {
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
missing_font_size = {
"font_family": "Inkfree.ttf",
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
missing_size = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
missing_color_scheme = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, 2800],
"wrap_limit": 20,
"margin_bottom": 312.5
}
missing_wrap_limit = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"margin_bottom": 312.5
}
missing_margin = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
}
invalid_font_family = {
"font_family": "test",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
invalid_font_size = {
"font_family": "Inkfree.ttf",
"font_size": "test",
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
invalid_size_length = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, 2800, "test"],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
invalid_size_value = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, "test"],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
invalid_color_scheme_length = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff", "test"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
invalid_color_scheme_value = {
"font_family": "Inkfree.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["test", "#fff"],
"wrap_limit": 20,
"margin_bottom": 312.5
}
invalid_color_scheme_rgba = {
"font_family": "arial.ttf",
"font_size_text": 100,
"font_size_header": 80,
"size": [1800, 1800],
"color_scheme": ["#15202b", "rgba(0, 0, 255, -1)"],
"wrap_limit": 32,
"margin_bottom": 30
}
invalid_wrap_limit = {
"font_family": "arial.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": "test",
"margin_bottom": 312.5
}
invalid_margin_bottom = {
"font_family": "arial.ttf",
"font_size": 250,
"size": [2800, 2800],
"color_scheme": ["#000", "#fff"],
"wrap_limit": 20,
"margin_bottom": "test"
}
valid_info = {
"title": "info1",
"text": "test test test test test"
}
missing_title = {
"text": "test test test test test"
}
invalid_title = {
"title": 123,
"text": "test test test test test"
}
missing_text = {
"title": "info1",
}
invalid_text = {
"title": "info1",
"text": 123
}
valid_info_list = [
{
"title": "info1",
"text": "test test test test test"
},
{
"title": "info2",
"text": "test test test test test"
},
{
"title": "info3",
"text": "test test test test test"
},
]
| 5,492 |
web/views/studyrecord.py
|
jy-cw/PerfectCRM
| 0 |
2170869
|
#!/usr/bin/env python
#coding=utf-8
from stark.service.stark import site, ModelStark,get_datetime_text
from django.urls import path
from web.models import *
from django.utils.safestring import mark_safe
from django.shortcuts import HttpResponse, redirect, render
from django.db.models import Q
import datetime
from django.http import JsonResponse
class StudyConfig(ModelStark):
list_display = ["student", "course_record", "record", "score"]
def patch_late(self, request, queryset):
queryset.update(record="late")
patch_late.text = "迟到"
actions = [patch_late]
| 585 |
dictionaries lab/05. Word Synonyms.py
|
nrgxtra/fundamentals
| 0 |
2171387
|
n = int(input())
dd = {}
for i in range(n):
word = input()
synonyms = input()
if word not in dd:
dd[word] = []
dd[word].append(synonyms)
for (word, synonym) in dd.items():
print(f"{word} - {', '.join(synonym)}")
| 254 |
john_zelle_python3/three_button_monte.py
|
alirkaya/programming-textbook-solutions
| 0 |
2168688
|
from graphics import *
class Button:
"""A button is a labeled rectangle in a window .
It is activated or deactivated with the activate()
and deactivate() methods. The clicked (p) method
returns true if the button is active and p is inside it."""
def __init__(self, win, center, width, height, label):
""" Creates a rectangular button, eg:
qb = Button(my_win, center_point, width, height, 'Quit') """
w, h = width / 2.0, height / 2.0
x, y = center.getX(), center.getY()
self.x_max, self.x_min = x + w, x - w
self.y_max, self.y_min = y + h, y - h
p1 = Point(self.x_min, self.y_min)
p2 = Point(self.x_max, self.y_max)
self.rect = Rectangle(p1, p2)
self.rect.setFill('lightgray')
self.rect.draw(win)
self.label = Text(center, label)
self.label.draw(win)
self.deactivate()
def clicked(self, p):
""" Returns True if button active and p is inside """
return self.active and self.x_min <= p.getX() <= self.x_max and self.y_min <= p.getY() <= self.y_max
def get_label(self):
""" Returns the label string of this button. """
return self.label.getText()
def activate(self):
""" Sets this button to 'active'. """
self.label.setFill('black')
self.rect.setWidth(2)
self.active = True
def deactivate(self):
""" Sets this button to 'inactive'. """
self.label.setFill('darkgrey')
self.rect.setWidth(1)
self.active = False
def undraw(self):
self.rect.undraw()
self.label.undraw()
def main():
win = GraphWin('Three Button Monte!', 640, 480)
win.setCoords(-16, -12, 16, 12)
door1 = Button(win, Point(-9, 1), 6, 9, 'Door 1')
door1.activate()
door2 = Button(win, Point(0, 1), 6, 9, 'Door 2')
door2.activate()
door3 = Button(win, Point(9, 1), 6, 9, 'Door 3')
door3.activate()
welcome_text = Text(Point(0, 8), 'Please! Select a door to start the game.')
welcome_text.setSize(16)
welcome_text.draw(win)
prices = sample(['car', 'goat', 'goat'], k=3)
while True:
pt = win.getMouse()
if door1.clicked(pt):
door_number = 0
break
if door2.clicked(pt):
door_number = 1
break
if door3.clicked(pt):
door_number = 2
break
welcome_text.undraw()
if prices[door_number] == 'car':
message = Text(Point(0, -7), 'You Win!!!')
message.setStyle('bold')
message.setSize(16)
message.draw(win)
else:
message = Text(Point(0, -7), 'You Lost!')
message.draw(win)
win.getMouse()
main()
if __name__ == '__main__':
main()
| 2,780 |
ga4gh/htsget/compliance/file_validator.py
|
jmtcsngr/htsget-compliance
| 1 |
2171545
|
import os
class FileValidator(object):
SUCCESS = 1
FAILURE = -1
def __init__(self, returned_fp, expected_fp):
self.set_returned_fp(returned_fp)
self.set_expected_fp(expected_fp)
def validate(self):
result = FileValidator.SUCCESS
string_returned = self.load(self.get_returned_fp())
string_expected = self.load(self.get_expected_fp())
if string_returned != string_expected:
result = FileValidator.FAILURE
return result
def load(self, fp):
s = ""
if fp.endswith(".sam"):
s = self.load_sam(fp)
elif fp.endswith(".bam"):
s = self.load_bam(fp)
return s
def load_sam(self, fp):
s = []
header = True
with open(fp, "r") as f:
for line in f.readlines():
if header:
if not line.startswith("@"):
header = False
if not header:
ls = line.rstrip().split("\t")
s.append("\t".join(ls[:11]))
return "\n".join(s) + "\n"
def load_bam(self, fp):
s = []
for line in os.popen("samtools view " + fp).readlines():
ls = line.rstrip().split("\t")
s.append("\t".join(ls[:11]))
return "\n".join(s) + "\n"
def set_returned_fp(self, returned_fp):
self.returned_fp = returned_fp
def get_returned_fp(self):
return self.returned_fp
def set_expected_fp(self, expected_fp):
self.expected_fp = expected_fp
def get_expected_fp(self):
return self.expected_fp
| 1,711 |
src/sqs.py
|
alphagov-mirror/verify-event-recorder-service
| 1 |
2170754
|
def fetch_single_message(sqs_client, queue_url):
response = sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=1,
VisibilityTimeout=300, # 5 min timeout - any failed messages can be picked up by a later lambda
WaitTimeSeconds=0, # Don't wait for messages - if there aren't any left, then this lambda's job is done
)
return response['Messages'][0] if 'Messages' in response and response['Messages'] else None
def delete_message(sqs_client, queue_url, message):
sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=message['ReceiptHandle']
)
| 637 |
test/test_emd_multi.py
|
dougalsutherland/POT
| 1 |
2171579
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 09:56:06 2017
@author: rflamary
"""
import numpy as np
import pylab as pl
import ot
from ot.datasets import get_1D_gauss as gauss
reload(ot.lp)
#%% parameters
n=5000 # nb bins
# bin positions
x=np.arange(n,dtype=np.float64)
# Gaussian distributions
a=gauss(n,m=20,s=5) # m= mean, s= std
ls= range(20,1000,10)
nb=len(ls)
b=np.zeros((n,nb))
for i in range(nb):
b[:,i]=gauss(n,m=ls[i],s=10)
# loss matrix
M=ot.dist(x.reshape((n,1)),x.reshape((n,1)))
#M/=M.max()
#%%
print('Computing {} EMD '.format(nb))
# emd loss 1 proc
ot.tic()
emd_loss4=ot.emd2(a,b,M,1)
ot.toc('1 proc : {} s')
# emd loss multipro proc
ot.tic()
emd_loss4=ot.emd2(a,b,M)
ot.toc('multi proc : {} s')
| 756 |
finrl_meta/env_execution_optimizing/order_execution_qlib/trade/observation/teacher_obs.py
|
eitin-infant/FinRL-Meta
| 214 |
2171046
|
import pandas as pd
import numpy as np
from gym.spaces import Discrete, Box, Tuple, MultiDiscrete
import math
import json
from .obs_rule import RuleObs
class TeacherObs(RuleObs):
"""
The Observation used for OPD method.
Consist of public state(raw feature), private state, seqlen
"""
def get_obs(
self, raw_df, feature_dfs, t, interval, position, target, is_buy, max_step_num, interval_num, *args, **kargs,
):
if t == -1:
self.private_states = []
public_state = self.get_feature_res(feature_dfs, t, interval, whole_day=True)
private_state = np.array([position / target, (t + 1) / max_step_num])
self.private_states.append(private_state)
list_private_state = np.concatenate(self.private_states)
list_private_state = np.concatenate(
(list_private_state, [0.0] * 2 * (interval_num + 1 - len(self.private_states)),)
)
seqlen = np.array([interval])
assert not (
np.isnan(list_private_state).any() | np.isinf(list_private_state).any()
), f"{private_state}, {target}"
assert not (np.isnan(public_state).any() | np.isinf(public_state).any()), f"{public_state}"
return np.concatenate((public_state, list_private_state, seqlen))
class RuleTeacher(RuleObs):
""" """
def get_obs(
self, raw_df, feature_dfs, t, interval, position, target, is_buy, max_step_num, interval_num, *args, **kargs,
):
if t == -1:
self.private_states = []
public_state = feature_dfs[0].reshape(-1)[: 6 * 240]
private_state = np.array([position / target, (t + 1) / max_step_num])
teacher_action = self.get_feature_res(feature_dfs, t, interval)[-self.features[1]["size"] :]
self.private_states.append(private_state)
list_private_state = np.concatenate(self.private_states)
list_private_state = np.concatenate(
(list_private_state, [0.0] * 2 * (interval_num + 1 - len(self.private_states)),)
)
seqlen = np.array([interval])
return np.concatenate((teacher_action, public_state, list_private_state, seqlen))
| 2,162 |
tests/test_iter.py
|
JoshKarpel/hypoxia
| 1 |
2171352
|
import itertools
import pytest
from hypoxia import Iter, Some, Nun
HELLO_WORLD = 'Hello world!'
@pytest.fixture(scope = 'function')
def char_iter():
return Iter(c for c in HELLO_WORLD)
@pytest.fixture(scope = 'function')
def int_iter():
return Iter(range(5))
def test_zip(char_iter, int_iter):
x = int_iter.zip(char_iter)
assert list(x) == [(0, 'H'), (1, 'e'), (2, 'l'), (3, 'l'), (4, 'o')]
def test_and(char_iter, int_iter):
x = int_iter & char_iter
assert list(x) == [(0, 'H'), (1, 'e'), (2, 'l'), (3, 'l'), (4, 'o')]
def test_unzip():
foo = Iter(range(3)).zip(range(3), range(3))
a, b, c = foo.unzip()
assert a == b == c == [0, 1, 2]
def test_enumerate():
d = Iter(('a', 'b', 'c'))
assert tuple(d.enumerate()) == ((0, 'a'), (1, 'b'), (2, 'c'))
def test_map_returns_iter(char_iter):
assert type(char_iter.map(lambda c: c.upper())) == Iter
def test_map_doubler(int_iter):
assert tuple(int_iter.map(lambda x: 2 * x)) == (0, 2, 4, 6, 8)
def test_reduce(int_iter):
assert int_iter.reduce(lambda acc, elem: max(acc, elem)) == 4
def test_reduce_with_initial(int_iter):
assert int_iter.reduce(lambda acc, elem: max(acc, elem), initial = 10) == 10
def test_filter_returns_iter(char_iter):
assert type(char_iter.filter(lambda c: c != 'l')) == Iter
def test_filter_evens(int_iter):
assert tuple(int_iter.filter(lambda x: x % 2 == 0)) == (0, 2, 4)
def test_all_true(int_iter):
assert int_iter.map(lambda x: x < 10).all()
def test_all_false(int_iter):
assert not int_iter.map(lambda x: x > 3).all()
def test_any_true(int_iter):
assert int_iter.map(lambda x: x == 0).any()
def test_any_false(int_iter):
assert not int_iter.map(lambda x: x < 0).any()
def test_on_list():
d = Iter([0, 1, 2, 3, 4])
assert tuple(d.map(lambda x: 2 * x)) == (0, 2, 4, 6, 8)
def test_count():
count = Iter.count(start = 5, step = 2)
assert next(count) == 5
assert next(count) == 7
assert next(count) == 9
assert next(count) == 11
def test_repeat():
repeat = Iter.repeat(True)
for _ in range(100):
assert next(repeat)
def test_repeat_limit():
repeat = Iter.repeat(True, 10)
assert list(repeat) == [True for _ in range(10)]
def test_chain(char_iter, int_iter):
x = char_iter.chain(int_iter)
assert list(x) == ['H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!', 0, 1, 2, 3, 4]
def test_add(char_iter, int_iter):
x = char_iter + int_iter
assert list(x) == ['H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!', 0, 1, 2, 3, 4]
def test_zip_longest():
x = Iter(range(3)).zip_longest(range(2))
assert list(x) == [(0, 0), (1, 1), (2, None)]
def test_zip_longest_other_way():
x = Iter(range(2)).zip_longest(range(3))
assert list(x) == [(0, 0), (1, 1), (None, 2)]
def test_or():
x = Iter(range(3)) | range(2)
assert list(x) == [(0, 0), (1, 1), (2, None)]
def test_star_map():
def pow(x, y):
return x ** y
x = Iter(range(4)).zip(range(4)).star_map(pow)
assert list(x) == [0 ** 0, 1 ** 1, 2 ** 2, 3 ** 3]
def test_filter_map(int_iter):
def fm(x):
if x % 2 == 0:
return Some(x ** 2)
else:
return Nun()
x = int_iter.filter_map(fm)
assert list(x) == [0, 4, 16]
def test_compress():
x = Iter('hello!')
selectors = [0, 1, 0, 1, 1, 0]
assert ''.join(x.compress(selectors)) == 'elo'
def test_partition(int_iter):
even, odd = int_iter.partition(lambda x: x % 2 == 0)
assert even == [0, 2, 4]
assert odd == [1, 3]
def test_for_each(int_iter, mocker):
mock = mocker.MagicMock()
int_iter.for_each(mock)
assert mock.call_count == 5
assert mock.call_args_list == [((x,),) for x in range(5)]
def test_start_for_each(char_iter, int_iter, mocker):
mock = mocker.MagicMock()
char_iter.zip(int_iter).star_for_each(mock)
assert mock.call_count == 5
assert mock.call_args_list == [(('H', 0),), (('e', 1),), (('l', 2),), (('l', 3),), (('o', 4),)]
def test_max(int_iter):
assert int_iter.max() == 4
def test_max_by_key(int_iter):
assert int_iter.max(lambda x: -x) == 0
def test_min(int_iter):
assert int_iter.min() == 0
def test_min_by_key(int_iter):
assert int_iter.min(lambda x: -x) == 4
def test_sum(int_iter):
assert int_iter.sum() == 1 + 2 + 3 + 4
def test_sum_with_start(int_iter):
assert int_iter.sum(start = 5) == 5 + 1 + 2 + 3 + 4
def test_mul(int_iter):
next(int_iter) # skip 0
assert int_iter.mul() == 2 * 3 * 4
def test_mul_with_initial(int_iter):
next(int_iter) # skip 0
assert int_iter.mul(initial = 5) == 2 * 3 * 4 * 5
def test_dot():
a = Iter(range(3))
b = Iter(range(4))
return a.dot(b) == 1 ** 2 + 2 ** 2 + 3 ** 2
def test_matmul():
a = Iter(range(3))
b = Iter(range(4))
return a @ b == 1 ** 2 + 2 ** 2 + 3 ** 2
def test_find(char_iter):
assert char_iter.find(lambda c: c == 'l').unwrap() == 'l'
def test_position(char_iter):
assert char_iter.position(lambda c: c == 'l').unwrap() == 2
def test_find_position(char_iter):
assert char_iter.find_position(lambda c: c == 'l').unwrap() == (2, 'l')
def test_find_with_no_match(char_iter):
assert char_iter.find(lambda c: c == 'z').is_nun()
def test_position_with_no_match(char_iter):
assert char_iter.position(lambda c: c == 'z').is_nun()
def test_find_position_with_no_match(char_iter):
assert char_iter.find_position(lambda c: c == 'z').is_nun()
def test_collect(int_iter):
assert int_iter.collect(tuple) == (0, 1, 2, 3, 4)
def test_join(char_iter):
assert char_iter.join('-') == 'H-e-l-l-o- -w-o-r-l-d-!'
def test_sorted(char_iter):
assert char_iter.sorted().join() == ''.join(sorted(HELLO_WORLD))
def test_sorted_with_reversed(char_iter):
assert char_iter.sorted(reversed = True).join() == ''.join(sorted(HELLO_WORLD, reverse = True))
def test_skip_while(char_iter):
assert char_iter.skip_while(lambda c: c in 'Hello').join() == ' world!'
def test_take_while(char_iter):
assert char_iter.take_while(lambda c: c in 'Hello').join() == 'Hello'
def test_product():
x = Iter('ABCD').product(repeat = 2).collect(list)
assert len(x) == 16
assert x == [('A', 'A'), ('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'B'), ('B', 'C'), ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'C'), ('C', 'D'), ('D', 'A'), ('D', 'B'), ('D', 'C'), ('D', 'D')]
def test___mul__():
x = Iter('ABCD')
y = Iter('ABCD')
z = (x * y).collect(list)
assert len(z) == 16
assert z == [('A', 'A'), ('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'B'), ('B', 'C'), ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'C'), ('C', 'D'), ('D', 'A'), ('D', 'B'), ('D', 'C'), ('D', 'D')]
def test_permutations():
x = Iter('ABCD').permutations(r = 2).collect(list)
assert len(x) == 12
assert x == [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'), ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'), ('D', 'B'), ('D', 'C')]
def test_combinations():
x = Iter('ABCD').combinations(2).collect(list)
assert len(x) == 6
assert x == [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'), ('C', 'D')]
def test_combinations_with_replacement():
x = Iter('ABCD').combinations_with_replacement(2).collect(list)
assert len(x) == 10
assert x == [('A', 'A'), ('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'B'), ('B', 'C'), ('B', 'D'), ('C', 'C'), ('C', 'D'), ('D', 'D')]
def test_cycle():
x = Iter('ABCD').cycle()
cycle = itertools.cycle('ABCD')
for _ in range(1000):
assert next(x) == next(cycle)
| 7,726 |
xview3/augmentations/copy_paste.py
|
BloodAxe/xView3-The-First-Place-Solution
| 39 |
2171005
|
import math
import random
from typing import List, Union, Tuple, Callable
import albumentations as A
import cv2
import numpy as np
from sklearn.utils import compute_sample_weight
__all__ = ["CopyPasteAugmentation"]
class CopyPasteAugmentation(A.DualTransform):
def __init__(
self,
images: List[str],
bboxes: List,
labels: List[int],
transform: A.Compose,
read_image_fn: Callable,
class_weights: Union[str, np.ndarray] = "balanced",
seamless_clone_p=0,
always_apply=False,
p=0.5,
):
super().__init__(always_apply=always_apply, p=p)
self.seamless_clone_p = seamless_clone_p
self.images = images
self.bboxes = np.asarray(bboxes, dtype=int)
self.labels = np.asarray(labels, dtype=int)
self.transform = transform
self.read_image = read_image_fn
if class_weights == "balanced":
sample_weights = compute_sample_weight("balanced", labels)
else:
sample_weights = compute_sample_weight(class_weights, labels)
self.sample_weights = sample_weights
@property
def targets(self):
return {
"image": self.apply,
"bboxes": self.apply_to_bboxes,
}
@property
def targets_as_params(self):
return "image", "bboxes"
def get_params_dependent_on_targets(self, params):
image = params["image"]
rows, cols = image.shape[:2]
bboxes = params["bboxes"]
bboxes = A.convert_bboxes_from_albumentations(bboxes, "pascal_voc", rows, cols)
# Compute average object size
if len(bboxes) != 0:
bboxes = np.array(bboxes)[:, :4]
median_size = np.median(np.sqrt((bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])))
else:
median_size = None
seed_image, seed_label = self._select_box()
if median_size is not None:
seed_size = math.sqrt(seed_image.shape[0] * seed_image.shape[1])
scale = min(6.0, max(0.1, random.gauss(median_size / seed_size, 0.5)))
seed_image = cv2.resize(seed_image, dsize=None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
# Paint used regions
mask = np.ones((rows, cols), dtype=np.uint8) * 255
for (x1, y1, x2, y2) in bboxes:
mask[int(y1) : int(y2), int(x1) : int(x2)] = 0
mask = cv2.distanceTransform(mask, cv2.DIST_L2, 3, dstType=cv2.CV_32F)
max_size = max(seed_image.shape[0], seed_image.shape[1])
half_width = seed_image.shape[1] // 2
half_height = seed_image.shape[0] // 2
mask[: half_height + 1, :] = 0
mask[:, : half_width + 1] = 0
mask[mask.shape[0] - half_height - 1 :, :] = 0
mask[:, mask.shape[1] - half_width - 1 :] = 0
local_max = mask > max_size
if not local_max.any():
return {}
nz_rows, nz_cols = np.nonzero(local_max)
index = random.choice(np.arange(len(nz_rows)))
x1 = nz_cols[index] - seed_image.shape[1] // 2
y1 = nz_rows[index] - seed_image.shape[0] // 2
x2 = x1 + seed_image.shape[1]
y2 = y1 + seed_image.shape[0]
return {
"seed_image": seed_image,
"seed_bbox": (x1, y1, x2, y2),
"seed_p": ((x1 + x2) // 2, (y1 + y2) // 2),
"seed_label": seed_label,
"use_seamless_clone": self.seamless_clone_p > random.random(),
}
def apply(self, img, seed_image=None, seed_p=None, use_seamless_clone=False, seed_bbox=None, **params):
if seed_image is not None:
if use_seamless_clone:
mask = np.ones(seed_image.shape[:2], dtype=np.uint8) * 255
mask[0, :] = 0
mask[:, 0] = 0
mask[mask.shape[0] - 1, :] = 0
mask[:, mask.shape[1] - 1] = 0
return cv2.seamlessClone(
src=np.ascontiguousarray(seed_image),
dst=np.ascontiguousarray(img),
mask=mask,
p=seed_p,
flags=cv2.NORMAL_CLONE,
)
else:
x1, y1, x2, y2 = seed_bbox
img_hard = img.copy()
img_hard[y1:y2, x1:x2] = seed_image
return img_hard
return img
def apply_to_bboxes(self, bboxes, seed_bbox=None, seed_label=None, **params):
if seed_bbox is not None:
t = A.convert_bbox_to_albumentations(seed_bbox, "pascal_voc", params["rows"], params["cols"])
bboxes = bboxes + [(*t, seed_label)]
return bboxes
def _select_box(self) -> Tuple[np.ndarray, int]:
n = len(self.images)
(index,) = random.choices(np.arange(n), self.sample_weights, k=1)
image = self.read_image(self.images[index])
x1, y1, x2, y2 = self.bboxes[index]
roi = slice(y1, y2), slice(x1, x2)
seed_image = image[roi]
seed_label = self.labels[index]
# Augment
seed_image = self.transform(image=seed_image)["image"]
return seed_image, seed_label
def get_transform_init_args_names(self):
return ()
| 5,243 |
prisoner/__init__.py
|
chenyuyou/Behavior-template
| 0 |
2170712
|
from otree.api import *
doc = """
This is a one-shot "Prisoner's Dilemma". Two players are asked separately
whether they want to cooperate or defect. Their choices directly determine the
payoffs.
"""
class Constants(BaseConstants):
name_in_url = 'prisoner'
players_per_group = 2
num_rounds = 1
instructions_template = 'prisoner/instructions.html'
# payoff if 1 player defects and the other cooperates""",
betray_payoff = cu(300)
betrayed_payoff = cu(0)
# payoff if both players cooperate or both defect
both_cooperate_payoff = cu(200)
both_defect_payoff = cu(100)
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
decision = models.StringField(
choices=[['Cooperate', 'Cooperate'], ['Defect', 'Defect']],
doc="""This player's decision""",
widget=widgets.RadioSelect,
)
# FUNCTIONS
def set_payoffs(group: Group):
for p in group.get_players():
set_payoff(p)
def other_player(player: Player):
return player.get_others_in_group()[0]
def set_payoff(player: Player):
payoff_matrix = dict(
Cooperate=dict(
Cooperate=Constants.both_cooperate_payoff, Defect=Constants.betrayed_payoff
),
Defect=dict(
Cooperate=Constants.betray_payoff, Defect=Constants.both_defect_payoff
),
)
player.payoff = payoff_matrix[player.decision][other_player(player).decision]
# PAGES
class Introduction(Page):
timeout_seconds = 100
class Decision(Page):
form_model = 'player'
form_fields = ['decision']
class ResultsWaitPage(WaitPage):
after_all_players_arrive = set_payoffs
class Results(Page):
@staticmethod
def vars_for_template(player: Player):
me = player
opponent = other_player(me)
return dict(
my_decision=me.decision,
opponent_decision=opponent.decision,
same_choice=me.decision == opponent.decision,
)
page_sequence = [Introduction, Decision, ResultsWaitPage, Results]
| 2,076 |
transformer/Layers.py
|
text-machine-lab/HierarchicalTransformer
| 7 |
2170991
|
''' Define the Layers '''
import torch.nn as nn
import torch
from transformer.SubLayers import MultiHeadAttention, PositionwiseFeedForward
__author__ = "<NAME>"
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output *= non_pad_mask
enc_output = self.pos_ffn(enc_output)
enc_output *= non_pad_mask
return enc_output, enc_slf_attn
class UNetEncoderLayer(nn.Module):
def __init__(self, d_out, d_inner, n_head, d_k, d_v, dropout=0.1, type_='same', skip_connect=False, d_in=None):
super(UNetEncoderLayer, self).__init__()
d_in = d_in if d_in is not None else d_out # size of input to unet layer
self.slf_attn = MultiHeadAttention(
n_head, d_out, d_k, d_v, dropout=dropout, d_in=d_in)
self.pos_ffn = PositionwiseFeedForward(d_out, d_inner, dropout=dropout)
self.norm = nn.LayerNorm(d_out)
self.skip_connect = skip_connect
# TODO add depthwise-separable convolutions
self.maxpool = None
self.type = type_
if type_ == 'down':
# half size of output
self.conv = nn.Conv1d(d_in, d_in, kernel_size=3, padding=1, groups=d_in)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
elif type_ == 'same':
# keep size of output the same
self.conv = nn.Conv1d(d_in, d_in, kernel_size=3, padding=1, groups=d_in)
elif type_ == 'up':
# double size of output
self.conv = nn.ConvTranspose1d(d_in, d_in, kernel_size=3, stride=2, padding=1, groups=d_in)
elif type_== 'none':
self.conv = None
else:
raise RuntimeError('Did not specify appropriate convolution type')
self.conv_out = nn.Linear(d_in, d_out)
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):
if self.conv is not None:
conv_input = enc_input.transpose(1, 2) # (batch_size, emb_size, n_steps)
if self.type != 'up':
conv_output_t = self.conv(conv_input)
else:
# we are doing a transpose - we need to specify output size in order to recover the correct size
output_size = None if non_pad_mask is None else \
(non_pad_mask.shape[0], conv_input.shape[2], non_pad_mask.shape[1])
conv_output_t = self.conv(conv_input, output_size=output_size)
conv_output_t = self.conv_out(conv_output_t.transpose(1,2)).transpose(1,2)
# if this is a down layer, we use maxpool similar to the true U-Net
if self.maxpool is not None:
# in: (batch_size, emb_size, n_steps)
# out: (batch_size, emb_size, n_steps//2)
conv_output_t = self.maxpool(conv_output_t)
conv_output = conv_output_t.transpose(1, 2) # (batch_size, n_steps, emb_size)
# we may not need an activation here, as layer norm can act as an activation
# if same, we use skip connections from input to output to allow for more efficient gradient propagation
norm_input = enc_input + conv_output if self.skip_connect else conv_output
norm_output = self.norm(norm_input)
else:
norm_output = enc_input
# here the output of the convolution performs attention over the input
#TODO see if using norm output helps
#TODO see if attention over input helps more
enc_output, enc_slf_attn = self.slf_attn(
norm_output, enc_input, enc_input, mask=slf_attn_mask)
enc_output *= non_pad_mask
enc_output = self.pos_ffn(enc_output)
enc_output *= non_pad_mask
return enc_output, enc_slf_attn
class DecoderLayer(nn.Module):
''' Compose with three layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1, d_enc=None):
super(DecoderLayer, self).__init__()
d_enc = d_model if d_enc is None else d_enc
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout, d_in=d_enc)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, dec_input, enc_output, non_pad_mask=None, slf_attn_mask=None, dec_enc_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(
dec_input, dec_input, dec_input, mask=slf_attn_mask)
dec_output *= non_pad_mask
dec_output, dec_enc_attn = self.enc_attn(
dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)
dec_output *= non_pad_mask
dec_output = self.pos_ffn(dec_output)
dec_output *= non_pad_mask
return dec_output, dec_slf_attn, dec_enc_attn
| 5,342 |
zig_zag.py
|
soham2109/algo_trading_blog
| 1 |
2171413
|
from datetime import date, timedelta
import pandas as pd
import numpy as np
from scipy import signal
from pandas_datareader import data
from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
from pandas_datareader.yahoo.headers import DEFAULT_HEADERS
import requests_cache
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import streamlit as st
np.random.seed(0)
stock_key_dict={"Netflix": "NFLX",
"Microsoft": "MSFT",
"Google": "GOOG",
"Apple": "AAPL",
"Tesla": "TSLA",
"Ball Corp.": "BLL"}
plt.style.use("bmh")
def max_width(prcnt_width:int = 75):
max_width_str = f"max-width: {prcnt_width}%;"
st.markdown(f"""
<style>
.reportview-container .main .block-container{{{max_width_str}}}
</style>
""",
unsafe_allow_html=True,
)
def filter(values, percentage):
previous = values[0]
mask = [True]
for value in values[1:]:
relative_difference = np.abs(value - previous)/previous
if relative_difference > percentage:
previous = value
mask.append(True)
else:
mask.append(False)
return mask
def get_data(stock=None, start_date=None, end_date=None):
expire_after = timedelta(days=3)
session = requests_cache.CachedSession(cache_name='cache',
backend='sqlite',
expire_after=expire_after)
session.headers = DEFAULT_HEADERS
df = data.DataReader(
stock, 'yahoo',
start=start_date,
end=end_date,
#data_source='yahoo'
session=session
)
return df
def plot(df_peaks_valleys, filtered, data_x, data_y, label):
# Instantiate axes.
(fig, ax) = plt.subplots(1,1, figsize=(10,5), \
constrained_layout=True)
# Plot zigzag trendline.
ax.plot(df_peaks_valleys['date'].values,
df_peaks_valleys['zigzag_y'].values,
color='red', label="Extrema", lw=2)
# Plot zigzag trendline.
ax.plot(filtered['date'].values,
filtered['zigzag_y'].values,
color='blue', label="ZigZag", lw=2)
# Plot original line.
ax.plot(data_x, data_y,
linestyle='dashed',
color='black',
label="Original Line", linewidth=2)
ax.set_title("Stock: {}".format(label))
plt.legend(loc="best")
return fig
def track_valleys(stock):
today = '{}'.format(date.today())
cont = 0
series = get_data(stock=stock, start_date='2018-1-1', end_date=today)
series.insert(loc=0, column='Date', value=series.index)
series = series.reset_index(drop=True)
data_x = series.index.values
data_y = series['Close'].values
peak_indexes = signal.argrelextrema(data_y, np.greater)
peak_indexes = peak_indexes[0]
# Find valleys(min).
valley_indexes = signal.argrelextrema(data_y, np.less)
valley_indexes = valley_indexes[0]
# Merge peaks and valleys data points using pandas.
df_peaks = pd.DataFrame({'date': data_x[peak_indexes],
'zigzag_y': data_y[peak_indexes]})
df_valleys = pd.DataFrame({'date': data_x[valley_indexes],
'zigzag_y': data_y[valley_indexes]})
df_peaks_valleys = pd.concat([df_peaks, df_valleys],
axis=0, ignore_index=True, sort=True)
# Sort peak and valley datapoints by date.
df_peaks_valleys = df_peaks_valleys.sort_values(by=['date'])
p = 0.1 # 20%
filter_mask = filter(df_peaks_valleys.zigzag_y, p)
filtered = df_peaks_valleys[filter_mask]
return (df_peaks_valleys, filtered, data_x, data_y)
def app():
max_width(80)
st.title("ZIG ZAG")
st.markdown("---")
markdown="""
The Zig Zag indicator is a valuable tool for visualizing past price trends and can make using drawing tools easier by providing a visual representation of those trends. As a result, it connects local peaks and troughs. Prices are tracked using this tool in order to identify price trends. Rather than relying on random price fluctuations, it tries to identify trend changes. If the price movement between the swing high and swing low is greater than the specified percentage — **often 5 percent** — then zig-zag lines will appear on the chart. The indicator makes it easier to spot trends in all time frames by filtering out minor price movements.
It is important to note that the Zig Zag indicator does not predict future trends; however, it can be used to identify potential support and resistance zones between plotted swing high and swing low levels. Zig Zag lines can also reveal reversal patterns, such as double bottoms and head and shoulders tops, when viewed from different angles. When the Zig Zag line changes direction, traders can use popular technical indicators such as the *relative strength index* (**RSI**) and the stochastics oscillator to determine whether a security's price is overbought or oversold. The RSI and the stochastics oscillator are two popular technical indicators.
"""
st.markdown(markdown)
stocks = st.selectbox(label="Select stock:",
options=['Netflix', 'Microsoft', 'Google', 'Apple', 'Tesla', 'Ball Corp.']
)
stock_name = stock_key_dict[stocks]
(df_peaks_valleys, filtered, data_x, data_y) = track_valleys(stock_name)
st.pyplot(plot(df_peaks_valleys, filtered, \
data_x, data_y, label=stocks))
if __name__=="__main__":
app()
| 5,106 |
tests/test_customer_operations.py
|
justmytwospence/banking-oop
| 0 |
2171604
|
from banking.customer_operations import change_address, onboard
from banking.models import Customer
from sqlalchemy import and_, select
from . import Session
def test_onboard(Session):
onboard.callback('<NAME>', 'USA', Session)
with Session() as session:
new_customer = session.execute(select(Customer).where(and_(
Customer.firstname == 'Greg',
Customer.lastname == 'Foobar')
)).scalar_one()
assert new_customer
def test_change_address(Session):
new_address = 'Fairfax Avenue'
change_address.callback('<NAME>', new_address, Session)
with Session() as session:
actual_address = session.execute(select(Customer.address).where(and_(
Customer.firstname == 'Spencer',
Customer.lastname == 'Foobar')
)).scalar_one()
assert actual_address == new_address
| 860 |
invenio_records/validators.py
|
rekt-hard/invenio-records
| 0 |
2170693
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
# Copyright (C) 2021 <NAME>.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Record validators."""
from jsonschema.validators import Draft4Validator, extend, validator_for
PartialDraft4Validator = extend(Draft4Validator, {'required': None})
"""Partial JSON Schema (draft 4) validator.
Special validator that contains the same validation rules of Draft4Validator,
except for required fields.
"""
def _generate_legacy_type_checks(types):
"""Generate new-style type mappings from old-style type mappings.
Based on the function `jsonschema.validators._generate_legacy_type_checks`
from jsonschema 3.x.
:param types: A mapping of type names to their Python types
:returns: A dictionary of definitions that can be passed to `TypeChecker`s
"""
def gen_type_check(pytypes):
def type_check(_checker, instance):
return isinstance(instance, pytypes)
return type_check
return {
typename: gen_type_check(pytypes)
for (typename, pytypes) in types.items()
}
def _create_validator(schema, base_validator_cls=None, custom_checks=None):
"""Create a fitting jsonschema validator class.
:param schema: The schema for which to create a fitting validator
:param base_validator_cls: The base :class:`jsonschema.protocols.Validator`
class to base the new validator class on -- if not specified, the base
class will be determined by the given schema.
:param custom_checks: A dictionary with type names and Python types
to check against, e.g. {"string": str, "object": dict}
:returns: An fitting :class:`jsonschema.protocols.Validator` class
"""
validator_cls = base_validator_cls or validator_for(schema)
if custom_checks:
type_checker = validator_cls.TYPE_CHECKER.redefine_many(
_generate_legacy_type_checks(custom_checks)
)
validator_cls = extend(
validator_cls,
type_checker=type_checker,
)
return validator_cls
| 2,208 |
map_/hash_map.py
|
night-cruise/DataStructure-py
| 4 |
2170867
|
#!/user/bin/python
# -*-coding:UTF-8-*-
"""实现哈希表(散列表)"""
class HashMap:
def __init__(self):
self._size = 11 # 散列表的长度
self._slots = [None] * self._size # 存放key的列表
self._data = [None] * self._size # 存放data的列表
def put(self, key, data):
hash_value = self._hash(key)
if self._slots[hash_value] is None:
self._slots[hash_value] = key
self._data[hash_value] = data
else:
if self._slots[hash_value] == key:
self._data[hash_value] = data
else:
next_slot = self._rehash(hash_value)
while self._slots[next_slot] is not None and self._slots[next_slot] != key:
next_slot = self._rehash(next_slot)
if self._slots[next_slot] is None:
self._slots[next_slot] = key
self._data[next_slot] = data
else:
self._data[next_slot] = data # 替换数据
def get(self, key):
start_slot = self._hash(key)
if self._slots[start_slot] == key:
return self._data[start_slot]
else:
next_slot = self._rehash(start_slot)
while next_slot != start_slot: # 如果和初始散列值相同,则说明找遍所有的槽都没有对应的键
if self._slots[next_slot] == key:
return self._data[next_slot]
next_slot = self._rehash(next_slot)
return None
def _hash(self, key):
if isinstance(key, str):
return self._hash_str(key)
elif isinstance(key, int):
return key % self._size
else:
pass
def _hash_int(self, key):
return key % self._size
def _hash_str(self, key):
all_weight = (1 + len(key)) * len(key) // 2
sum_ = 0
for pos in range(1, len(key) + 1):
sum_ += sum_ + int(ord(key[pos]) * (pos / all_weight))
return sum_ % self._size
def _rehash(self, oldhash):
return (oldhash + 3) % self._size
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __contains__(self, key):
return self.get(key) is not None
def __str__(self):
str_lst = ["{ "]
for i in range(len(self._slots)):
if self._slots[i] is not None:
if i == len(self._slots) - 1:
str_lst.append(f"{self._slots[i]}: {self._data[i]}")
else:
str_lst.append(f"{self._slots[i]}: {self._data[i]}, ")
str_lst.append(" }")
return "".join(str_lst)
if __name__ == '__main__':
hm = HashMap()
hm[54] = "cat"
hm[26] = "dog"
hm[93] = "lion"
hm[17] = "tiger"
hm[77] = "bird"
hm[31] = "cow"
hm[44] = "goat"
hm[55] = "pig"
hm[20] = "chichen"
print(hm)
# { 77: bird, 55: pig, 44: goat, 26: dog, 93: lion, 17: tiger, 20: chichen, 31: cow, 54: cat }
print(hm[54], hm[93], hm[31], hm[32]) # cat lion cow None
print(32 in hm) # False
print(31 in hm) # True
| 3,112 |
Examples/Garch_example.py
|
NemesiP/volatiltiy-forecasting
| 6 |
2170161
|
import numpy as np
from scipy.optimize import minimize
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('C:/Users/peter/Desktop/volatility-forecasting/data/Stocks/AMD.csv')
df = df.iloc[-1500:, :]
df['Chg'] = np.log(df.close).diff().fillna(0)
returns = df.Chg[1:].values
df['Date'] = pd.to_datetime(df.iloc[:, 0])
def garch_filter(alpha0, alpha1, beta1, omega, eps):
iT = len(eps)
sigma_2 = np.zeros(iT)
for i in range(iT):
if i == 0:
sigma_2[i] = alpha0/(1 - alpha1 - beta1)
else:
sigma_2[i] = alpha0 + alpha1*eps[i - 1]**2 + beta1*sigma_2[i - 1] + omega * eps[i - 1]**2 * (eps[i - 1] < 0)
return sigma_2
def garch_loglike(vP, eps):
alpha0 = vP[0]
alpha1 = vP[1]
beta1 = vP[2]
omega = vP[3]
sigma_2 = garch_filter(alpha0, alpha1, beta1, omega, eps)
logL = -np.sum(-np.log(sigma_2) - eps**2/sigma_2)
return logL
cons = ({'type': 'ineq', 'func': lambda x: np.array(x)})
vP0 = (0.1, 0.05, 0.92, 0.2)
res = minimize(garch_loglike, vP0, args = (returns),
bounds = ((0.0001, None), (0.0001, None), (0.0001, None), (0.0001, None)),
options = {'disp': True})
alpha0_est = res.x[0]
alpha1_est = res.x[1]
beta1_est = res.x[2]
omega_est = res.x[3]
sigma2 = garch_filter(alpha0_est, alpha1_est, beta1_est, omega_est, returns)
plt.plot(df.Date[1:], sigma2, label = 'GJR-GARCH')
plt.legend(loc = 'best')
plt.show()
def garch_filter2(alpha0, alpha1, beta1, eps):
iT = len(eps)
sigma_2 = np.zeros(iT)
for i in range(iT):
if i == 0:
sigma_2[i] = alpha0/(1 - alpha1 - beta1)
else:
sigma_2[i] = alpha0 + alpha1*eps[i - 1]**2 + beta1*sigma_2[i - 1]
return sigma_2
def garch_loglike2(vP, eps):
alpha0 = vP[0]
alpha1 = vP[1]
beta1 = vP[2]
sigma_2 = garch_filter2(alpha0, alpha1, beta1, eps)
logL = -np.sum(-np.log(sigma_2) - eps**2/sigma_2)
return logL
cons = ({'type': 'ineq', 'func': lambda x: np.array(x)})
vP0 = (0.1, 0.05, 0.92)
res2 = minimize(garch_loglike2, vP0, args = (returns),
bounds = ((0.0001, None), (0.0001, None), (0.0001, None)),
options = {'disp': True})
alpha0_est2 = res2.x[0]
alpha1_est2 = res2.x[1]
beta1_est2 = res2.x[2]
sigma22 = garch_filter2(alpha0_est2, alpha1_est2, beta1_est2, returns)
plt.plot(df.Date[1:], sigma22, label = 'GARCH')
plt.legend(loc = 'best')
plt.show()
plt.plot(df.Date[1:], sigma22, label = 'GARCH')
plt.plot(df.Date[1:], sigma2, label = 'GJR-GARCH')
plt.legend(loc = 'best')
plt.show()
plt.scatter(sigma2, sigma22)
plt.show()
| 2,683 |
samples/Python/ClosePosition.py
|
holgafx/gehtsoft
| 0 |
2170852
|
# Copyright 2018 Gehtsoft USA LLC
# Licensed under the license derived from the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://fxcodebase.com/licenses/open-source/license.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import threading
from time import sleep
from forexconnect import fxcorepy, ForexConnect, Common
import common_samples
def parse_args():
parser = argparse.ArgumentParser(description='Process command parameters.')
common_samples.add_main_arguments(parser)
common_samples.add_instrument_timeframe_arguments(parser, timeframe=False)
common_samples.add_account_arguments(parser)
args = parser.parse_args()
return args
class ClosedTradesMonitor:
def __init__(self):
self.__close_order_id = None
self.__closed_trades = {}
self.__event = threading.Event()
def on_added_closed_trade(self, _, __, closed_trade_row):
close_order_id = closed_trade_row.close_order_id
self.__closed_trades[close_order_id] = closed_trade_row
if self.__close_order_id == close_order_id:
self.__event.set()
def wait(self, time, close_order_id):
self.__close_order_id = close_order_id
closed_trade_row = self.find_closed_trade(close_order_id)
if closed_trade_row is not None:
return closed_trade_row
self.__event.wait(time)
return self.find_closed_trade(close_order_id)
def find_closed_trade(self, close_order_id):
if close_order_id in self.__closed_trades:
return self.__closed_trades[close_order_id]
return None
def reset(self):
self.__close_order_id = None
self.__closed_trades.clear()
self.__event.clear()
class OrdersMonitor:
def __init__(self):
self.__order_id = None
self.__added_orders = {}
self.__deleted_orders = {}
self.__added_order_event = threading.Event()
self.__deleted_order_event = threading.Event()
def on_added_order(self, _, __, order_row):
order_id = order_row.order_id
self.__added_orders[order_id] = order_row
if self.__order_id == order_id:
self.__added_order_event.set()
def on_deleted_order(self, _, __, order_row):
order_id = order_row.order_id
self.__deleted_orders[order_id] = order_row
if self.__order_id == order_id:
self.__deleted_order_event.set()
def wait(self, time, order_id):
self.__order_id = order_id
is_order_added = True
is_order_deleted = True
# looking for an added order
if order_id not in self.__added_orders:
is_order_added = self.__added_order_event.wait(time)
if is_order_added:
order_row = self.__added_orders[order_id]
print("The order has been added. Order ID: {0:s}, Rate: {1:.5f}, Time In Force: {2:s}".format(
order_row.order_id, order_row.rate, order_row.time_in_force))
# looking for a deleted order
if order_id not in self.__deleted_orders:
is_order_deleted = self.__deleted_order_event.wait(time)
if is_order_deleted:
order_row = self.__deleted_orders[order_id]
print("The order has been deleted. Order ID: {0}".format(order_row.order_id))
return is_order_added and is_order_deleted
def reset(self):
self.__order_id = None
self.__added_orders.clear()
self.__deleted_orders.clear()
self.__added_order_event.clear()
self.__deleted_order_event.clear()
def main():
args = parse_args()
user_id = args.l
password = args.p
str_url = args.u
connection = args.c
session_id = args.session
pin = args.pin
instrument = args.i
str_account = args.account
with ForexConnect() as fx:
fx.login(user_id, password, str_url, connection, session_id,
pin, common_samples.session_status_changed)
account = Common.get_account(fx, str_account)
if not account:
raise Exception(
"The account '{0}' is not valid".format(account))
else:
str_account = account.account_id
print("AccountID='{0}'".format(str_account))
offer = Common.get_offer(fx, instrument)
if not offer:
raise Exception(
"The instrument '{0}' is not valid".format(instrument))
trade = Common.get_trade(fx, str_account, offer.offer_id)
if not trade:
raise Exception("There are no opened positions for instrument '{0}'".format(instrument))
amount = trade.amount
buy = fxcorepy.Constants.BUY
sell = fxcorepy.Constants.SELL
buy_sell = sell if trade.buy_sell == buy else buy
request = fx.create_order_request(
order_type=fxcorepy.Constants.Orders.TRUE_MARKET_CLOSE,
OFFER_ID=offer.offer_id,
ACCOUNT_ID=str_account,
BUY_SELL=buy_sell,
AMOUNT=amount,
TRADE_ID=trade.trade_id
)
if request is None:
raise Exception("Cannot create request")
orders_monitor = OrdersMonitor()
closed_trades_monitor = ClosedTradesMonitor()
closed_trades_table = fx.get_table(ForexConnect.CLOSED_TRADES)
orders_table = fx.get_table(ForexConnect.ORDERS)
trades_listener = Common.subscribe_table_updates(closed_trades_table,
on_add_callback=closed_trades_monitor.on_added_closed_trade)
orders_listener = Common.subscribe_table_updates(orders_table, on_add_callback=orders_monitor.on_added_order,
on_delete_callback=orders_monitor.on_deleted_order)
try:
resp = fx.send_request(request)
order_id = resp.order_id
except Exception as e:
common_samples.print_exception(e)
trades_listener.unsubscribe()
orders_listener.unsubscribe()
else:
# Waiting for an order to appear/delete or timeout (default 30)
is_success = orders_monitor.wait(30, order_id)
closed_trade_row = None
if is_success:
# Waiting for a closed trade to appear or timeout (default 30)
closed_trade_row = closed_trades_monitor.wait(30, order_id)
if closed_trade_row is None:
print("Response waiting timeout expired.\n")
else:
print("For the order: OrderID = {0} the following positions have been closed: ".format(order_id))
print("Closed Trade ID: {0:s}; Amount: {1:d}; Closed Rate: {2:.5f}".format(closed_trade_row.trade_id,
closed_trade_row.amount,
closed_trade_row.close_rate))
sleep(1)
trades_listener.unsubscribe()
orders_listener.unsubscribe()
try:
fx.logout()
except Exception as e:
common_samples.print_exception(e)
if __name__ == "__main__":
main()
input("Done! Press enter key to exit\n")
| 7,739 |
trackeval/utils.py
|
ATrackerLearner/TrackEval-custom
| 0 |
2171543
|
import os
import csv
from collections import OrderedDict
from statistics import mean
from typing import List
def print_hota(d : dict):
"""Print HOTA fancy way
Args:
d (dict): HOTA dictionnari in extract_dict list
"""
upper_line : str = "||"
down_line : str = "||"
for i,key in enumerate(d.keys()):
upper_line += "{:^10}".format(key) + "||"
down_line += "{:^10.2f}".format(d[key]) + "||"
if (i+1) % 7 == 0:
print(upper_line + '\n' + down_line)
upper_line : str = "||"
down_line : str = "||"
if (i+1) % 7 != 0:
print(upper_line + '\n' + down_line)
def extract_dict(trackeval_dict : dict) -> List[dict]:
# Initialize hota/count score list
hota_score_list : List[dict] = []
# Extract informations of interest
hota_score : dict = trackeval_dict['MotChallenge2DBox']['dataset_train']
del hota_score['COMBINED_SEQ']
# Browse for every sequence
for i in range(len(hota_score)):
# Format score
hota_score_temp : dict= hota_score['seq_{!s}'.format(i+1)]['pedestrian']['HOTA']
# Remove unrelevent keys
del hota_score_temp['HOTA(0)']
del hota_score_temp['LocA(0)']
del hota_score_temp['HOTALocA(0)']
del hota_score_temp['RHOTA']
for key in hota_score_temp.keys():
hota_score_temp[key] = mean(hota_score_temp[key])
# Add count keys to HOTA dict
count_score_temp : dict = hota_score['seq_{!s}'.format(i+1)]['pedestrian']['Count']
for key in count_score_temp.keys():
hota_score_temp[key] = count_score_temp[key]
# Append score hota
hota_score_list.append(hota_score_temp)
return hota_score_list
def init_config(config, default_config, name=None):
"""Initialise non-given config values with defaults"""
if config is None:
config = default_config
else:
for k in default_config.keys():
if k not in config.keys():
config[k] = default_config[k]
if name and config['PRINT_CONFIG']:
print('\n%s Config:' % name)
for c in config.keys():
print('%-20s : %-30s' % (c, config[c]))
return config
def get_code_path():
"""Get base path where code is"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def validate_metrics_list(metrics_list):
"""Get names of metric class and ensures they are unique, further checks that the fields within each metric class
do not have overlapping names.
"""
metric_names = [metric.get_name() for metric in metrics_list]
# check metric names are unique
if len(metric_names) != len(set(metric_names)):
raise TrackEvalException('Code being run with multiple metrics of the same name')
fields = []
for m in metrics_list:
fields += m.fields
# check metric fields are unique
if len(fields) != len(set(fields)):
raise TrackEvalException('Code being run with multiple metrics with fields of the same name')
return metric_names
def write_summary_results(summaries, cls, output_folder):
"""Write summary results to file"""
fields = sum([list(s.keys()) for s in summaries], [])
values = sum([list(s.values()) for s in summaries], [])
# In order to remain consistent upon new fields being adding, for each of the following fields if they are present
# they will be output in the summary first in the order below. Any further fields will be output in the order each
# metric family is called, and within each family either in the order they were added to the dict (python >= 3.6) or
# randomly (python < 3.6).
default_order = ['HOTA', 'DetA', 'AssA', 'DetRe', 'DetPr', 'AssRe', 'AssPr', 'LocA', 'RHOTA', 'HOTA(0)', 'LocA(0)',
'HOTALocA(0)', 'MOTA', 'MOTP', 'MODA', 'CLR_Re', 'CLR_Pr', 'MTR', 'PTR', 'MLR', 'CLR_TP', 'CLR_FN',
'CLR_FP', 'IDSW', 'MT', 'PT', 'ML', 'Frag', 'sMOTA', 'IDF1', 'IDR', 'IDP', 'IDTP', 'IDFN', 'IDFP',
'Dets', 'GT_Dets', 'IDs', 'GT_IDs']
default_ordered_dict = OrderedDict(zip(default_order, [None for _ in default_order]))
for f, v in zip(fields, values):
default_ordered_dict[f] = v
for df in default_order:
if default_ordered_dict[df] is None:
del default_ordered_dict[df]
fields = list(default_ordered_dict.keys())
values = list(default_ordered_dict.values())
out_file = os.path.join(output_folder, cls + '_summary.txt')
os.makedirs(os.path.dirname(out_file), exist_ok=True)
with open(out_file, 'w', newline='') as f:
writer = csv.writer(f, delimiter=' ')
writer.writerow(fields)
writer.writerow(values)
def write_detailed_results(details, cls, output_folder):
"""Write detailed results to file"""
sequences = details[0].keys()
fields = ['seq'] + sum([list(s['COMBINED_SEQ'].keys()) for s in details], [])
out_file = os.path.join(output_folder, cls + '_detailed.csv')
os.makedirs(os.path.dirname(out_file), exist_ok=True)
with open(out_file, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(fields)
for seq in sorted(sequences):
if seq == 'COMBINED_SEQ':
continue
writer.writerow([seq] + sum([list(s[seq].values()) for s in details], []))
writer.writerow(['COMBINED'] + sum([list(s['COMBINED_SEQ'].values()) for s in details], []))
def load_detail(file):
"""Loads detailed data for a tracker."""
data = {}
with open(file) as f:
for i, row_text in enumerate(f):
row = row_text.replace('\r', '').replace('\n', '').split(',')
if i == 0:
keys = row[1:]
continue
current_values = row[1:]
seq = row[0]
if seq == 'COMBINED':
seq = 'COMBINED_SEQ'
if (len(current_values) == len(keys)) and seq is not '':
data[seq] = {}
for key, value in zip(keys, current_values):
data[seq][key] = float(value)
return data
class TrackEvalException(Exception):
"""Custom exception for catching expected errors."""
...
| 6,284 |
backend/WikiContrib/result/helper.py
|
kostajh/WikiContrib
| 17 |
2171548
|
import pandas as pd
from django.utils.text import slugify
from hashlib import sha256
from django.utils.crypto import get_random_string
from query.models import Query
from WikiContrib.settings import API_TOKEN, GITHUB_ACCESS_TOKEN
ORGS = [
"wikimedia",
"wmde",
"DataValues",
"commons-app",
"wikidata",
"openzim",
"mediawiki-utilities",
"wiki-ai",
"wikimedia-research",
"toollabs",
"toolforge",
"counterVandalism"
]
API_ENDPOINTS = [
["""https://phabricator.wikimedia.org/api/maniphest.search""",
"""https://phabricator.wikimedia.org/api/user.search"""],
["""https://gerrit.wikimedia.org/r/changes/?q=owner:{gerrit_username}&o=DETAILED_ACCOUNTS""",
"""https://gerrit.wikimedia.org/r/accounts/?q=name:{gerrit_username}&o=DETAILS"""],
["""https://api.github.com/search/commits?per_page=100&q=author:{github_username}""",
"""https://api.github.com/search/issues?per_page=100&q=is:pr+is:merged+author:{github_username}"""]
]
REQUEST_DATA = [
{
'constraints[authorPHIDs][0]': '',
'api.token': API_TOKEN,
'constraints[createdStart]': 0,
'constraints[createdEnd]': 0
},
{
'constraints[assigned][0]': '',
'api.token': API_TOKEN,
'constraints[createdStart]': 0,
'constraints[createdEnd]': 0
},
{
'constraints[usernames][0]':'',
'api.token': API_TOKEN
},
{
'github_username':'',
'github_access_token':GITHUB_ACCESS_TOKEN,
'createdStart':0,
'createdEnd':0
}
]
def get_prev_user(file, ind):
prev_user = None
while True:
if ind != 0:
temp = file.iloc[ind - 1, :]
if pd.isnull(temp['fullname']) or (pd.isnull(temp['Gerrit']) and pd.isnull(temp['Phabricator'])):
ind -= 1
else:
prev_user = temp['fullname']
break
else:
break
return prev_user
def get_next_user(file, ind):
next_user = None
while True:
if ind != len(file) - 1:
temp = file.iloc[ind+1, :]
if pd.isnull(temp['fullname']) or (pd.isnull(temp['Gerrit']) and pd.isnull(temp['Phabricator'])):
ind += 1
else:
next_user = temp['fullname']
break
else:
break
return next_user
def create_hash(usersArr=None):
"""
:return: hash code to create the Query.
"""
hash_code = ""
if usersArr == None:
hash_code = get_random_string(64)
while Query.objects.filter(hash_code=hash_code).exists():
hash_code = get_random_string(64)
else:
fullname_slug = ""
for dict in usersArr:
hash_code = hash_code + dict["fullname"].lower() + dict["gerrit_username"].lower()\
+ dict["phabricator_username"].lower() + dict["github_username"].lower()
if len(usersArr) == 1:
fullname_slug = slugify(usersArr[0]["fullname"].lower())
hash_code = fullname_slug +"-"+ sha256(hash_code.encode("utf-8")).hexdigest()[:9]
else:
hash_code = sha256(hash_code.encode("utf-8")).hexdigest()
return hash_code
| 3,192 |
metabot/metabot/NormalizedDict.py
|
CarlQLange/sophox
| 24 |
2171339
|
from .utils import sitelink_normalizer
# See also
# https://stackoverflow.com/questions/2390827/how-to-properly-subclass-dict-and-override-getitem-setitem/2390997
class NormalizedDict(dict):
def __init__(self, *args, **kwargs):
super().__init__()
self.update(*args, **kwargs)
def __getitem__(self, key):
val = dict.__getitem__(self, sitelink_normalizer(key))
return val
def __setitem__(self, key, val):
dict.__setitem__(self, sitelink_normalizer(key), val)
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self[k] = v
| 628 |
tests/FakeOs.py
|
kyan001/ConsoleCmdTool
| 0 |
2170351
|
# -*- coding: utf-8 -*-
import queue
class FakeOs:
'''用于重定向 os.system'''
def __init__(self):
self.call_q = queue.LifoQueue() # (stack)
def system(self, cmd):
self.call_q.put(cmd)
return 0
def readline(self):
if self.call_q.empty():
return None
else:
return self.call_q.get()
def clean(self):
self.call_q = queue.LifoQueue()
| 422 |
main.py
|
Saivivek-Peddi/ml_horizontal_partitioning
| 0 |
2170730
|
from preprocessing import Preporcess
from training import Training
from postprocessing import Postprocess
from build_partitions import Build_partitions
prp = Preporcess('StateNames.csv','queries.csv')
t = Training(prp.training_data,3)
pop = Postprocess(prp.training_data,t.model)
b = Build_partitions(prp.data_path)
| 317 |
AES_py/AES_demo/aes_128.py
|
R3DDY97/crypto-py
| 3 |
2171231
|
#!/usr/bin/env python3
import copy
from aes_constants import *
from get_info import (get_message, get_key)
class Aes_128(object):
def __init__(self):
# def __init__(self, key):
self.key = get_key()
key = self.key
# encryption round keys
Ke = [[0] * 4 for i in range(11)]
# decryption round keys
Kd = [[0] * 4 for i in range(11)]
# copy user material bytes into temporary ints
tk = []
for i in range(0, 4):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = j = tt = 0
while j < 4 and t < 44:
Ke[int(t / 4)][t % 4] = tk[j]
Kd[10 - (int(t / 4))][t % 4] = tk[j]
j += 1
t += 1
rconpointer = 0
while t < 44:
# extrapolate using phi (the round key evolution function)
tt = tk[4 - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
for i in range(1, 4):
tk[i] ^= tk[i - 1]
# copy values into round key arrays
j = 0
while j < 4 and t < 44:
Ke[int(t / 4)][t % 4] = tk[j]
Kd[10 - (int(t / 4))][t % 4] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in range(1, 10):
for i in range(4):
tt = Kd[r][i]
Kd[r][i] = U1[(tt >> 24) & 0xFF] ^ U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ U4[tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def aes_encrypt(self, message):
block_list = get_message(message)
ciphertext = ""
for block in block_list:
ciphertext += self.encrypt(block)
print(ciphertext)
return ciphertext
def encrypt(self, plaintext):
if len(plaintext) != 16:
raise ValueError('wrong block length, expected {} got {}'.format(16, str(len(plaintext))))
Ke = self.Ke
s1 = shifts[1][0]
s2 = shifts[2][0]
s3 = shifts[3][0]
a, t = [[0] * 4] * 2
# temporary work array
t = [0] * 4
# plaintext to ints + key
for i in range(4):
t[i] = (ord(plaintext[i * 4]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3])) ^ Ke[0][i]
# apply round transforms
for r in range(1, 10):
for i in range(4):
a[i] = (T1[(t[i] >> 24) & 0xFF] ^
T2[(t[(i + s1) % 4] >> 16) & 0xFF] ^
T3[(t[(i + s2) % 4] >> 8) & 0xFF] ^
T4[t[(i + s3) % 4] & 0xFF]) ^ Ke[r][i]
t = copy.deepcopy(a)
# last round is special
result = []
for i in range(4):
tt = Ke[10][i]
result.append((S[(t[i] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[t[(i + s3) % 4] & 0xFF] ^ tt) & 0xFF)
return ''.join(list(map(chr, result)))
def aes_decrypt(self, cipher):
cipher_list = [cipher[16 * i:(i + 1) * 16] for i in range(len(cipher) // 16)]
plaintext = ""
for block in cipher_list:
plaintext += self.decrypt(block)
print(plaintext)
return plaintext
def decrypt(self, ciphertext):
if len(ciphertext) != 16:
raise ValueError('wrong block length, expected expected {} got {}'.format(16, str(len(plaintext))))
Kd = self.Kd
s1 = shifts[1][1]
s2 = shifts[2][1]
s3 = shifts[3][1]
a = [0] * 4
t = [0] * 4
# ciphertext to ints + key
for i in range(4):
t[i] = (ord(ciphertext[i * 4]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3])) ^ Kd[0][i]
# apply round transforms
for r in range(1, 10):
for i in range(4):
a[i] = (T5[(t[i] >> 24) & 0xFF] ^
T6[(t[(i + s1) % 4] >> 16) & 0xFF] ^
T7[(t[(i + s2) % 4] >> 8) & 0xFF] ^
T8[t[(i + s3) % 4] & 0xFF]) ^ Kd[r][i]
t = copy.deepcopy(a)
# last round is special
result = []
for i in range(4):
tt = Kd[10][i]
result.append((Si[(t[i] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[t[(i + s3) % 4] & 0xFF] ^ tt) & 0xFF)
return ''.join(list(map(chr, result)))
| 5,343 |
core/migrations/0026_auto_20180824_1440.py
|
raheemazeezabiodun/art-backend
| 4 |
2170795
|
# Generated by Django 2.0.1 on 2018-08-24 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0025_auto_20180824_1227'),
]
operations = [
migrations.AlterField(
model_name='assetstatus',
name='current_status',
field=models.CharField(choices=[('Available', 'Available'), ('Allocated', 'Allocated'), ('Lost', 'Lost'), ('Damaged', 'Damaged')], default='Available', max_length=50),
),
migrations.AlterUniqueTogether(
name='asset',
unique_together={('asset_code', 'serial_number')},
),
]
| 665 |
flask-webserver/website.py
|
DaCryptoSpartan/r6teambuilder
| 0 |
2171597
|
from helper import backend_functions
from helper.custom_forms import InputForm, SubmitForm, HiddenForm
from helper.get_player_rank_data import get_player_rank_data
from flask import Flask, render_template, url_for, redirect, flash, request
import os, sys
import json
from operator import itemgetter
SCRIMDATE = 'Fri 10 July @ 7pm EDT'
ALLOW_SUBMISSIONS = True
# start of flask app
app = Flask(__name__)
SECRET_KEY = os.urandom(64)
app.config['SECRET_KEY'] = SECRET_KEY
@app.template_filter('tojsonobj')
def tojsonobj(string):
if isinstance(string, str):
string = string.replace("None", "''")
string = string.replace("\'", "\"")
string = json.loads(string)
return string
else:
return string
# main webpages
@app.route('/', methods=["GET", "POST"])
@app.route('/home', methods=["GET", "POST"])
def home():
global SCRIMDATE
global ALLOW_SUBMISSIONS
if ALLOW_SUBMISSIONS:
input_form = InputForm()
if request.method == 'POST' and input_form.validate_on_submit():
form_data = {'discord':input_form.discord.data, 'uplay':input_form.uplay.data}
player_rank_data = get_player_rank_data(form_data['uplay'])
if player_rank_data == 'NOT FOUND':
return redirect(url_for('not_found', player_entry=form_data))
form_data.update(player_rank_data)
return redirect(url_for('confirmation_check', player_entry=form_data))
else:
return render_template('submissions.html', scrimdate=SCRIMDATE, form=input_form)
else:
return render_template('deny_submissions.html')
@app.route('/success')
def success():
return render_template('success.html', player_entry=request.args.get('player_entry'))
@app.route('/confirmation_check', methods=["GET", "POST"])
def confirmation_check():
if request.method == 'POST':
submit_form = HiddenForm()
form_data = {'discord':submit_form.discord.data, 'uplay':submit_form.uplay.data, 'rank_w_mmr':submit_form.rank_w_mmr.data, 'avg_mmr':submit_form.avg_mmr.data,
'level':submit_form.level.data, 'season':submit_form.season.data, 'picture':submit_form.picture.data, 'rank_img':submit_form.rank_img.data}
#player_csv_entry = dict((itemgetter(*['discord', 'uplay', 'rank_w_mmr', 'avg_mmr', 'level'])(form_data)))
#backend_functions.player_to_csv(player_entry)
return redirect(url_for('success', player_entry=form_data))
else:
player_entry = tojsonobj(request.args.get('player_entry'))
#submit_form = SubmitForm(player_entry)
hidden_form = HiddenForm(discord=player_entry['discord'], uplay=player_entry['uplay'], rank_w_mmr=player_entry['rank_w_mmr'], level=player_entry['level'],
season=player_entry['season'], picture=player_entry['picture'], rank_img=player_entry['rank_img'])
return render_template('confirmation_check.html', player_entry=player_entry, form=hidden_form)
@app.route('/not_found')
def not_found():
return render_template('not_found.html', player_entry=request.args.get('player_entry'))
@app.route('/test', methods=["GET", "POST"])
def test():
global SCRIMDATE
global ALLOW_SUBMISSIONS
if ALLOW_SUBMISSIONS:
input_form = InputForm()
if request.method == 'POST' and input_form.validate_on_submit():
form_data = {'discord':input_form.discord.data, 'uplay':input_form.uplay.data}
player_rank_data = get_player_rank_data(form_data['uplay'])
if player_rank_data == 'NOT FOUND':
return redirect(url_for('not_found', player_entry=form_data))
form_data.update(player_rank_data)
return redirect(url_for('confirmation_check', player_entry=form_data))
else:
return render_template('submissions_test.html', scrimdate=SCRIMDATE, form=input_form)
else:
return render_template('deny_submissions.html')
# run the webserver on localhost, port 8080
if __name__ == '__main__':
app.run('0.0.0.0', 8080, debug=True)
| 3,907 |
status/statusapp/templatetags/displayoptions_filters.py
|
dcalderon/TorStatus
| 1 |
2171153
|
"""
Custom filters for the columnpreferences page.
"""
from django import template
register = template.Library()
COLUMN_VALUE_NAME = {'Country Code': 'country',
'Router Name': 'nickname',
'Bandwidth': 'bandwidthobserved',
'Uptime': 'uptime',
'IP': 'address',
'Hostname': 'hostname',
'Icons': 'icons',
'ORPort': 'orport',
'DirPort': 'dirport',
'BadExit': 'isbadexit',
'Named': 'isnamed',
'Exit': 'isexit',
'Authority': 'isauthority',
'Fast': 'isfast',
'Guard': 'isguard',
'Stable': 'isstable',
'Running': 'isrunning',
'Valid': 'isvalid',
'Directory': 'isv2dir',
'Platform': 'platform',
'Fingerprint': 'fingerprint',
'LastDescriptorPublished': 'published',
'Contact': 'contact',
'BadDir': 'isbaddirectory',
}
@register.filter
def movable(column_name):
"""
Checks whether or not the passed column can be moved on the list.
@rtype: C{boolean}
"""
not_movable_columns = set(("Named", "Exit", "Authority", "Fast",
"Guard", "Stable", "Running", "Valid",
"V2Dir", "Platform", "Hibernating"))
if column_name in not_movable_columns:
return False;
else:
return True;
| 1,669 |
migrations/versions/9f92f1e1de67_.py
|
realm520/flask_base
| 0 |
2169463
|
"""empty message
Revision ID: <PASSWORD>
Revises:
Create Date: 2018-06-29 16:40:41.191761
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('t_users', sa.Column('password', sa.String(length=16), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('t_users', 'password')
# ### end Alembic commands ###
| 640 |
helpers.py
|
tyoc213-contrib/reformer-fastai-old
| 3 |
2170335
|
"""
Helper functions, including profiling
"""
import torch
from torch import nn
import torch.autograd.profiler as profiler
from fastai.text.all import *
from fastai.basics import *
def do_cuda_timing(f, inp, context=None, n_loops=100):
'''
Get timings of cuda modules. Note `self_cpu_time_total` is returned, but
from experiments this appears to be similar/same to the total CUDA time
f : function to profile, typically an nn.Module
inp : required input to f
context : optional additional input into f, used for Decoder-style modules
'''
f.cuda()
inp = inp.cuda()
if context is not None: context = context.cuda()
with profiler.profile(record_shapes=False, use_cuda=True) as prof:
with profiler.record_function("model_inference"):
with torch.no_grad():
for _ in range(n_loops):
if context is None: f(inp)
else: f(inp, context)
torch.cuda.synchronize()
res = round((prof.key_averages().self_cpu_time_total / 1000) / n_loops, 3)
print(f'{res}ms')
return res
def model_performance(n_loops=5, model='arto', dls=None, n_epochs=1, lr=5e-4):
"""
DEMO CODE ONLY!
Run training loop to measure timings. Note that the models internally
should be changed depending on the model you would like to use.
You should also adjust the metrics you are monitoring
"""
acc_ls, ppl_ls =[], []
for i in range(n_loops):
# ADD YOUR MODEL(S) INIT HERE
# if model == 'arto': m = artoTransformerLM(vocab_sz, 512)
# elif model == 'pt': m = ptTransformerLM(vocab_sz, 512)
# else: print('model name not correct')
learn = Learner(dls, m,
loss_func=CrossEntropyLossFlat(),
metrics=[accuracy, Perplexity()]).to_native_fp16()
learn.fit_one_cycle(n_epochs, lr, wd=0.05)
acc_ls.append(learn.recorder.final_record[2])
ppl_ls.append(learn.recorder.final_record[3])
print(f'Avg Accuracy: {round(sum(acc_ls)/len(acc_ls),3)}, std: {np.std(acc_ls)}')
print(f'Avg Perplexity: {round(sum(ppl_ls)/len(ppl_ls),3)}, std: {np.std(ppl_ls)}')
print()
return learn, acc_ls, ppl_ls
def total_params(m):
"""
Give the number of parameters of a module and if it's trainable or not
- Taken from Taken from fastai.callback.hook
"""
params = sum([p.numel() for p in m.parameters()])
trains = [p.requires_grad for p in m.parameters()]
return params, (False if len(trains)==0 else trains[0])
| 2,657 |
ufora/cumulus/distributed/CumulusGatewayRemote.py
|
ufora/ufora
| 571 |
2170467
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
import uuid
import time
import ufora.util.ManagedThread as ManagedThread
import ufora
import ufora.native.Hash as HashNative
import ufora.native.Cumulus as CumulusNative
import ufora.cumulus.distributed.CumulusGateway as CumulusGateway
import ufora.cumulus.distributed.CumulusActiveMachines as CumulusActiveMachines
import ufora.FORA.python.ModuleImporter as ModuleImporter
HANDSHAKE_TIMEOUT = 10.0
class RemoteGateway(CumulusGateway.CumulusGateway,
CumulusActiveMachines.CumulusActiveMachinesListener):
def __init__(self,
callbackScheduler,
vdm,
channelFactory,
activeMachines,
viewFactory):
CumulusGateway.CumulusGateway.__init__(self,
callbackScheduler,
vdm,
viewFactory)
ModuleImporter.initialize()
self.channelFactory_ = channelFactory
self.connectedMachines_ = set()
self.disconnectedMachines_ = set()
self.desiredMachines_ = set()
self.connectingThreads_ = []
self.isTornDown_ = False
self.activeMachines = activeMachines
self.activeMachines.addListener(self)
self.activeMachines.startService()
def getClusterStatus(self):
return {
"workerCount": len(self.activeMachines.activeMachineIds)
}
def teardown(self):
with self.lock_:
self.isTornDown_ = True
for thread in self.connectingThreads_:
thread.join()
self.stop()
self.activeMachines.dropListener(self)
self.activeMachines.stopService()
CumulusGateway.CumulusGateway.teardown(self)
def isConnected(self):
return self.activeMachines.isConnected()
def onWorkerAdd(self, ip, ports, machineIdAsString):
machineId = CumulusNative.MachineId(HashNative.Hash.stringToHash(machineIdAsString))
with self.lock_:
if self.isTornDown_:
return
logging.info("CumulusClient %s preparing to connect to %s", self.cumulusClientId, machineId)
self.desiredMachines_.add(machineId)
newThread = ManagedThread.ManagedThread(
target=self.addDesiredMachine,
args=(machineId, ip, ports)
)
self.connectingThreads_.append(newThread)
self.connectingThreads_ = [x for x in self.connectingThreads_ if x.isAlive()]
newThread.start()
def addDesiredMachine(self, machineId, ip, ports):
tries = 0
while not self.tryOnWorkerAdd(machineId, ip, ports):
with self.lock_:
if machineId not in self.desiredMachines_:
return
if self.isTornDown_:
return
tries += 1
if tries > 4:
logging.critical("Failed to connect to worker %s %s times. Bailing", machineId, tries)
return
else:
time.sleep(1.0)
def tryOnWorkerAdd(self, machineId, ip, ports):
if self.isTornDown_:
return False
guid = HashNative.Hash.sha1(str(uuid.uuid4()))
try:
# TODO: get the number of cumulus ports from config
assert len(ports) == 2
channels = []
for i in range(2):
channel = self.connectToWorker(machineId, ip, ports[i], guid)
assert channel is not None
channels.append(channel)
logging.info("CumulusClient %s successfully connected to both channels of %s",
self.cumulusClientId,
machineId
)
with self.lock_:
if machineId not in self.desiredMachines_:
return False
with self.lock_:
if machineId in self.disconnectedMachines_:
return True
self.cumulusClient.addMachine(
machineId,
channels,
ModuleImporter.builtinModuleImplVal(),
self.callbackScheduler
)
self.connectedMachines_.add(machineId)
self.desiredMachines_.discard(machineId)
return True
except:
logging.error("Failed: %s", traceback.format_exc())
return False
def connectToWorker(self, machineId, ip, port, guid):
with self.lock_:
stringChannel = self.channelFactory_.createChannel((ip, port))
builtinsHash = ModuleImporter.builtinModuleImplVal().hash
clientId = self.cumulusClientId
callbackScheduler = self.callbackScheduler
logging.info(
"Client %s writing version message '%s' to %s",
clientId,
ufora.version,
machineId
)
stringChannel.write(ufora.version)
logging.info("Client %s writing client ID message to %s", clientId, machineId)
stringChannel.write(machineId.__getstate__())
logging.info("Client %s writing expected machineId message to %s", clientId, machineId)
stringChannel.write(
CumulusNative.CumulusClientOrMachine.Client(
clientId
).__getstate__()
)
logging.info("Client %s writing guid %s to %s", clientId, guid, machineId)
stringChannel.write(guid.__getstate__())
channelAsQueue = stringChannel.makeQueuelike(callbackScheduler)
msg = channelAsQueue.getTimeout(HANDSHAKE_TIMEOUT)
if msg is None:
logging.error(
"While attempting to add worker %s, CumulusClient %s did not " +
"receive a builtin hash message during handshake",
machineId,
clientId
)
assert msg is not None
logging.info("Client %s received serialized worker's builtin hash", clientId)
try:
workersBuiltinHash = HashNative.Hash(0)
workersBuiltinHash.__setstate__(msg)
except:
logging.info("Client received a bad worker hash: %s of size %s", repr(msg), len(msg))
raise
builtinsAgree = workersBuiltinHash == builtinsHash
if not builtinsAgree:
logging.critical("Could not connect CumulusClient %s to CumulusWorker %s as they " + \
"have different builtins; client's builtin hash: %s, worker's " + \
"builtin hash: %s. Disconnecting channel",
clientId,
machineId,
builtinsHash,
workersBuiltinHash
)
channelAsQueue.disconnect()
return None
return channelAsQueue
def onReconnectedToSharedState(self):
pass
def onWorkerDrop(self, machineIdAsString):
with self.lock_:
machineId = CumulusNative.MachineId(HashNative.Hash.stringToHash(machineIdAsString))
self.disconnectedMachines_.add(machineId)
if machineId in self.desiredMachines_:
self.desiredMachines_.discard(machineId)
if machineId not in self.connectedMachines_:
return
self.connectedMachines_.discard(machineId)
if len(self.connectedMachines_) == 0:
self.onMachineCountWentToZero()
self.cumulusClient.dropMachine(machineId)
| 8,356 |
code/align.py
|
ChenSh1ne/LoReAn
| 68 |
2171621
|
#!/usr/bin/env python3
"""
Copyright 2017 <NAME> (<EMAIL>)
https://github.com/rrwick/Porechop
Porechop makes use of C++ functions which are compiled in cpp_functions.so. This module uses ctypes
to wrap them in similarly named Python functions.
This file is part of Porechop. Porechop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version. Porechop is distributed in
the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along with Porechop. If
not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
from ctypes import CDLL, cast, c_char_p, c_int, c_void_p
from multiprocessing.dummy import Pool as ThreadPool
import numpy as np
import tqdm
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
SO_FILE = 'cpp_functions.so'
SO_FILE_FULL = os.path.join(os.path.dirname(os.path.realpath(__file__)), SO_FILE)
if not os.path.isfile(SO_FILE_FULL):
sys.exit('could not find ' + SO_FILE + ' - please reinstall')
C_LIB = CDLL(SO_FILE_FULL)
C_LIB.adapterAlignment.argtypes = [c_char_p, # Read sequence
c_char_p, # Adapter sequence
c_int, # Match score
c_int, # Mismatch score
c_int, # Gap open score
c_int] # Gap extension score
C_LIB.adapterAlignment.restype = c_void_p # String describing alignment
# This function cleans up the heap memory for the C strings returned by the other C functions. It
# must be called after them.
C_LIB.freeCString.argtypes = [c_void_p]
C_LIB.freeCString.restype = None
def adapter_alignment(read_sequence, adapter_sequence, scoring_scheme_vals, alignm_score_value, out_filename, threads, min_length):
#print(read_sequence, adapter_sequence, scoring_scheme_vals, alignm_score_value, out_filename, threads,
# min_length)
"""
Python wrapper for adapterAlignment C++ function.
"""
alignm_score_value = int(alignm_score_value)
sys.stdout.write("### STARTING ADAPTER ALIGNMENT AND READS ORIENTATION ###\n")
list_adapter = []
list_run = []
for adapter in SeqIO.parse(adapter_sequence, "fasta"):
list_adapter.append(adapter)
record = SeqRecord(adapter.seq.reverse_complement(), id=adapter.id + "_rev")
list_adapter.append(record)
dict_aln = {}
for sequence in SeqIO.parse(read_sequence, "fasta"):
dict_aln[sequence.id] = ""
for adapter in list_adapter:
match_score = scoring_scheme_vals[0]
mismatch_score = scoring_scheme_vals[1]
gap_open_score = scoring_scheme_vals[2]
gap_extend_score = scoring_scheme_vals[3]
list_run.append([str(sequence.seq).encode('utf-8'), str(adapter.seq).encode('utf-8'), match_score,
mismatch_score, gap_open_score, gap_extend_score, sequence.id, adapter.id])
#print(dict_aln)
with ThreadPool(int(threads)) as pool:
for out in pool.imap(align, tqdm.tqdm(list_run)):
out_list = out.split(",")
#print(out_list)
if dict_aln[out_list[0]] != "":
if (float(out.split(",")[9])) > float(dict_aln[out_list[0]].split(",")[9]):
dict_aln[out.split(",")[0]] = out
else:
dict_aln[out.split(",")[0]] = out
good_reads = [float(dict_aln[key].split(",")[9]) for key in dict_aln if float(dict_aln[key].split(",")[9]) > 80]
if len(good_reads)/len(dict_aln) < 0.1:
sys.stdout.write("### THERE ARE FEW READS (<10%) THAT MATCH WITH THE ADAPTER SEQUENCE WITH A GOOD IDENITTY (>80%). SWITCHING TO NON-STRANDED MODE ###\n")
stranded_value = False
return (len(dict_aln), read_sequence, stranded_value)
else:
sys.stdout.write("### ABOUT " + str((len(dict_aln)/len(list_run))*100) + " MATCH TO AN ADAPTER ###\n")
stranded_value = True
if alignm_score_value == 0:
alignm_score_mean = np.mean([float(dict_aln[key].split(",")[9]) for key in dict_aln])
alignm_score_std = np.std([float(dict_aln[key].split(",")[9]) for key in dict_aln])
alignm_score_value = alignm_score_mean - (alignm_score_std/10)
#print(alignm_score_mean, alignm_score_std, alignm_score_value)
seq_to_keep = {}
for key in dict_aln:
if (float(dict_aln[key].split(",")[9])) > alignm_score_value:
seq_to_keep[key] = dict_aln[key]
#print (seq_to_keep[key])
with open(out_filename, "w") as output_handle:
for sequence in tqdm.tqdm(SeqIO.parse(read_sequence, "fasta")):
count = 0
if sequence.id in seq_to_keep:
if seq_to_keep[sequence.id].split(",")[1].endswith("rev"):
position = [seq_to_keep[sequence.id].split(",")[2], seq_to_keep[sequence.id].split(",")[3]]
seq = str(sequence.seq)
sequence_match = seq[int(position[0]):int(position[1])]
multiple_seq = seq.split(sequence_match)
full_multiple_seq_all = [seq_full for seq_full in multiple_seq if seq_full != ""]
full_multiple_seq = [seq_full for seq_full in full_multiple_seq_all if len(seq_full) > int(min_length)]
if len(full_multiple_seq) > 1:
for split_seq in full_multiple_seq:
count += 1
sequence_new = SeqRecord(Seq(split_seq), id=sequence.id, description="REV")
rev_seq = SeqRecord(sequence_new.seq.reverse_complement(), id=sequence.id + "_rev." + str(count))
SeqIO.write(rev_seq, output_handle, "fasta")
elif len(full_multiple_seq) == 1:
sequence_new = SeqRecord(Seq(full_multiple_seq[0]), id=sequence.id)
rev_seq = SeqRecord(sequence_new.seq.reverse_complement(), id=sequence.id + "_rev")
SeqIO.write(rev_seq, output_handle, "fasta")
else:
continue
else:
position = [seq_to_keep[sequence.id].split(",")[2], seq_to_keep[sequence.id].split(",")[3]]
seq = str(sequence.seq)
sequence_match = seq[int(position[0]):int(position[1])]
multiple_seq = seq.split(sequence_match)
full_multiple_seq_all = [seq_full for seq_full in multiple_seq if seq_full != ""]
full_multiple_seq = [seq_full for seq_full in full_multiple_seq_all if len(seq_full) > int(min_length)]
if len(full_multiple_seq) > 1:
for split_seq in full_multiple_seq:
count += 1
sequence_new = SeqRecord(Seq(split_seq), id=sequence.id + "." + str(count))
SeqIO.write(sequence_new, output_handle, "fasta")
elif len(full_multiple_seq) == 1:
sequence_new = SeqRecord(Seq(full_multiple_seq[0]), id=sequence.id)
SeqIO.write(sequence_new, output_handle, "fasta")
else:
continue
return (len(seq_to_keep), out_filename, stranded_value)
def align(command_in):
ptr = C_LIB.adapterAlignment(str(command_in[0]).encode('utf-8'), str(command_in[1]).encode('utf-8'),
command_in[2], command_in[3], command_in[4], command_in[5])
result_string = c_string_to_python_string(ptr)
single_result_string = result_string.split(",")
average_score = (float(single_result_string[5]) + float(single_result_string[6])) / 2
result_string_name = ",".join([command_in[6], command_in[7], result_string, str(average_score)])
return result_string_name
def c_string_to_python_string(c_string):
"""
This function casts a C string to a Python string and then calls a function to delete the C
string from the heap.
"""
python_string = cast(c_string, c_char_p).value.decode()
C_LIB.freeCString(c_string)
return python_string
#if __name__ == '__main__':
# scoring = [3, -6, -5, -2]
# alignm_score_value = ""
# adapter_alignment(*sys.argv[1:], scoring, alignm_score_value)
| 8,747 |
popol/jobs/saq/__init__.py
|
aprilahijriyan/popol
| 0 |
2171365
|
from typing import Any, Dict
from fastapi import FastAPI
from ...utils import get_settings
from .config import parse_config
from .queue import Queue
def setup(app: FastAPI, settings: Any = None) -> Dict[str, Queue]:
"""
Install the saq plugin to the app.
This will install 2 new attributes to `app.state` which are:
* `queue` - SAQ Queue (Default, if available)
* `queues` - All SAQ queues (dict type)
Args:
app: FastAPI app.
settings: The settings (can be pydantic.BaseSettings).
Returns:
Dict[str, Queue]: The SAQ queues.
"""
settings = get_settings(app, settings)
queue_maps, _ = parse_config(settings)
app.state.queue = queue_maps.get("default")
app.state.queues = queue_maps
return queue_maps
| 782 |
linkedin_api/settings.py
|
gazer99/linkedin-api-1
| 655 |
2171165
|
import os
from pathlib import Path
HOME_DIR = str(Path.home())
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
LINKEDIN_API_USER_DIR = os.path.join(HOME_DIR, ".linkedin_api/")
COOKIE_PATH = os.path.join(LINKEDIN_API_USER_DIR, "cookies/")
| 245 |
awe/users/models.py
|
Awesomebug95/aweum
| 0 |
2170165
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models import *
class User(AbstractUser):
"""
Model User.
"""
phone = CharField(
max_length=15,
unique=True,
)
telegram = CharField(
max_length=50,
unique=True
)
country = CharField(
max_length=50
)
photo = ImageField(
blank=True,
upload_to='users/'
)
# TODO: create post, departament, groups.
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = "Пользователи"
ordering = ('last_name',)
def __str__(self):
return f'{self.last_name} {self.first_name}'
| 708 |
graphPartitioning.py
|
khushhallchandra/Simulated-Annealing
| 9 |
2171367
|
import matplotlib.pyplot as plt
import random
import math
import numpy as np
import time
import copy
INF = 10000
alpha = 0.999
threshold = 0.01
# we want to initialize the structure
# i.e. place the k-elements on the N X N grid
def initialize(k):
v1=[i for i in range(1,int(k/2))]
v2=[i for i in range(int(k/2),k+1)]
return v1,v2
# Suppose you are in any state say S, then you can go to
# some other state S'
def findNeighbourState(v1,v2):
# We will follow these two protocols, each 50% of time
# 1. swap positions of any two elements
# 2. Move any element fom v1/v2 to v2/v1
l1 = len(v1)
l2 = len(v2)
tempv1 = copy.deepcopy(v1)
tempv2 = copy.deepcopy(v2)
if(l1==0):
j = random.randint(0,l2-1)
temp = tempv2[j]
tempv2.remove(temp)
tempv1.append(temp)
return tempv1, tempv2
if(l2 == 0):
i = random.randint(0,l1-1)
temp = tempv1[i]
tempv1.remove(temp)
tempv2.append(temp)
return tempv1, tempv2
if(np.random.random()<0.5):
i = random.randint(0,l1-1)
j = random.randint(0,l2-1)
temp = tempv1[i]
tempv1[i] = tempv2[j]
tempv2[j] = temp
else:
choose = random.randint(1,2)
if(choose ==1 ):
i = random.randint(0,l1-1)
temp = tempv1[i]
tempv1.remove(temp)
tempv2.append(temp)
else:
j = random.randint(0,l2-1)
temp = tempv2[j]
tempv2.remove(temp)
tempv1.append(temp)
return tempv1, tempv2
# This will return the updated temperature
# according to some annealing schedule
def updateTemp(T):
return alpha*T
# This function finds the total wirelength
# V1 and V2 is list
def cost(v1, v2, connections):
distance = 0
l1 = len(v1)
l2 = len(v2)
for i in v1:
for j in v2:
if(i<j):
if(connections[i,j] == 1):
distance+=1
#print i,j
else:
if(connections[j,i] == 1):
distance+=1
#print i,j
c = distance + (0.5*(l1-l2)**2)
print v1, v2, c
return (c)
def annealing(k, connections):
T = INF
# We initialize the the two partitions
v1,v2 = initialize(k)
minCost = cost(v1,v2,connections)
print "Initial Cost",minCost
tic = time.clock()
# No. of interation at each temperature
# No. of temperature points to try
while(T > threshold):
tempv1,tempv2 = findNeighbourState(v1,v2)
tempCost = cost(tempv1, tempv2, connections)
delta = tempCost - minCost
if (delta<0):
v1 = tempv1
v2 = tempv2
minCost = tempCost
else:
p = np.exp(-delta / T)
if(np.random.random()<p):
v1 = tempv1
v2 = tempv2
minCost = tempCost
T = updateTemp(T)
return v1,v2,minCost, tic
def create(k) :
#we want k nodes to be connected to each other
connections = np.zeros([k+1,k+1])
ii = 0
while ii < k+1:
i = int(random.randint(1,k))
j = int(random.randint(1,k))
if (i > j )& (connections[j,i] ==0):
connections[j,i] = 1
ii = ii+1
print (j,i)
if (i < j) & (connections[i,j] ==0):
connections[i,j] = 1
ii = ii+1
print (i,j)
return connections
def mainrun(auto):
if auto == 1:
k = 6
else :
k = auto
# connections = create(k)
# we can create random connections
connections = np.zeros([k+1,k+1])
connections[1,3] = 1
connections[2,3] = 1
connections[4,5] = 1
connections[1,2] = 1
connections[3,4] = 1
connections[4,6] = 1
connections[5,6] = 1
if auto != 1:
connections = create(k)
v1,v2,minCost, tic = annealing(k, connections)
toc = time.clock()
tim = toc - tic
print "time taken ", tim
print "Final Cost", minCost
print v1
print v2
| 3,816 |
examples/template.py
|
ph4r05/LL-Smartcard
| 0 |
2171029
|
"""
This a nice simple reference implementation when creating new smartcard
programs using the LL-Smartcard API
"""
# Navtive
import logging
import optparse
# LL Smartcard
from llsmartcard.card import SmartCard, VisaCard, CAC
def process_card(connection, options):
"""
Implement your function here
"""
# Open card
card = SmartCard(connection)
#
# DO SOMETHING HERE
#
if __name__ == "__main__":
# Import our command line parser
from llsmartcard import parser
opts = optparse.OptionParser()
# Add any options we want here
opts.add_option("-s", "--sample", action="store_true",
dest="sample", default=False,
help="Sample")
# parse user arguments
parser.command_line(opts, process_card)
| 787 |
Ekeopara_Praise/Phase 2/STRINGS/Day33 Tasks/Task1.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
| 6 |
2171487
|
'''1. Write a Python program to strip a set of characters from a string.'''
def strip_chars(str, chars):
return "".join(c for c in str if c not in chars)
print("\nOriginal String: ")
print("The quick brown fox jumps over the lazy dog.")
print("After stripping a,e,i,o,u")
print(strip_chars("The quick brown fox jumps over the lazy dog.", "aeiou"))
print()
#Reference: w3resource
| 391 |
2015/2/sol.py
|
spencerteiknsmith/advent-of-code
| 0 |
2171523
|
from sys import stdin
def solve():
s = 0
r = 0
for l in lines():
l, w, h = map(int, l.split('x'))
ars = [l * w, w * h, h * l]
s += (2 * sum(ars) + min(ars))
r += (sum([l, w, h]) - max([l, w, h])) * 2 + l * w * h
print(s)
print(r)
pass
def lines():
return stdin.read().strip().split('\n')
if __name__ == '__main__':
solve()
| 393 |
desafio 020.py
|
KelvinAraujo/Python
| 0 |
2170448
|
import random
n1 = str(input('\033[35m Primeiro nome: '))
n2 = str(input('Segundo nome: '))
n3 = str(input('Terceiro nome: '))
n4 = str(input('Quarto nome: '))
lista = [n1,n2,n3,n4]
random.shuffle(lista)
print('\033[31m A ordem de apresentação será')
print(lista)
| 264 |
beanstool/csmunews.py
|
oxygen-TW/Beans-LineBot
| 0 |
2170922
|
from bs4 import BeautifulSoup
import requests
import re
import datetime
class CSMUNews():
def __MessageRegex(self, part):
result = re.findall(r">(.+)<\/td>", str(part))
result[0] = re.findall(r">(.+)<", result[0])[0]
result[2] = re.findall(r"href=\"(.+)\">(.+)<", result[2])[0]
return result
def __GenerateFormatTime(self):
datetimeYesterday = datetime.date.today() + datetime.timedelta(days=-1)
year = str(int(datetimeYesterday.strftime("%Y")) - 1911)
date = year + datetimeYesterday.strftime("/%m/%d")
return date
def getMsg(self):
url = "http://message.csmu.edu.tw/main2List.asp"
baseurl = "https://message.csmu.edu.tw/"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
rr = soup.find_all("tr",class_="whitetablebg")
date = self.__GenerateFormatTime()
msg = date+" 中山醫大校園公告 \n"
# ^m ^x ^p ^|^i ^v ^h ^a ^z^dflag
haveNews = False
for item in rr:
# ^o^v ^w ^j ^z^d ^h ^a
if(date in str(item)):
haveNews = True
news = self.__MessageRegex(item)
msg += ("\n" + news[2][1]+"\n")
msg += ("\n類別:" + news[0])
msg += ("\n單位:" + news[3])
msg += ("\n網址:" + baseurl + news[2][0])
msg += "\n\n-------------------------------------\n"
if(haveNews):
return msg
else:
return "今日無最新消息"
if __name__ == "__main__":
pass
| 1,611 |
zksync/sdk/musig/schnorr_musig.py
|
zksync-sdk/schnorr-musig-sdk-python
| 1 |
2170567
|
from typing import List
from typing import Union
from _functools import reduce
from zksync.sdk.musig.schnorr_musig_error import SchnorrMusigError
from zksync.sdk.musig.schnorr_musig_native import SchnorrMusigLoader
from zksync.sdk.musig.schnorr_musig_signer import AggregatedPublicKey
from zksync.sdk.musig.schnorr_musig_signer import AggregatedPublicKeyPointer
from zksync.sdk.musig.schnorr_musig_signer import MusigRes
from zksync.sdk.musig.schnorr_musig_signer import SchnorrMusigSigner
class SchnorrMusig:
def __init__(self) -> None:
self.native = SchnorrMusigLoader.load()
def create_signer(self, public_keys: List[bytes], position: int) -> SchnorrMusigSigner:
encoded_public_keys = reduce(lambda x, y: x + y, public_keys)
signer = self.native.schnorr_musig_new_signer(encoded_public_keys, len(encoded_public_keys), position)
return SchnorrMusigSigner(self, signer, public_keys)
def verify(self, message: bytes, signature: bytes, public_keys: Union[bytes, List[bytes]]) -> bool:
if type(public_keys) == list:
encoded_public_keys = reduce(lambda x, y: x + y, public_keys)
else:
encoded_public_keys = public_keys
code = self.native.schnorr_musig_verify(message, len(message), encoded_public_keys, len(encoded_public_keys),
signature, len(signature))
if code == MusigRes.OK:
return True
elif code == MusigRes.SIGNATURE_VERIFICATION_FAILED:
return False
else:
raise SchnorrMusigError(code)
def aggregate_public_keys(self, *public_keys: bytes) -> bytes:
encoded_public_keys = reduce(lambda x, y: x + y, public_keys)
aggregated_public_key = AggregatedPublicKey()
code = self.native.schnorr_musig_aggregate_pubkeys(encoded_public_keys, len(encoded_public_keys),
AggregatedPublicKeyPointer(aggregated_public_key))
if code != MusigRes.OK:
raise SchnorrMusigError(code)
return bytes(aggregated_public_key.data)
| 2,133 |
build/Glyph Nanny.roboFontExt/lib/glyphNanny/defaultsWindow.py
|
typesupply/glyph-nanny
| 36 |
2171180
|
import ezui
from mojo.events import postEvent
from . import defaults
from .testTabs import makeTestsTableDescription
class GlyphNannyDefaultsWindow(ezui.WindowController):
def build(self):
# Live Report
liveReportCheckboxDescription = dict(
identifier="liveReport",
type="Checkbox",
text="Show Live Report",
value=defaults.getDisplayLiveReport()
)
# Test During Drag
testDuringDragCheckboxDescription = dict(
identifier="testDuringDrag",
type="Checkbox",
text="Test During Drag",
value=defaults.getTestDuringDrag()
)
# Tests
testsTableDescription = makeTestsTableDescription()
# Colors
informationColorWell = dict(
identifier="informationColor",
type="ColorWell",
width=70,
height=25,
color=tuple(defaults.getColorInform())
)
reviewColorWell = dict(
identifier="reviewColor",
type="ColorWell",
width=70,
height=25,
color=tuple(defaults.getColorReview())
)
insertColorWell = dict(
identifier="insertColor",
type="ColorWell",
width=70,
height=25,
color=tuple(defaults.getColorInsert())
)
removeColorWell = dict(
identifier="removeColor",
type="ColorWell",
width=70,
height=25,
color=tuple(defaults.getColorRemove())
)
rowDescriptions = [
dict(
itemDescriptions=[
informationColorWell,
dict(
type="Label",
text="Information"
)
]
),
dict(
itemDescriptions=[
reviewColorWell,
dict(
type="Label",
text="Review Something"
)
]
),
dict(
itemDescriptions=[
insertColorWell,
dict(
type="Label",
text="Insert Something"
)
]
),
dict(
itemDescriptions=[
removeColorWell,
dict(
type="Label",
text="Remove Something"
)
]
),
]
columnDescriptions = [
dict(
width=70
),
{}
]
colorsGridDescription = dict(
identifier="colors",
type="Grid",
rowDescriptions=rowDescriptions,
columnPlacement="leading",
rowPlacement="center"
)
# Titles
reportTitlesCheckboxDescription = dict(
identifier="reportTitles",
type="Checkbox",
text="Show Titles",
value=defaults.getDisplayTitles()
)
windowContent = dict(
identifier="defaultsStack",
type="VerticalStack",
contentDescriptions=[
liveReportCheckboxDescription,
testDuringDragCheckboxDescription,
testsTableDescription,
colorsGridDescription,
reportTitlesCheckboxDescription,
],
spacing=15
)
windowDescription = dict(
type="Window",
size=(270, "auto"),
title="Glyph Nanny Preferences",
contentDescription=windowContent
)
self.w = ezui.makeItem(
windowDescription,
controller=self
)
def started(self):
self.w.open()
def defaultsStackCallback(self, sender):
values = sender.get()
defaults.setColorInform(values["colors"]["informationColor"])
defaults.setColorReview(values["colors"]["reviewColor"])
defaults.setColorInsert(values["colors"]["insertColor"])
defaults.setColorRemove(values["colors"]["removeColor"])
defaults.setDisplayLiveReport(values["liveReport"])
defaults.setTestDuringDrag(values["testDuringDrag"])
defaults.setDisplayTitles(values["reportTitles"])
for testItem in values["testStates"]:
if isinstance(testItem, ezui.TableGroupRow):
continue
defaults.setTestState(
testItem["identifier"],
testItem["state"]
)
postEvent(
defaults.defaultKeyStub + ".defaultsChanged"
)
haveShownTestDuringDragNote = False
def testDuringDragCallback(self, sender):
if not self.haveShownTestDuringDragNote:
self.showMessage(
"This change will take effect after RoboFont is restarted.",
"You'll have to restart RoboFont yourself."
)
self.haveShownTestDuringDragNote = True
stack = self.w.findItem("defaultsStack")
self.defaultsStackCallback(stack)
| 5,293 |
quest/game/services/mouse_service.py
|
KrazyLama789/cse210-6
| 0 |
2170941
|
class MouseService:
"""A mouse service inteface."""
def get_coordinates(self):
"""Gets the current mouse coordinates as a Point.
Returns:
Point: An instance of the quest.casting.Point class.
"""
raise NotImplementedError("not implemented in base class")
def has_mouse_moved(self):
"""Whether or not the mouse has moved since the last frame.
Returns:
True if the mouse moved; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_down(self, button):
"""Detects if the given button is pressed.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button is pressed; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_pressed(self, button):
"""Detects if the given button was pressed once.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button was pressed once; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_released(self, button):
"""Detects if the given button was released once.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button was released once; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_up(self, button):
"""Detects if the given button is released.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button is released; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
| 2,082 |
libs/python/AStar.py
|
vandersonmr/A_Star_Algorithm
| 52 |
2169223
|
#!/usr/bin/python
# Created by <NAME>. <<EMAIL>>
# <NAME>
from collections import deque
class AStar:
def distBetween(self,current,neighbor):
pass
def heuristicEstimate(self,start,goal):
pass
def neighborNodes(self,current):
pass
def reconstructPath(self,cameFrom,goal):
path = deque()
node = goal
path.appendleft(node)
while node in cameFrom:
node = cameFrom[node]
path.appendleft(node)
return path
def getLowest(self,openSet,fScore):
lowest = float("inf")
lowestNode = None
for node in openSet:
if fScore[node] < lowest:
lowest = fScore[node]
lowestNode = node
return lowestNode
def aStar(self,start,goal):
cameFrom = {}
openSet = set([start])
closedSet = set()
gScore = {}
fScore = {}
gScore[start] = 0
fScore[start] = gScore[start] + self.heuristicEstimate(start,goal)
while len(openSet) != 0:
current = self.getLowest(openSet,fScore)
if current == goal:
return self.reconstructPath(cameFrom,goal)
openSet.remove(current)
closedSet.add(current)
for neighbor in self.neighborNodes(current):
tentative_gScore = gScore[current] + self.distBetween(current,neighbor)
if neighbor in closedSet and tentative_gScore >= gScore[neighbor]:
continue
if neighbor not in closedSet or tentative_gScore < gScore[neighbor]:
cameFrom[neighbor] = current
gScore[neighbor] = tentative_gScore
fScore[neighbor] = gScore[neighbor] + self.heuristicEstimate(neighbor,goal)
if neighbor not in openSet:
openSet.add(neighbor)
return 0
| 1,944 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.