max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
python/frontend/cirrus/tests/test_handler.py
|
FTWH/cirrus
| 116 |
2023556
|
"""Tests that the Lambda request handler works as expected, at least in the
local environment.
"""
import textwrap
import os
import tempfile
import logging
import sys
from cirrus import handler
PS_IP = "127.0.0.1"
PS_PORT = 1543
def test_registration_success():
"""Test that the request handler runs when registration succeeds.
"""
assert _run_test(True)["body"] == "Success."
def test_registration_failure():
"""Test that the request handler exits when registration fails.
"""
assert _run_test(False)["body"] == "Registration failure."
def _run_test(registration_result):
old_config_path = handler.CONFIG_PATH
old_register_task_id = handler.register_task_id
old_executable_name = handler.EXECUTABLE_NAME
old_task_root = os.environ.get("LAMBDA_TASK_ROOT")
def register(*args):
return registration_result
handler.register_task_id = register
with tempfile.NamedTemporaryFile() as config_file:
with tempfile.NamedTemporaryFile() as executable:
with tempfile.NamedTemporaryFile() as flag:
handler.CONFIG_PATH = config_file.name
handler.EXECUTABLE_NAME = os.path.basename(executable.name)
os.environ["LAMBDA_TASK_ROOT"] = os.path.dirname(executable.name)
# TODO: This doesn't work unless using "sh".
executable.write(textwrap.dedent("""
#!/usr/bin/env python2
import time
time.sleep(2)"""
))
os.chmod(executable.name, 0x777)
event = {
"log_level": "DEBUG",
"worker_id": 5,
"ps_ip": PS_IP,
"ps_port": PS_PORT,
"num_workers": 10,
"config": "config bla"
}
class Context:
function_name = "foo"
function_version = "2"
log_stream_name = "bla stream"
log_group_name = "bla group"
aws_request_id = "request bla"
memory_limit_in_mb = "512"
def get_remaining_time_in_millis(self):
return 10000
result = handler.run(event, Context())
handler.CONFIG_PATH = old_config_path
handler.register_task_id = old_register_task_id
handler.EXECUTABLE_NAME = old_executable_name
if old_task_root is not None:
os.environ["LAMBDA_TASK_ROOT"] = old_task_root
return result
if __name__ == "__main__":
log = logging.getLogger("cirrus")
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
print("===== REGISTRATION SUCCESS =====")
test_registration_success()
print("")
print("===== REGISTRATION FAILURE =====")
test_registration_failure()
| 2,915 |
test/test_credential_handler.py
|
olasirtep/ahjo
| 0 |
2023391
|
from base64 import b64encode
from os import remove
import pytest
import ahjo.credential_handler as ahjo
def obfuscate(stng):
return b64encode(stng.encode()).decode()
@pytest.fixture(scope="function")
def execute_get_credentials_with_varying_input(tmp_path, monkeypatch):
"""First, reset global variable of Ahjo's credential_handler module.
If this is not done, tests will affect one another.
Second, create username/password files with given content and store
file paths to list created_records.
Third, set username/password input with monkeypatch.
Fourth, execute ahjo.get_credentials with created username/password file
paths as parameters (None if no file created).
Fifth, reset global variable of Ahjo's credential_handler module.
If this is not done, tests will affect one another.
Finally, delete created username/password files.
"""
# reset global variable
ahjo.cred_dict = {}
created_records = []
def get_credentials(usrn_file_name, usrn_file_content, pw_file_name, pw_file_content, usrn_input, pw_input, ask_pw=True):
# create username file if file name and content given
usrn_file_path = None
if usrn_file_name and usrn_file_content is not None:
usrn_file_path = tmp_path / usrn_file_name
usrn_file_path.write_text(
f"cred={usrn_file_content}", encoding="utf-8")
created_records.append(str(usrn_file_path))
# create password file if file name and content given
pw_file_path = None
if pw_file_name and pw_file_content is not None:
pw_file_path = tmp_path / pw_file_name
pw_file_path.write_text(
f"cred={pw_file_content}", encoding="utf-8")
created_records.append(str(pw_file_path))
# monkeypatch username and password input
monkeypatch.setattr('builtins.input', lambda x: usrn_input)
monkeypatch.setattr('getpass.getpass', lambda x: pw_input)
if ask_pw:
return ahjo.get_credentials(usrn_file_path=usrn_file_path, pw_file_path=pw_file_path)
else:
return ahjo.get_credentials(usrn_file_path=usrn_file_path, pw_file_path=pw_file_path, pw_prompt=None)
yield get_credentials
# reset global variable
ahjo.cred_dict = {}
# executed despite of result
for record in created_records:
remove(record)
def test_credentials_should_be_read_from_file_when_both_paths_given(execute_get_credentials_with_varying_input):
"""Both files given - read credentials from files.
input: file_path, "cred=USER1", file_path, "cred=PASSWORD1", None, None
output: ("USER1", "<PASSWORD>")
"""
testinput = ("usrn1.txt", "USER1", "pw1.txt",
obfuscate("PASSWORD1"), None, None)
assert ("USER1", "<PASSWORD>") == execute_get_credentials_with_varying_input(
*testinput)
def test_credentials_should_be_asked_when_username_file_not_given(execute_get_credentials_with_varying_input):
"""Username file not given - ask credentials from user.
input: None, None, file_path, "cred=PASSWORD", "USER2", "<PASSWORD>"
output: ("USER2", "<PASSWORD>")
"""
testinput = (None, None, "pw2.txt", obfuscate(
"PASSWORD"), "USER2", "<PASSWORD>")
assert ("USER2", "<PASSWORD>") == execute_get_credentials_with_varying_input(
*testinput)
def test_credentials_should_be_asked_when_password_file_not_given(execute_get_credentials_with_varying_input):
"""Password file not given - ask credentials from user.
input: file_path, "cred=USER", None, None, "USER3", "<PASSWORD>"
output: ("USER3", "<PASSWORD>")
"""
testinput = ("usrn3.txt", "USER", None, None, "USER3", "<PASSWORD>")
assert ("USER3", "<PASSWORD>") == execute_get_credentials_with_varying_input(
*testinput)
def test_credentials_should_be_asked_when_both_files_not_given(execute_get_credentials_with_varying_input):
"""Username and password files not given - ask credentials from user.
input: None, None, None, None, "USER4", "<PASSWORD>"
output: ("USER4", "PASSWORD4")
"""
testinput = (None, None, None, None, "USER4", "PASSWORD4")
assert ("USER4", "PASSWORD4") == execute_get_credentials_with_varying_input(
*testinput)
def test_windows_authentication_from_files_should_return_empty_strings(execute_get_credentials_with_varying_input):
"""Windows authentication from files - return tuple of empty strings.
input: file_path, "cred=", file_path, "cred=", None, None
output: ("", "")
"""
testinput = ("usrn4.txt", "", "pw4.txt", obfuscate(""), None, None)
assert ("", "") == execute_get_credentials_with_varying_input(*testinput)
def test_windows_authentication_from_input_should_return_empty_strings(execute_get_credentials_with_varying_input):
"""Windows authentication from user input - return tuple of empty strings.
input: None, None, None, None, "", ""
output: ("", "")
"""
testinput = (None, None, None, None, "", "")
assert ("", "") == execute_get_credentials_with_varying_input(*testinput)
def test_input_should_be_asked_when_file_missing(execute_get_credentials_with_varying_input):
"""File paths given but files do not exist - ask credentials from user.
input: file_path, None, file_path, None, "USER5", "PASSWORD5"
output: ("USER5", "PASSWORD5")
"""
testinput = ("usrn5.txt", None, "pw5.txt", None, "USER5", "<PASSWORD>")
assert ("USER5", "<PASSWORD>") == execute_get_credentials_with_varying_input(
*testinput)
def test_password_should_not_be_asked_if_pw_prompt_is_none(execute_get_credentials_with_varying_input):
"""Parameter value pw_prompt=None -> do not ask for password -> password is empty string.
input: file_path, "cred=USER", None, None, "USER3", "<PASSWORD>"
output: ("USER3", "")
"""
testinput = ("usrn3.txt", "USER", None, None, "USER3", "<PASSWORD>", False)
assert ("USER3", "") == execute_get_credentials_with_varying_input(*testinput)
| 6,077 |
common/bot_wrapper.py
|
namuan/trading-utils
| 36 |
2024304
|
from telegram import Update
def start(update: Update, _) -> None:
update.message.reply_text("👋 Enter a stock ticker with a $ sign. Eg: $TSLA")
def help_command(update: Update, _) -> None:
update.message.reply_text("Help!")
| 235 |
elmoformanylangs/modules/classify_layer.py
|
Sy-Zhang/ELMoForManyLangs
| 1,414 |
2024300
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class SoftmaxLayer(nn.Module):
""" Naive softmax-layer """
def __init__(self, output_dim, n_class):
"""
:param output_dim: int
:param n_class: int
"""
super(SoftmaxLayer, self).__init__()
self.hidden2tag = nn.Linear(output_dim, n_class)
self.criterion = nn.CrossEntropyLoss(size_average=False)
def forward(self, x, y):
"""
:param x: torch.Tensor
:param y: torch.Tensor
:return:
"""
tag_scores = self.hidden2tag(x)
return self.criterion(tag_scores, y)
class SampledSoftmaxLayer(nn.Module):
"""
"""
def __init__(self, output_dim, n_class, n_samples, use_cuda):
"""
:param output_dim:
:param n_class:
:param n_samples:
:param use_cuda:
"""
super(SampledSoftmaxLayer, self).__init__()
self.n_samples = n_samples
self.n_class = n_class
self.use_cuda = use_cuda
self.criterion = nn.CrossEntropyLoss(size_average=False)
self.negative_samples = []
self.word_to_column = {0: 0}
self.all_word = []
self.all_word_to_column = {0: 0}
self.column_emb = nn.Embedding(n_class, output_dim)
self.column_emb.weight.data.uniform_(-0.25, 0.25)
self.column_bias = nn.Embedding(n_class, 1)
self.column_bias.weight.data.uniform_(-0.25, 0.25)
self.oov_column = nn.Parameter(torch.Tensor(output_dim, 1))
self.oov_column.data.uniform_(-0.25, 0.25)
def forward(self, x, y):
if self.training:
for i in range(y.size(0)):
y[i] = self.word_to_column.get(y[i].tolist())
samples = torch.LongTensor(len(self.word_to_column)).fill_(0)
for word in self.negative_samples:
samples[self.word_to_column[word]] = word
else:
for i in range(y.size(0)):
y[i] = self.all_word_to_column.get(y[i].tolist(), 0)
samples = torch.LongTensor(len(self.all_word_to_column)).fill_(0)
for word in self.all_word:
samples[self.all_word_to_column[word]] = word
if self.use_cuda:
samples = samples.cuda()
tag_scores = (x.matmul(self.embedding_matrix)).view(y.size(0), -1) + \
(self.column_bias.forward(samples)).view(1, -1)
return self.criterion(tag_scores, y)
def update_embedding_matrix(self):
word_inp, chars_inp = [], []
if self.training:
columns = torch.LongTensor(len(self.negative_samples) + 1)
samples = self.negative_samples
for i, word in enumerate(samples):
columns[self.word_to_column[word]] = word
columns[0] = 0
else:
columns = torch.LongTensor(len(self.all_word) + 1)
samples = self.all_word
for i, word in enumerate(samples):
columns[self.all_word_to_column[word]] = word
columns[0] = 0
if self.use_cuda:
columns = columns.cuda()
self.embedding_matrix = self.column_emb.forward(columns).transpose(0, 1)
def update_negative_samples(self, word_inp, chars_inp, mask):
batch_size, seq_len = word_inp.size(0), word_inp.size(1)
in_batch = set()
for i in range(batch_size):
for j in range(seq_len):
if mask[i][j] == 0:
continue
word = word_inp[i][j].tolist()
in_batch.add(word)
for i in range(batch_size):
for j in range(seq_len):
if mask[i][j] == 0:
continue
word = word_inp[i][j].tolist()
if word not in self.all_word_to_column:
self.all_word.append(word)
self.all_word_to_column[word] = len(self.all_word_to_column)
if word not in self.word_to_column:
if len(self.negative_samples) < self.n_samples:
self.negative_samples.append(word)
self.word_to_column[word] = len(self.word_to_column)
else:
while self.negative_samples[0] in in_batch:
self.negative_samples = self.negative_samples[1:] + [self.negative_samples[0]]
self.word_to_column[word] = self.word_to_column.pop(self.negative_samples[0])
self.negative_samples = self.negative_samples[1:] + [word]
class CNNSoftmaxLayer(nn.Module):
def __init__(self, token_embedder, output_dim, n_class, n_samples, corr_dim, use_cuda):
super(CNNSoftmaxLayer, self).__init__()
self.token_embedder = token_embedder
self.n_samples = n_samples
self.use_cuda = use_cuda
self.criterion = nn.CrossEntropyLoss(size_average=False)
self.negative_samples = []
self.word_to_column = {0: 0}
self.all_word = []
self.all_word_to_column = {0: 0}
self.M = nn.Parameter(torch.Tensor(output_dim, corr_dim))
stdv = 1. / math.sqrt(self.M.size(1))
self.M.data.uniform_(-stdv, stdv)
self.corr = nn.Embedding(n_class, corr_dim)
self.corr.weight.data.uniform_(-0.25, 0.25)
self.oov_column = nn.Parameter(torch.Tensor(output_dim, 1))
self.oov_column.data.uniform_(-0.25, 0.25)
def forward(self, x, y):
if self.training:
for i in range(y.size(0)):
y[i] = self.word_to_column.get(y[i].tolist())
samples = torch.LongTensor(len(self.word_to_column)).fill_(0)
for package in self.negative_samples:
samples[self.word_to_column[package[0]]] = package[0]
else:
for i in range(y.size(0)):
y[i] = self.all_word_to_column.get(y[i].tolist(), 0)
samples = torch.LongTensor(len(self.all_word_to_column)).fill_(0)
for package in self.all_word:
samples[self.all_word_to_column[package[0]]] = package[0]
if self.use_cuda:
samples = samples.cuda()
tag_scores = (x.matmul(self.embedding_matrix)).view(y.size(0), -1) + \
(x.matmul(self.M).matmul(self.corr.forward(samples).transpose(0, 1))).view(y.size(0), -1)
return self.criterion(tag_scores, y)
def update_embedding_matrix(self):
batch_size = 2048
word_inp, chars_inp = [], []
if self.training:
sub_matrices = [self.oov_column]
samples = self.negative_samples
id2pack = {}
for i, package in enumerate(samples):
id2pack[self.word_to_column[package[0]]] = i
else:
sub_matrices = [self.oov_column]
samples = self.all_word
id2pack = {}
for i, package in enumerate(samples):
id2pack[self.all_word_to_column[package[0]]] = i
for i in range(len(samples)):
# [n_samples, 1], [n_samples, 1, x], [n_samples, 1]
word_inp.append(samples[id2pack[i + 1]][0])
chars_inp.append(samples[id2pack[i + 1]][1])
if len(word_inp) == batch_size or i == len(samples) - 1:
sub_matrices.append(self.token_embedder.forward(torch.LongTensor(word_inp).view(len(word_inp), 1),
None if chars_inp[0] is None else torch.LongTensor(chars_inp).view(len(word_inp), 1, len(package[1])),
(len(word_inp), 1)).squeeze(1).transpose(0, 1))
if not self.training:
sub_matrices[-1] = sub_matrices[-1].detach()
word_inp, chars_inp = [], []
sum = 0
for mat in sub_matrices:
sum += mat.size(1)
#print(sum, len(self.word_to_column))
self.embedding_matrix = torch.cat(sub_matrices, dim=1)
def update_negative_samples(self, word_inp, chars_inp, mask):
batch_size, seq_len = word_inp.size(0), word_inp.size(1)
in_batch = set()
for i in range(batch_size):
for j in range(seq_len):
if mask[i][j] == 0:
continue
word = word_inp[i][j].tolist()
in_batch.add(word)
for i in range(batch_size):
for j in range(seq_len):
if mask[i][j] == 0:
continue
package = (word_inp[i][j].tolist(), None if chars_inp is None else chars_inp[i][j].tolist())
if package[0] not in self.all_word_to_column:
self.all_word.append(package)
self.all_word_to_column[package[0]] = len(self.all_word_to_column)
if package[0] not in self.word_to_column:
if len(self.negative_samples) < self.n_samples:
self.negative_samples.append(package)
self.word_to_column[package[0]] = len(self.word_to_column)
else:
while self.negative_samples[0][0] in in_batch:
self.negative_samples = self.negative_samples[1:] + [self.negative_samples[0]]
self.word_to_column[package[0]] = self.word_to_column.pop(self.negative_samples[0][0])
self.negative_samples = self.negative_samples[1:] + [package]
| 8,504 |
google/home/graph/v1/home-graph-v1-py/google/home/graph_v1/__init__.py
|
googleapis/googleapis-gen
| 7 |
2023877
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.home_graph_api_service import HomeGraphApiServiceClient
from .services.home_graph_api_service import HomeGraphApiServiceAsyncClient
from .types.device import AgentOtherDeviceId
from .types.device import Device
from .types.device import DeviceInfo
from .types.device import DeviceNames
from .types.homegraph import AgentDeviceId
from .types.homegraph import DeleteAgentUserRequest
from .types.homegraph import QueryRequest
from .types.homegraph import QueryRequestInput
from .types.homegraph import QueryRequestPayload
from .types.homegraph import QueryResponse
from .types.homegraph import QueryResponsePayload
from .types.homegraph import ReportStateAndNotificationDevice
from .types.homegraph import ReportStateAndNotificationRequest
from .types.homegraph import ReportStateAndNotificationResponse
from .types.homegraph import RequestSyncDevicesRequest
from .types.homegraph import RequestSyncDevicesResponse
from .types.homegraph import StateAndNotificationPayload
from .types.homegraph import SyncRequest
from .types.homegraph import SyncResponse
from .types.homegraph import SyncResponsePayload
__all__ = (
'HomeGraphApiServiceAsyncClient',
'AgentDeviceId',
'AgentOtherDeviceId',
'DeleteAgentUserRequest',
'Device',
'DeviceInfo',
'DeviceNames',
'HomeGraphApiServiceClient',
'QueryRequest',
'QueryRequestInput',
'QueryRequestPayload',
'QueryResponse',
'QueryResponsePayload',
'ReportStateAndNotificationDevice',
'ReportStateAndNotificationRequest',
'ReportStateAndNotificationResponse',
'RequestSyncDevicesRequest',
'RequestSyncDevicesResponse',
'StateAndNotificationPayload',
'SyncRequest',
'SyncResponse',
'SyncResponsePayload',
)
| 2,259 |
cd/checks/is_author_connected.py
|
Axelware/CD-bot
| 2 |
2024207
|
# Future
from __future__ import annotations
# Standard Library
from collections.abc import Callable
from typing import Literal, TypeVar
# Packages
import discord
from discord.ext import commands
# Local
from cd import custom, exceptions
__all__ = (
"is_author_connected",
)
T = TypeVar("T")
def is_author_connected() -> Callable[[T], T]:
async def predicate(ctx: custom.Context) -> Literal[True]:
assert isinstance(ctx.author, discord.Member)
author_channel = ctx.author.voice and ctx.author.voice.channel
voice_client_channel = ctx.voice_client and ctx.voice_client.voice_channel
if voice_client_channel != author_channel:
raise exceptions.EmbedError(
description=f"You must be connected to {getattr(voice_client_channel, 'mention', None)} to use this "
f"command."
)
return True
return commands.check(predicate)
| 949 |
libs/ddbb.py
|
efrenbg1/rmote.app
| 0 |
2023151
|
import pymysql as mysql
from DBUtils.PooledDB import PooledDB
from flask import request
from libs import password, mqtls
from libs.flask import app
import json
class SettingsDB:
def __init__(self, host, user, pw, db, broker, proxy):
self.host = host
self.user = user
self.pw = pw
self.db = db
self.broker = broker
self.proxy = proxy
class SettingsEmail:
def __init__(self, host, port, user, pw, url):
self.host = host
self.port = port
self.user = user
self.pw = pw
self.url = url
settings = SettingsDB("127.0.0.1", "root", "", "rmote",
"127.0.0.1", "127.0.0.1")
email = SettingsEmail("smtp.gmail.com", 25,
"<EMAIL>", "password", "http://localhost")
broker = None
with open('settings.json', "r") as f:
param = json.load(f)
settings.host = param['mysql']['host']
settings.user = param['mysql']['user']
settings.pw = param['mysql']['pw']
settings.db = param['mysql']['db']
settings.broker = param['broker']
broker = mqtls.mqtls(host=settings.broker)
settings.proxy = param['proxy']
email.host = param['email']['host']
email.port = param['email']['port']
email.user = param['email']['user']
email.pw = param['email']['pw']
email.url = param['email']['url']
def checkPW(user, pw):
hash = query("SELECT pw FROM user WHERE username=%s LIMIT 1", user)
if len(hash) != 1:
return False
if password.checkHash(hash[0][0], pw):
return True
return False
def inAcls(user, mac):
acls = query("SELECT a.mac FROM acls AS a LEFT JOIN share AS s ON a.mac=s.mac WHERE a.user=(SELECT id FROM user WHERE username=%s) OR s.user=(SELECT id FROM user WHERE username=%s)", user, user)
for r in acls:
if r[0] == mac:
return True
return False
def get_db():
if not hasattr(app, 'db'):
app.db = PooledDB(creator=mysql, user=settings.user,
password=<PASSWORD>, host=settings.host, database=settings.db)
return app.db.connection()
def query(sql, *param):
if not hasattr(request, 'conn'):
request.conn = get_db()
cursor = request.conn.cursor()
cursor.execute(sql, param)
result = cursor.fetchall()
request.conn.commit()
if result is None:
raise Exception("Error fetching query result: is of None type")
return result
def insert(sql, *param):
if not hasattr(request, 'conn'):
request.conn = get_db()
cursor = request.conn.cursor()
cursor.execute(sql, param)
id = cursor.lastrowid
request.conn.commit()
if id is None:
raise Exception("Insert id returned None")
return id
| 2,738 |
STOFleetAnalysis.py
|
GlowingCrystallineEntity/STOFleetAnalysis
| 1 |
2024310
|
from datetime import datetime
from sys import argv
import shutil
import glob
import re
import csv
import statistics as stats
AccountName = ""
# Tacitly assuming these all exist
Root = ""
Pattern = "*.csv"
Copy_Destination = ""
FILE_DATE_PATTERN = "%Y%m%d-%H%M%S"
Field_Names = \
"Character Name, Account Handle, Level, Class, Guild Rank, Contribution Total, Join Date, Rank Change Date, Last Active Date, Status, Public Comment, Public Comment Last Edit Date"
ROUND_FACTOR = 100
# If true, only display stats for one difference (depending on FROM_START);
# if false, display stats between every two files (and FROM_START is ignored)
ONE_ONLY = False
# If true, display stats from first file to last file; If false, display stats between every two files; if ONE_ONLY is also
# true, display stats only between last two files
FROM_START = True
# If false, don't print the "first" of the diff calculation -- either the first file (if FROM_START is True),
# or the penultimate file (if FROM_START is False). Ignored if ONE_ONLY is False.
PRINT_FIRST = True
fleetFiles = []
def round(x):
return int(ROUND_FACTOR * x + 0.5) / ROUND_FACTOR
def roundInt(x):
return int(x + 0.5)
# From http://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python
def str2bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
"""
if str(value).lower() in ("yes", "y", "true", "t", "1"): return True
if str(value).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"): return False
raise Exception('Invalid value for boolean conversion: ' + str(value))
def getVal(dict, key, default):
if key in dict:
return dict[key]
else:
return default
# Read from config file
filename = "./config.txt"
if len(argv) == 2:
script, filename = argv
config = {}
with open(filename) as file:
for line in file:
stripLine = line.strip()
if stripLine == "" or stripLine[0] == "#":
continue
if stripLine.count("=") != 1:
print("Must have exactly one \"=\" on non-comment lines")
exit(1)
(key, val) = line.strip().split("=")
config[key.strip()] = val.strip()
ptrn = re.compile("^<.*>$")
AccountName = getVal(config, "AccountName", AccountName)
if AccountName == "" or ptrn.match(AccountName):
raise Exception("Must define AccountName in file config.txt (or specify config file on command line)")
Root = getVal(config, "Root", Root)
if ptrn.match(Root):
raise Exception("Must define Root in file config.txt (or specify config file on command line)")
Pattern = getVal(config, "Pattern", Pattern)
Copy_Destination = getVal(config, "Copy_Destination", Copy_Destination)
if ptrn.match(Copy_Destination):
raise Exception("Must define Copy_Destination in file config.txt (or specify config file on command line)")
FILE_DATE_PATTERN = getVal(config, "FILE_DATE_PATTERN", FILE_DATE_PATTERN)
ROUND_FACTOR = int(getVal(config, "ROUND_FACTOR", ROUND_FACTOR))
ONE_ONLY = str2bool(getVal(config, "ONE_ONLY", ONE_ONLY))
FROM_START = str2bool(getVal(config, "FROM_START", FROM_START))
PRINT_FIRST = str2bool(getVal(config, "PRINT_FIRST", PRINT_FIRST))
# Move CSV Exports out of STO install dir, into a working dir
moveCount = 0
for file in glob.iglob(Root + "/" + Pattern):
print("Moving:", file, "to:", Copy_Destination)
shutil.move(file, Copy_Destination)
moveCount += 1
print("")
print("moved ", moveCount, " files", sep="")
print("")
# Read the filenames, and extract fleet name and export date from them
for file in glob.iglob(Copy_Destination + "/" + Pattern):
fleetFile = re.split("[/\\\\_.]+", file)
dt = datetime.strptime(fleetFile[-2], FILE_DATE_PATTERN)
splitNames = fleetFile[-3:-1]
splitNames.append(dt)
splitNames.append(file)
fleetFiles.append(splitNames)
# sorting by filename implicitly sorts by date
fleetFiles.sort(key=lambda p: p[-1])
shortFleetFiles = []
if ONE_ONLY:
# Sentinel value in case there's only one file for the last fleet in the list
fleetFiles.append([""] * len(fleetFiles[0]))
first = fleetFiles[0]
last = ["", first]
for current in fleetFiles:
# if first[0] != current[0] or current[0] == fleetFiles[-1][0]:
if first[0] != current[0]:
if FROM_START:
shortFleetFiles.append(first)
elif last[1] != "" and last[1][0] != "":
shortFleetFiles.append(last[1])
if last[0] != "" and last[0][0] != "":
shortFleetFiles.append(last[0])
else:
last[0] = current
first = current
last = ["", first]
else:
last[1] = last[0]
last[0] = current
else:
shortFleetFiles = fleetFiles
displaySummary = []
# Read the files, and calculate and print file data
lastFleetName = ""
lastContribTotal = 0
lastFileTime = datetime.min
charName = ""
lastCharContrib = 0
charContrib = 0
for i in range(len(shortFleetFiles)):
fleetName = shortFleetFiles[i]
with open(fleetName[-1]) as csvfile:
reader = csv.DictReader(csvfile)
contribTotal = 0
data = []
for row in reader:
contrib = int(row["Contribution Total"])
contribTotal += contrib
data.append(contrib)
if AccountName == row["Account Handle"]:
charName = row["Character Name"]
charContrib = int(row["Contribution Total"])
members = len(data)
mu = stats.mean(data)
median = stats.median(data)
stdev = stats.pstdev(data, mu)
zeroCount = data.count(0)
membersZeroCount = members - zeroCount
fractionNonZero = membersZeroCount / members
dataNonZero = [x for x in data if x != 0]
muNZ = stats.mean(dataNonZero)
medianNZ = stats.median(dataNonZero)
stdevNZ = stats.pstdev(dataNonZero, muNZ)
contribDiff = 0
contribPerHourPerMemberNZ = 0.
if lastFleetName != fleetName[0]:
# print("---")
# if "" != lastFleetName:
# print("")
# if "" != charName:
# print(charName + ":")
lastContribTotal = contribTotal
lastFileTime = fleetName[2]
lastCharContrib = charContrib
if PRINT_FIRST or lastFleetName == fleetName[0] or \
(i < len(shortFleetFiles) - 1 and shortFleetFiles[i + 1][0] != fleetName[0]) or i == len(shortFleetFiles) - 1:
# print(fleetName[0], " ", fleetName[2], ": ", charName + ": ", "{:,}".format(int(charContrib)),
# ", Contrib Total: ", "{:,}".format(contribTotal), ", Members: ", members,
# ", NonZero: ", "{:,}".format(membersZeroCount), " ({:,.2f}".format(round(fractionNonZero * 100)), "%)",
# sep="")
# print(" mean : ", "{:,}".format(roundInt(mu)), ", median : ", "{:,}".format(roundInt(median)),
# ", StdDev : ", "{:,}".format(roundInt(stdev)), " ({:,.2f}".format(round(stdev / contribTotal * 100)), "%)",
# sep="")
# print(" meanNZ: ", "{:,}".format(roundInt(muNZ)), ", medianNZ: ", "{:,}".format(roundInt(medianNZ)),
# ", StdDevNZ: ", "{:,}".format(roundInt(stdevNZ)), " ({:,.2f}".format(round(stdevNZ / contribTotal * 100)),"%)",
# sep="")
contribDiff = contribTotal - lastContribTotal
charContribDiff = charContrib - lastCharContrib
timeDiff = 0
contribPerHour = 0
contribPerHourPerMember = 0
contribPerHourPerMemberNZ = 0
charContribPerHour = 0
if fleetName[2] > lastFileTime:
timeDiff = fleetName[2] - lastFileTime
contribPerHour = contribDiff / (timeDiff.total_seconds() / 3600)
contribPerHourPerMember = contribPerHour / members
contribPerHourPerMemberNZ = contribPerHour / membersZeroCount
charContribPerHour = charContribDiff / (timeDiff.total_seconds() / 3600)
# print("Char Contrib: ", "{:,}".format(charContribDiff), ", Fleet Total Contrib: ", "{:,}".format(contribDiff), " / ",
# "Time Diff: ", timeDiff, sep="")
# print("Char Contrib Per Hour: ", "{:,.2f}".format(round(charContribPerHour))) # Shouldn't have negative numbers anyway...
# print("Fleet Contrib Per Hour: ", "{:,.2f}".format(round(contribPerHour)),", Per Member: ", round(contribPerHourPerMember),
# ", Per MemberNZ: ", round(contribPerHourPerMemberNZ),
# sep="") # Shouldn't have negative numbers anyway...
# print("")
displaySummary.append((charName, fleetName[0], fleetName[2], charContrib, charContribDiff, contribTotal, contribDiff, members, 100 * fractionNonZero, int((100 * contribPerHourPerMemberNZ + 0.5)) / 100))
lastFleetName = fleetName[0]
lastFileTime = fleetName[2]
lastContribTotal = contribTotal
lastCharContrib = charContrib
print("\n===\n")
print("{:<27}{:<23}{:<9}{:<9}{:<14}{:<11}{:<6}{:<8}{:<8}".format("Name", "Date", "Char", "Diff", "Total", "Diff", "#",
"% NZ", "Per Hour Per Non-Zero Member"))
lastFleetName = displaySummary[0][1]
print(displaySummary[0][0] + ":")
for summary in displaySummary:
if lastFleetName != summary[1]:
print("")
print(summary[0] + ":")
print("{:<27}{:<23}{:<9}{:<9}{:<14}{:<11}{:<6}{:<8}{:<8}".format("Name", "Date", "Char", "Diff", "Total", "Diff", "#",
"% NZ", "Per Hour Per Non-Zero Member"))
lastFleetName = summary[1]
print("{:27}{:%Y-%m-%d %H:%M:%S}{:10,}{:10,}{:13,}{: 12,}{:5d}{: 8.2f}{: 9.2f}".format(*(summary[1:])))
| 9,549 |
imread/tests/test_pvrtc.py
|
fish2000/imread
| 0 |
2023797
|
from nose.tools import raises
import numpy as np
from imread import imread
@raises(RuntimeError)
def test_error():
imread('./imread/tests/data/error.webp')
def test_AI88():
im = imread('./imread/tests/data/pvrsamples/AI88.pvr')
#assert np.any(im) FAIL WTF
def test_I8():
im = imread('./imread/tests/data/pvrsamples/I8.pvr')
assert np.any(im)
def test_RGB565():
im = imread('./imread/tests/data/pvrsamples/RGB565.pvr')
assert np.any(im)
def test_RGB888():
im = imread('./imread/tests/data/pvrsamples/RGB888.pvr')
assert np.any(im)
def test_RGBA4444():
im = imread('./imread/tests/data/pvrsamples/RGBA4444.pvr')
assert np.any(im)
def test_RGBA5551():
im = imread('./imread/tests/data/pvrsamples/RGBA5551.pvr')
assert np.any(im)
def test_RGBA8888():
im = imread('./imread/tests/data/pvrsamples/RGBA8888.pvr')
assert np.any(im)
def test_apple_2bpp():
im = imread('./imread/tests/data/pvrsamples/apple_2bpp.pvr')
assert np.any(im)
def test_apple_4bpp():
im = imread('./imread/tests/data/pvrsamples/apple_4bpp.pvr')
assert np.any(im)
def test_pvrtc2bpp():
im = imread('./imread/tests/data/pvrsamples/pvrtc2bpp.pvr')
assert np.any(im)
def test_pvrtc4bpp():
im = imread('./imread/tests/data/pvrsamples/pvrtc4bpp.pvr')
assert np.any(im)
# pngreference.png ...?
| 1,368 |
forecasting.py
|
cyrusmaher/EpiScore
| 0 |
2024353
|
"""
Usage:
forecasting.py <infile> <outfolder> [--from_meta|--from_lineage] [--n_days_for_forecast=<n>]
Options:
--from_meta Read from metadata input. Will be inferred to be true if input is contains "metadata" but not "lineage"
--from_lineage Read from metadata_lineage file. Will be inferred to be true is input contains "lineage"
--n_days_for_forecast=<n> Number of days to forecast [default: 90]
"""
import pandas as pd
import numpy as np
from docopt import docopt
import var_classification_helper as varclass
import var_ranking_helper as helper
import utils
import parse_gisaid as gisaid
import os
today = utils.today
def read_input(in_file, from_meta, from_lineage, filter_last_n_days):
if from_lineage:
print(f"Reading gisaid lineage summary: {in_file}")
df = read_lineage_table(in_file, filter_last_n_days=filter_last_n_days)
print(f"Reading {len(df)} rows from lineage file")
return df
elif from_meta:
print(f"Reading gisaid metadata summary: {in_file}")
return gisaid.read_gisaid_assummary(fname=in_file, filter_last_n_days=filter_last_n_days)
else:
if filter_last_n_days is not None:
raise ValueError("Cannot filter by granular date if reading from summary file")
print("Reading directly from summary table")
return pd.read_csv(in_file, sep="\t")
def format_lineage_table(lineage_table):
df_tmp = lineage_table[
["AA_Substitution", "country", "pango_lineage", "GISAID_clade", "date"]
].copy()
df_tmp["monthdate"] = (
df_tmp["date"].str.split("-").str[:2].str.join("-")
)
df_tmp = df_tmp.rename(
columns={"AA_Substitution": "haplotype", "country": "location"}
)
df_tmp["haplotype"] = (
df_tmp["haplotype"]
.str.replace(r"(", "", regex=False)
.str.replace(r")", "", regex=False)
)
collected_counts = df_tmp.groupby(["location", "monthdate"]).size()
haplotype_counts = (
df_tmp.groupby(
["haplotype", "location", "monthdate", "pango_lineage", "GISAID_clade"]
)
.size()
.rename("haplotype_counts")
)
final = (
haplotype_counts.reset_index()
.set_index(["location", "monthdate"])
.join(collected_counts.rename("collected_counts"))
.reset_index()
)
final = final[final["haplotype"].str.len() > 0]
return final
def read_lineage_table(path, filter_last_n_days=None):
lineage_table = pd.read_table(path)
lineage_table = gisaid.filter_by_date(lineage_table, filter_last_n_days)
return format_lineage_table(lineage_table)
def df2score(df, months, keepall=False):
"""
Return mutation scores in the specified window
"""
if months is not None:
df_mo = df[df["monthdate"].isin(months)]
else:
df_mo = df
feature_df = varclass.calculate_features(df_mo)
if keepall:
return feature_df
return feature_df["EpiScore"]
def df2topscores(df, months):
return (
df2score(df, months)
.loc[lambda x: x > x.quantile(0.95)]
.sort_values(ascending=False)
)
def df2pred(df, months):
"""
Turn mutation scores into a set of predicted mutations (ordered by score)
"""
return df2topscores(df, months).index
def retrieve_hap_w_vars(mutations, df):
these_haplos = pd.Series(np.unique(df["haplotype"]))
matches, haps = zip(
*sorted(
zip(
[
len(set(xx.split(", ")).intersection(mutations)) / len(mutations)
for xx in these_haplos
],
these_haplos,
),
reverse=True,
)
)
haps = [hh for mm, hh in zip(matches, haps) if mm == np.max(matches)]
_, best_hap = sorted(
zip([len(set(hh.split(", ")).difference(mutations)) for hh in haps], haps)
)[0]
return best_hap
def write_summaries(in_file, out_folder, from_meta=False, from_lineage=False, n_days_for_forecast=None):
df_updatepred = read_input(
in_file, from_meta, from_lineage, filter_last_n_days=n_days_for_forecast)
haps_bymonth = (
df_updatepred.groupby("monthdate")["haplotype_counts"].sum().iloc[-8:]
)
months_updatepred = haps_bymonth.iloc[-4:].index
scores_updatepred = df2score(df_updatepred, months_updatepred, keepall=True)
# Write out predicted mutations with scores
print("Writing out scores...")
scores_updatepred.to_csv(f"{out_folder}/scores_{today()}.csv")
def var2site_df(mutations, input_df):
sites = helper.var2site(mutations)
new_df = input_df.reindex(sites).copy()
new_df.index = mutations
return new_df
if __name__ == "__main__":
print("Entered main")
arguments = docopt(__doc__)
if "lineage" in arguments["<infile>"]:
arguments["--from_lineage"] = True
elif "metadata" in arguments["<infile>"]:
arguments["--from_meta"] = True
print("Arguments are:")
print(arguments)
if not os.path.exists(arguments["<outfolder>"]):
os.mkdir(arguments["<outfolder>"])
write_summaries(
arguments["<infile>"],
arguments["<outfolder>"],
from_meta=arguments["--from_meta"],
from_lineage=arguments["--from_lineage"],
n_days_for_forecast=int(arguments["--n_days_for_forecast"]),
)
| 5,398 |
pasket/decode/singleton.py
|
plum-umd/pasket
| 18 |
2023452
|
import re
import logging
import lib.const as C
import lib.visit as v
from .. import util
from ..meta import methods, classes, class_lookup
from ..meta.template import Template
from ..meta.clazz import Clazz
from ..meta.method import Method
from ..meta.field import Field
from ..meta.statement import Statement, to_statements
from ..meta.expression import Expression
class Singleton(object):
## hole assignments for roles
## glblInit_role_????,StmtAssign,role_???? = n
regex_role = r"((({})_\S+)_{}).* = (\d+)$".format('|'.join(C.sng_roles), C.SNG.AUX)
@staticmethod
def simple_role_of_interest(msg):
return re.match(Singleton.regex_role, msg)
# add a mapping from role variable to its value chosen by sketch
def add_simple_role(self, msg):
m = re.match(Singleton.regex_role, msg)
v, n = m.group(1), m.group(4)
self._role[v] = n
# initializer
def __init__(self, output_path):
self._output = output_path
self._demo = util.pure_base(output_path)
self._cur_mtd = None
self._role = {} # { v : n }
# class roles
self._sngs = []
# method roles
self._gttrs = []
# interpret the synthesis result
with open(self._output, 'r') as f:
for line in f:
line = line.strip()
try:
items = line.split(',')
func, kind, msg = items[0], items[1], ','.join(items[2:])
if Singleton.simple_role_of_interest(msg): self.add_simple_role(msg)
except IndexError: # not a line generated by custom codegen
pass # if "Total time" in line: logging.info(line)
@property
def demo(self):
return self._demo
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Template)
def visit(self, node):
def find_role(lst, role):
ids = []
for v in self._role:
if v.startswith(role): ids.append(int(self._role[v]))
return [ lst[_id] for _id in ids ]
# find and store class roles
self._sngs = find_role(classes(), C.SNG.SNG)
logging.debug("singleton(s): {}".format(self._sngs))
# find and store method roles
self._gttrs = find_role(methods(), C.SNG.GET)
logging.debug("getter(s): {}".format(self._gttrs))
@v.when(Clazz)
def visit(self, node):
if node in self._sngs:
# make the constructor(s) *private*
inits = node.inits
if not inits: inits = [node.add_default_init()]
for init in inits:
if C.mod.PR not in init.mods: init.mods.append(C.mod.PR)
# rip off *public* modifier, if exists
try: init.mods.remove(C.mod.PB)
except ValueError: pass
# add a static field to hold the singleton instance
cname = node.name
fname = C.SNG.INS
holder = Field(clazz=node, mods=[C.mod.PR, C.mod.ST], typ=cname, name=fname)
logging.debug("adding field {0}.{1} of type {0}".format(cname, fname))
node.add_flds([holder])
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node):
self._cur_mtd = node
if node in self._gttrs:
cname = node.clazz.name
mname = node.name
fname = C.SNG.INS
body = u"""
if ({fname} == null) {{
{fname} = new {cname}();
}}
return {fname};
""".format(**locals())
logging.debug("filling getter {}.{}".format(cname, mname))
node.body = to_statements(node, body)
@v.when(Statement)
def visit(self, node):
if node.kind == C.S.RETURN:
call = unicode(node)
## Aux...getterInOne(...);
if call.startswith(u"return " + C.SNG.AUX):
return to_statements(self._cur_mtd, u"return null;")
return [node]
@v.when(Expression)
def visit(self, node): return node
| 3,768 |
src/dcar/raw.py
|
andreas19/dcar
| 1 |
2024057
|
"""Raw message data."""
import io
from contextlib import contextmanager
from .const import MAX_MESSAGE_LEN, MAX_MSG_UNIX_FDS, MAX_VARIANT_NESTING_DEPTH
from .errors import MessageError, TooLongError
__all__ = ['RawData']
class RawData(io.BytesIO):
"""Raw messge data."""
def __init__(self, initial_bytes=b''):
super().__init__(initial_bytes)
self.byteorder = None
self._unix_fds = []
self._nesting_depth = 0
@property
def unix_fds(self):
"""Return list with unix file descriptors."""
return self._unix_fds
@unix_fds.setter
def unix_fds(self, fds):
"""Set list with unix file descriptors."""
if len(fds) > MAX_MSG_UNIX_FDS:
raise TooLongError('too many unix fds: %d' % len(fds))
self._unix_fds = fds
def write(self, b):
"""Write bytes."""
n = super().write(b)
if self.tell() > MAX_MESSAGE_LEN:
raise TooLongError('message too long: %d bytes' % self.tell())
return n
def write_nul_bytes(self, n):
"""Write n NUL bytes."""
self.write(b'\x00' * n)
def write_padding(self, alignment):
"""Write padding bytes."""
self.write_nul_bytes(self._padding_len(alignment))
def skip_padding(self, alignment):
"""Skip padding bytes."""
b = self.read(self._padding_len(alignment))
if any(list(b)):
raise MessageError('none-NUL byte in padding: %s' % b)
def set_value(self, pos, fixed_type, value):
"""Set value at position pos."""
self.seek(pos)
fixed_type.marshal(self, value)
self.seek(0, io.SEEK_END)
def add_unix_fd(self, fd):
"""Add unix file descriptor."""
if fd in self._unix_fds:
return self._unix_fds.index(fd)
else:
self._unix_fds.append(fd)
fd_cnt = len(self._unix_fds)
if fd_cnt > MAX_MSG_UNIX_FDS:
raise TooLongError('too many unix fds: %d' % fd_cnt)
return fd_cnt - 1
def _padding_len(self, alignment):
x = self.tell() % alignment
if x:
return alignment - x
return 0
def __repr__(self):
return '<%s: byteorder=%s>' % (self.__class__.__name__,
self.byteorder.name
if self.byteorder is not None else None)
@contextmanager
def nesting_depth(self):
"""Context manager for checking the nesting depth of variants."""
self._nesting_depth += 1
if self._nesting_depth > MAX_VARIANT_NESTING_DEPTH:
raise MessageError('nesting depth > %d' % MAX_VARIANT_NESTING_DEPTH)
try:
yield
finally:
self._nesting_depth -= 1
| 2,805 |
spam-api/controllers/egg_controller.py
|
Alex-Noll/Beginner-Flask-Zappa-API
| 4 |
2023873
|
from resources.egg_resource import *
def egg_add(api):
api.add_resource(EggResource,'/api/egg')
| 102 |
nlp2go/model.py
|
voidful/nlp2go
| 25 |
2022726
|
import inspect
import nlp2
import tfkit
from transformers import pipeline, pipelines, BertTokenizer, cached_path, AutoTokenizer
from nlp2go.modelhub import MODELMAP
from nlp2go.parser import Parser
class Model:
def __init__(self, model_path, panel=False, **param):
self.model = None
self.enable_panel = panel
if nlp2.is_file_exist(model_path) or "tfkit_" in model_path: # tfkit models
self.lib = 'tfkit'
self.model, self.predict_parameter, self.model_task = self.load_tfkit_model(model_path, **param)
self.predict_func = self.model.predict
else: # huggingface's transfromers model - local model saved in dir, online model name without tfkit tag
self.lib = 'hf'
self.model, self.predict_parameter, self.model_task = self.load_huggingface_model(model_path, **param)
self.predict_func = self.model
self.parser = Parser(self.model_task, self.predict_func, self.model.tokenizer)
predict_parameter, _ = nlp2.function_sep_suit_arg(self.parser.get_input_parser(), param)
self.predict_parameter.update(predict_parameter)
print("loaded model predict_parameter", predict_parameter)
def load_huggingface_model(self, model_path, **param):
supported_type = list(pipelines.SUPPORTED_TASKS.keys())
if 'task' not in param or param['task'] not in supported_type:
panel = nlp2.Panel()
panel.add_element('task', supported_type, "Select model task: ", default={})
model_task = panel.get_result_dict()['task']
else:
model_task = param['task']
param['model'] = model_path
param['tokenizer'] = BertTokenizer.from_pretrained(model_path) if 'voidful/albert_chinese' in model_path \
else AutoTokenizer.from_pretrained(model_path)
pipeline_param, _ = nlp2.function_sep_suit_arg(pipeline, param)
nlp = pipeline(**pipeline_param)
predict_parameter, _ = nlp2.function_sep_suit_arg(nlp, param)
return nlp, predict_parameter, model_task
def load_tfkit_model(self, model_path, **param):
model_task = param['task'] if 'task' in param else None
model_path = MODELMAP[model_path] if model_path in MODELMAP else model_path
model, model_task, model_class, model_info = tfkit.utility.model.load_trained_model(cached_path(model_path),
tag=model_task)
predict_parameter, _ = nlp2.function_sep_suit_arg(model.predict, param)
model.eval()
return model, predict_parameter, model_task
def predict(self, pred_json=None, **argument):
pred_param = self.predict_parameter.copy()
pred_param.update(argument)
if pred_json:
if isinstance(pred_json, str):
pred_param.update({'input': pred_json})
else:
pred_param.update(pred_json)
input_argument = self.parser.input_parser(pred_param, enable_arg_panel=self.enable_panel)
try:
if isinstance(input_argument, tuple):
pred = self.predict_func(input_argument[0], **input_argument[1])
else:
pred = self.predict_func(**input_argument)
pred = self.parser.output_parser(pred)
return pred
except Exception as e:
return {
"input": input_argument,
"error": str(e)
}
| 3,520 |
tests/integration_tests/testLegs.py
|
neoyung/IrLib
| 1 |
2023868
|
import unittest
from datetime import date
from irLib.helpers.schedule import period, schedule, floatingSchedule
from irLib.marketConvention.roll import following
from irLib.marketConvention.dayCount import ACT_ACT
from irLib.marketConvention.compounding import annually_k_Spot
import numpy as np
from irLib.helpers.yieldCurve import discountCurve, forwardCurve
from irLib.instruments.legs import fixLeg, floatLeg
# set schedule
startDate = tradeDate = date(2020, 7, 1)
terminationDate = date(2022, 7, 1) # a 2 year bond
howOften = period(6, 'month') # semi-annually
howToAdjust = following('HongKong')
fixedDateLag = period(1, 'day')
s = schedule(startDate, terminationDate, howOften, howToAdjust)
fS = floatingSchedule(startDate, terminationDate,
howOften, howToAdjust, fixedDateLag)
# set synthetic data
timeIndex = [1, 2, 3, 4, 5]
flatR = 0.03
dF = ((flatR + 1) ** -np.arange(1, 6)).tolist()
forwardRates = (flatR * np.ones(5)).tolist()
# set discountCurve and LiborCurve (instance of forwad rate curve)
alias_disC = 'disC'
alias_forC = 'forC'
referenceDate = date(2018, 1, 1)
dayCount = ACT_ACT()
compounding = annually_k_Spot()
allowExtrapolation = True # trigger warning if false and extrapolation is needed
disC = discountCurve(alias_disC, referenceDate, dayCount, compounding, allowExtrapolation)
disC.values = dF
disC.timeIndex = timeIndex
forwardC = forwardCurve(alias_forC, referenceDate, dayCount, compounding, allowExtrapolation)
forwardC.values = forwardRates
forwardC.timeIndex = timeIndex
# fix leg parameter
fixedRate = 0.03
fixL = fixLeg(tradeDate, fixedRate, s)
floatL = floatLeg(tradeDate, forwardC, fS)
class testLegs(unittest.TestCase):
""" Testing its NPV in the first 3 years """
def testFixLeg(self):
fixL.setPricingEngine(disC)
self.assertCountEqual(np.round([fixL.calculateNPV(date(2020 + d, 7, 1)) for d in range(3)], 8),
[0.05793798, 0.02957842, 0.00024652])
def testFloatLeg(self):
floatL.setPricingEngine(disC)
# the difference from cash flow of fix leg is due to interpolation methodolgy of forward rates,
# as forward rate is defined by (fixDate, paymentPeriodStartDate, paymentPeriodEndDate)
self.assertCountEqual(np.round([floatL.calculateNPV(date(2020 + d, 7, 1)) for d in range(3)], 8),
[0.05751394, 0.02936204, 0.00024290])
| 2,425 |
demos/demo_spark.py
|
grayed/dfxml
| 2 |
2022955
|
#!/usr/bin/env python3
#
# Shows how DFXML works with spark.
# This program runs spark if it is not already running
import sys
import os
sys.path.append("../python")
from dfxml_writer import DFXMLWriter
def spark_demo():
"""A small spark program. Must be run under spark"""
from pyspark import SparkConf
from pyspark import SparkContext
import operator
conf = SparkConf()
sc = SparkContext(conf=conf)
m = 1000000
result = sc.parallelize(range(0, m+1)).reduce(operator.add)
print(f"The sum of the numbers 0 to {m} is {result}")
assert result == 500000500000
def run_spark():
# If we are running under spark, just call check_spark.
# Otherwise, run recursively under spark-submit
import os
if "SPARK_ENV_LOADED" in os.environ:
return # yea! Spark is running
#
# Re-run this script under spark, and then exit.
#
import subprocess
r = subprocess.run(['spark-submit',__file__] + sys.argv[1:])
assert r.returncode==0
exit(0)
if __name__=="__main__":
import argparse
import time
parser = argparse.ArgumentParser()
args = parser.parse_args()
run_spark()
dfxml = DFXMLWriter(filename=f'demo_spark_{int(time.time())}.dfxml',prettyprint=True)
spark_demo()
# DFXML file gets written automatically when program exits.
exit(0)
| 1,388 |
HypothesisTest.py
|
FouL06/Statistics-Tools
| 0 |
2024345
|
import scipy as sp
import math
#Hypothesis Testing Variables
Null_Mean = 3
Sample_Mean = 3.38
Sample_Variance = 1.769
Sample_Standard_Deviation = 5.237
Sample_Size = 50
#Finds the T value for a specific significance level for the data
def FindTValue(x, u, s, n):
t = (x-u)/(s / math.sqrt(n))
return abs(t)
#T-Test using sample variance
print(FindTValue(Sample_Mean, Null_Mean, Sample_Variance, Sample_Size))
#T-Test using sample deviation
print(FindTValue(Sample_Mean, Null_Mean, Sample_Standard_Deviation, Sample_Size))
| 534 |
apertium/management/commands/import_mono_dix.py
|
mikahama/verdd
| 5 |
2023980
|
import os
import io
from django.core.management.base import BaseCommand, CommandError
from manageXML.models import *
from django.conf import settings
from ._dix_common import *
ignore_affiliations = False
def create(itm: Item, lemma, pos_g, homoId, lang, datafile, stem, contlex):
# find the lexeme or create the instance and return it
# Handling importing Proper nouns
prop = False
if pos_g == 'Prop':
prop = True
pos_g = 'N'
try:
_l = Lexeme.objects.get(lexeme=lemma, pos=pos_g, homoId=homoId, language=lang)
except:
_l = Lexeme.objects.create(
lexeme=lemma, pos=pos_g, homoId=homoId, language=lang,
imported_from=datafile)
add_metadata_to_lexeme(_l, itm)
# Handling importing Proper nouns
if prop:
md, created = LexemeMetadata.objects.get_or_create(lexeme=_l, type=LEXEME_TYPE, text='Prop')
if not ignore_affiliations:
title = _l.find_akusanat_affiliation()
# link it
if title:
a, created = Affiliation.objects.get_or_create(lexeme=_l, title=title, type=AKUSANAT,
link="{}{}".format(settings.WIKI_URL, title))
if stem:
s, created = Stem.objects.get_or_create(lexeme=_l, text=stem, homoId=homoId, contlex=contlex)
def add_element(e: DixElement, lang, datafile):
lemma, homoId, pos, pos_g = e.pair.right.lemma_homoId_POS()
if not lemma:
lemma = e.attributes['lm'] if 'lm' in e.attributes else '' # is also in <r>
stem, stem_homoId, stem_pos, stem_pos_g = e.pair.left.lemma_homoId_POS()
if not stem and e.i:
stem, stem_homoId, stem_pos, stem_pos_g = e.i.lemma_homoId_POS()
contlex = e.par.attributes['n'] if e.par and 'n' in e.par.attributes else ''
if not pos:
for pos_key in CONTLEX_TO_POS.keys():
if pos_key in contlex:
pos = CONTLEX_TO_POS[pos_key]
break
if type(pos) is list:
for _p in pos:
create(e.pair.right, lemma, _p, homoId, lang, datafile, stem, contlex)
else:
create(e.pair.right, lemma, pos_g, homoId, lang, datafile, stem, contlex)
else:
create(e.pair.right, lemma, pos_g, homoId, lang, datafile, stem, contlex)
class Command(BaseCommand):
'''
Example: python manage.py import_mono_dix -f ../apertium-fin -l fin
'''
help = 'This command imports the content of a monolingual .dix file.'
def add_arguments(self, parser):
parser.add_argument('-f', '--file', type=str, help='The .DIX file containing the translations.', )
parser.add_argument('-l', '--language', type=str, help='The language of the monolingual file.', )
parser.add_argument('--ignore-affiliations', dest='ignore_affiliations', action='store_true')
parser.set_defaults(ignore_affiliations=False)
def handle(self, *args, **options):
global ignore_affiliations
file_path = options['file'] # the directory containing the XML files
ignore_affiliations = options['ignore_affiliations']
lang = Language.objects.get(id=options['language'])
if not os.path.isfile(file_path):
raise CommandError('File "%s" does not exist.' % file_path)
with io.open(file_path, 'r', encoding='utf-8') as fp:
dix = parse_dix(fp)
filename = os.path.splitext(os.path.basename(file_path))[0]
df = DataFile(lang_source=lang, lang_target=None, name=filename)
df.save()
for sdef, comment in dix.sdefs.items():
try:
Symbol.objects.get_or_create(name=sdef, comment=comment)
except: # exists but with different comment
pass
for e in dix.sections['main'].elements:
add_element(e, lang, df)
self.stdout.write(self.style.SUCCESS('Successfully imported the file.'))
| 3,942 |
golf/visual_tests.py
|
OMG-ICFP-FTW/icfp2021
| 0 |
2022971
|
#!/usr/bin/env python3
# visual_tests.py - Visually test some of the solver multiplication
'''
Cases to test:
The hole points are H1, H2, forming line segment HH
The edge points are A, B, forming line segment AB
Test AB HH are identical
Test AB HH are collinear and disjoint
Test AB HH are collinear and touching
Test AB HH are collinear and intersect
Test AB HH are parallel
Test AB HH are intersecting but do not overlap
Test AB HH are intersecting and overlap at an end (like a L)
Test AB HH are intersecting and overlap at a point (like a T)
Test AB HH are intersecting and overlap in the center (like a X)
Whole hole tests
H1, H2, H3 form a triangle
Test AB inside the triangle
Test AB along each of the edges
Test AB crosses from inside to outside
Test AB starts at point and goes inside
Test AB starts at point and goes outside
Test AB starts at midpoint and goes outside
Concave hole tests
Hole forms a U shape
Test AB inside the hole
Test AB along each of the edges
Test AB along the top of the U, point to point
Test AB across the middle of the U, midpoint to midpoint
Test AB across the U from midpoint to point, diagonally
Test AB across the U from point to midpoint, diagonally
Test AB from inside the U across to the opposite midpoint
Test AB from inside the U across to the opposite point
J shaped hole
..
.. ..
.. ..
......
Test that AB from far top point across to
'''
# %%
from typing import List
from dataclasses import dataclass
from itertools import product
from collections import namedtuple
import matplotlib.pyplot as plt
from andtools import Problem
# %%
# https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
Coord = namedtuple('Coord', ['x', 'y'])
def onSegment(p: Coord, q: Coord, r: Coord) -> bool:
''' Return True if point q lies on line segment 'pr' '''
return (q.x <= max(p.x, r.x) and q.x >= min(p.x, r.x) and
q.y <= max(p.y, r.y) and q.y >= min(p.y, r.y))
def orient(p: Coord, q: Coord, r: Coord):
''' Get orientation of triangle pqr (Collinear, Clockwise, Counterclockwise) '''
val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)
return 0 if val == 0 else (1 if val > 0 else 2)
'''// The main function that returns true if line segment 'p1q1'
// and 'p2q2' intersect.
bool doIntersect(Point p1, Point q1, Point p2, Point q2)
{
// Find the four orientations needed for general and
// special cases
int o1 = orientation(p1, q1, p2);
int o2 = orientation(p1, q1, q2);
int o3 = orientation(p2, q2, p1);
int o4 = orientation(p2, q2, q1);
// General case
if (o1 != o2 && o3 != o4)
return true;
// Special Cases
// p1, q1 and p2 are colinear and p2 lies on segment p1q1
if (o1 == 0 && onSegment(p1, p2, q1)) return true;
// p1, q1 and q2 are colinear and q2 lies on segment p1q1
if (o2 == 0 && onSegment(p1, q2, q1)) return true;
// p2, q2 and p1 are colinear and p1 lies on segment p2q2
if (o3 == 0 && onSegment(p2, p1, q2)) return true;
// p2, q2 and q1 are colinear and q1 lies on segment p2q2
if (o4 == 0 && onSegment(p2, q1, q2)) return true;
return false; // Doesn't fall in any of the above cases
}
'''
def intersecting(p1: Coord, q1: Coord, p2: Coord, q2: Coord) -> bool:
''' Return True if line segments p1q1 and p2q2 intersect '''
o1 = orient(p1, q1, p2)
o2 = orient(p1, q1, q2)
o3 = orient(p2, q2, p1)
o4 = orient(p2, q2, q1)
# General case
if o1 != o2 and o3 != o4:
return True
# Special Cases
# p1, q1 and p2 are colinear and p2 lies on segment p1q1
if o1 == 0 and onSegment(p1, p2, q1):
return True
# p1, q1 and q2 are colinear and q2 lies on segment p1q1
if o2 == 0 and onSegment(p1, q2, q1):
return True
# p2, q2 and p1 are colinear and p1 lies on segment p2q2
if o3 == 0 and onSegment(p2, p1, q2):
return True
# p2, q2 and q1 are colinear and q1 lies on segment p2q2
if o4 == 0 and onSegment(p2, q1, q2):
return True
return False # Doesn't fall in any of the above cases
def valid(a: Coord, b: Coord, c: Coord, d: Coord) -> bool:
''' Return True if segments a-b and hole c-d are valid in combination '''
if (a == c and b == d) or (a == d and b == c):
return True
o1 = orient(a, b, c)
o2 = orient(a, b, d)
o3 = orient(c, d, a)
o4 = orient(c, d, b)
# General case
if o1 != o2 and o3 != o4:
return False
# Special Cases
if o1 == 0 or o2 == 0 or o3 == 0 or o4 == 0:
return False
return (o1 != o2 and o3 != o4) # crossing intersection or not
@dataclass
class TestCase:
name: str
a: Coord
b: Coord
hole: List[Coord]
expected: bool
def valid(self):
is_valid = True
for i in range(len(self.hole) - 1):
if not intersecting(self.a, self.b, self.hole[i], self.hole[i + 1]):
is_valid = False
break
return is_valid
def plot(self):
fig, ax = plt.subplots(figsize=(2,2))
for i in range(len(self.hole)):
h1, h2 = self.hole[i], self.hole[(i + 1) % len(self.hole)]
ax.plot([h1.x, h2.x], [h1.y, h2.y], 'co-', alpha=.5)
ax.plot([self.a.x, self.b.x], [self.a.y, self.b.y], 'mo-', alpha=.5)
# Show gridlines
ax.grid(True)
xmin = min(0, self.a.x, self.b.x, min(c.x for c in self.hole))
xmax = max(1, self.a.x, self.b.x, max(c.x for c in self.hole))
ymin = min(0, self.a.y, self.b.y, min(c.y for c in self.hole))
ymax = max(1, self.a.y, self.b.y, max(c.y for c in self.hole))
# Set x and y limits
ax.set_xlim([xmin - 1, xmax + 1])
ax.set_ylim([ymin - 1, ymax + 1])
ax.set_xticks(range(xmin, xmax + 1))
ax.set_yticks(range(ymin, ymax + 1))
# set title
ax.set_title(self.name)
# set labels
valid = self.valid()
ax.set_xlabel(f'Got: {valid}')
ax.set_ylabel(f'Expected: {self.expected}')
if valid != self.expected:
ax.set_facecolor((1, .7, .7))
# Set coloring
plt.show()
# %% Test AB HH are identical
pairs = [
(Coord(0, 0), Coord(0, 0)),
(Coord(0, 0), Coord(0, 1)),
(Coord(0, 0), Coord(1, 0)),
(Coord(0, 0), Coord(1, 1)),
(Coord(0, 1), Coord(1, 0)),
]
for a, b in pairs:
# TestCase(a, b, [a, b]).assert_valid()
TestCase('identity', a, b, [a, b], True).plot()
# %% Test AB HH are collinear and disjoint
abhole = [
(Coord(0, 0), Coord(0, 1), [Coord(0, 2), Coord(0, 3)]),
(Coord(0, 0), Coord(1, 0), [Coord(2, 0), Coord(3, 0)]),
(Coord(0, 0), Coord(1, 1), [Coord(2, 2), Coord(3, 3)]),
(Coord(0, 3), Coord(1, 2), [Coord(2, 1), Coord(3, 0)]),
(Coord(0, 2), Coord(0, 3), [Coord(0, 0), Coord(0, 1)]),
(Coord(2, 0), Coord(3, 0), [Coord(0, 0), Coord(1, 0)]),
(Coord(2, 2), Coord(3, 3), [Coord(0, 0), Coord(1, 1)]),
(Coord(2, 1), Coord(3, 0), [Coord(0, 3), Coord(1, 2)]),
]
for a, b, hole in abhole:
TestCase('collinear disjoint', a, b, hole, True).plot()
# %% Test AB HH are collinear and intersecting
abhole = [
(Coord(0, 0), Coord(0, 2), [Coord(0, 1), Coord(0, 3)]),
(Coord(0, 0), Coord(2, 0), [Coord(1, 0), Coord(3, 0)]),
(Coord(0, 0), Coord(2, 2), [Coord(1, 1), Coord(3, 3)]),
(Coord(0, 3), Coord(2, 1), [Coord(1, 2), Coord(3, 0)]),
(Coord(0, 1), Coord(0, 3), [Coord(0, 0), Coord(0, 2)]),
(Coord(1, 0), Coord(3, 0), [Coord(0, 0), Coord(2, 0)]),
(Coord(1, 1), Coord(3, 3), [Coord(0, 0), Coord(2, 2)]),
(Coord(1, 2), Coord(3, 0), [Coord(0, 3), Coord(2, 1)]),
]
for a, b, hole in abhole:
TestCase('collinear intersect', a, b, hole, False).plot()
# %% Test AB HH are collinear and abut
abhole = [
(Coord(0, 0), Coord(0, 2), [Coord(0, 1), Coord(0, 2)]),
(Coord(0, 0), Coord(2, 0), [Coord(1, 0), Coord(2, 0)]),
(Coord(0, 0), Coord(2, 2), [Coord(1, 1), Coord(2, 2)]),
(Coord(0, 0), Coord(0, 2), [Coord(0, 1), Coord(0, 2)]),
(Coord(0, 0), Coord(0, 2), [Coord(0, 0), Coord(0, 1)]),
(Coord(0, 0), Coord(2, 0), [Coord(0, 0), Coord(1, 0)]),
(Coord(0, 0), Coord(2, 2), [Coord(0, 0), Coord(1, 1)]),
(Coord(0, 0), Coord(0, 2), [Coord(0, 0), Coord(0, 1)]),
]
for a, b, hole in abhole:
TestCase('collinear abut', a, b, hole, True).plot()
# %% Test AB HH are collinear and overlap
abhole = [
(Coord(0, 0), Coord(0, 3), [Coord(0, 1), Coord(0, 2)]),
(Coord(0, 0), Coord(3, 0), [Coord(1, 0), Coord(2, 0)]),
(Coord(0, 0), Coord(3, 3), [Coord(1, 1), Coord(2, 2)]),
(Coord(0, 3), Coord(3, 0), [Coord(1, 2), Coord(2, 1)]),
]
for a, b, hole in abhole:
TestCase('collinear overlap', a, b, hole, True).plot()
| 8,751 |
solver.py
|
benb10/nonogram_solver
| 0 |
2022710
|
from copy import deepcopy
from itertools import chain
from time import time
def get_new_boards(board):
# return a list of boards with the next cell clicked
new_cell_location = None
for i, row in enumerate(board):
if new_cell_location:
break
for j, cell in enumerate(row):
if new_cell_location:
break
if cell is None:
new_cell_location = (i, j)
nc_i, nc_j = new_cell_location
nb1 = deepcopy(board)
nb1[nc_i][nc_j] = True
nb2 = deepcopy(board)
nb2[nc_i][nc_j] = False
return [nb1, nb2]
def get_row_groups(row):
"""
input: [True, True, None, True, None]
return: [2, 1]
"""
row_groups = []
for prev_cell, cell in zip([None] + row, row):
if cell is not True:
continue
if prev_cell is True:
# add to the last num
row_groups[-1] += 1
else:
# append a new group
row_groups.append(1)
return row_groups
def row_groups_can_fit_in_nums(row_groups, nums):
"""
input: [1, 1], [2, 1]
output: True
input: [3], [2, 1]
output: False
"""
if len(row_groups) > len(nums):
return False
# if row_groups is shorter that nums,
# we can try "fitting it in" in multiple positions
start_positions_to_try = range(len(nums) - len(row_groups) + 1)
for start_pos in start_positions_to_try:
pairs_to_compare = list(zip(row_groups, nums[start_pos:]))
# print(pairs_to_compare)
can_fit = all(
row_g_num <= constraint_num
for row_g_num, constraint_num in pairs_to_compare
)
if can_fit:
return True
# we haven't been able to fit it anywhere
return False
def row_is_complete(row):
no_nones = all(cell is not None for cell in row)
return no_nones
def board_is_complete(board):
all_cells = chain(*board)
no_nones = all(cell is not None for cell in all_cells)
return no_nones
def row_is_valid(row, nums):
# isn't perfect. Errs on the side of being valid
row_groups = get_row_groups(row)
if row_is_complete(row):
return row_groups == nums
return row_groups_can_fit_in_nums(row_groups, nums)
def is_valid(board, top_nums, side_nums):
rows = board
cols = list(list(x) for x in zip(*board))
assert len(rows) == len(top_nums) == len(side_nums) == len(cols)
for row, nums in zip(rows, side_nums):
if not row_is_valid(row, nums):
# print(f"bad row! {row}")
return False
for col, nums in zip(cols, top_nums):
if not row_is_valid(col, nums):
# print(f"bad col! {col}, {nums}")
return False
return True
def solve(top_nums, side_nums, board_size=5):
"""Return a list of lists representing the solution to the puzzle.
puzzles are board_size x board_size
each board element will be either:
- None (we don't know yet)
- True (click it)
- False (don't click it)
"""
start_time = time()
print(f"Starting solve.")
empty_board = [[None for _ in range(board_size)] for _ in range(board_size)]
queue = [empty_board]
while True:
if not queue:
raise ValueError(f"Unable to find a solution.")
board = queue.pop(0)
# check if we have a solution:
if board_is_complete(board):
run_time = round(time() - start_time, 3)
print(f"Finsihed solve in {run_time} seconds.")
return board
new_boards = get_new_boards(board)
new_valid_boards = [b for b in new_boards if is_valid(b, top_nums, side_nums)]
queue.extend(new_valid_boards)
| 3,748 |
modules/convert_db.py
|
icreator/icreatorSite
| 0 |
2023143
|
#!/usr/bin/env python
# coding: utf8
#import common
#common.not_is_local(None, True)
#from gluon import *
from gluon.dal import DAL, Field, Table
reserveds = ['MIN', 'MAX', 'ref', 'template', 'count', 'sum', 'counter', 'average', 'connect', 'desc', 'page', 'level' ]
#print db.tables
def to_mysql(db, db_old=None):
return 'action commented - modules/convert_db - to_mysql'
if not db_old:
db_old = DAL("sqlite://storage.sqlite",
# this keyword buil model on fly on load
auto_import=True,
#folder="../../ipay_db/databases",
#folder="/databases",
#folder="../../ipay8/databases",
folder="applications/ipay8/databases",
)
# те таблицы что не надо копировать
exept_copy = ['auth_user', 'auth_cas']
exept_truncate = ['auth_user', 'auth_group', 'auth_membership', 'auth_permission', 'auth_event', 'auth_cas', 'cat']
for table in db:
tn = table._tablename
if tn in exept_copy:
print 'EXEPTED:', table
continue
if tn not in exept_truncate:
print 'NOT TRUNCATE:', table
db[table].truncate()
print 'CONVERTing:', table
for r in db_old(db_old[table]).select():
#if tn == 'deals': print r
# заменим зарезервированные слова
r_ = dict()
for f in r:
#print f
if f in reserveds:
r_[f] = r[f]
for f in r_:
r[f + '_'] = r_[f]
try:
db[table][0] = r
except Exception as e:
print e
# тут запуск из тулсов, поэтому коммит автоматом
def to7(db):
return
'''
db_old = DAL("sqlite://storage.sqlite",
#pool_size=1,
#check_reserved=['all'],
# this keyword buil model on fly on load
auto_import=True,
folder="../../ipay_db/databases")
'''
db_old = db
#import json
print '\nconvert 6 to 7'
for r in db(db.deals).select():
r.used = True
r.update_record()
dealer = db.dealers[1]
print dealer.name
for deal in db(db.deals).select():
db.dealer_deals.insert(
dealer_id = dealer.id,
deal_id = deal.id,
used = True,
scid = deal.scid,
template = deal.template,
grab_form = deal.grab_form,
p2p = deal.p2p,
tax = deal.dealer_tax,
)
db.commit()
print 'end'
def from5(db):
return
db_old = DAL("sqlite://storage.sqlite",
#pool_size=1,
#check_reserved=['all'],
# this keyword buil model on fly on load
auto_import=True,
folder="../../ipay5-m/databases")
db_new = DAL("sqlite://storage.sqlite",
#pool_size=1,
#check_reserved=['all'],
# this keyword buil model on fly on load
auto_import=True,
folder="../../ipay6-a/databases")
import json
print '\nimport 5 to 6'
for xcurr in db(db.xcurrs).select():
pass
deal = db(db.deals.name=='to phone +7 RUBs').select().first()
if not deal: return 'not deal "to phone +7 RUBs"'
print "for deal:", deal
for rec in db_old(db_old.to_phone).select():
# найдем неимпортированные записи
#
acc = db((db.deal_accs.deal_id==deal.id)
& ( db.deal_accs.acc==rec.phone)).select().first()
#print acc
#continue
if acc:
acc_id = acc.id
else:
print 'insert deal_acc', rec.phone
acc_id = db.deal_accs.insert( deal_id = deal.id, acc = rec.phone )
acc_addr = db((db.deal_acc_addrs.deal_acc_id==acc_id)
& (db.deal_acc_addrs.addr==rec.wallet)
& (db.deal_acc_addrs.xcurr_id==rec.xcurr_id)).select().first()
if acc_addr: continue
print 'insert acc_addr ',rec.xcurr_id, rec.wallet
db.deal_acc_addrs.insert(deal_acc_id=acc_id,
addr=rec.wallet, xcurr_id=rec.xcurr_id,
incomed=rec.unspent, converted=rec.unspent)
####### теперь платежи
#for p_in in db_old(db_old.payments).select():
db_old.close()
if __name__ == "__main__":
#to7(db)
db.commit()
db.close()
| 4,495 |
yggdrasil/drivers/tests/test_RModelDriver.py
|
Xyzic/yggdrasil
| 0 |
2023601
|
import numpy as np
import pandas as pd
from collections import OrderedDict
import yggdrasil.drivers.tests.test_InterpretedModelDriver as parent
class TestRModelParam(parent.TestInterpretedModelParam):
r"""Test parameters for RModelDriver."""
driver = "RModelDriver"
@property
def inst_kwargs(self):
r"""dict: Keyword arguments for creating a class instance."""
out = super(TestRModelParam, self).inst_kwargs
out.setdefault('interpreter_flags', ['--vanilla'])
return out
class TestRModelDriverNoInit(TestRModelParam,
parent.TestInterpretedModelDriverNoInit):
r"""Test runner for RModelDriver without init."""
def test_is_library_installed(self):
r"""Test is_library_installed for invalid library."""
self.assert_equal(
self.import_cls.is_library_installed('invalid_unicorn'),
False)
def test_python2language(self):
r"""Test python2language."""
test_vars = [(np.string_('hello'), 'hello'),
((np.string_('hello'), ), ('hello', )),
([np.string_('hello')], ['hello']),
({np.string_('hello'): np.string_('hello')},
{'hello': 'hello'}),
(OrderedDict([(np.string_('hello'), np.string_('hello'))]),
OrderedDict([('hello', 'hello')]))]
test_vars.append((
pd.DataFrame.from_dict({'a': np.zeros(5, dtype='int64')}),
pd.DataFrame.from_dict({'a': np.zeros(5, dtype='int32')})))
for a, b in test_vars:
self.assert_equal(self.import_cls.python2language(a), b)
class TestRModelDriverNoStart(TestRModelParam,
parent.TestInterpretedModelDriverNoStart):
r"""Test runner for RModelDriver without start."""
pass
class TestRModelDriver(TestRModelParam,
parent.TestInterpretedModelDriver):
r"""Test runner for RModelDriver."""
pass
| 2,014 |
test.py
|
ethinallen/supremeShopper
| 4 |
2023857
|
## this is where i test all of my shit code
## before I put it in my other shit code
# from selenium import webdriver
#
# PROXY = '172.16.31.10:19006' # IP:PORT
#
# chrome_options = webdriver.Options()
# chrome_options.add_argument('--proxy-server=http://%s' % PROXY)
#
# chrome = webdriver.Chrome(chrome_options=chrome_options, executable_path='/users/drew/Projects/drivers/chromedriver73/chromedriver' )
# chrome.get("http://whatismyipaddress.com")
#################
# I am an idiot hahahahahahaha
# import json
#
# with open('drew.drew') as data:
# data = json.load(data)
#
# print(data['productType'])
#################
# import checkStock
#
# destinations = checkStock.main()
#################
import csv
import threading
proxies = []
with open('/users/drew/Projects/proxies/proxies.csv') as f:
f = csv.reader(f)
for elem in f:
for proxy in elem:
proxies.append(proxy)
# give us our proxies
list = [proxy for proxy in proxies]
def getThread(list):
| 988 |
source/autoSettingsUtils/autoSettings.py
|
XLTechie/nvdaTests
| 2 |
2022883
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2019 NV Access Limited
"""autoSettings for add-ons"""
from abc import abstractmethod
from copy import deepcopy
from typing import Dict, Type, Any, Iterable
import config
from autoSettingsUtils.utils import paramToPercent, percentToParam, UnsupportedConfigParameterError
from baseObject import AutoPropertyObject
from logHandler import log
from .driverSetting import DriverSetting
SupportedSettingType: Type = Iterable[DriverSetting]
class AutoSettings(AutoPropertyObject):
""" An AutoSettings instance is used to simplify the load/save of user config for NVDA extensions
(Synth drivers, braille drivers, vision providers) and make it possible to automatically provide a
standard GUI for these settings.
Derived classes must implement:
- getId
- getDisplayName
- _get_supportedSettings
"""
def __init__(self):
"""Perform any initialisation
@note: registers with the config save action extension point
"""
super().__init__()
self._registerConfigSaveAction()
def __del__(self):
self._unregisterConfigSaveAction()
def _registerConfigSaveAction(self):
""" Overrideable pre_configSave registration
"""
log.debug(f"registering pre_configSave action: {self.__class__!r}")
config.pre_configSave.register(self.saveSettings)
def _unregisterConfigSaveAction(self):
""" Overrideable pre_configSave de-registration
"""
config.pre_configSave.unregister(self.saveSettings)
@classmethod
@abstractmethod
def getId(cls) -> str:
"""
@return: Application friendly name, should be globally unique, however since this is used in the config file
human readable is also beneficial.
"""
...
@classmethod
@abstractmethod
def getDisplayName(cls) -> str:
"""
@return: The translated name for this collection of settings. This is for use in the GUI to represent the
group of these settings.
"""
...
@classmethod
@abstractmethod
def _getConfigSection(cls) -> str:
"""
@return: The section of the config that these settings belong in.
"""
...
@classmethod
def _initSpecificSettings(
cls,
clsOrInst: Any,
settings: SupportedSettingType
) -> None:
section = cls._getConfigSection()
settingsId = cls.getId()
firstLoad = not config.conf[section].isSet(settingsId)
if firstLoad:
# Create the new section.
config.conf[section][settingsId] = {}
# Make sure the config spec is up to date, so the config validator does its work.
config.conf[section][settingsId].spec.update(
cls._getConfigSpecForSettings(settings)
)
# Make sure the clsOrInst has attributes for every setting
for setting in settings:
if not hasattr(clsOrInst, setting.id):
setattr(clsOrInst, setting.id, setting.defaultVal)
if firstLoad:
cls._saveSpecificSettings(clsOrInst, settings) # save defaults
else:
cls._loadSpecificSettings(clsOrInst, settings)
def initSettings(self):
"""Initializes the configuration for this AutoSettings instance.
This method is called when initializing the AutoSettings instance.
"""
self._initSpecificSettings(self, self.supportedSettings)
#: Typing for auto property L{_get_supportedSettings}
supportedSettings: SupportedSettingType
# make supportedSettings an abstract property
_abstract_supportedSettings = True
def _get_supportedSettings(self) -> SupportedSettingType:
"""The settings supported by the AutoSettings instance. Abstract.
"""
return []
def isSupported(self, settingID) -> bool:
"""Checks whether given setting is supported by the AutoSettings instance.
"""
for s in self.supportedSettings:
if s.id == settingID:
return True
return False
@classmethod
def _getConfigSpecForSettings(
cls,
settings: SupportedSettingType
) -> Dict:
section = cls._getConfigSection()
spec = deepcopy(config.confspec[section]["__many__"])
for setting in settings:
if not setting.useConfig:
continue
spec[setting.id] = setting.configSpec
return spec
def getConfigSpec(self):
return self._getConfigSpecForSettings(self.supportedSettings)
@classmethod
def _saveSpecificSettings(
cls,
clsOrInst: Any,
settings: SupportedSettingType
) -> None:
"""
Save values for settings to config.
The values from the attributes of `clsOrInst` that match the `id` of each setting are saved to config.
@param clsOrInst: Destination for the values.
@param settings: The settings to load.
"""
section = cls._getConfigSection()
settingsId = cls.getId()
conf = config.conf[section][settingsId]
for setting in settings:
if not setting.useConfig:
continue
try:
conf[setting.id] = getattr(clsOrInst, setting.id)
except UnsupportedConfigParameterError:
log.debugWarning(
f"Unsupported setting {setting.id!r}; ignoring",
exc_info=True
)
continue
if settings:
log.debug(f"Saved settings for {cls.__qualname__}")
def saveSettings(self):
"""
Saves the current settings for the AutoSettings instance to the configuration.
This method is also executed when the AutoSettings instance is loaded for the first time,
in order to populate the configuration with the initial settings..
"""
self._saveSpecificSettings(self, self.supportedSettings)
@classmethod
def _loadSpecificSettings(
cls,
clsOrInst: Any,
settings: SupportedSettingType,
onlyChanged: bool = False
) -> None:
"""
Load settings from config, set them on `clsOrInst`.
@param clsOrInst: Destination for the values.
@param settings: The settings to load.
@param onlyChanged: When True, only settings that no longer match the config are set.
@note: attributes are set on clsOrInst using setattr.
The id of each setting in `settings` is used as the attribute name.
"""
section = cls._getConfigSection()
settingsID = cls.getId()
log.debug(f"loading {section} {settingsID}")
conf = config.conf[section][settingsID]
for setting in settings:
if not setting.useConfig or conf.get(setting.id) is None:
continue
val = conf[setting.id]
if onlyChanged and getattr(clsOrInst, setting.id) == val:
continue
try:
setattr(clsOrInst, setting.id, val)
except UnsupportedConfigParameterError:
log.debugWarning(
f"Unsupported setting {setting.id!r}; ignoring",
exc_info=True
)
continue
if settings:
log.debug(
f"Loaded changed settings for {cls.__qualname__}"
if onlyChanged else
f"Loaded settings for {cls.__qualname__}"
)
def loadSettings(self, onlyChanged: bool = False):
"""
Loads settings for this AutoSettings instance from the configuration.
This method assumes that the instance has attributes o/properties
corresponding with the name of every setting in L{supportedSettings}.
@param onlyChanged: When loading settings, only apply those for which
the value in the configuration differs from the current value.
"""
self._loadSpecificSettings(self, self.supportedSettings, onlyChanged)
@classmethod
def _paramToPercent(cls, current: int, min: int, max: int) -> int:
"""Convert a raw parameter value to a percentage given the current, minimum and maximum raw values.
@param current: The current value.
@param min: The minimum value.
@param max: The maximum value.
"""
return paramToPercent(current, min, max)
@classmethod
def _percentToParam(cls, percent: int, min: int, max: int) -> int:
"""Convert a percentage to a raw parameter value given the current percentage and the minimum and maximum
raw parameter values.
@param percent: The current percentage.
@param min: The minimum raw parameter value.
@param max: The maximum raw parameter value.
"""
return percentToParam(percent, min, max)
| 8,054 |
datasette/cli.py
|
macropin/datasette
| 0 |
2023392
|
import click
from click_default_group import DefaultGroup
import json
import shutil
from subprocess import call
import sys
from .app import Datasette
from .utils import (
temporary_docker_directory,
)
@click.group(cls=DefaultGroup, default='serve', default_if_no_args=True)
def cli():
"""
Datasette!
"""
@cli.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.option('--inspect-file', default='inspect-data.json')
def build(files, inspect_file):
app = Datasette(files)
open(inspect_file, 'w').write(json.dumps(app.inspect(), indent=2))
@cli.command()
@click.argument('publisher', type=click.Choice(['now']))
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.option(
'-n', '--name', default='datasette',
help='Application name to use when deploying to Now'
)
@click.option(
'-m', '--metadata', type=click.File(mode='r'),
help='Path to JSON file containing metadata to publish'
)
@click.option('--extra-options', help='Extra options to pass to datasette serve')
@click.option('--force', is_flag=True, help='Pass --force option to now')
def publish(publisher, files, name, metadata, extra_options, force):
"""
Publish specified SQLite database files to the internet along with a datasette API.
Only current option for PUBLISHER is 'now'. You must have Zeit Now installed:
https://zeit.co/now
Example usage: datasette publish now my-database.db
"""
if not shutil.which('now'):
click.secho(
' The publish command requires "now" to be installed and configured ',
bg='red',
fg='white',
bold=True,
err=True,
)
click.echo('Follow the instructions at https://zeit.co/now#whats-now', err=True)
sys.exit(1)
with temporary_docker_directory(files, name, metadata, extra_options):
if force:
call(['now', '--force'])
else:
call('now')
@cli.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1, required=True)
@click.option(
'-t', '--tag',
help='Name for the resulting Docker container, can optionally use name:tag format'
)
@click.option(
'-m', '--metadata', type=click.File(mode='r'),
help='Path to JSON file containing metadata to publish'
)
@click.option('--extra-options', help='Extra options to pass to datasette serve')
def package(files, tag, metadata, extra_options):
"Package specified SQLite files into a new datasette Docker container"
if not shutil.which('docker'):
click.secho(
' The package command requires "docker" to be installed and configured ',
bg='red',
fg='white',
bold=True,
err=True,
)
sys.exit(1)
with temporary_docker_directory(files, 'datasette', metadata, extra_options):
args = ['docker', 'build']
if tag:
args.append('-t')
args.append(tag)
args.append('.')
call(args)
@cli.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.option('-h', '--host', default='0.0.0.0', help='host for server, defaults to 0.0.0.0')
@click.option('-p', '--port', default=8001, help='port for server, defaults to 8001')
@click.option('--debug', is_flag=True, help='Enable debug mode - useful for development')
@click.option('--reload', is_flag=True, help='Automatically reload if code change detected - useful for development')
@click.option('--cors', is_flag=True, help='Enable CORS by serving Access-Control-Allow-Origin: *')
@click.option('--page_size', default=100, help='Page size - default is 100')
@click.option('--max_returned_rows', default=1000, help='Max allowed rows to return at once - default is 1000. Set to 0 to disable check entirely.')
@click.option('--sql_time_limit_ms', default=1000, help='Max time allowed for SQL queries in ms')
@click.option('--inspect-file', help='Path to JSON file created using "datasette build"')
@click.option('-m', '--metadata', type=click.File(mode='r'), help='Path to JSON file containing license/source metadata')
def serve(files, host, port, debug, reload, cors, page_size, max_returned_rows, sql_time_limit_ms, inspect_file, metadata):
"""Serve up specified SQLite database files with a web UI"""
if reload:
import hupper
hupper.start_reloader('datasette.cli.serve')
inspect_data = None
if inspect_file:
inspect_data = json.load(open(inspect_file))
metadata_data = None
if metadata:
metadata_data = json.loads(metadata.read())
click.echo('Serve! files={} on port {}'.format(files, port))
ds = Datasette(
files,
cache_headers=not debug and not reload,
cors=cors,
page_size=page_size,
max_returned_rows=max_returned_rows,
sql_time_limit_ms=sql_time_limit_ms,
inspect_data=inspect_data,
metadata=metadata_data,
)
# Force initial hashing/table counting
ds.inspect()
ds.app().run(host=host, port=port, debug=debug)
| 5,090 |
zxtaputils/tapify.py
|
weiju/zxtaputils
| 1 |
2022803
|
#!/usr/bin/env python3
import struct
from .util import BT_PROGRAM, BT_NUM_ARRAY, BT_CHAR_ARRAY, BT_BINARY, compute_checksum
from .tapinfo import ZXHeader, ZXData
"""
tapify.py - Put the specified file into a TAP file
"""
def type_byte(objtype):
if objtype == 'program':
return BT_PROGRAM
elif objtype == 'code':
return BT_BINARY
elif objtype == 'nums':
return BT_NUM_ARRAY
elif objtype == 'chars':
return BT_CHAR_ARRAY
return None
def make_block_parameters(args, data_bytes):
if args.objtype in ["nums", "chars"]: # array data
return [args.varname[0], 0x8000]
elif args.objtype == "program":
return [args.autostart_line, len(data_bytes)]
elif args.objtype == 'code':
return [args.startaddr, 0x8000]
def tapify(args):
with open(args.infile, "rb") as infile:
data_bytes = infile.read()
filename = args.filename
data_size = len(data_bytes)
parameters = make_block_parameters(args, data_bytes)
zxheader = ZXHeader(type_byte(args.objtype), args.filename, data_size, parameters)
zxdata = ZXData(data_bytes)
header_bytes = zxheader.bytes()
dblock_bytes = zxdata.bytes()
with open(args.outfile, "wb") as outfile:
# write header (2 + 19 bytes)
outfile.write(struct.pack('<H', len(header_bytes))) # size word
outfile.write(header_bytes)
# write the data block (2 + |data_bytes| | + 2 bytes)
outfile.write(struct.pack('<H', len(dblock_bytes))) # size word
outfile.write(dblock_bytes)
print("done")
| 1,586 |
question_6#alt/question_6.py
|
imApoorva36/12thPracticals
| 3 |
2022785
|
<<<<<<< HEAD
# Create random numbers between any two values and add them to a list of fixed size (say 5)
import random
#generate list of random numbers
def random_list(size, min, max):
lst = []
for i in range(size):
lst.append(random.randint(min, max))
return lst
x = random_list(5, 1, 100)
# the list of random numbers
print("The list of random numbers is:")
print(x)
l = int(input("Enter a number you would like to insert"))
# enter the index to be inserted in x
i = int(input("Enter the index to be inserted"))
# insert the number in the list
x.insert(i, l)
print("The list after insertion is:")
print(x)
# Would you like to delete a number from the list?
y = input("Would you like to delete a number from the list? (y/n)")
if y == "y":
# enter the index to be deleted
j = int(input("Enter the index to be deleted"))
# delete the number from the list
x.pop(j)
print("The list after deletion is:")
print(x)
else:
print("Thank you for using the program")
=======
def generate(n=5):
import random
a=int(input("Enter base number : "))
b=int(input("Enter ceiling number : "))
for i in range(0,n) :
x=round(a+(b-a)*random.random(),2)
list1.append(x)
print(list1)
global val
val=float(input("Enter value to be removed: "))
temp(val)
def update(pos,num) :
list1.insert(pos-1,num)
print(list1)
def temp(val) :
list1.remove(val)
print(list1)
global num
global pos
num=int(input("Enter value to be inserted : "))
pos=int(input("Enter position from start of previous value (1 onward) : "))
update(pos,num)
n=int(input("Enter length of list : "))
list1=[]
generate(n)
>>>>>>> 964a130e5215f229fac07a4e9133df869309fe82
| 1,755 |
tests/equipment/shot702_controller.py
|
MSLNZ/pr-single-photons
| 0 |
2022830
|
"""
Test that photons/equipment/shot702_controller.py is working properly.
"""
from time import sleep
import connect
app, dev = connect.device('wheel-cv', 'Is it safe to control the ND filter wheel?')
assert dev.is_moving() is False
dev.home(wait=False)
while True:
position, is_moving = dev.status()
if not is_moving:
break
degrees = dev.position_to_degrees(position)
app.logger.info(f'at {degrees} degrees [Encoder: {position}]')
sleep(0.1)
dev.set_angle(5.12)
assert dev.get_angle() == 5.12
two_pi = dev.NUM_PULSES_PER_360_DEGREES
assert dev.position_to_degrees(0, bound=True) == 0.
assert dev.position_to_degrees(-0, bound=True) == 0.
assert dev.position_to_degrees(two_pi / 16., bound=True) == 22.5
assert dev.position_to_degrees(-two_pi / 16., bound=True) == 337.5
assert dev.position_to_degrees(two_pi / 8., bound=True) == 45.
assert dev.position_to_degrees(-two_pi / 8., bound=True) == 315.
assert dev.position_to_degrees(two_pi / 6., bound=True) == 60.
assert dev.position_to_degrees(-two_pi / 6., bound=True) == 300.
assert dev.position_to_degrees(two_pi / 4., bound=True) == 90.
assert dev.position_to_degrees(-two_pi / 4., bound=True) == 270.
assert dev.position_to_degrees(two_pi / 2., bound=True) == 180.
assert dev.position_to_degrees(-two_pi / 2., bound=True) == 180.
assert dev.position_to_degrees(two_pi, bound=True) == 0.
assert dev.position_to_degrees(-two_pi, bound=True) == 0.
assert dev.position_to_degrees(3 * two_pi / 2., bound=True) == 180.
assert dev.position_to_degrees(-3 * two_pi / 2., bound=True) == 180.
assert dev.position_to_degrees(3 * two_pi / 2.) == 540.
assert dev.position_to_degrees(-3 * two_pi / 2.) == -540.
assert dev.position_to_degrees(10 * two_pi, bound=True) == 0.
assert dev.position_to_degrees(two_pi - 1, bound=True) == 359.9975
assert dev.position_to_degrees(-two_pi + 1, bound=True) == 0.0025
assert dev.position_to_degrees(two_pi + 1, bound=False) == 360.0025
app.disconnect_equipment()
| 1,976 |
tests/async_client.py
|
seaeast/momoko
| 1 |
2024024
|
#!/usr/bin/env python
import sys
import unittest
import tornado.ioloop
import tornado.testing
import momoko
import settings
class AsyncClientTest(tornado.testing.AsyncTestCase):
"""``AsyncClient`` tests.
"""
def setUp(self):
super(AsyncClientTest, self).setUp()
self.db = momoko.AsyncClient({
'host': settings.host,
'port': settings.port,
'database': settings.database,
'user': settings.user,
'password': <PASSWORD>,
'min_conn': settings.min_conn,
'max_conn': settings.max_conn,
'cleanup_timeout': settings.cleanup_timeout,
'ioloop': self.io_loop
})
def tearDown(self):
super(AsyncClientTest, self).tearDown()
def test_single_query(self):
"""Test executing a single SQL query.
"""
self.db.execute('SELECT 42, 12, 40, 11;', callback=self.stop)
cursor = self.wait()
self.assertEqual(cursor.fetchall(), [(42, 12, 40, 11)])
def test_batch_query(self):
"""Test executing a batch query.
"""
input = {
'query1': ['SELECT 42, 12, %s, %s;', (23, 56)],
'query2': 'SELECT 1, 2, 3, 4, 5;',
'query3': 'SELECT 465767, 4567, 3454;'
}
expected = {
'query1': [(42, 12, 23, 56)],
'query2': [(1, 2, 3, 4, 5)],
'query3': [(465767, 4567, 3454)]
}
self.db.batch(input, callback=self.stop)
cursors = self.wait()
for key, cursor in cursors.items():
self.assertEqual(cursor.fetchall(), expected[key])
def test_chain_query(self):
"""Test executing a chain query.
"""
input = (
['SELECT 42, 12, %s, 11;', (23,)],
'SELECT 1, 2, 3, 4, 5;'
)
expected = (
[(42, 12, 23, 11)],
[(1, 2, 3, 4, 5)]
)
self.db.chain(input, callback=self.stop)
cursors = self.wait()
for index, cursor in enumerate(cursors):
self.assertEqual(cursor.fetchall(), expected[index])
if __name__ == '__main__':
unittest.main()
| 2,180 |
06_calc_derivatives.py
|
stevenshave/microdialysis
| 1 |
2024176
|
"""
Differentiate redconc, whiteconc, pt
Simulate the rate of change of redconc, whiteconc and pts, also KD with
respect to these values. Requires the autograd package. We redefine all the
equations present in microdialysis_equations.py so that sqrt is suppled by the
autograd package.
"""
import sys
from autograd import grad
from autograd.numpy import sqrt
t0 = 80.0
l0 = 50.0
redvol = 100.0
whitevol = 300.0
pc = 1.0
KDS_to_simulate = [1., 100., 200., 300., 400., 500.]
def qud_lred(t0: float, l0: float, kdtl: float, redvol: float, whitevol: float, pc: float):
"""Calculate the compound concentration in the red chamber in a partially equlibrated system
Args:
t0 (float): Target concentration (in the red chamber)
l0 (float): Ligand concentration, over the entire volume of red and white chambers when fully equilibrated.
kdtl (float): Kd of target-ligand interaction
redvol (float): Volume of the red chamber
whitevol (float): Volume of the white chamber
pc (float): Pc - Ligand partition coefficient in the absence of protein (control)
Returns:
float: Ligand concentration in the red chamber
"""
return (2*l0*pc**2*redvol**2 + l0*pc*redvol*whitevol +
kdtl*pc*redvol*whitevol + 2*l0*pc**2*redvol*whitevol + pc*t0*redvol*whitevol +
kdtl*whitevol**2 + l0*pc*whitevol**2 -
sqrt((-2*l0*pc**2*redvol**2 - l0*pc*redvol*whitevol - kdtl*pc*redvol*whitevol -
2*l0*pc**2*redvol*whitevol - pc*t0*redvol*whitevol - kdtl*whitevol**2 -
l0*pc*whitevol**2)**2 -
4*(pc**2*redvol**2 + pc*redvol*whitevol) *
(l0**2*pc**2*redvol**2 + l0*kdtl*pc*redvol*whitevol +
2*l0**2*pc**2*redvol*whitevol + l0*pc*t0*redvol*whitevol +
l0*kdtl*pc*whitevol**2 + l0**2*pc**2*whitevol**2 + l0*pc*t0*whitevol**2)))/(2.*(pc**2*redvol**2 + pc*redvol*whitevol))
def qud_lwhite(t0: float, l0: float, kdtl: float, redvol: float, whitevol: float, pc: float):
"""Calculate the compound concentration in the white chamber in a partially equlibrated system
Args:
t0 (float): Target concentration (in the red chamber)
l0 (float): Ligand concentration, over the entire volume of red and white chambers when fully equilibrated.
kdtl (float): Kd of target-ligand interaction
redvol (float): Volume of the red chamber
whitevol (float): Volume of the white chamber
pc (float): Pc - Ligand partition coefficient in the absence of protein (control)
Returns:
float: Ligand concentration in the white chamber
"""
return (l0*pc*redvol - kdtl*pc*redvol - pc*t0*redvol -
kdtl*whitevol + l0*pc*whitevol +
sqrt(-4*(-(l0*kdtl*redvol) - l0*kdtl*whitevol)*(pc**2*redvol + pc*whitevol) +
(-(l0*pc*redvol) + kdtl*pc*redvol + pc*t0*redvol + kdtl*whitevol -
l0*pc*whitevol)**2))/(2.*(pc**2*redvol + pc*whitevol))
def qud_pt(t0: float, l0: float, kdtl: float, redvol: float, whitevol: float, pc: float):
"""Calculate the pt value in a partially equlibrated system
Args:
t0 (float): Target concentration (in the red chamber)
l0 (float): Ligand concentration, over the entire volume of red and white chambers when fully equilibrated.
kdtl (float): Kd of target-ligand interaction
redvol (float): Volume of the red chamber
whitevol (float): Volume of the white chamber
pc (float): Pc - Ligand partition coefficient in the absence of protein (control)
Returns:
float: pt value
"""
return (kdtl*pc*redvol - l0*pc*redvol + pc*redvol*t0 - kdtl*whitevol - l0*pc*whitevol + sqrt((-(kdtl*pc*redvol) + l0*pc*redvol - pc*redvol*t0 + kdtl*whitevol + l0*pc*whitevol)**2 - 4*kdtl*redvol*(-(l0*pc**2*redvol) - kdtl*pc*whitevol - l0*pc**2*whitevol - pc*t0*whitevol)))/(2.*kdtl*redvol)
def qud_Kd_from_pt(pt: float, t0: float, l0: float, redvol: float, whitevol: float, pc: float):
"""Calculate the protein-ligand interaction Kd from Pt in a partially equilibrated system
Args:
pt (float): Pt value (lred/lwhite)
t0 (float): Target concentration (in the red chamber)
l0 (float): Ligand concentration, over the entire volume of red and white chambers when fully equilibrated.
redvol (float): Volume of the red chamber
whitevol (float): Volume of the white chamber
pt (float): Pt - Ligand partition coefficient in the presence of protein
pc (float): Pc - Ligand partition coefficient in the absence of protein (control)
Returns:
float: Kd of the target-ligand interaction
"""
return (-(l0*pc**2*redvol) + l0*pc*pt*redvol - pc*t0*pt*redvol -
l0*pc**2*whitevol - pc*t0*whitevol + l0*pc*pt*whitevol)/((pc - pt)*(pt*redvol + whitevol))
def qud_Kd_from_lred(lred: float, t0: float, l0: float, redvol: float, whitevol: float, pc: float):
"""Calculate the protein-ligand interaction Kd from ligand in red chamber in a partially equilibrated system
Args:
lred (float): Ligand concentration in the red chamber
t0 (float): Target concentration (in the red chamber)
l0 (float): Ligand concentration, over the entire volume of red and white chambers when fully equilibrated.
redvol (float): Volume of the red chamber
whitevol (float): Volume of the white chamber
pc (float): Pc - Ligand partition coefficient in the absence of protein (control)
Returns:
float: Kd of the target-ligand interaction
"""
return ((-l0 ** 2)*pc ** 2*redvol ** 2 + 2*l0*lred*pc ** 2*redvol ** 2 - lred ** 2*pc ** 2*redvol ** 2 +
l0*lred*pc*redvol*whitevol - lred ** 2*pc*redvol*whitevol - 2*l0 ** 2*pc ** 2*redvol*whitevol +
2*l0*lred*pc ** 2*redvol*whitevol - l0*pc*redvol*t0*whitevol + lred*pc*redvol*t0*whitevol + l0*lred*pc*whitevol ** 2 -
l0 ** 2*pc ** 2*whitevol ** 2 - l0*pc*t0*whitevol ** 2)/(whitevol*(l0*pc*redvol - lred*pc*redvol -
lred*whitevol + l0*pc*whitevol))
def qud_Kd_from_lwhite(lwhite: float, t0: float, l0: float, redvol: float, whitevol: float, pc: float):
"""Calculate the protein-ligand interaction Kd from ligand in white chamber in a partially equilibrated system
Args:
lwhite (float): Ligand concentration in the white chamber
t0 (float): Target concentration (in the red chamber)
l0 (float): Ligand concentration, over the entire volume of red and white chambers when fully equilibrated.
redvol (float): Volume of the red chamber
whitevol (float): Volume of the white chamber
pc (float): Pc - Ligand partition coefficient in the absence of protein (control)
Returns:
float: Kd of the target-ligand interaction
"""
return -((lwhite*(l0*pc*redvol - lwhite*pc**2*redvol - pc*redvol*t0 + l0*pc*whitevol - lwhite*pc*whitevol))/(l0*redvol - lwhite*pc*redvol + l0*whitevol - lwhite*whitevol))
g_lred_wrt_kd = grad(lambda kd: qud_lred(t0,l0,kd,redvol,whitevol,1.0))
g_lwhite_wrt_kd = grad(lambda kd: qud_lwhite(t0,l0,kd,redvol,whitevol,1.0))
g_pt_wrt_kd = grad(lambda kd: qud_pt(t0,l0,kd,redvol,whitevol,1.0))
print(f"{'KD':>10},{'lred':>10},{'dlreddKD':>10},{'lwhite':>10},{'dlwhitedKD':>10},{'pt':>10},{'dlptdKD':>10}")
for kd in KDS_to_simulate:
lwhite = qud_lwhite(t0, l0, kd, redvol, whitevol, 1.0)
lred = qud_lred(t0, l0, kd, redvol, whitevol, 1.0)
pt = qud_pt(t0, l0, kd, redvol, whitevol, 1.0)
print(f"{kd:>10.0f},{lred:>10.4f},{g_lred_wrt_kd(kd):>10.4f},{lwhite:>10.4f},{g_lwhite_wrt_kd(kd):>10.4f},{pt:>10.4f},{g_pt_wrt_kd(kd):>10.4f}")
print()
g_kd_wrt_lred = grad(lambda lred: qud_Kd_from_lred(lred,t0,l0,redvol,whitevol,1.0))
g_kd_wrt_lwhite = grad(lambda lwhite: qud_Kd_from_lwhite(lwhite,t0,l0,redvol,whitevol,1.0))
g_kd_wrt_ptval = grad(lambda pt: qud_Kd_from_pt(pt,t0,l0,redvol,whitevol,1.0))
print(f"{'KD':>10},{'lred':>10},{'dKDdlred':>10},{'lwhite':>10},{'dlKDdlwhite':>10},{'pt':>10},{'dKDdlpt':>10}")
for kd in KDS_to_simulate:
lred = qud_lred(t0, l0, kd, redvol, whitevol, 1.0)
lwhite = qud_lwhite(t0, l0, kd, redvol, whitevol, 1.0)
pt = qud_pt(t0, l0, kd, redvol, whitevol, 1.0)
print(f"{kd:>10.0f},{lred:>10.4f},{g_kd_wrt_lred(lred):>10.4f},{lwhite:>10.4f},{g_kd_wrt_lwhite(lwhite):>10.4f},{pt:>10.4f},{g_kd_wrt_ptval(pt):>10.4f}")
| 8,532 |
reconstruction/filters/normals.py
|
mickare/Robust-Reconstruction-of-Watertight-3D-Models
| 6 |
2023150
|
from typing import Optional, Tuple
import numpy as np
from reconstruction.data.chunks import ChunkGrid
from reconstruction.mathlib import Vec3f
def make_normal_kernel(shape: Tuple[int, int, int] = (3, 3, 3)) -> np.ndarray:
assert len(shape) == 3
center = np.asanyarray(shape) // 2
normals = np.full((*shape, 3), 0, dtype=np.float32)
for i in np.ndindex(shape):
normals[i] = i - center
norm = np.linalg.norm(normals[i])
if norm > 0:
normals[i] /= norm
return normals
def detect_normals(surface: ChunkGrid[np.bool8], outer: ChunkGrid[np.bool8],
normal_kernel: Optional[np.ndarray] = None):
if normal_kernel is None:
normal_kernel = make_normal_kernel()
# Method cache
__np_sum = np.sum
normal_pos = np.array(list(surface.where()))
normal_val = np.full((len(normal_pos), 3), 0.0, dtype=np.float32)
for n, p in enumerate(normal_pos):
x, y, z = p
mask: np.ndarray = outer[x - 1:x + 2, y - 1:y + 2, z - 1:z + 2]
normal_val[n] = __np_sum(normal_kernel[mask], axis=0)
normal_val = (normal_val.T / np.linalg.norm(normal_val, axis=1)).T
return normal_pos, normal_val
def grid_normals(surface: ChunkGrid[np.bool8], outer: ChunkGrid[np.bool8], normal_kernel: Optional[np.ndarray] = None) \
-> ChunkGrid[Vec3f]:
normal_pos, normal_val = detect_normals(surface, outer, normal_kernel)
normals: ChunkGrid[np.float32] = ChunkGrid(surface.chunk_size, np.dtype((np.float32, (3,))), 0.0)
normals[normal_pos] = normal_val
return normals
| 1,588 |
miradar_node/miradar_node/ppi_visualizer.py
|
QibiTechInc/miradar_ros2_pkgs
| 0 |
2022642
|
#!/usr/bin/env python
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile
from rcl_interfaces.srv import GetParameters
#import rospy
from miradar_msgs.msg import PPI, PPIData
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point
#import dynamic_reconfigure.client
class PPIVisualizer(Node):
def __init__(self):
super().__init__("ppi_visualizer")
self.count = 0
qos_profile = QoSProfile ( depth = 10 )
self.pub = self.create_publisher(MarkerArray, "/miradar/markers", qos_profile)
self.sub = self.create_subscription(PPIData, "/miradar/ppidata", self.visualizePPI, qos_profile)
#self.pub = rospy.Publisher("/miradar/markers", MarkerArray, queue_size=20)
#self.sub = rospy.Subscriber("/miradar/ppidata", PPIData, self.visualizePPI)
#self.client = self.create_client(
# GetParameters,
# '{node_name}/get_parameters'.format_map(locals()))
# call as soon as ready
#ready = self.client.wait_for_service(timeout_sec=5.0)
#if not ready:
# raise RuntimeError('Wait for service timed out')
#self.future = None
#self.dynparam = None
#self.maxdb = -20
#self.mindb = -40
def sendRequest(self, node_name, parameter_names):
# create client
request = GetParameters.Request()
request.names = parameter_names
self.future = self.client.call_async(request)
def visualizePPI(self, data):
markerArraydel = MarkerArray()
marker = Marker()
marker.header.frame_id = "miradar"
marker.action = marker.DELETEALL
markerArraydel.markers.append(marker)
self.pub.publish(markerArraydel)
#cli = dynamic_reconfigure.client.Client("miradar_node")
#dynparam = cli.get_configuration()
"""
self.sendRequest("miradar_node", ["min_dB", "max_dB"])
if self.future.done():
try:
self.dynparam = self.future.result()
except:
self.client.get_logger().info("failed to get value")
else:
self.mindb = self.dynparam["min_dB"]
self.maxdb = self.dynparam["max_db"]
"""
mindb = -40
maxdb = -20
#self.mindb = self.dynparam["min_dB"]
#self.maxdb = self.dynparam["max_db"]
markerArray = MarkerArray()
#mindb = dynparam["min_dB"]
#maxdb = dynparam["max_dB"]
for i in range(len(data.data)):
marker = Marker()
marker.header.frame_id = "miradar"
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1.0
a = 1.0/(float(maxdb) - float(mindb))
b = - (float(mindb)/(float(maxdb) - float(mindb)))
print("a : {0}, b : {1}".format(a, b))
marker.color.r = data.data[i].db * a + b
marker.color.b = 1.0 - marker.color.r
marker.color.g = 0.0
marker.pose.orientation.w = 1.0
marker.pose.position = data.data[i].position
marker.id = i
markerArray.markers.append(marker)
self.pub.publish(markerArray)
def main():
rclpy.init()
node = PPIVisualizer()
#rospy.init_node("ppi_visualizer")
#ppiVisualizer = PPIVisualizer()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 3,651 |
setup.py
|
douglatornell/raspi_x10
| 0 |
2023746
|
"""
RaspberryPi X10 Home Automation
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
import __version__
python_classifiers = [
'Programming Language :: Python :: {0}'.format(py_version)
for py_version in ['3', '3.2', '3.3']]
other_classifiers = [
'Development Status :: ' + __version__.dev_status,
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
'Topic :: Home Automation',
]
with open('README.rst', 'rt') as f:
long_description = f.read()
install_requires = [
# see requirements.txt for versions most recently used in development
'pyramid==1.5a3',
'pyramid_mako',
]
tests_require = [
'coverage',
'pytest',
'tox',
]
setup(
name='raspi_x10',
version=__version__.number + __version__.release,
description='Raspberry Pi X10 Home Automation',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://bitbucket.org/douglatornell/raspi_x10',
license='Apache License, Version 2.0',
classifiers=python_classifiers + other_classifiers,
platforms=['Linux'],
packages=['raspi_x10'],
install_requires=install_requires,
tests_require=tests_require,
entry_points="""\
[paste.app_factory]
web_remote = raspi_x10.web_remote:main
""",
)
| 2,029 |
colight-master/baseline/network_agent_bk.py
|
utkachenko/Con-MATSCo
| 1 |
2023703
|
import numpy as np
from keras.layers import Input, Dense, Conv2D, Flatten, BatchNormalization, Activation, Multiply, Add
from keras.models import Model, model_from_json, load_model
from keras.optimizers import RMSprop
from keras.layers.core import Dropout
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
import random
from keras.engine.topology import Layer
import os
from agent import Agent
class Selector(Layer):
def __init__(self, select, **kwargs):
super(Selector, self).__init__(**kwargs)
self.select = select
self.select_neuron = K.constant(value=self.select)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
super(Selector, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
return K.cast(K.equal(x, self.select_neuron), dtype="float64")
def get_config(self):
config = {"select": self.select}
base_config = super(Selector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def conv2d_bn(input_layer, index_layer,
filters=16,
kernel_size=(3, 3),
strides=(1, 1)):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
conv = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
use_bias=False,
name="conv{0}".format(index_layer))(input_layer)
bn = BatchNormalization(axis=bn_axis, scale=False, name="bn{0}".format(index_layer))(conv)
act = Activation('relu', name="act{0}".format(index_layer))(bn)
pooling = MaxPooling2D(pool_size=2)(act)
x = Dropout(0.3)(pooling)
return x
class NetworkAgent(Agent):
@staticmethod
def _unison_shuffled_copies(Xs, Y, sample_weight):
p = np.random.permutation(len(Y))
new_Xs = []
for x in Xs:
assert len(x) == len(Y)
new_Xs.append(x[p])
return new_Xs, Y[p], sample_weight[p]
@staticmethod
def _cnn_network_structure(img_features):
conv1 = conv2d_bn(img_features, 1, filters=32, kernel_size=(8, 8), strides=(4, 4))
conv2 = conv2d_bn(conv1, 2, filters=16, kernel_size=(4, 4), strides=(2, 2))
img_flatten = Flatten()(conv2)
return img_flatten
@staticmethod
def _shared_network_structure(state_features, dense_d):
hidden_1 = Dense(dense_d, activation="sigmoid", name="hidden_shared_1")(state_features)
return hidden_1
@staticmethod
def _separate_network_structure(state_features, dense_d, num_actions, memo=""):
hidden_1 = Dense(dense_d, activation="sigmoid", name="hidden_separate_branch_{0}_1".format(memo))(state_features)
q_values = Dense(num_actions, activation="linear", name="q_values_separate_branch_{0}".format(memo))(hidden_1)
return q_values
def load_network(self, file_name):
self.q_network = load_model(os.path.join(os.getcwd(), self.dic_path["PATH_TO_MODEL"], "{0}.h5".format(file_name)), custom_objects={"Selector": Selector})
print("succeed in loading model %s"%file_name)
def load_network_bar(self, file_name):
self.q_network_bar = load_model(os.path.join(os.getcwd(), self.dic_path["PATH_TO_MODEL"], "%s.h5" % file_name), custom_objects={"Selector": Selector})
print("succeed in loading model %s"%file_name)
def save_network(self, file_name):
self.q_network.save(os.path.join(self.dic_path["PATH_TO_MODEL"], "%s.h5" % file_name))
def save_network_bar(self, file_name):
self.q_network_bar.save(os.path.join(self.dic_path["PATH_TO_MODEL"], "%s.h5" % file_name))
def prepare_Xs_Y(self, sample_set):
NORMALIZATION_FACTOR = 20
# forget
ind_end = len(sample_set)
ind_sta = max(0, ind_end - self.dic_agent_conf["MAX_MEMORY_LEN"])
sample_set = sample_set[ind_sta: ind_end]
sample_size = min(self.dic_agent_conf["SAMPLE_SIZE"], len(sample_set))
print("memory samples number:", sample_size)
sample_slice = random.sample(sample_set, sample_size)
dic_state_feature_arrays = {}
for feature_name in self.dic_traffic_env_conf["LIST_STATE_FEATURE"]:
dic_state_feature_arrays[feature_name] = []
Y = []
for i in range(len(sample_slice)):
state, action, next_state, reward, instant_reward, _ = sample_slice[i]
for feature_name in self.dic_traffic_env_conf["LIST_STATE_FEATURE"]:
dic_state_feature_arrays[feature_name].append(state[feature_name])
_state = []
_next_state = []
for feature_name in self.dic_traffic_env_conf["LIST_STATE_FEATURE"]:
_state.append([state[feature_name]])
_next_state.append([next_state[feature_name]])
target = self.q_network.predict(_state)
next_state_qvalues = self.q_network_bar.predict(_next_state)
if self.dic_agent_conf["LOSS_FUNCTION"] == "mean_squared_error":
final_target = np.copy(target[0])
final_target[action] = reward/NORMALIZATION_FACTOR + self.dic_agent_conf["GAMMA"] * next_state_qvalues[0][action]
elif self.dic_agent_conf["LOSS_FUNCTION"] == "categorical_crossentropy":
raise NotImplementedError
Y.append(final_target)
self.Xs = [np.array(dic_state_feature_arrays[feature_name]) for feature_name in
self.dic_traffic_env_conf["LIST_STATE_FEATURE"]]
self.Y = np.array(Y)
def choose(self, count, if_pretrain):
''' choose the best action for current state '''
q_values = self.q_network.predict(self.convert_state_to_input(self.state))
# print(q_values)
if if_pretrain:
self.action = np.argmax(q_values[0])
else:
if random.random() <= self.dic_agent_conf["EPSILON"]: # continue explore new Random Action
self.action = random.randrange(len(q_values[0]))
print("##Explore")
else: # exploitation
self.action = np.argmax(q_values[0])
if self.dic_agent_conf["EPSILON"] > 0.001 and count >= 20000:
self.dic_agent_conf["EPSILON"] = self.dic_agent_conf["EPSILON"] * 0.9999
return self.action, q_values
def choose_action(self, count, state):
''' choose the best action for current state '''
#q_values = self.q_network.predict(self.convert_state_to_input(state))
state = [[state[feature]] for feature in self.dic_traffic_env_conf["LIST_STATE_FEATURE"]]
q_values = self.q_network.predict(state)
if random.random() <= self.dic_agent_conf["EPSILON"]: # continue explore new Random Action
self.action = random.randrange(len(q_values[0]))
else: # exploitation
self.action = np.argmax(q_values[0])
#if self.dic_agent_conf["EPSILON"] > 0.001 and count >= 600:
# self.dic_agent_conf["EPSILON"] = self.dic_agent_conf["EPSILON"] * 0.99
return self.action
def build_memory(self):
return []
def build_network_from_copy(self, network_copy):
'''Initialize a Q network from a copy'''
network_structure = network_copy.to_json()
network_weights = network_copy.get_weights()
network = model_from_json(network_structure, custom_objects={"Selector": Selector})
network.set_weights(network_weights)
network.compile(optimizer=RMSprop(lr=self.dic_agent_conf["LEARNING_RATE"]),
loss=self.dic_agent_conf["LOSS_FUNCTION"])
return network
| 8,441 |
exchanges/opportunity_kraken.py
|
Humantrashcan/prices
| 0 |
2023919
|
from exchanges import helpers
from exchanges import kraken
from decimal import Decimal
### Kraken opportunities
#### ARBITRAGE OPPORTUNITY 1
def opportunity_1():
sellLTCbuyEUR = kraken.get_current_bid_LTCEUR()
sellEURbuyXBT = kraken.get_current_ask_XBTEUR()
sellXBTbuyLTC = kraken.get_current_ask_XBTLTC()
opport = 1-((sellLTCbuyEUR/sellEURbuyBTX)*sellXBTbuyLTC)
return Decimal(opport)
def opportunity_2():
sellEURbuyLTC = kraken.get_current_ask_LTCEUR()
sellLTCbuyXBT = kraken.get_current_ask_XBTLTC()
sellXBTbuyEUR = kraken.get_current_bid_XBTEUR()
opport = 1-(((1/sellEURbuyLTC)/sellLTCbuyXBT)*sellXBTbuyEUR)
return Decimal(opport)
| 658 |
NN.py
|
hmhamza/autonomous-task-oriented-machine-bs-final-year-project
| 2 |
2023853
|
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import numpy as np
def bitMapRowGenerator(Map):
ObstacleLimit=250
currentNumber=0;
for j in range(0,17):
currentNumber=(currentNumber<<1)|(Map[j]<ObstacleLimit)
return currentNumber
def bitMapGenerator():
Map=np.loadtxt('ATOM_Map.txt')
MapDimensions=Map.shape
BitMap=np.zeros(MapDimensions[0])
for i in range(0,MapDimensions[0]):
BitMap[i]=bitMapRowGenerator(Map[i])
np.savetxt("bitMap.txt",BitMap,"%.0f")
def initialNeuralNetworkMapGenerator():
Map=np.loadtxt('ATOM_Map.txt')
MapDimensions=Map.shape
NNMap=np.zeros((MapDimensions[0],MapDimensions[1]*2))
for i in range(0,MapDimensions[0]):
for j in range(0,MapDimensions[1]):
NNMap[i][j*2]=-1
np.savetxt("neuralNetworkMap.txt",NNMap,'%.0f')
def main():
#Loading All Map files
BitMap=np.loadtxt("bitMap.txt")
NNMap=np.loadtxt("neuralNetworkMap.txt")
ActualMap=np.loadtxt("ATOM_Map.txt")
#Peforming Localization Multi-Step Process
MapDimensions=BitMap.shape
indices=[i for i in range(MapDimensions[0]+1)]
indices[MapDimensions[0]]=-1
totalReps=3
marginForBitError=totalReps-1
index=0 #Only for readings array
bitReading=np.zeros(totalReps)
readings=np.zeros((totalReps,17))
readings[0]=[255.60975075,255.59353828,256.91890717,259.19675827,7.46178627,313.41123581,311.11717224,311.27119064,3540.16566277,7.14159012,296.71645164,297.05691338,298.49982262,3542.66643524,7.48205185,3527.05788612,7.12537766]
readings[1]=[254.18305397,3526.08919144,7.58337975,3525.46095848,7.137537,316.49160385,315.43779373,316.13087654,315.74177742,303.24602127,300.08459091,299.28612709,299.492836,300.56285858,303.95936966,7.53068924,3551.38468742]
readings[2]=[231.55856133,251.90925598,251.76739693,257.22694397,3617.24376678,307.00325966,306.34260178,306.28991127,307.31129646,306.93840981,299.62658882,297.87564278,296.58269882,296.61512375,297.9080677,3604.46023941,6.69980049]
while (marginForBitError>=0):
ComparedBitValues=np.zeros(MapDimensions[0])
bitReading[index]=bitMapRowGenerator(readings[index])
counter=0
i=indices[counter]
while (i!=-1):
if (i<MapDimensions[0]):
ComparedBitValues[i]=17-(bin(int(bitReading[index])^int(BitMap[i])).count('1'))
counter=counter+1
i=indices[counter]
print ComparedBitValues
counter=0
localmax=np.max(ComparedBitValues)
for i in range(0,MapDimensions[0]):
if (ComparedBitValues[i]+marginForBitError>=localmax):
indices[counter]=i+1
counter=counter+1
indices[counter]=-1
marginForBitError=marginForBitError-1
index=index+1
indices[0]-=1
print indices[0]
#Learning part of the Map through NN
resultIndex=indices[0]-2
noObstacleValue=1010
obstacleValue=10
for i in range(0,totalReps):
bitMapValue=int(BitMap[resultIndex+i])
value=int(bitReading[i])^bitMapValue
for j in range(16,-1,-1):
remainder=value%2
if (remainder==0):
NNMap[resultIndex+i][j*2]-=1
NNMap[resultIndex+i][j*2+1]=0
else:
addVal=pow(2,NNMap[resultIndex+i][j*2+1])
if (NNMap[resultIndex+i][j*2]+addVal>0):
NNMap[resultIndex+i][j*2]=-1
NNMap[resultIndex+i][j*2+1]=0
if (bitMapValue%2==0):
ActualMap[resultIndex+i][j]=obstacleValue
else:
ActualMap[resultIndex+i][j]=noObstacleValue
BitMap[resultIndex+i]=bitMapRowGenerator(ActualMap[resultIndex+i])
else:
NNMap[resultIndex+i][j*2]+=addVal
NNMap[resultIndex+i][j*2+1]+=1
value=value>>1
bitMapValue=bitMapValue>>1
#Saving files back
np.savetxt("mapT.txt",ActualMap,'%5.5f')
np.savetxt("bitMapT.txt",BitMap,'%.0f')
np.savetxt("neuralNetworkMapT.txt",NNMap,'%.0f')
main()
print "Done"
| 4,379 |
data_sources/sqs.py
|
Hobsons/hippo
| 1 |
2022789
|
import boto3
import logging
from data_sources.hippo_base import HippoDataSource
class SqsQueue(HippoDataSource):
namespace = 'sqs'
label = 'SQS Queue'
inputs = {
'awskey': {'input':'text','label':'AWS Key Id'},
'awssecret': {'input':'text','label':'AWS Secret'},
'awsregion': {'input':'text','label':'AWS Region','default':'us-east-1'},
'queuename': {'input':'text','label':'SQS Queue Name'}
}
def __init__(self, *args):
super().__init__(*args, namespace=SqsQueue.namespace, inputs=SqsQueue.inputs)
def process(self):
if not self.awskey or not self.queuename or not self.awssecret:
return
session = boto3.Session(aws_access_key_id = self.awskey,
aws_secret_access_key = self.awssecret,
region_name = self.awsregion)
sqs = session.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=self.queuename)
max_msg = min(self.new_task_limit,10)
if max_msg > 0:
messages = queue.receive_messages(MaxNumberOfMessages=max_msg)
if messages:
self.create_tasks([m.body for m in messages])
for m in messages:
m.delete()
| 1,261 |
gdprcheck/gdprcheck.py
|
bwghughes/gdprcheck
| 0 |
2023028
|
# -*- coding: utf-8 -*-
"""Main module."""
import os
import logging
from commonregex import CommonRegex
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def scantree(path):
"""Recursively yield DirEntry objects for given directory."""
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scantree(entry.path) # see below for Python 2.x
else:
yield entry
class PiiAnalyzer(object):
def __init__(self, data):
self.data = data
self.parser = CommonRegex()
def analyse(self):
people = []
organizations = []
locations = []
emails = []
phone_numbers = []
street_addresses = []
credit_cards = []
ips = []
results = []
for text in self.data:
emails.extend(self.parser.emails(text))
phone_numbers.extend(self.parser.phones("".join(text.split())))
street_addresses.extend(self.parser.street_addresses(text))
credit_cards.extend(self.parser.credit_cards(text))
ips.extend(self.parser.ips(text))
return {'people': people, 'locations': locations, 'organizations': organizations,
'emails': emails, 'phone_numbers': phone_numbers, 'street_addresses': street_addresses,
'credit_cards': credit_cards, 'ips': ips
}
| 1,426 |
posts/forms.py
|
barbaramootian/insta-clone
| 0 |
2024234
|
from django import forms
from posts.models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('profile', 'name', 'caption','photo')
| 167 |
group06/operadores/CurrentToRand.py
|
Luisets/HeuristicOp
| 0 |
2023836
|
from ..MutationOperator import MutationOperator
from ..Genome import Genome as Genome
class CurrentToRand(MutationOperator):
def __init__(self, f_fitnes):
self.f_fitnes = f_fitnes
self.F = 0.8
pass
def apply(self, genomes):
x_i = genomes[0].getSolution()
v_1 = genomes[1].getSolution()
v_2 = genomes[2].getSolution()
v_3 = genomes[3].getSolution()
mutant = x_i + self.F * (v_1 - x_i) + self.F * (v_2 - v_3)
return Genome(mutant, self.f_fitnes(mutant))
pass
pass
| 562 |
lib/gii/DeviceManager/Device.py
|
tommo/gii
| 7 |
2023358
|
import logging
##----------------------------------------------------------------##
class DeviceItem():
def getName( self ):
return 'device'
def getType( self ):
return 'device '
def getId( self ):
return 0
def isConnected( self ):
return False
def setActive( self, act ):
self.active = act
def isActive( self ):
return self.active
def deploy( self, deployContext ):
pass
def clearData( self ):
pass
def startDebug( self ):
pass
def stopDebug( self ):
pass
def disconnect( self ):
pass
def __repr__( self ):
# return self.getType()
return ( u'{0}({1})'.format( self.getName(), self.getType() ) ).encode( 'utf-8' )
| 667 |
shared/utils/debugging.py
|
sha-red/django-shared-utils
| 0 |
2023308
|
import logging
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
logger = logging.getLogger(__name__)
def log_select_template(template_names):
logger.info("\nPossible template names:")
logger.info("\n".join(template_names))
try:
logger.info("Chosen: %s" % select_template(template_names).template.name)
except TemplateDoesNotExist:
logger.warn(" Could not find a matching template file.")
| 478 |
docs/_ext/local_toctree.py
|
gh-oss-contributor/graphql-engine-1
| 27,416 |
2022932
|
# -*- coding: utf-8 -*-
from sphinx import addnodes
"""
``local_toctree``: A callable yielding the global TOC tree that contains
list of all the content below the specified page. ``local_toctree`` need
pagename specifying as like as ``{{ local_toctree(pagename) }}`` and
optional keyword arguments are available:
* maxdepth (defaults to the max depth selected in the toctree directive):
the maximum depth of the tree; set it to -1 to allow unlimited depth
"""
def init_local_toctree(app):
def _get_local_toctree(docname, **kwds):
doctree = app.env.get_doctree(docname)
if 'maxdepth' not in kwds:
kwds['maxdepth'] = 0
toctrees = []
for toctreenode in doctree.traverse(addnodes.toctree):
toctree = app.env.resolve_toctree(
docname, app.builder, toctreenode, **kwds)
toctrees.append(toctree)
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
result.extend(toctree.children)
return app.builder.render_partial(result)['fragment']
ctx = app.env.config['html_context']
if 'local_toctree' not in ctx:
ctx['local_toctree'] = _get_local_toctree
def setup(app):
app.connect('builder-inited', init_local_toctree)
| 1,303 |
awx/main/management/commands/list_custom_venvs.py
|
DamoR25/awxnew
| 11,396 |
2024267
|
# Copyright (c) 2021 Ansible, Inc.
# All Rights Reserved
import sys
from awx.main.utils.common import get_custom_venv_choices
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
"""Returns a list of custom venv paths from the path passed in the argument"""
def add_arguments(self, parser):
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
def handle(self, *args, **options):
super(Command, self).__init__()
venvs = get_custom_venv_choices()
if venvs:
if not options.get('q'):
msg = [
'# Discovered Virtual Environments:',
'\n'.join(venvs),
'',
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
'awx-manage export_custom_venv /path/to/venv',
'',
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
'awx-manage custom_venv_associations /path/to/venv',
'',
'- Run these commands with `-q` to remove tool tips.',
'',
]
print('\n'.join(msg))
else:
print('\n'.join(venvs), '\n')
else:
msg = ["No custom virtual environments detected in:", settings.BASE_VENV_PATH]
for path in settings.CUSTOM_VENV_PATHS:
msg.append(path)
print('\n'.join(msg), file=sys.stderr)
| 1,780 |
forg.py
|
andreaswachs/forg
| 0 |
2023875
|
import argparse, sys, re, shutil, datetime, os, urllib, locale, shelve
from pathlib import Path
from tqdm import tqdm
class Forg:
def __init__(self, source_dir, dest_dir):
self.source_dir = Path(source_dir)
self.dest_dir = Path(dest_dir)
# Check to see if the directories exist
if self.__source_directory_exists():
# Firstly check to see if there is elgible files to move from source to destination folder
if self.__inspect_source_directory():
# Make sure the destination folder exists
self.__prepare_destination_dir()
def organize(self):
translated = {}
for file in tqdm(self.files, total=len(self.files), desc='Moving files', unit='file'):
filename = str(file).split('/')[-1][0:8]
file_date = {'year': filename[0:4],
'month': filename[4:6]}
# Change the month number to the month name
file_date['month'] = datetime.date(int(file_date['year']), int(file_date['month']), 1).strftime('%B')
# Construct the full path with the date
dest_full_path = Path(self.dest_dir / file_date['year'] / file_date['month'].capitalize())
# Check to see if it excists
if not dest_full_path.exists():
os.makedirs(dest_full_path)
shutil.move(str(file), str(dest_full_path))
def __inspect_source_directory(self):
# Create a regex pattern to match the elgible files.
# We are going to match the whole path to the single files, so it takes that in account too
pattern = re.compile('.+\d{8}.+')
# List all files that have a date as the first thing in their filename
files = [file for file in self.source_dir.glob('*') if pattern.match(str(file))]
if not files:
print("There were no elgible files to move in the source directory.", file=sys.stderr)
exit(2)
else:
self.files = files
return True
def __source_directory_exists(self):
if not self.source_dir.exists():
print("The source directory does not exist - exiting program.", file=sys.stderr)
exit(1)
else:
return True
def __prepare_destination_dir(self):
if not self.dest_dir.exists():
os.makedirs(self.dest_dir)
if __name__ == '__main__':
# This block is at the top because it needs to work beyond argparse
# Handle single flag assignments, such as setting default settings
if len(sys.argv) >= 2:
# Open up the saved variables file
forgdata = shelve.open('forgdata')
# Handle the request of setting the default locale, if the user ran the program with this argument
if sys.argv[1] == '--set-default-locale':
# Make sure that there is the expected amount of arguments passed to the program.
if len(sys.argv) == 3:
forgdata['default-locale'] = sys.argv[2]
print(f"Default locale set to {sys.argv[2]}. The program will now exit.")
else:
print("Illegal number of arguments passed. Expected flag and locale only.")
forgdata.close()
exit(0)
# Handle asking for the default locale
elif sys.argv[1] == '--get-default-locale':
try:
print(f"The default locale for this program is: {forgdata['default-locale']}.")
except:
print("There was no default locale set. You can set it with the flag --set-default-locale <locale-code>")
forgdata.close()
exit(0)
# Set up the parser. Require source and destination arguments
parser = argparse.ArgumentParser(
description='Put files in order by date, if the filename starts with a date '\
'of the format YYYYmmdd, like 20200101.'
)
parser.add_argument('source', nargs=1, default=None, help='Source directory for files to organize.')
parser.add_argument('destination', nargs=1, default=None, help='Destination directory for files to get organized in.')
parser.add_argument('--locale', dest='locale', help='Sets the locale for month names if system is not english.\n'\
+ 'Remember that you need to pass the country code as well as the encoding, ex: "da_DK.UTF-8".')
parser.add_argument('--set-default-locale', dest="setdefaultlocale", help='Set the defautl locale so that you '\
+ 'don\'t have to set the flag every time you use the program. If setting a default locale'\
+ 'do put the flag first followed by the locale code and nothing else.')
parser.add_argument('--get-default-locale', dest='getdefaultlocale', help='Tells the user what the default locale is set to.')
# Interpret the arguments passed to the script
args = parser.parse_args()
# Check to see if the forgdata shelve file exists, if so load the default locale
try:
forgdata = shelve.open('forgdata')
if 'default-locale' in forgdata.keys():
default_locale = forgdata['default-locale']
except Exception as e:
print(e)
exit(1)
if args.locale or default_locale:
try:
# Set a buffer to either assigned locale, from the save file or the args
if args.locale:
locale_buffer = args.locale
elif default_locale:
locale_buffer = default_locale
locale.setlocale(locale.LC_ALL, locale_buffer)
except Exception as e:
print("Something went wrong with assigning the custom locale.")
print("Error message:\n", e)
print()
print("Do you want to continue anyhow? Y/N")
answer = input()
if answer.lower() == 'n' and answer.lower() != 'y':
exit(3)
# Make sure the arguments are there, or else explain how the user needs to use the program
if args.source and args.destination:
# Create the organizer object
forg_obj = Forg(
source_dir=args.source,
dest_dir=args.destination
)
# Organize!
forg_obj.organize()
else:
print(parser.usage)
exit(1)
| 6,452 |
linguistic_style_transfer_model/config/model_config.py
|
yonglin-wang/disentagled-style-rep
| 0 |
2023834
|
class ModelConfig():
def __init__(self):
# batch settings
self.batch_size = 128
# layer sizes
self.encoder_rnn_size = 256
self.decoder_rnn_size = 256
self.style_embedding_size = 8
self.content_embedding_size = 128
# dropout
self.sequence_word_keep_prob = 0.8
self.recurrent_state_keep_prob = 0.8
self.fully_connected_keep_prob = 0.8
# learning rates
self.autoencoder_learning_rate = 0.001
self.style_adversary_learning_rate = 0.001
self.content_adversary_learning_rate = 0.001
# loss weights
# self.style_multitask_loss_weight = 10
# self.content_multitask_loss_weight = 3
# self.style_adversary_loss_weight = 1
# self.content_adversary_loss_weight = 0.03
# self.style_kl_lambda = 0.03
# self.content_kl_lambda = 0.03
self.style_multitask_loss_weight = 10
self.content_multitask_loss_weight = 1
self.style_adversary_loss_weight = 1
self.content_adversary_loss_weight = 0.01
self.style_kl_lambda = 0.03
self.content_kl_lambda = 0.03
# training iterations
self.kl_anneal_iterations = 20000
# noise
self.epsilon = 1e-8
def init_from_dict(self, previous_config):
for key in previous_config:
setattr(self, key, previous_config[key])
mconf = ModelConfig()
| 1,444 |
gpvolve/__init__.py
|
harmsm/gpvolve
| 0 |
2023716
|
# Import the main module in this package
from gpvolve.__version__ import __version__
from gpvolve.base import to_greedy
from gpvolve import simulate
from gpvolve import utils
from gpvolve import pyplot
from gpvolve import cluster
from gpvolve import markov
from gpvolve import analysis
| 286 |
dataworkspace/dataworkspace/apps/core/charts/constants.py
|
uktrade/jupyterhub-data-auth-admin
| 1 |
2023135
|
CHART_BUILDER_SCHEMA = "_data_explorer_charts"
CHART_BUILDER_AXIS_MAP = {
"scatter": {
"xsrc": "xsrc",
"ysrc": "ysrc",
},
"line": {
"xsrc": "xsrc",
"ysrc": "ysrc",
},
"bar": {
"xsrc": "xsrc",
"ysrc": "ysrc",
},
"pie": {
"xsrc": "labelsrc",
"ysrc": "valuesrc",
},
"scattermapbox": {
"x": "lat",
"y": "lon",
"xsrc": "latsrc",
"ysrc": "lonsrc",
},
}
| 484 |
subfunctions/model_handler.py
|
toothless93/fashion-mnist
| 1 |
2023196
|
import os
import tensorflow.keras.optimizers as opt
import tensorflow as tf
from tensorflow.keras import layers
def generate_cnn_model():
# Index Layer Channels H*W
# Layer 0 Input 1 28*28
# Layer 1 Conv (ReLU) 16 14*14
# Layer 2 Conv (ReLU) 32 7*7
# Layer 3 Conv (ReLU) 64 4*4
# Layer 4 Max-Pool 64 1*1
# Layer 5 Output 10 1*1
model = tf.keras.Sequential(name='Model')
model.add(
layers.Conv2D(16, kernel_size=3, strides=2, padding='same', data_format='channels_last', activation='relu',
input_shape=(28, 28, 1), name='Conv01', ))
model.add(
layers.Conv2D(32, kernel_size=3, strides=2, padding='same', data_format='channels_last', activation='relu',
name='Conv02'))
model.add(
layers.Conv2D(64, kernel_size=3, strides=2, padding='same', data_format='channels_last', activation='relu',
name='Conv03'))
model.add(layers.GlobalMaxPool2D(data_format='channels_last', name='max_pool'))
model.add(layers.Dense(10, activation='softmax', name='Output'))
model.summary()
return model
def get_model_optimizer(optimizer_str, learning_rate):
if optimizer_str == "SGD":
optimizer = opt.SGD(lr=learning_rate, momentum=0.0, nesterov=False, name=optimizer_str)
elif optimizer_str == "SGD_with_momentum":
optimizer = opt.SGD(lr=learning_rate * 5, momentum=0.8, decay=learning_rate / 10, nesterov=False, name=optimizer_str)
elif optimizer_str == "RMSprop":
optimizer = opt.RMSprop(
lr=learning_rate,
rho=0.9,
momentum=0.0,
epsilon=1e-07,
centered=False,
name=optimizer_str
)
elif optimizer_str == "Adam":
optimizer = opt.Adam(lr=learning_rate, name=optimizer_str)
return optimizer
def save_model(model, model_name):
model_folder = 'content/savedModels/models/'
# model_name = 'model00'
if not os.path.exists(model_folder):
os.mkdir(model_folder)
# model.save_weights(model_folder + model_name)
model.save_weights(model_folder + model_name + '.h5', save_format='h5')
| 2,306 |
sound.py
|
tydyus/HardZone_enjam2017_exterieur
| 0 |
2023077
|
import pygame
from pygame.locals import *
import time
class sound:
pygame.init()
volume = 0.3 #0.3
pygame.mixer.music.set_volume(volume)
def __init__(self, path, type = "event"):
self.type = type
if type == "event":
self.son = pygame.mixer.Sound(path)
elif type == "fond":
pygame.mixer.music.load(path)
def run(self):
if self.type == "event":
self.son.play()
elif self.type == "fond":
pygame.mixer.music.play(-1)
def stop(self):
if self.type == "event":
self.son.stop()
elif self.type == "fond":
pygame.mixer.music.stop()
def set_volume():
pygame.mixer.music.set_volume(sound.volume)
| 762 |
nlp/bert4tf2/classifier.py
|
mikuh/models-tf2
| 0 |
2023227
|
"""BERT classification finetuning runner in tf2.0."""
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
class Classifier(object):
def __init__(self):
pass
| 230 |
utils/channels.py
|
LazaroHurtado/ToxicBot
| 11 |
2024209
|
def system_or_first(guild, bot):
if guild.system_channel is not None:
return guild.system_channel
else:
for channel in guild.text_channels:
if not dict(channel.permissions_for(guild.get_member(bot.user.id)))["send_messages"]:
continue
return channel
| 314 |
source_codes/memoir/batch_preprocessing/train_test_split.py
|
andy6975/Memoir
| 1 |
2023074
|
from random import shuffle
from typing import Union
import cv2
def train_test_val(
image_names: list,
test_fraction: Union[float, int] = 0.25,
val_data: bool = True,
val_fraction: float = 0.1,
):
'''
Function to split the dataset into train, test, and validation sets.
Args:
image_names (list): Dataset to be used for training.
test_fraction (int, float): Size of the test set. Default: 0.25
If **float**, takes it as the fraction of the whole dataset.
If **integer**, takes only that many images in the test set from the whole dataset.
val_data (bool): Whether to make validation set or not.
val_fraction (float, int): Size of the validation set. Default: 0.1
If **float**, takes it as the fraction of the train dataset.
If **integer**, takes only that many images in the validation set from the train set.
Returns:
Train, test and validation (if asked) datasets.
'''
total_samples = len(image_names)
shuffle(image_names)
test = image_names[: int(test_fraction * total_samples)]
train = image_names[int(test_fraction * total_samples) :]
if val_data:
total_train = len(train)
shuffle(train)
validation = train[: int(val_fraction * total_train)]
train = train[int(val_fraction * total_train) :]
return train, test, validation
else:
return train, test
| 1,458 |
setup.py
|
Advanced-Computer-Science/FutureNode
| 0 |
2023815
|
from setuptools import setup
setup(
name='FutureNode',
packages=['futurenode', 'analyze', 'api', 'predict', 'tests'],
include_package_data=True,
install_requires=[
'flask',
],
)
| 207 |
proteus/tests/POD/test_deim.py
|
acatwithacomputer/proteus
| 0 |
2023701
|
#!/usr/bin/env python
"""
some tests for initial DEIM implementation in proteus
"""
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy as np
import numpy.testing as npt
from nose.tools import ok_ as ok
from nose.tools import eq_ as eq
from proteus import deim_utils
def get_burgers_ns(name,T=0.1,nDTout=10,archive_pod_res=False):
import burgers_init as bu
bu.physics.name=name
bu.so.name = bu.physics.name
#adjust default end time and number of output steps
bu.T=T
bu.nDTout=nDTout
bu.DT=old_div(bu.T,float(bu.nDTout))
bu.so.tnList = [i*bu.DT for i in range(bu.nDTout+1)]
#request archiving of spatial residuals ...
simFlagsList=None
if archive_pod_res:
simFlagsList=[{}]
simFlagsList[0]['storeQuantities']=['pod_residuals']
ns = bu.NumericalSolution.NS_base(bu.so,[bu.physics],[bu.numerics],bu.so.sList,bu.opts,simFlagsList=simFlagsList)
return ns
def test_burger_run():
"""
Aron's favority smoke test to see if burgers runs
"""
ns = get_burgers_ns("test_run",T=0.1,nDTout=10)
failed = ns.calculateSolution("run_smoke_test")
assert not failed
def test_residual_split():
"""
just tests that R=R_s+R_t for random dof vector in [0,1]
Here R_s and R_t are the spatial and mass residuals
"""
ns = get_burgers_ns("test_res_split",T=0.05,nDTout=5)
failed = ns.calculateSolution("run_res_test")
assert not failed
#storage for residuals
model = ns.modelList[0].levelModelList[-1]
u_tmp = np.random.random(model.u[0].dof.shape)
res = np.zeros(model.u[0].dof.shape,'d')
res_s = res.copy(); res_t=res.copy()
model.getResidual(u_tmp,res)
model.getSpatialResidual(u_tmp,res_s)
model.getMassResidual(u_tmp,res_t)
res_t += res_s
npt.assert_almost_equal(res,res_t)
def test_res_archive():
"""
smoke test if numerical solution can archive 'spatial residuals' to xdmf
"""
ns = get_burgers_ns("test_space_res_archive",T=0.1,nDTout=10,archive_pod_res=True)
failed = ns.calculateSolution("run_space_res_smoke_test")
assert not failed
def test_svd_space_res(file_prefix='F_s'):
"""
test SVD decomposition of spatial residuals by generating SVD, saving to file and reloading
"""
from proteus.deim_utils import read_snapshots,generate_svd_decomposition
ns = get_burgers_ns("test_svd_space_res",T=0.1,nDTout=10,archive_pod_res=True)
failed = ns.calculateSolution("run_svd_space_res")
assert not failed
from proteus import Archiver
archive = Archiver.XdmfArchive(".","test_svd_space_res",readOnly=True)
U,s,V=generate_svd_decomposition(archive,len(ns.tnList),'spatial_residual0',file_prefix)
S_svd = np.dot(U,np.dot(np.diag(s),V))
#now load back in and test
S = read_snapshots(archive,len(ns.tnList),'spatial_residual0')
npt.assert_almost_equal(S,S_svd)
def test_svd_mass_res(file_prefix='F_m'):
"""
test SVD decomposition of mass residuals by generating SVD, saving to file and reloading
"""
from proteus.deim_utils import read_snapshots,generate_svd_decomposition
ns = get_burgers_ns("test_svd_mass_res",T=0.1,nDTout=10,archive_pod_res=True)
failed = ns.calculateSolution("run_svd_mass_res")
assert not failed
from proteus import Archiver
archive = Archiver.XdmfArchive(".","test_svd_mass_res",readOnly=True)
U,s,V=generate_svd_decomposition(archive,len(ns.tnList),'mass_residual0',file_prefix)
S_svd = np.dot(U,np.dot(np.diag(s),V))
#now load back in and test
S = read_snapshots(archive,len(ns.tnList),'mass_residual0')
npt.assert_almost_equal(S,S_svd)
def test_svd_soln():
"""
test SVD decomposition of solution by generating SVD, saving to file and reloading
"""
from proteus.deim_utils import read_snapshots,generate_svd_decomposition
ns = get_burgers_ns("test_svd_soln",T=0.1,nDTout=10,archive_pod_res=True)
failed = ns.calculateSolution("run_svd_soln")
assert not failed
from proteus import Archiver
archive = Archiver.XdmfArchive(".","test_svd_soln",readOnly=True)
U,s,V=generate_svd_decomposition(archive,len(ns.tnList),'u','soln')
S_svd = np.dot(U,np.dot(np.diag(s),V))
#now load back in and test
S = read_snapshots(archive,len(ns.tnList),'u')
npt.assert_almost_equal(S,S_svd)
def test_deim_indices():
"""
Taking a basis generated from snapshots
and tests that get the deim algorithm returns all of the indices
"""
import os
basis_file='F_s_SVD_basis'
if not os.path.isfile(basis_file):
test_svd_space_res(file_prefix='F_s')
U = np.loadtxt(basis_file)
from proteus.deim_utils import calculate_deim_indices
rho_half = calculate_deim_indices(U[:,:old_div(U.shape[1],2)])
assert rho_half.shape[0] == old_div(U.shape[1],2)
rho = calculate_deim_indices(U)
assert rho.shape[0] == U.shape[1]
rho_uni = np.unique(rho)
assert rho_uni.shape[0] == rho.shape[0]
def deim_approx(T=0.1,nDTout=10,m=5,m_mass=5):
"""
Follow basic setup for DEIM approximation
- generate a burgers solution, saving spatial and 'mass' residuals
- generate SVDs for snapshots
- for both residuals F_s and F_m
- pick $m$, dimension for snapshot reduced basis $\mathbf{U}_m$
- call DEIM algorithm to determine $\vec \rho$ and compute projection matrix
$\mathbf{P}_F=\mathbf{U}_m(\mathbf{P}^T\mathbf{U}_m)^{-1}$
For selected timesteps
- extract fine grid solution from archive, $\vec y$
- for both residuals F=F_s and F_m
- evaluate $\vec F(\vec y)$ at indices in $\vec \rho \rightarrow \vec c$
- apply DEIM interpolant $\tilde{\vec F} = \mathbf{P}_F\vec c$
- compute error $\|F-\tilde{\vec F}\|
- visualize
"""
from proteus.deim_utils import read_snapshots,generate_svd_decomposition
##run fine grid problem
ns = get_burgers_ns("test_deim_approx",T=T,nDTout=nDTout,archive_pod_res=True)
failed = ns.calculateSolution("run_deim_approx")
assert not failed
from proteus import Archiver
archive = Archiver.XdmfArchive(".","test_deim_approx",readOnly=True)
##perform SVD on spatial residual
U,s,V=generate_svd_decomposition(archive,len(ns.tnList),'spatial_residual0','F_s')
U_m,s_m,V_m=generate_svd_decomposition(archive,len(ns.tnList),'mass_residual0','F_m')
from proteus.deim_utils import deim_alg
##calculate DEIM indices and projection matrix
rho,PF = deim_alg(U,m)
#also 'mass' term
rho_m,PF_m = deim_alg(U_m,m_mass)
##for comparison, grab snapshots of solution and residual
Su = read_snapshots(archive,len(ns.tnList),'u')
Sf = read_snapshots(archive,len(ns.tnList),'spatial_residual0')
Sm = read_snapshots(archive,len(ns.tnList),'mass_residual0')
steps_to_test = np.arange(len(ns.tnList))
errors = np.zeros(len(steps_to_test),'d'); errors_mass = errors.copy()
F_deim = np.zeros((Sf.shape[0],len(steps_to_test)),'d')
Fm_deim = np.zeros((Sf.shape[0],len(steps_to_test)),'d')
for i,istep in enumerate(steps_to_test):
#solution on the fine grid
u = Su[:,istep]
#spatial residual evaluated from fine grid
F = Sf[:,istep]
#deim approximation on the fine grid
F_deim[:,istep] = np.dot(PF,F[rho])
errors[i] = np.linalg.norm(F-F_deim[:,istep])
#repeat for 'mass residual'
Fm= Sm[:,istep]
#deim approximation on the fine grid
Fm_deim[:,istep] = np.dot(PF_m,Fm[rho])
errors_mass[i] = np.linalg.norm(Fm-Fm_deim[:,istep])
#
np.savetxt("deim_approx_errors_space_test_T={0}_nDT={1}_m={2}.dat".format(T,nDTout,m),errors)
np.savetxt("deim_approx_errors_mass_test_T={0}_nDT={1}_m={2}.dat".format(T,nDTout,m_mass),errors_mass)
return errors,errors_mass,F_deim,Fm_deim
def test_deim_approx_full(tol=1.0e-12):
"""
check that get very small error if use full basis
"""
T = 0.1; nDTout=10; m=nDTout+1
errors,errors_mass,F_deim,Fm_deim = deim_approx(T=T,nDTout=nDTout,m=m,m_mass=m)
assert errors.min() < tol
assert errors_mass.min() < tol
if __name__ == "__main__":
from proteus import Comm
comm = Comm.init()
import nose
nose.main(defaultTest='test_deim:test_deim_approx_full')
| 8,396 |
main.py
|
mohitchaniyal/Forensics
| 0 |
2024321
|
from tkinter import *
from tkinter import ttk
from PIL import Image ,ImageTk
import winreg
from codecs import decode
import sys
from tkinter import filedialog
import webbrowser
class Forensics(object) :
def __init__(self,window):
self.window=window
self.imgg=Image.open("IMG/new.png")
self.imgre=self.imgg.resize((195,50),Image.ANTIALIAS)
self.logo=ImageTk.PhotoImage(self.imgre)
# save_ico=Image.open('IMG/filesave.png')
# save_ico_re=imgg.resize((20,30),Image.ANTIALIAS)
# self.save_ico=ImageTk.PhotoImage(save_ico_re)
self.mainframe=Frame(self.window)
self.mainframe.pack()
self.manubar=Menu(self.window)
self.window.config(menu=self.manubar)
self.file=Menu(self.manubar,tearoff=0)
self.About=Menu(self.manubar,tearoff=0)
self.manubar.add_cascade(label='File',menu=self.file)
self.file.add_command(label='New',command=main)
self.file.add_command(label='Save',command=self.save_file)
self.file.add_command(label='Exit',command=sys.exit)
self.manubar.add_cascade(label='About',menu=self.About)
self.About.add_command(label='Follow Us On Facebook',command=self.open_facebook)
self.About.add_command(label='Follow Us On Instagram',command=self.open_instagram)
self.About.add_command(label='Follow Us On Github',command=self.open_github)
self.About.add_command(label='Subscribe To Our Youtube Channel',command=self.open_youtube)
self.bottom_frame=Frame(self.mainframe,height='20',bg='#292929',pady=1,padx=1)
self.bottom_frame.pack(side=BOTTOM,fill=X)
self.bottom_frame_label=Label(self.bottom_frame,text='All Rights Are Reserved To <NAME> | <NAME> | <NAME> | <NAME> *** ',fg='White',bg='#292929')
self.bottom_frame_label.pack(side=RIGHT)
self.left_frame=Frame(self.mainframe,height='700',width='200',bg='#393939',borderwidth=2,relief=GROOVE,padx=10)
self.left_frame.pack(side=LEFT,fill=Y)
self.right_frame=Frame(self.mainframe,height='700',width='1000',bg='#393939',borderwidth=2,relief=GROOVE,pady=1,padx=1)
self.right_frame.pack(side=RIGHT,fill=Y)
self.tabs=ttk.Notebook(self.right_frame,width='900',height='700')
self.tabs.pack(side=TOP)
self.tab1=ttk.Frame(self.tabs)
self.tabs.add(self.tab1,text='Introduction')
self.intro_label=Text(self.tab1,width='800',height='700')
self.intro_label.pack()
self.intro_label.insert(END,'''Welcome To Forensics\n> This tool is based on digital forensics and Digital forensics is a branch of forensic science encompassing the recovery and investigation of material foundin digital devices, often in relation to computer crime.By using this tool we can perform windows forensics in a begginer level .
\n>This tool is build using python 3 and the modules used in this tool are ->\n 1.Tkinter for GUI \n 2.Winreg for communicating with Windows Registry
3.PIL for image resizing
4.Decode for decoding the regbinary data to readable format.''')
try:
self.lable=Label(self.left_frame,image=self.logo,bg='#393939')
self.lable.pack(side=TOP)
except :
pass
self.lframe=LabelFrame(self.left_frame,text='Tools',height='650',width='200',bg="#393939",fg='white',padx=5,pady=5)
self.lframe.pack()
self.recent_D_B=Button(self.lframe,text="Recent Doc",height=3,width=25,bg='#292929',fg='white',command=self.recent_doc)
self.recent_D_B.pack()
self.p_ran=Button(self.lframe,text="Previusly Run",height=3,width=25,bg='#292929',fg='white',command=self.p_run_app)
self.p_ran.pack()
self.Run_search=Button(self.lframe,text="Run Searches",height=3,width=25,bg='#292929',fg='white',command=self.run_searches)
self.Run_search.pack()
def recent_doc(self) :
self.recent_doc_list=[]
net=r"Software\Microsoft\Windows\CurrentVersion\Explorer\RecentDocs"
with winreg.ConnectRegistry(None,winreg.HKEY_CURRENT_USER) as access_registry :
with winreg.OpenKey(access_registry,net,0,winreg.KEY_ALL_ACCESS) as access_key :
num_of_values = winreg.QueryInfoKey(access_key)[1]
self.recent_doc_list.append('Recent Doc\n\n')
for i in range(1,num_of_values):
try :
name,value,type = winreg.EnumValue(access_key,i)
a=value[::2][:value[::2].find(b'\x00')].decode()
self.recent_doc_list.append(a+'\n')
except :
continue
self.recent_doc_list.append('\n')
self.recent_doc_tab()
def recent_doc_tab(self):
self.rtab=ttk.Frame(self.tabs)
self.tabs.add(self.rtab,text='Recently Open Doc')
self.r_d_textbox=Text(self.rtab,width='800',height='700',wrap=WORD)
self.r_d_textbox.pack()
for i in self.recent_doc_list:
self.r_d_textbox.insert(END,i)
def p_run_app(self):
self.p_run_app_list=[]
def enum_key(hive, subkey):
with winreg.OpenKey(hive, subkey, 0, winreg.KEY_ALL_ACCESS) as key:
num_of_values = winreg.QueryInfoKey(key)[1]
self.p_run_app_list.append('Priviosly Run Application\n\n')
for i in range(num_of_values):
values=winreg.EnumValue(key, i)
if values[0] == "MRUList":
continue
val=str(values[1])
self.p_run_app_list.append(val+'\n')
self.p_run_app_list.append('\n')
with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as hkcu_hive:
enum_key(hkcu_hive, r"SOFTWARE\Classes\Local Settings\Software\Microsoft\Windows\Shell\MuiCache")
self.p_run_app_tab()
def p_run_app_tab(self):
self.p_run_tab=ttk.Frame(self.tabs)
self.tabs.add(self.p_run_tab,text='Priviously Run App')
self.p_r_textbox=Text(self.p_run_tab,width='800',height='700',wrap=WORD)
self.p_r_textbox.pack()
for i in self.p_run_app_list:
self.p_r_textbox.insert(END,i)
def run_searches(self):
self.run_searches=[]
def enum_key(hive, subkey):
with winreg.OpenKey(hive, subkey, 0, winreg.KEY_ALL_ACCESS) as key:
num_of_values = winreg.QueryInfoKey(key)[1]
self.run_searches.append('Run searches \n\n')
for i in range(num_of_values):
values=winreg.EnumValue(key, i)
if values[0] == "MRUList":
continue
val=str(values[1])
self.run_searches.append(val+'\n')
self.run_searches.append('\n')
with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as hkcu_hive:
enum_key(hkcu_hive, r"Software\Microsoft\Windows\CurrentVersion\Explorer\RunMRU")
self.run_searches_tab()
def run_searches_tab(self):
self.run_searches_tab=ttk.Frame(self.tabs)
self.tabs.add(self.run_searches_tab,text='Run Searches')
self.run_searches_textbox=Text(self.run_searches_tab,width='800',height='700',wrap=WORD)
self.run_searches_textbox.pack()
for i in self.run_searches:
self.run_searches_textbox.insert(END,i)
def save_file(self):
file=filedialog.asksaveasfile(mode='w', defaultextension='.txt')
if file== None:
return
try :
self.recent_doc_data=self.r_d_textbox.get(1.0, 'end-1c')
file.write(self.recent_doc_data)
except:
pass
try :
self.p_run_data=self.p_r_textbox.get(1.0, 'end-1c')
file.write(self.p_run_data)
except:
pass
try :
self.run_searches_data=self.run_searches_textbox.get(1.0, 'end-1c')
file.write(self.run_searches_data)
except:
pass
def open_youtube(self):
webbrowser.open('https://www.youtube.com/pythohacker')
def open_facebook(self):
webbrowser.open('https://www.facebook.com/pythohacker')
def open_instagram(self):
webbrowser.open('https://www.instagram.com/pythohacker')
def open_github(self):
webbrowser.open('https://github.com/mohitchaniyal')
def main():
root=Tk()
root.title('Forensics')
root.geometry('1132x770+100+50')
root.iconbitmap('IMG/Turbo.ico')
# root.maxsize(1080,720)
app=Forensics(root)
root.mainloop()
if __name__=="__main__":
main()
| 9,091 |
tools/change_things/change_ico.py
|
akalenuk/wordsandbuttons
| 367 |
2023929
|
import os
path = '../../pages/'
for file_name in os.listdir(path):
if os.path.isfile(path + file_name):
if file_name.endswith('.html'):
print (file_name)
html = open(path + file_name, 'r')
text = html.read()
html.close()
new_text = text.replace('favicon.ico', 'favicon.svg')
html = open(path + file_name, 'w')
html.write(new_text)
html.close()
| 372 |
ToBev/visiable_LIDAR.py
|
wjy199708/Visualization_KITTI_Waymo
| 6 |
2024494
|
import numpy as np
import mayavi
import mayavi.mlab as mlab
import argparse
from glob import glob
import os
import time
# from tvtk.tools import visual
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument('--index',
type=int,
help='the index file of bin that you wanna show ',
required=True)
parse.add_argument(
'--waymo',
help='if using waymo dataset',
action='store_true',
required=True,
)
parse.add_argument(
'--continuous',
action='store_true',
help=
'show much more frames lidar datas,if it not given then you will get one frame scence point cloud'
)
args = parse.parse_args()
return args
def get_lidar_xyzrd(base_dir, index):
ind = index
try:
pointcloud = np.fromfile('{}/{}.bin'.format(base_dir,
str('%06d' % (ind))),
dtype=np.float32,
count=-1).reshape([-1, 4])
except ValueError:
pointcloud = np.fromfile('{}/{}.bin'.format(base_dir,
str('%06d' % (ind))),
dtype=np.float64,
count=-1).reshape([-1, 4])
else:
pointcloud = np.fromfile('{}/{}.bin'.format(base_dir,
str('%06d' % (ind))),
dtype=np.float32,
count=-1).reshape([-1, 4])
print(pointcloud.shape)
print(pointcloud.shape)
x = pointcloud[:, 0] # x position of point
y = pointcloud[:, 1] # y position of point
z = pointcloud[:, 2] # z position of point
r = pointcloud[:, 3] # reflectance value of point
d = np.sqrt(x**2 + y**2) # Map Distance from sensor
return x, y, z, r, d
def show_one_frame_lidar(index, base_dir):
ind = index
# def LIDAR_show(binData):
x, y, z, r, d = get_lidar_xyzrd(base_dir, index)
vals = 'height'
if vals == "height":
col = z #威力等雷达在采集车上的方向是x向前y向左z向上
else:
col = d
scalars = [1.5, 1.5]
#根据在这一时刻采集到的数据,将这一帧的数据画出来,由于是3D的数据,所以使用mayavi来画出3D数据图
fig = mayavi.mlab.figure(bgcolor=(0, 0, 0), size=(640, 500))
# visual.set_viewer(fig)
mayavi.mlab.points3d(
x,
y,
z,
col, # Values used for Color
mode="point", #sphere point
)
# colormap='spectral', # 'bone', 'copper', 'gnuplot','spectral'
# # color=(0, 1, 0), # Used a fixed (r,g,b) instead
# figure=fig,
# scale_factor=0.05)
# mayavi.mlab.plot3d()
x = np.linspace(5, 5, 50)
y = np.linspace(0, 0, 50)
z = np.linspace(0, 5, 50)
mayavi.mlab.plot3d(x, y, z)
mayavi.mlab.show()
def show_more_frames_lidar(continuous_lidar_dir):
index_lists = [
int(os.path.basename(x).replace('.bin', ''))
for x in glob(os.path.join(continuous_lidar_dir, '*.bin'))
]
# print(index_lists)
first_frame = (x, y, z, r, d) = get_lidar_xyzrd(continuous_lidar_dir,
index_lists[0])
vals = 'height'
if vals == "height":
col = z #威力等雷达在采集车上的方向是x向前y向左z向上
else:
col = d
fig = mlab.figure(bgcolor=(0, 0, 0), size=(640, 500))
frame_lidar = mlab.points3d(
first_frame[0],
first_frame[1],
first_frame[2],
# col, # Values used for Color
mode="point", #sphere point
# colormap='spectral', # 'bone', 'copper', 'gnuplot','spectral'
# color=(0, 1, 0), # Used a fixed (r,g,b) instead
# figure=fig,
# scale_factor=0.05)
)
@mlab.animate(delay=20)
def lidar_animate():
f = mlab.gcf()
while True:
for index in index_lists[1:]:
# time.sleep(0.5)
print('-*' * 20)
print('当前为{}'.format(index))
print('Updating scene...')
print('-*' * 20)
x1, y1, z1, r1, d1 = get_lidar_xyzrd(continuous_lidar_dir,
index)
frame_lidar.mlab_source.reset(x=x1, y=y1, z=z1)
f.scene.render()
yield
lidar_animate()
time.sleep(2)
mlab.show()
def main():
args = parse_args()
base_dir = '../data/object/training/velodyne' if not args.waymo else '../data/waymo/visualization'
if args.continuous:
continuous_lidar_dir = base_dir
show_more_frames_lidar(continuous_lidar_dir)
# mlab.show()
else:
show_one_frame_lidar(args.index, base_dir)
if __name__ == '__main__':
main()
| 4,803 |
Data Science With Python/02-intermediate-python-for-data-science/5-case-study-hacker-statistics/visualize-the-walk.py
|
aimanahmedmoin1997/DataCamp
| 3 |
2024475
|
'''
Visualize the walk
100xp
Let's visualize this random walk! Remember how you could use matplotlib to build a line plot?
import matplotlib.pyplot as plt
plt.plot(x, y)
plt.show()
The first list you pass is mapped onto the x axis and the second list is mapped onto the y axis.
If you pass only one argument, Python will know what to do and will use the index of the
list to map onto the x axis, and the values in the list onto the y axis.
Instructions
Add some lines of code after the for loop:
-Import matplotlib.pyplot as plt.
-Use plt.plot() to plot random_walk.
-Finish off with plt.show() to actually display the plot.
'''
# Initialization
import numpy as np
np.random.seed(123)
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
random_walk.append(step)
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Plot random_walk
plt.plot(random_walk)
# Show the plot
plt.show()
| 1,108 |
Apps/aulas/migrations/0014_alter_planejamento_data_envio_alter_resposta_data.py
|
arthur-asilva/rc_plataforma
| 0 |
2024022
|
# Generated by Django 4.0 on 2021-12-31 17:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aulas', '0013_alter_planejamento_data_envio_alter_resposta_data'),
]
operations = [
migrations.AlterField(
model_name='planejamento',
name='data_envio',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 12, 31, 14, 17, 53, 532938), null=True),
),
migrations.AlterField(
model_name='resposta',
name='data',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 12, 31, 14, 17, 53, 533878), null=True),
),
]
| 738 |
src/molecule/test/unit/model/v2/test_platforms_section.py
|
prity-k/molecule
| 2 |
2024429
|
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
from molecule.model import schema_v3
@pytest.mark.parametrize(
"_config", ["_model_platforms_delegated_section_data"], indirect=True
)
def test_platforms_delegated(_config):
assert {} == schema_v3.validate(_config)
@pytest.mark.parametrize(
"_config", ["_model_platforms_delegated_section_data"], indirect=True
)
def test_platforms_unique_names(_config):
instance_name = _config["platforms"][0]["name"]
_config["platforms"] += [{"name": instance_name}] # duplicate platform name
expected_validation_errors = {
"platforms": [
{
0: [{"name": ["'{}' is not unique".format(instance_name)]}],
1: [{"name": ["'{}' is not unique".format(instance_name)]}],
}
]
}
assert expected_validation_errors == schema_v3.validate(_config)
def test_platforms_driver_name_required(_config):
if "platforms" in _config:
del _config["platforms"][0]["name"]
else:
_config["platforms"] = [{"foo": "bar"}]
x = {"platforms": [{0: [{"name": ["required field"]}]}]}
assert x == schema_v3.validate(_config)
| 2,257 |
classifier/modeltest_multilayer.py
|
GusSand/youarespecial
| 93 |
2023586
|
import common
import numpy as np
# this will take a LONG time the first time you run it (and cache features to disk for next time)
# it's also chatty. Parts of feature extraction require LIEF, and LIEF is quite chatty.
# the output you see below is *after* I've already run feature extraction, so that
# X and sample_index are being read from cache on disk
X, y, sha256list = common.extract_features_and_persist()
# split our features, labels and hashes into training and test sets
from sklearn.model_selection import train_test_split
np.random.seed(123)
X_train, X_test, y_train, y_test, sha256_train, sha256_test = train_test_split(
X, y, sha256list, test_size=1000)
# StandardScaling the data can be important to multilayer perceptron
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
###########
# sanity check: random forest classifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
rf = RandomForestClassifier(
n_estimators=40, n_jobs=-1, max_depth=30).fit(X_train, y_train)
y_pred = rf.predict_proba(X_test)[:,-1] # get probabiltiy of malicious (last class == last column )
common.summarize_performance(y_pred, y_test, "RF Classifier")
from sklearn.externals import joblib
joblib.dump(rf, 'random_forest.pkl')
# simple multilayer perceptron
X_train = scaler.transform(X_train) # scale for multilayer perceptron
X_test = scaler.transform(X_test)
import simple_multilayer
from keras.callbacks import LearningRateScheduler
model = simple_multilayer.create_model(input_shape=(
X_train.shape[1], ), input_dropout=0.1, hidden_dropout=0.1, hidden_layers=[4096, 2048, 1024, 512])
model.fit(X_train, y_train,
batch_size=128,
epochs=20,
verbose=1,
callbacks=[LearningRateScheduler(
lambda epoch: common.schedule(0.2, 0.5, 5))],
validation_data=(X_test, y_test))
y_pred = model.predict(X_test)
common.summarize_performance(y_pred, y_test, "Multilayer perceptron")
model.save('multilayer.h5')
| 2,045 |
phoneme_classifier/train.py
|
DennisMagnusson/voice2voice
| 0 |
2024467
|
import torch
from torch import nn, optim
from read_data import get_data, get_dict
from tqdm import tqdm
from model import PhonemeClassifier
from random import shuffle
"""
class PhonemeClassifier(nn.Module):
def __init__(self):
super(PhonemeClassifier, self).__init__()
self.linear0 = nn.Linear(80, 256)
self.lstm1 = nn.GRU(256, 256, 3, dropout=0.15, batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(0.1)
self.linear1 = nn.Linear(512, 256)
self.linear2 = nn.Linear(256, 61)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, hidden=None):
out = self.linear0(x)
out, hx = self.lstm1(out, hidden)
#print(out.shape)
out = self.dropout(out)
out = self.linear1(out)
out = self.linear2(out)
#out = self.softmax(self.linear1(out))
out = torch.transpose(out, 1, 2)
return out, hx
"""
def get_batches(mels, phones, batch_size=64, seq_len=64):
batch_x = []
batch_y = []
# Shuffle data
c = list(zip(mels, phones))
shuffle(c)
mels, phones = zip(*c)
for mel, phone in tqdm(zip(mels, phones), total=len(mels)):
for i in range(0, len(mel)-seq_len, seq_len):
batch_x.append(mel[i:i+seq_len])
batch_y.append(phone[i:i+seq_len])
if len(batch_x) == seq_len:
torch.Tensor(batch_x)
yield torch.Tensor(batch_x), torch.LongTensor(batch_y)
batch_x = []
batch_y = []
#yield torch.Tensor(batch_x), torch.LongTensor(batch_y)
if __name__ == '__main__':
mels, phones = get_data('TIMIT-data/data/TRAIN/')
#torch.set_num_threads(4)
#things = torch.load('./models/phoneme-classifier-model.pth')
things = torch.load('./models/phoneme-classifier--final64-model-20ep.pth')
model = things['model']
optimizer = things['optimizer']
#model = PhonemeClassifier()
#model.load_state_dict(torch.load('./models/phoneme-classifier-final64-state_dict-20ep.pth'))
#optimizer = optim.Adam(model.parameters(), lr=0.0003)
criterion = nn.CrossEntropyLoss()
torch.autograd.set_detect_anomaly(True)
seq_len = 64
#smooth_acc = 1.0/61
#smooth_loss = 0.0177
smooth_acc = 0.770
smooth_loss = 0.018
batch_size = 32
seq_len = 64
for ep in range(21, 100):
counter = 0
#for mel, phone in tqdm(zip(mels, phones), total=len(mels)):
for x, y in get_batches(mels, phones, batch_size=batch_size, seq_len=seq_len):
counter += 1
hx = None
retain_graph = True
optimizer.zero_grad()
#logits, hx = model.forward(x, hidden=hx)
logits, _ = model.forward(x, hidden=hx)
#print(logits.shape)
# TODO Might need a loop here
loss = criterion(logits, y)
# Get accuracy
correct = 0
for i in range(batch_size):
correct += (torch.argmax(logits[i], dim=0) == y[i]).sum().item()
#accuracy = float(correct) / len(mel)
accuracy = float(correct) / (batch_size*seq_len)
smooth_acc = smooth_acc*0.95 + accuracy*0.05
# TODO Might be wrong if last seq is shorter
loss /= batch_size
smooth_loss = smooth_loss*0.95 + loss.item()*0.05
loss.backward(retain_graph=retain_graph)
optimizer.step()
#if counter % 500 == 0 or counter == 1:
#print('smooth_accuracy: {}\tsmooth_loss: {}'.format(smooth_acc, smooth_loss))
#print('accuracy: {}\tloss: {}'.format(accuracy, loss.item()))
print('ep {} done'.format(ep))
print('smooth_accuracy: {}\tsmooth_loss: {}'.format(smooth_acc, smooth_loss))
print('accuracy: {}\tloss: {}'.format(accuracy, loss.item()))
if ep % 10 == 0:
saved_thing = {'model': model, 'optimizer': optimizer}
torch.save(model.state_dict(), './models/phoneme-classifier-final64-state_dict-{}ep.pth'.format(ep))
torch.save(saved_thing, './models/phoneme-classifier--final64-model-{}ep.pth'.format(ep))
print('checkpoint saved')
| 3,978 |
Python-Math.py
|
SkyLee310/Python-Math
| 0 |
2024468
|
import time
print('你好(*´▽`)ノノ我是Sky,你的机械助理')
time.sleep(1)
print('当前版本为1.04.30 BETA ,您可通过联机获取最新版本')
time.sleep(1)
while True:
xuanze=input('''请选择功能:
a.计算器
b.计算三角形面积
c.计算平方根(SquareRoot)
d.寻找因数(Factor)
e.计算最大公约数(HCF)
f.计算最小公倍数(LCM)
g.计算抛物线长度(Length of parabora)
h.计算圆外边长度(Length of Arc)
您的选择是: ''')
if xuanze=='a':
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
print("选择运算:")
print("1、相加")
print("2、相减")
print("3、相乘")
print("4、相除")
choice = input("输入你的选择(1/2/3/4/输入其他退出):")
num1 = int(input("输入第一个数字: "))
num2 = int(input("输入第二个数字: "))
if choice == '1':
print(num1,"+",num2,"=", add(num1,num2))
time.sleep(2)
elif choice == '2':
print(num1,"-",num2,"=", subtract(num1,num2))
time.sleep(2)
elif choice == '3':
print(num1,"*",num2,"=", multiply(num1,num2))
time.sleep(2)
elif choice == '4':
print(num1,"/",num2,"=", divide(num1,num2))
time.sleep(2)
else:
print("非法输入")
time.sleep(2)
break
elif xuanze=='b':
a = float(input('输入三角形第一边长: '))
b = float(input('输入三角形第二边长: '))
c = float(input('输入三角形第三边长: '))
s = (a + b + c) / 2
#海尔公式
area = (s*(s-a)*(s-b)*(s-c)) ** 0.5
print('三角形面积为 %0.2f' %area)
time.sleep(2)
elif xuanze=='c':
num = float(input('请输入一个数字: '))
num_sqrt = num ** 0.5
print(' %0.3f 的平方根为 %0.3f'%(num ,num_sqrt))
time.sleep(2)
elif xuanze=='d':
x = int(input("Input a Number: "))
for i in range(1,x+1):
if x%i==0:
print(i)
time.sleep(3)
elif xuanze=='e':
# 定义一个函数
def hcf(x, y):
#"""该函数返回两个数的最大公约数"""
# 获取最小值
if x > y:
smaller = y
else:
smaller = x
for i in range(1,smaller + 1):
if((x % i == 0) and (y % i == 0)):
hcf = i
return hcf
# 用户输入两个数字
num1 = int(input("输入第一个数字: "))
num2 = int(input("输入第二个数字: "))
print( num1,"和", num2,"的最大公约数为", hcf(num1, num2))
time.sleep(2)
elif xuanze=='f':
# 定义函数
def lcm(x, y):
# 获取最大的数
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lc = greater
break
greater += 1
return lcm
# 获取用户输入
num1 = int(input("输入第一个数字: "))
num2 = int(input("输入第二个数字: "))
print( num1,"和", num2,"的最小公倍数为", lcm)
time.sleep(2)
elif xuanze == 'g':
i = int(input("抛物线水平长度: "))
h = int(input("为抛物线的矢高(抛物线对比地面的最高点): "))
L= i+(8*h**2)/(3*i**2)*i
print("抛物线的长度为:", L)
time.sleep(5)
elif xuanze == 'h':
pi = 3.1415926
r = int(input("Radius: "))
tht = int(input("θ:"))
arc = (tht/360)*(2*pi*r)
print(arc)
time.sleep(3)
#作者:Skylee
| 4,006 |
Lists.py
|
ChaitanyaJoshiX/Pirple-Python
| 0 |
2024473
|
"""
Creating a function which adds unique elements to a list when called upon.
Adding non-unique elements to a seperate list.
Testing out the function with different elements and printing both the lists in the end.
GitHub : @ChaitanyaJoshiX
"""
myUniqueList = [] #Creating the list for unique elements
myLeftovers = [] #Creating the list for non-unique elements
def AddToList(value):
if value in myUniqueList:
myLeftovers.append(value) #Adding value to non-unique list
return False
else:
myUniqueList.append(value) #Adding value to unique list
return True
print(AddToList("Hello"))
print(AddToList(1))
print(AddToList("Hello"))
print(AddToList(3.2))
print(AddToList(1))
print(myUniqueList) #Printing the unique list created
print(myLeftovers) #Printing the remaining values which were rejected in myUniqueList
"""
GitHub : @ChaitanyaJoshiX
"""
| 909 |
mopidy_pidi/brainz.py
|
ykenneth/mopidy-pidi
| 7 |
2023693
|
"""
Musicbrainz related functions.
"""
import base64
import logging
import os
import time
from threading import Thread
import musicbrainzngs as mus
from .__init__ import __version__
logger = logging.getLogger(__name__)
class Brainz:
def __init__(self, cache_dir):
"""Initialize musicbrainz."""
mus.set_useragent(
"python-pidi: A cover art daemon.",
__version__,
"https://github.com/pimoroni/mopidy-pidi",
)
self._cache_dir = cache_dir
self._default_filename = os.path.join(self._cache_dir, "__default.jpg")
self.save_album_art(self.get_default_album_art(), self._default_filename)
def get_album_art(self, artist, album, callback=None):
if artist is None or album is None or artist == "" or album == "":
if callback is not None:
return callback(self._default_filename)
return self._default_filename
file_name = self.get_cache_file_name(f"{artist}_{album}")
if os.path.isfile(file_name):
# If a cached file already exists, use it!
if callback is not None:
return callback(file_name)
return file_name
if callback is not None:
def async_request_album_art(self, artist, album, file_name, callback):
album_art = self.request_album_art(artist, album)
if album_art is None:
# If the MusicBrainz request fails, cache the default
# art using this filename.
self.save_album_art(self.get_default_album_art(), file_name)
return callback(file_name)
self.save_album_art(album_art, file_name)
return callback(file_name)
t_album_art = Thread(
target=async_request_album_art,
args=(self, artist, album, file_name, callback),
)
t_album_art.start()
return t_album_art
else:
album_art = self.request_album_art(artist, album)
if album_art is None:
# If the MusicBrainz request fails, cache the default
# art using this filename.
self.save_album_art(self.get_default_album_art(), file_name)
return file_name
self.save_album_art(album_art, file_name)
return file_name
def save_album_art(self, data, output_file):
with open(output_file, "wb") as f:
f.write(data)
def request_album_art(self, artist, album, size=500, retry_delay=5, retries=5):
"""Download the cover art."""
try:
data = mus.search_releases(artist=artist, release=album, limit=1)
release_id = data["release-list"][0]["release-group"]["id"]
logger.info("mopidy-pidi: musicbrainz using release-id: {release_id}")
return mus.get_release_group_image_front(release_id, size=size)
except mus.NetworkError:
if retries == 0:
# raise mus.NetworkError("Failure connecting to MusicBrainz.org")
return None
logger.info(
f"mopidy-pidi: musicbrainz retrying download. {retries} retries left!"
)
time.sleep(retry_delay)
self.request_album_art(artist, album, size=size, retries=retries - 1)
except mus.ResponseError:
logger.info(
f"mopidy-pidi: musicbrainz couldn't find album art for {artist} - {album}"
)
return None
def get_cache_file_name(self, file_name):
file_name = file_name.encode("utf-8")
file_name = base64.b64encode(file_name)
if type(file_name) is bytes:
file_name = file_name.decode("utf-8")
# Ruh roh, / is a vaild Base64 character
# but also a valid UNIX path separator!
file_name = file_name.replace("/", "-")
file_name = f"{file_name}.jpg"
return os.path.join(self._cache_dir, file_name)
def get_default_album_art(self):
"""Return binary version of default album art."""
return base64.b64decode(
"""
<KEY>"""
)
| 4,240 |
backport/fix_leaf.py
|
tailhook/pyzza
| 2 |
2024410
|
from lib2to3 import fixer_base, pytree, patcomp
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Call, Name
"""
Fixes:
class Leaf:
...
def ...
self.value = value
into:
class Leaf:
...
def ...
sef.value = unicode(value)
"""
class FixLeaf(fixer_base.BaseFix):
PATTERN = """
classdef< 'class' 'Leaf' any* suite< any*
funcdef< any* suite< any*
simple_stmt< expr_stmt< power< 'self' trailer< '.' 'value' > >
'=' val='value' >
any* > any* > > any* > >
"""
def transform(self, node, results):
results['val'].replace(Call(Name('unicode'), [results['val'].clone()]))
return node
| 745 |
shiftval/validators/route.py
|
bitsbeats/shiftval
| 0 |
2023852
|
import click
from jsonpath_ng import parse as json_path
from shiftval.helpers import object_ident
from shiftval.errors import LintError
from shiftval.helpers import validator
@validator('Route')
def route_monitoring(yml: object):
"""
A route should always be monitored by ormon.
annotations:
thobits.com/ormon-valid-statuscodes: 200,300
thobits.com/ormon-body-regex: Thomann
note: ormon-valid-statuscodes has to be a string.
"""
matches = json_path(
'metadata.annotations."thobits.com/ormon-skip"'
).find(yml)
if (matches
and matches[0].value in ['1', 't', 'T', 'TRUE', 'true' or 'True']):
click.secho(
f'* resource with disabled monitoring found: {object_ident(yml)}',
fg='yellow'
)
return
codes = json_path('metadata.annotations."thobits.com/"').find(yml)
if codes:
codes = codes.pop().value
if not all(code.isdigit() for code in codes.split(',')):
raise LintError(f'invalid statuscodes for {object_ident(yml)}')
return
body_regexes = json_path(
'metadata.annotations."thobits.com/ormon-body-regex"'
).find(yml)
if body_regexes:
body_regex = body_regexes.pop()
return
raise LintError(f'no monitoring specified for {object_ident(yml)}')
@validator('Route')
def route_warn_public(yml: object):
"""Warn for every public route."""
matches = json_path(
'metadata.annotations."acme.openshift.io/exposer"'
).find(yml)
if matches:
click.secho(f'* public route found: {object_ident(yml)}', fg='yellow')
| 1,634 |
COE/contents/building/watch_tower.py
|
Python-Project-Cheap-Empire/cheap-of-empire
| 0 |
2023487
|
from COE.contents.entity import Entity
from COE.contents.entity_types import EntityTypes
from COE.logic.Player import Player
from .building import Building
from .granary import Granary
class WatchTower(Building):
def __init__(self, position: tuple, player: Player):
self.damage = 3
self.range = 5
self.attack_speed = 1.5
super().__init__(
name="Watch Tower",
hp=125,
positions=position,
height=1,
width=1,
line_of_sight=6,
required_building={Granary.__class__.__name__},
required_age=2,
required_researches={}, # Watch tower
researches={},
stone_required=150,
wood_required=0,
construction_time=30,
melee_armor=0,
pierce_armor=0,
entity_type=EntityTypes.GROUND,
player=player,
)
def attack(
self,
target: Entity,
) -> str:
return "Attacking..."
| 1,029 |
conversion_scripts/ancor_to_json.py
|
WeiweiGu1998/incremental-coref
| 13 |
2023101
|
import sys, os
from collections import defaultdict
import json
text_dir = sys.argv[1]
chains_dir = sys.argv[2]
mentions_dir = sys.argv[3]
output = sys.argv[4]
books = os.listdir(chains_dir)
output_file = open(output, 'w+')
def fix_bounds(token_dict, index, go_up=True):
if index in token_dict:
return token_dict[index]
if go_up:
return fix_bounds(token_dict, index + 1, go_up=go_up)
else:
return fix_bounds(token_dict, index - 1, go_up=go_up)
num_real_mentions = 0
num_mentions = 0
num_chains = 0
for book in books:
try:
tokens_file = open(text_dir + "/" + book, 'r')
except:
print (f"Skipping {book}")
continue
tokens_list = tokens_file.readlines()
starts = {}
ends = {}
sent = []
curr_doc_id = 0
curr_doc = []
doc_len = 0
num_reals = 0
for i, tokstr in enumerate(tokens_list):
tokstr = tokstr.strip()
sent_end = tokstr == ""
if sent_end:
curr_doc.append(sent)
sent = []
else:
tok = tokstr.split("\t")
sent.append(tok[3])
starts[int(tok[1])] = doc_len
ends[int(tok[1]) + int(tok[2])] = doc_len
doc_len += 1
if sent:
curr_doc.append(sent)
cluster_doc = open(chains_dir + "/" + book, 'r')
clusters = defaultdict(list)
seen_mentions = set()
for line in cluster_doc:
(mid, start, length, chain_id) = tuple([int(x) for x in line.strip().split()])
left = fix_bounds(starts, start, go_up=True)
right = fix_bounds(ends, start + length, go_up=False)
if left > right:
print (f"Died on {left}, {right}")
right = left
clusters[chain_id].append([left, right])
if (left, right) not in seen_mentions:
seen_mentions.add((left, right))
else:
print ("dupe")
import pdb; pdb.set_trace()
mentions_doc = open(mentions_dir + "/" + book, 'r')
for i, line in enumerate(mentions_doc):
(mid, start, length) = tuple([int(x) for x in line.strip().split()])
num_real_mentions += 1
num_reals += 1
left = fix_bounds(starts, start, go_up=True)
right = fix_bounds(ends, start + length, go_up=False)
if left > right:
print (f"And died on {left}, {right}")
right = left
if (left, right) not in seen_mentions:
clusters[1000000 + i].append([left, right])
num_chains += len(clusters)
net_mentions = sum([len(c) for c in clusters.values()])
num_mentions += net_mentions
json_dict = {
"doc_key": "ancor_" + book,
"language": "russian",
"sentences": curr_doc,
"clusters": list(clusters.values()),
}
output_file.write(json.dumps(json_dict) + "\n")
print (num_real_mentions)
print (num_mentions)
print (num_chains)
| 2,906 |
uws_client/simple_webapp.py
|
ParisAstronomicalDataCentre/OPUS
| 7 |
2024387
|
#!/usr/bin/env python
#
# Copyright (C) 2013 <NAME> and others, see AUTHORS file.
# Released under LGPLv3+ license, see LICENSE.txt
#
# Cork example web application
#
# The following users are already available:
# admin/admin, demo/demo
import bottle
from beaker.middleware import SessionMiddleware
from cork import Cork
import logging
logging.basicConfig(format='localhost - - [%(asctime)s] %(message)s', level=logging.DEBUG)
log = logging.getLogger(__name__)
bottle.debug(True)
# Use users.json and roles.json in the local example_conf directory
aaa = Cork('example_conf', email_sender='<EMAIL>', smtp_url='smtp://smtp.magnet.ie')
app = bottle.app()
session_opts = {
'session.cookie_expires': True,
'session.encrypt_key': 'please use a random key and keep it secret!',
'session.httponly': True,
'session.timeout': 3600 * 24, # 1 day
'session.type': 'cookie',
'session.validate_key': True,
}
app = SessionMiddleware(app, session_opts)
# # Bottle methods # #
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip()
@bottle.post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
aaa.login(username, password, success_redirect='/', fail_redirect='/login')
@bottle.route('/user_is_anonymous')
def user_is_anonymous():
if aaa.user_is_anonymous:
return 'True'
return 'False'
@bottle.route('/logout')
def logout():
aaa.logout(success_redirect='/login')
@bottle.post('/register')
def register():
"""Send out registration email"""
aaa.register(post_get('username'), post_get('password'), post_get('email_address'))
return 'Please check your mailbox.'
@bottle.route('/validate_registration/:registration_code')
def validate_registration(registration_code):
"""Validate registration, create user account"""
aaa.validate_registration(registration_code)
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/reset_password')
def send_password_reset_email():
"""Send out password reset email"""
aaa.send_password_reset_email(
username=post_get('username'),
email_addr=post_get('email_address')
)
return 'Please check your mailbox.'
@bottle.route('/change_password/:reset_code')
@bottle.view('password_change_form')
def change_password(reset_code):
"""Show password change form"""
return dict(reset_code=reset_code)
@bottle.post('/change_password')
def change_password():
"""Change password"""
aaa.reset_password(post_get('reset_code'), post_get('password'))
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.route('/')
def index():
"""Only authenticated users can see this"""
aaa.require(fail_redirect='/login')
return 'Welcome! <a href="/admin">Admin page</a> <a href="/logout">Logout</a>'
@bottle.route('/restricted_download')
def restricted_download():
"""Only authenticated users can download this file"""
aaa.require(fail_redirect='/login')
return bottle.static_file('static_file', root='.')
@bottle.route('/my_role')
def show_current_user_role():
"""Show current user role"""
session = bottle.request.environ.get('beaker.session')
print "Session from simple_webapp", repr(session)
aaa.require(fail_redirect='/login')
return aaa.current_user.role
# Admin-only pages
@bottle.route('/admin')
@bottle.view('admin_page')
def admin():
"""Only admin users can see this"""
aaa.require(role='admin', fail_redirect='/sorry_page')
return dict(
current_user=aaa.current_user,
users=aaa.list_users(),
roles=aaa.list_roles()
)
@bottle.post('/create_user')
def create_user():
try:
aaa.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_user')
def delete_user():
try:
aaa.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception, e:
print repr(e)
return dict(ok=False, msg=e.message)
@bottle.post('/create_role')
def create_role():
try:
aaa.create_role(post_get('role'), post_get('level'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@bottle.post('/delete_role')
def delete_role():
try:
aaa.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
# Static pages
@bottle.route('/login')
@bottle.view('login_form')
def login_form():
"""Serve login form"""
return {}
@bottle.route('/sorry_page')
def sorry_page():
"""Serve sorry page"""
return '<p>Sorry, you are not authorized to perform this action</p>'
# # Web application main # #
def main():
# Start the Bottle webapp
bottle.debug(True)
bottle.run(app=app, quiet=False, reloader=True)
if __name__ == "__main__":
main()
| 5,100 |
pinaxcon/registrasion/migrations/0004_attendeeprofile_agreement.py
|
lamby/website
| 4 |
2023090
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-04 13:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pinaxcon_registrasion', '0003_auto_20171002_1719'),
]
operations = [
migrations.AddField(
model_name='attendeeprofile',
name='agreement',
field=models.BooleanField(default=False, help_text=b"I agree to act according to the conference <a href='/code-of-conduct'>Code of Conduct</a>. I also agree with the North Bay Python <a href='/terms'>Terms and Conditions</a>.", verbose_name=b'Agreement'),
),
]
| 692 |
world/gen/mode/DebugOverWorldGenerator.py
|
uuk0/mcpython-4
| 2 |
2023342
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import globals as G
import event.Registry
import math
import util.math
import mod.ModMcpython
class blockinfo:
TABLE = {} # {chunk: tuple<x, z> -> {position<x,z> -> blockname}}
@classmethod
def construct(cls):
BLOCKS: event.Registry.Registry = G.registry.get_by_name("block")
blocktable = BLOCKS.registered_objects
blocktable.sort(key=lambda x: x.get_name())
blocklist = []
for block in blocktable:
for state in block.get_all_model_states():
blocklist.append((block, state))
size = math.ceil(len(blocklist) ** 0.5)
hsize = size // 2 + 1
rx, ry = -hsize, -hsize
for block, state in blocklist:
x, y = rx * 4, ry * 4
chunk = util.math.sectorize((x, 0, y))
cls.TABLE.setdefault(chunk, {})[(x, y)] = (block.get_name(), state)
rx += 1
if x >= hsize:
ry += 1
rx = -hsize
def chunk_generate(cx, cz, chunk):
if (cx, cz) in blockinfo.TABLE:
heigthmap = chunk.get_value("heightmap")
blockmap = blockinfo.TABLE[(cx, cz)]
for x, z in blockmap.keys():
block, state = blockmap[(x, z)]
chunk.add_add_block_gen_task((x, 10, z), block, kwargs={"state": state}, block_update=False)
heigthmap[(x, z)] = [(0, 10)]
config = {"layers": [], "on_chunk_generate_pre": chunk_generate}
G.worldgenerationhandler.register_world_gen_config("debug_overworld", config)
mod.ModMcpython.mcpython.eventbus.subscribe("stage:post", blockinfo.construct, info="constructing debug world info")
| 1,891 |
watch/admin.py
|
thuojose/Neighbourhood
| 0 |
2023921
|
from django.contrib import admin
from .models import Neighbourhood, healthservices, notifications, Business, Health, Authorities, BlogPost, Profile
class HealthAdmin(admin.ModelAdmin):
filter_horizontal = ['healthservices']
# Register your models here.
admin.site.register(Neighbourhood)
admin.site.register(notifications)
admin.site.register(Health, HealthAdmin)
admin.site.register(Business)
admin.site.register(healthservices)
admin.site.register(Authorities)
admin.site.register(BlogPost)
admin.site.register(Profile)
| 527 |
patients/urls.py
|
friskycodeur/patient-backend
| 0 |
2024247
|
from django.urls import path, include
from django.contrib import admin
from django.views.generic import TemplateView
from patient.views import patient_list_view
urlpatterns = [
path("", patient_list_view, name="home"),
path("admin/", admin.site.urls),
path("patient/", include("patient.urls")),
]
| 310 |
lucy.py
|
justincely/lucy
| 14 |
2024255
|
""" I want to do lucy-richardson in python!!!
"""
import numpy as np
import scipy
from scipy import stats
from scipy.ndimage.filters import convolve
try:
from astropy.io import fits
except ImportError:
import pyfits as fits
import matplotlib.pyplot as plt
import pdb
def pad_psf(psf, spectrum):
out_psf = np.zeros( spectrum.shape )
start = len(spectrum)/2 - len(psf)/2
end = start + len(psf)
out_psf[start:end] = psf
return out_psf
def rl_fft(raw_image, psf, niter, k=1, con_var=None):
""" Implementing the one i got from Jerry
"""
calc_chisq = lambda a, b, c, d: np.sum((a - b)**2 / (a + c)**2 / (d-1))
conversion = raw_image.mean() / 10
raw_image /= conversion
lucy = np.ones(raw_image.shape)
ratio = k * np.ones(raw_image.shape)
fft_psf = np.fft.fft(psf)
con_var = sample_noise(raw_image)
print "using: ", con_var
norm = np.fft.ifft(np.fft.fft(ratio) * np.conj(fft_psf))
#index = np.where(norm <= 1E-3 * norm.max())
#norm[index] = 1
#raw_image[index] = 0
fft_conv = fft_psf * np.fft.fft(lucy)
lucy_conv = np.fft.ifft(fft_conv)
chisq = calc_chisq(lucy_conv, raw_image, con_var, raw_image.size)
print "initial Chisq: {}".format(chisq)
#plt.figure()
#plt.plot(raw_image)
for iteration in range(niter):
ratio = k * (raw_image + con_var) / (lucy_conv + con_var)
fft_srat = np.fft.fft(ratio) * np.conj(fft_psf)
lucy *= np.fft.ifft(fft_srat) / norm
print lucy.max(), lucy.mean(), lucy.min()
fft_conv = fft_psf * np.fft.fft(lucy)
lucy_conv = np.fft.ifft(fft_conv)
size = lucy.size
#plt.plot(lucy[range(size/2,size)+range(0,size/2)])
chisq = calc_chisq(lucy_conv, raw_image, con_var, raw_image.size)
print "Iteration {} Chisq: {}".format(iteration, chisq)
#pdb.set_trace()
#raw_input('done')
#--Why?!
lucy = lucy[range(size/2,size)+range(0,size/2)]
return lucy * conversion
def rl_standard(raw_image, psf, niter):
""" Standerd lucy-richardson convolution
arXiv 2002 Lauer
"""
psf /= psf.sum()
psf_inverse = psf[::-1]
lucy = np.ones( raw_image.shape ) * raw_image.mean()
for i in xrange( niter ):
estimate = convolve(lucy, psf, mode='mirror')
estimate[ np.isnan(estimate) ] = 0
correction = convolve(raw_image/estimate, psf_inverse, mode='mirror')
correction[ np.isnan(correction) ] = 0
print 'Correction:',correction.mean()
lucy *= correction
print 'Means:', raw_image.mean(), lucy.mean()
chisq = scipy.nansum((lucy - raw_image)**2 / (lucy)) / (raw_image.size-1)
print chisq
return lucy
def sample_noise( spectrum ):
samples = [spectrum[start:start+300].std() for start in range(0, len(spectrum), 300) ]
return np.median( samples )
def rl_damped(raw, psf, niter=2, damped=True, N=3, T=None, multiplier=1):
""" working on it"""
#psf /= psf.sum()
conversion = raw.mean() / 10
raw /= conversion
lucy = np.ones(raw.shape) * raw.mean()
#plt.ion()
#plt.figure()
#plt.plot(raw)
#plt.axhline(y=0, lw=2, color='black')
for i in xrange(niter):
if damped:
print "dampening"
lucy_temp = convolve( lucy, psf, mode='mirror')
ratio = dampen(lucy_temp, raw, N, T, multiplier)
else:
ratio = raw / convolve(lucy, psf, mode='mirror')
ratio[ np.isnan(ratio) ] = 0
top = convolve( ratio, psf, mode='mirror')
top[ np.isnan(top) ] = 0
lucy = lucy * (top / psf.sum())
#plt.plot( lucy )
print 'iteration', i, lucy.mean(), raw.mean()
print
#raw_input('Done')
return lucy * conversion
def u_factor(lucy, raw_image, T=None, multiplier=1):
""" Equation 7
http://spider.ipac.caltech.edu/staff/fmasci/home/astro_refs/DampledLR94.pdf
"""
assert np.all( lucy > 0 ), 'Negative values'
T = T or multiplier * sample_noise(raw_image)
print 'Using {} for T'.format(T)
first = (-2.0 / T**2)
ratio = lucy / raw_image
ratio[ np.isnan(ratio) ] = 0
logarithm = np.log( ratio )
logarithm[ np.isnan(logarithm) ] = 0
second = (raw_image * logarithm - lucy + raw_image)
factor = first * second
#factor = (-2.0 / T**2) * (raw_image * np.log( lucy / raw_image) - lucy + raw_image)
factor[ np.isnan(factor) ] = 0
print 'Factor_pre',np.median(factor[factor>0]), factor[factor>0].min()
factor = np.where(factor > 1, 1, factor)
print 'Factor=',np.median([factor>0]), factor[factor>0].min()
return factor
def dampen(lucy, raw, N=3, T=None, multiplier=1):
first = u_factor(lucy, raw, T, multiplier)**(N-1)
first[ np.isnan(first) ] = 0
print first.mean()
second = (N - (N-1) * u_factor(lucy, raw, T, multiplier))
second[ np.isnan(second) ] = 0
print second.mean()
third = (raw - lucy)/lucy
third[ np.isnan(third) ] = 0
print third.mean()
return 1 + first * second * third
| 5,108 |
ionyweb/authentication/forms.py
|
makinacorpus/ionyweb
| 4 |
2023883
|
# -*- coding: utf-8 -*-
import floppyforms as forms
from django.contrib import auth
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy as _
import dns.resolver
from ionyweb.authentication.models import UserProfile
class AutofocusInput(forms.TextInput):
template_name = 'floppyforms/autofocus.html'
def get_context_data(self):
self.attrs['autofocus'] = True
return super(AutofocusInput, self).get_context_data()
class EditCurrentUser(forms.Form):
email = forms.EmailField(label = _(u"Email Address"),
help_text="",
required=True)
def __init__(self, user, *args, **kwargs):
super(EditCurrentUser, self).__init__(*args, **kwargs)
self.fields['email'].initial = user.email
def clean_email(self):
"Check the email domain for MX DNS record"
email = self.cleaned_data['email']
user, domain = email.split('@')
# Checking if the domain contains a MX record
try:
answers = dns.resolver.query(domain, 'MX')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
raise forms.ValidationError(_(u"Emails from this domain are not "
u"accepted"))
else:
return email
def save(self, user, *args, **kwargs):
user.email = self.cleaned_data['email']
user.save()
| 1,541 |
typhos/benchmark/device.py
|
pcdshub/typhon
| 9 |
2024222
|
"""
A generator of test devices.
These are meant to be used for targetted benchmarking and typically are
"extreme" in both size and in composition in order to make specific
loading issues more obvious.
Note that this currently only supports devices with uniform signals.
In the future it can be expanded to have Kind information, different
data types to test different widget types, etc.
"""
from ophyd.device import Component as Cpt
from ophyd.device import create_device_from_components as create_device
from ophyd.signal import Signal
def make_test_device_class(name='TestClass', signal_class=Signal,
include_prefix=False, num_signals=10,
subdevice_layers=0, subdevice_spread=0):
"""
Creates a test :class:`ophyd.Device` subclass.
Parameters
----------
name : str, optional
The name of the class.
Defaults to 'TestClass'.
signal_class : type, optional
Picks which type of signal to use in the Device.
Defaults to :class:`ophyd.Signal`.
include_prefix : bool, optional
If True, passes a string as a position argument into the signal
components. This should be True for something like an
:class:`ophyd.EpicsSignal` and False for something like a base
:class:`ophyd.Signal`, depending on the required arguments.
Defaults to False.
num_signals : int, optional
The number of signals to use in the test class. Note that this is the
number of signals per bottom-level subdevice. Therefore, the actual
total number of signals can be some multiple of this number.
Defaults to 10.
subdevice_layers : int, optional
The number of subdevices we need to traverse down before seeing
signals. For example, putting this at 0 results in no subdevices and
all signals at the top level, while putting this at 2 gives us only
subdevices at the top level, only subdevices on each of these
subdevices, and only signals on these bottom-most subdevices.
Has no effect if subdevice_spread is 0.
Defaults to 0.
subdevice_spread : int, optional
The number of subdevices to include in each layer.
Has no effect if subdevice_layers is 0.
Defaults to 0
"""
signals = {}
for nsig in range(num_signals):
if include_prefix:
sig_cpt = Cpt(signal_class, f'SIGPV{nsig}')
else:
sig_cpt = Cpt(signal_class)
signals[f'signum{nsig}'] = sig_cpt
SignalHolder = create_device('SignalHolder', **signals)
if all((subdevice_layers > 0, subdevice_spread > 0)):
PrevClass = SignalHolder
while subdevice_layers > 0:
subdevices = {}
for ndev in range(subdevice_spread):
subdevices[f'devnum{ndev}'] = Cpt(PrevClass, f'PREFIX{ndev}:')
ThisClass = create_device(f'Layer{subdevice_layers}', **subdevices)
PrevClass = ThisClass
subdevice_layers -= 1
else:
ThisClass = SignalHolder
return create_device(name, base_class=ThisClass)
| 3,144 |
postgresqleu/newsevents/management/commands/twitter_post.py
|
dlangille/pgeu-system
| 0 |
2024293
|
#
# Script to post previosly unposted news to twitter
#
#
from django.core.management.base import BaseCommand, CommandError
from django.template.defaultfilters import slugify
from django.db import connection
from django.conf import settings
from datetime import datetime, timedelta
import sys
import time
from postgresqleu.newsevents.models import News
from postgresqleu.confreg.models import Conference, ConferenceNews, ConferenceTweetQueue, ConferenceIncomingTweet
from postgresqleu.util.messaging.twitter import Twitter
def news_tweets_queryset():
return News.objects.filter(tweeted=False, datetime__gt=datetime.now() - timedelta(days=7), datetime__lt=datetime.now())
def conferences_with_tweets_queryset():
return Conference.objects.filter(twittersync_active=True).extra(where=[
"(EXISTS (SELECT 1 FROM confreg_conferencetweetqueue q WHERE q.conference_id=confreg_conference.id AND q.approved AND NOT q.sent) OR EXISTS (SELECT 1 FROM confreg_conferenceincomingtweet i WHERE i.conference_id=confreg_conference.id AND i.retweetstate=1))"
])
class Command(BaseCommand):
help = 'Post to twitter'
class ScheduledJob:
scheduled_interval = timedelta(minutes=5)
@classmethod
def should_run(self):
if settings.TWITTER_NEWS_TOKEN:
if news_tweets_queryset().exists():
return True
if conferences_with_tweets_queryset().exists():
return True
return False
def handle(self, *args, **options):
curs = connection.cursor()
curs.execute("SELECT pg_try_advisory_lock(981273)")
if not curs.fetchall()[0][0]:
raise CommandError("Failed to get advisory lock, existing twitter_post process stuck?")
err = False
if settings.TWITTER_NEWS_TOKEN:
tw = Twitter()
for a in news_tweets_queryset().order_by('datetime'):
# We hardcode 30 chars for the URL shortener. And then 10 to cover the intro and spacing.
statusstr = "{0} {1}/news/{2}-{3}/".format(a.title[:140 - 40],
settings.SITEBASE,
slugify(a.title),
a.id)
id, msg = tw.post_tweet(statusstr)
if id:
a.tweeted = True
a.save()
else:
err = True
self.stderr.write("Failed to post to twitter: %s" % msg)
# Don't post more often than once / 10 seconds, to not trigger flooding detection.
time.sleep(10)
# Send off the conference twitter queue (which should normally only be one or two tweets, due to the filtering
# on datetime.
for c in conferences_with_tweets_queryset():
tw = Twitter(c)
for t in ConferenceTweetQueue.objects.filter(conference=c, approved=True, sent=False, datetime__lte=datetime.now()).order_by('datetime'):
id, msg = tw.post_tweet(t.contents, t.image, t.replytotweetid)
if id:
t.sent = True
t.tweetid = id
t.save(update_fields=['sent', 'tweetid', ])
else:
err = True
self.stderr.write("Failed to post to twitter: %s" % msg)
# Don't post more often than once / 10 seconds, to not trigger flooding detection.
time.sleep(10)
for t in ConferenceIncomingTweet.objects.filter(conference=c, retweetstate=1):
ok, msg = tw.retweet(t.statusid)
if ok:
t.retweetstate = 2
t.save(update_fields=['retweetstate'])
else:
self.stderr.write("Failed to retweet: %s" % msg)
time.sleep(2)
if err:
# Error message printed earlier, but we need to exit with non-zero exitcode
# to flag the whole job as failed.
sys.exit(1)
| 4,166 |
optimization/rank_one.py
|
ahujaradhika/optimization
| 0 |
2023459
|
import numpy as np
from numpy import linalg as LA
from scipy import optimize
a = np.array([[1, 0], [0, 2]])
b = np.array([1, -1])
def f(x):
return (lambda x : np.matmul(np.matmul(np.dot(0.5, x.transpose()), a), x) - np.matmul(x.transpose(), b) + 7)(x)
def g(x):
return (lambda x : np.matmul(a, x) - b)(x)
# only pass np values - no internal checking
def quad_alpha(x, d):
return np.matmul(np.dot(-1, g(x)), d) / np.matmul(np.matmul(d.transpose(), a), d)
# rank one correction formula
def rank_one(x0, max, tol):
# step 1
k = 0
x0 = np.array(x0)
H = np.array([[1, 0], [0, 1]])
while k < max:
# step 2
print(LA.norm(g(x0)))
if LA.norm(g(x0)) < tol:
return x0
else:
d = np.matmul(np.dot(-1, H), g(x0))
# step 3
alpha = quad_alpha(x0, d)
delta_x = np.dot(alpha, d)
x1 = x0 + delta_x
# step 4
delta_x = np.dot(alpha, d)
delta_g = g(x1) - g(x0)
num = np.matmul((delta_x - np.matmul(H, delta_g)), (delta_x - np.matmul(H, delta_g)).transpose())
denom = np.matmul(delta_g.transpose(), (delta_x - np.matmul(H, delta_g)))
H1 = H + (num / denom)
# loop back
H = H1
x0 = x1
k = k + 1
# # SciPy offers BFGS as part of its library
# print(optimize.fmin_bfgs(f, np.array([0, 0])), g)
# # NOTE: x must be entered as a 2-D array
# # there is a ZeroDivisionError increasing the error range
# print(rank_one([0, 0], 10, 0.5))
| 1,531 |
src/mygrad/math/nondifferentiable.py
|
Zac-HD/MyGrad
| 0 |
2024252
|
import numpy as np
from mygrad.tensor_base import Tensor
__all__ = ["argmin", "argmax"]
def argmax(a, axis=None, out=None):
""" Returns the indices of the maximum values along an axis.
Parameters
----------
a: array_like
axis: int, optional
By default, the index is into the flattened array, otherwise along the specified axis.
out: numpy.array, optional
If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype.
Returns
-------
numpy.ndarray[int]"""
a = a.data if isinstance(a, Tensor) else a
return np.argmax(a, axis, out)
def argmin(a, axis=None, out=None):
""" Returns the indices of the minimum values along an axis.
Parameters
----------
a: array_like
axis: int, optional
By default, the index is into the flattened array, otherwise along the specified axis.
out: numpy.array, optional
If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype.
Returns
-------
numpy.ndarray[int]"""
a = a.data if isinstance(a, Tensor) else a
return np.argmin(a, axis, out)
def any(a, axis=None, out=None, keepdims=False):
""" Test whether any array or Tensor element along a given axis evaluates to True.
Returns single boolean if `axis` is ``None``
This documentation was adapted from ``numpy.add``
Parameters
----------
a : array_like
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
Tensor.any : equivalent method
"""
a = a.data if isinstance(a, Tensor) else a
return np.any(a, axis=axis, out=out, keepdims=keepdims)
| 3,356 |
rlpyt/utils/buffer.py
|
alex-petrenko/rlpyt
| 1 |
2023292
|
import numpy as np
import multiprocessing as mp
import ctypes
import torch
from rlpyt.utils.collections import namedarraytuple_like
# from rlpyt.utils.misc import put
def buffer_from_example(example, leading_dims, share_memory=False):
"""Allocates memory and returns it in `namedarraytuple` with same
structure as ``examples``, which should be a `namedtuple` or
`namedarraytuple`. Applies the same leading dimensions ``leading_dims`` to
every entry, and otherwise matches their shapes and dtypes. The examples
should have no leading dimensions. ``None`` fields will stay ``None``.
Optionally allocate on OS shared memory. Uses ``build_array()``.
"""
if example is None:
return
try:
buffer_type = namedarraytuple_like(example)
except TypeError: # example was not a namedtuple or namedarraytuple
return build_array(example, leading_dims, share_memory)
return buffer_type(*(buffer_from_example(v, leading_dims, share_memory)
for v in example))
def build_array(example, leading_dims, share_memory=False):
"""Allocate a numpy array matchin the dtype and shape of example, possibly
with additional leading dimensions. Optionally allocate on OS shared
memory.
"""
a = np.asarray(example)
if a.dtype == "object":
raise TypeError("Buffer example value cannot cast as np.dtype==object.")
constructor = np_mp_array if share_memory else np.zeros
if not isinstance(leading_dims, (list, tuple)):
leading_dims = (leading_dims,)
return constructor(shape=leading_dims + a.shape, dtype=a.dtype)
def np_mp_array(shape, dtype):
"""Allocate a numpy array on OS shared memory."""
size = int(np.prod(shape))
nbytes = size * np.dtype(dtype).itemsize
mp_array = mp.RawArray(ctypes.c_char, nbytes)
return np.frombuffer(mp_array, dtype=dtype, count=size).reshape(shape)
def torchify_buffer(buffer_):
"""Convert contents of ``buffer_`` from numpy arrays to torch tensors.
``buffer_`` can be an arbitrary structure of tuples, namedtuples, and
namedarraytuples, and a new, matching structure will be returned.
``None`` fields remain ``None``, and torch tensors are left alone."""
if buffer_ is None:
return
if isinstance(buffer_, np.ndarray):
return torch.from_numpy(buffer_)
elif isinstance(buffer_, torch.Tensor):
return buffer_
contents = tuple(torchify_buffer(b) for b in buffer_)
if type(buffer_) is tuple: # tuple, namedtuple instantiate differently.
return contents
return type(buffer_)(*contents)
def numpify_buffer(buffer_):
"""Convert contents of ``buffer_`` from torch tensors to numpy arrays.
``buffer_`` can be an arbitrary structure of tuples, namedtuples, and
namedarraytuples, and a new, matching structure will be returned.
``None`` fields remain ``None``, and numpy arrays are left alone."""
if buffer_ is None:
return
if isinstance(buffer_, torch.Tensor):
return buffer_.cpu().numpy()
elif isinstance(buffer_, np.ndarray):
return buffer_
contents = tuple(numpify_buffer(b) for b in buffer_)
if type(buffer_) is tuple:
return contents
return type(buffer_)(*contents)
def buffer_to(buffer_, device=None):
"""Send contents of ``buffer_`` to specified device (contents must be
torch tensors.). ``buffer_`` can be an arbitrary structure of tuples,
namedtuples, and namedarraytuples, and a new, matching structure will be
returned."""
if buffer_ is None:
return
if isinstance(buffer_, torch.Tensor):
return buffer_.to(device)
elif isinstance(buffer_, np.ndarray):
raise TypeError("Cannot move numpy array to device.")
contents = tuple(buffer_to(b, device=device) for b in buffer_)
if type(buffer_) is tuple:
return contents
return type(buffer_)(*contents)
def buffer_method(buffer_, method_name, *args, **kwargs):
"""Call method ``method_name(*args, **kwargs)`` on all contents of
``buffer_``, and return the results. ``buffer_`` can be an arbitrary
structure of tuples, namedtuples, and namedarraytuples, and a new,
matching structure will be returned. ``None`` fields remain ``None``.
"""
if buffer_ is None:
return
if isinstance(buffer_, (torch.Tensor, np.ndarray)):
return getattr(buffer_, method_name)(*args, **kwargs)
contents = tuple(buffer_method(b, method_name, *args, **kwargs) for b in buffer_)
if type(buffer_) is tuple:
return contents
return type(buffer_)(*contents)
def buffer_func(buffer_, func, *args, **kwargs):
"""Call function ``func(buf, *args, **kwargs)`` on all contents of
``buffer_``, and return the results. ``buffer_`` can be an arbitrary
structure of tuples, namedtuples, and namedarraytuples, and a new,
matching structure will be returned. ``None`` fields remain ``None``.
"""
if buffer_ is None:
return
if isinstance(buffer_, (torch.Tensor, np.ndarray)):
return func(buffer_, *args, **kwargs)
contents = tuple(buffer_func(b, func, *args, **kwargs) for b in buffer_)
if type(buffer_) is tuple:
return contents
return type(buffer_)(*contents)
def get_leading_dims(buffer_, n_dim=1):
"""Return the ``n_dim`` number of leading dimensions of the contents of
``buffer_``. Checks to make sure the leading dimensions match for all
tensors/arrays, except ignores ``None`` fields.
"""
if buffer_ is None:
return
if isinstance(buffer_, (torch.Tensor, np.ndarray)):
return buffer_.shape[:n_dim]
contents = tuple(get_leading_dims(b, n_dim) for b in buffer_ if b is not None)
if not len(set(contents)) == 1:
raise ValueError(f"Found mismatched leading dimensions: {contents}")
return contents[0]
# def buffer_put(x, loc, y, axis=0, wrap=False):
# if isinstance(x, (np.ndarray, torch.Tensor)):
# put(x, loc, y, axis=axis, wrap=wrap)
# else:
# for vx, vy in zip(x, y):
# buffer_put(vx, loc, vy, axis=axis, wrap=wrap)
| 6,132 |
code/scripts/deceptive_functions.py
|
TheoryInPractice/spiderdonuts
| 0 |
2023965
|
#
# This file is part of spiderdonuts, https://github.com/TheoryInPractice/spiderdonuts/,
# and is Copyright (C) North Carolina State University, 2017. It is licensed
# under the three-clause BSD license; see LICENSE.
#
"""Calculate deceptive functions for a set of graphs.
A deceptive function is formed as g(lambda) = (e^lambda) + p(lambda), where
p(lambda) = 0*lambda^0 + 0*lambda^1 + x_{1}lambda^2 + ... + x_{k-1}lambda^k
with x_1..x_{k-1} formed from the solutions to
`polygraph.nonnegative_linear_system_check` and lambda is an eigenvalue
of the graph.
Outputs
- Plot of (lambda, g(lambda)) for each graph.
- Table of (graph, min(g(lambda))) for all graphs.
"""
# Imports
from code import generators as gen, polygraph, SPIDERDONUTS, verbose
from functools import partial
from math import exp
from tabulate import tabulate
import io
import logging
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
# Intermediate points
NUM_POINTS = 100
MAX_POWER = 7
def deceptive(y, coefficients):
"""Deceptive function.
Parameters
----------
y : Number
Eigenvalue of a graph
coefficients : list
List of coefficients
Returns
-------
Number
e^y + coefficients[0]*y^2 + ... coefficients[k-1]*y^k
"""
poly = np.poly1d(np.concatenate((coefficients, [0, 0])))
return exp(y) + np.polyval(poly, y)
# Get logger
logger = logging.getLogger(SPIDERDONUTS)
verbose(True)
# A list of (graph_name, graph_generator) tuples.
# Each generator has its parameters already bound to it
graphs_generators = [
('chamfered_dodecahedron', gen.chamfered_dodecahedron),
('pyramid_prism(4,0)', partial(gen.pyramid_prism, 4, 0)),
('spider_torus(4,2,[5,3])', partial(gen.spider_torus, 4, 2, [5, 3])),
('spider_torus(4,3,[7,5,3])', partial(gen.spider_torus, 4, 3, [7, 5, 3])),
('snowflakecycle(5,5,3)', partial(gen.snowflakecycle, 5, 5, 3)),
('snowflakecycle(5,7,5)', partial(gen.snowflakecycle, 5, 7, 5)),
('snowflakecycle(5,7,3)', partial(gen.snowflakecycle, 5, 7, 3))
]
# Min lambda
min_lambda = [('Graph', 'Min g(lambda)', 'Number of Coefficients')]
# Analyze graphs
for name, generator in graphs_generators:
logger.info('Analyzing graph {}'.format(name))
# Construct graph
logger.info('Generating graph')
g = generator()
# Analyze walk classes
logger.info('Analyzing walk classes')
if type(g) is dict:
graph = g['graph']
w_obj = polygraph.spider_torus_walk_classes(g)
else:
graph = g
w_obj = polygraph.walk_classes(g, max_power=MAX_POWER)
# Generate solutions to nonnegative linear system
logger.info('Performing nonnegative system check')
res = polygraph.nonnegative_linear_system_check(w_obj)
# Check for success
if not res.success:
logger.warning('Failed nonnegative check on {} with {}'.format(
name,
res.message
))
else:
# Take solutions as coefficients
coefficients = res.x[::-1]
# Find graph eigenvalues
logger.info('Finding graph eigenvalues')
eigenvalues = np.linalg.eigvalsh(nx.adjacency_matrix(graph).todense())
# Key points
max_eig = max(eigenvalues)
min_eig = min(eigenvalues)
linspace = np.linspace(min_eig, max_eig, NUM_POINTS)
# Evaluate the deceptive function at each eigenvalue
logger.info('Evaluating deceptive function at eigenvalues')
eig_results = [deceptive(x, coefficients) for x in eigenvalues]
lin_results = [deceptive(x, coefficients) for x in linspace]
# Append min result to table output
min_result, min_idx = min(
(val, idx)
for (idx, val) in enumerate(eig_results)
)
min_lambda.append((name, min_result, len(coefficients)))
# Generate plot
logger.info('Generating (lambda, g(lambda)) plot')
plt.figure()
plt.suptitle(name)
plt.xlabel('lambda (Eigenvalue)')
plt.ylabel('g(lambda)')
plt.scatter(eigenvalues, eig_results, marker='*', s=150)
plt.scatter(linspace, lin_results, marker='.')
plt.axvline(x=eigenvalues[min_idx], ymin=0, ymax=max(eig_results))
plt.axhline(y=min_result, xmin=0, xmax=max_eig)
plt.savefig('docs/tables-and-figures/{}'.format(name))
plt.close()
logger.info('Finished graph {}\n'.format(name))
# Generate (graph, min(g(lambda))) table
table = tabulate(min_lambda, tablefmt='grid')
# Write file
file = io.open('docs/tables-and-figures/lambda-table.txt', 'w')
file.write(table)
file.close()
| 4,641 |
tests/test_day_11.py
|
Erik-vdg/adventofcode-2020
| 0 |
2022985
|
import pytest
from adventofcode_2020.day_11 import Coordinate
from adventofcode_2020.day_11 import Ruleset
from adventofcode_2020.day_11 import SeatMap
from adventofcode_2020.day_11 import SeatStatus
@pytest.fixture
def seat_maps():
return {
"initial_state": [
"L.LL.LL.LL",
"LLLLLLL.LL",
"L.L.L..L..",
"LLLL.LL.LL",
"L.LL.LL.LL",
"L.LLLLL.LL",
"..L.L.....",
"LLLLLLLLLL",
"L.LLLLLL.L",
"L.LLLLL.LL",
],
"expected_state_2_rules": [
"#.LL.L#.##",
"#LLLLLL.L#",
"L.L.L..L..",
"#LLL.LL.L#",
"#.LL.LL.LL",
"#.LLLL#.##",
"..L.L.....",
"#LLLLLLLL#",
"#.LLLLLL.L",
"#.#LLLL.##",
],
"test_visible_1": [
".......#.",
"...#.....",
".#.......",
".........",
"..#L....#",
"....#....",
".........",
"#........",
"...#.....",
],
"test_visible_2": [
".............",
".L.L.#.#.#.#.",
".............",
],
"test_visible_3": [
".##.##.",
"#.#.#.#",
"##...##",
"...L...",
"##...##",
"#.#.#.#",
".##.##.",
],
}
def test_seat_adjacent_neighbors():
raw_map = [".L#", ".L#", ".L#"]
seat_map = SeatMap.from_input(raw_map)
found_neighbors = seat_map.adjacent_neighbors(Coordinate(1, 1))
expected_neighbors = {
SeatStatus.VACANT: 2,
SeatStatus.OCCUPIED: 3,
SeatStatus.FLOOR: 3,
}
assert found_neighbors == expected_neighbors
def test_apply_rule_adjacent(seat_maps):
seat_map = SeatMap.from_input(seat_maps["initial_state"])
expected_seat_map = SeatMap.from_input(seat_maps["expected_state_2_rules"])
found_seat_map = Ruleset.apply_rule_adjacent(Ruleset.apply_rule_adjacent(seat_map))
assert expected_seat_map == found_seat_map
def test_steady_state_adjacent(seat_maps):
expected_occupancy_count_steady = 37
seat_map = SeatMap.from_input(seat_maps["initial_state"])
steady_seatmap = Ruleset().apply_until_steady(seat_map, "adjacent")
assert expected_occupancy_count_steady == steady_seatmap.count_occupied
def test_visible_neighbors_1(seat_maps):
seat_map = SeatMap.from_input(seat_maps["test_visible_1"])
visible_neighbors = seat_map.visible_neighbors(Coordinate(4, 3))
expected_vis_neighbors = {
SeatStatus.VACANT: 0,
SeatStatus.OCCUPIED: 8,
SeatStatus.FLOOR: 0,
}
assert expected_vis_neighbors == visible_neighbors
def test_visible_neighbors_2(seat_maps):
seat_map = SeatMap.from_input(seat_maps["test_visible_2"])
visible_neighbors = seat_map.visible_neighbors(Coordinate(1, 1))
expected_vis_neighbors = {
SeatStatus.VACANT: 8,
SeatStatus.OCCUPIED: 0,
SeatStatus.FLOOR: 0,
}
assert expected_vis_neighbors == visible_neighbors
def test_visible_neighbors_3(seat_maps):
seat_map = SeatMap.from_input(seat_maps["test_visible_3"])
visible_neighbors = seat_map.visible_neighbors(Coordinate(3, 3))
expected_vis_neighbors = {
SeatStatus.VACANT: 8,
SeatStatus.OCCUPIED: 0,
SeatStatus.FLOOR: 0,
}
assert expected_vis_neighbors == visible_neighbors
def test_steady_state_visible(seat_maps):
expected_occupancy_count_steady = 26
seat_map = SeatMap.from_input(seat_maps["initial_state"])
steady_seatmap = Ruleset().apply_until_steady(seat_map, "visible")
assert expected_occupancy_count_steady == steady_seatmap.count_occupied
| 3,803 |
pytorch_ranger/__init__.py
|
jettify/Ranger-Deep-Learning-Optimizer
| 9 |
2023178
|
from .ranger import Ranger
from .ranger913A import RangerVA
from .rangerqh import RangerQH
__version__ = '0.1.1'
| 114 |
packages/postgres-database/tests/test_utils_migration.py
|
elisabettai/osparc-simcore
| 25 |
2022679
|
import pytest
from alembic.script.revision import MultipleHeads
from simcore_postgres_database.utils_migration import get_current_head
def test_migration_has_no_branches():
try:
current_head = get_current_head()
assert current_head
assert isinstance(current_head, str)
except MultipleHeads as err:
pytest.fail(
f"This project migration expected a single head (i.e. no branches): {err}"
)
| 450 |
config.env.py
|
frybin/RideBoardAPI
| 0 |
2023551
|
import secrets
from os import environ as env
# Flask config
# DEBUG = True
IP = env.get('IP', '0.0.0.0')
PORT = env.get('PORT', 8080)
SERVER_NAME = env.get('SERVER_NAME', 'rideboard-api.csh.rit.edu')
# DB Info
SQLALCHEMY_DATABASE_URI = env.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_TRACK_MODIFICATIONS = 'False'
# Openshift secret
SECRET_KEY = env.get("SECRET_KEY", default=''.join(secrets.token_hex(16)))
# OpenID Connect SSO config
OIDC_ISSUER = env.get('OIDC_ISSUER', 'https://sso.csh.rit.edu/auth/realms/csh')
OIDC_CLIENT_CONFIG = {
'client_id': env.get('OIDC_CLIENT_ID', 'rideboard-api'),
'client_secret': env.get('OIDC_CLIENT_SECRET', ''),
'post_logout_redirect_uris': [env.get('OIDC_LOGOUT_REDIRECT_URI', 'https://rideboard-api.csh.rit.edu/logout')]
}
| 775 |
2017/18_duet_test.py
|
pchudzik/adventofcode
| 0 |
2024384
|
import importlib
module = importlib.import_module("18_duet")
CPU = module.CPU
Register = module.Register
parser1 = module.parser1
execute_part1 = module.execute_part1
execute_part2 = module.execute_part2
empty_snd_card = None
def test_set_cmd():
cpu = CPU(parser1(empty_snd_card, ["set a 123"]))
cpu.tick()
assert cpu["a"].value == 123
def test_add_cmd():
cpu = CPU(parser1(empty_snd_card, [
"set a 10",
"set b 10",
"add a b"
]))
cpu.tick()
cpu.tick()
cpu.tick()
assert cpu["a"].value == 20
assert cpu["b"].value == 10
def test_mul_cmd():
cpu = CPU(parser1(empty_snd_card, [
"set a 10",
"set b 10",
"mul a b"
]))
cpu.tick()
cpu.tick()
cpu.tick()
assert cpu["a"].value == 100
assert cpu["b"].value == 10
def test_snd_cmd():
snd_card = Register(None)
cpu = CPU(parser1(snd_card, [
"set a 10",
"snd a"
]))
cpu.tick()
cpu.tick()
assert snd_card.value == 10
def test_jgz_cmd_negative():
cpu = CPU(parser1(empty_snd_card, [
"set a 10",
"jgz a -1"
]))
cpu.tick()
cpu.tick()
assert cpu.offset == 0
def test_jgz_cmd_positive():
cpu = CPU(parser1(empty_snd_card, [
"set a 10",
"jgz a 1",
"set a 11",
]))
cpu.tick()
cpu.tick()
assert cpu.offset == 2
def test_rcv_pass_through():
played = execute_part1([
"rcv a",
"set a -10",
"rcv a"
])
assert played is None
def test_stop_execution():
played = execute_part1([
"set a 10",
"snd a",
"rcv a"
])
assert played == 10
def test_example():
played = execute_part1([
"set a 1",
"add a 2",
"mul a a",
"mod a 5",
"snd a",
"set a 0",
"rcv a",
"jgz a -1",
"set a 1",
"jgz a -2"
])
assert played == 4
def test_part2():
send_values = execute_part2([
"snd 1",
"snd 2",
"snd p",
"rcv a",
"rcv b",
"rcv c",
"rcv d"
])
assert send_values == 3
| 2,168 |
CIS41B/Parallel/p7.py
|
jackh423/python
| 1 |
2023002
|
from multiprocessing import Process, Manager
def func(dct, m):
dct[1] = '11'
dct['2'] = 22
dct[0.25] = 0.33
m.reverse()
if __name__ == '__main__':
with Manager() as manager:
dct = manager.dict()
ml = manager.list(range(10))
p = Process(target=func, args=(dct, ml))
p.start()
p.join()
print(dct)
print(ml)
print("Exiting main")
| 443 |
ontospy/extras/mylocal_spqrglpy_test.py
|
tobby2002/Ontospy
| 0 |
2024006
|
from sparqlpy import *
ENDPOINT = "http://127.0.0.1:3030/neo/sparql"
s = SparqlEndpoint(ENDPOINT)
q = "select ?x where {?x a owl:Class}"
results = s.query(q)
| 161 |
src/anilius/utils/random.py
|
sadegh-moayedizadeh/Anilius
| 3 |
2023953
|
import string
import random
RANDOM_STRING_CHOICES = string.ascii_lowercase + string.digits
RANDOM_STRING_DIGITS_CHOICES = string.digits
def random_string(specific_len):
return "".join(random.choice(RANDOM_STRING_CHOICES) for _ in range(specific_len))
def random_string_digits(specific_len):
return "".join(
random.choice(RANDOM_STRING_DIGITS_CHOICES) for _ in range(specific_len)
)
| 407 |
empower/managers/lommmanager/lnsdp/lnsshandler.py
|
EstefaniaCC/empower-runtime
| 0 |
2024066
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 <NAME>
# Author(s): <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# TODO ADD REFERENCE TO ALLOWED DEVICES
"""LNS Handlers for the LNS Discovery Server."""
import json, traceback
import empower.managers.apimanager.apimanager as apimanager
from empower.managers.lommmanager.datatypes.eui64 import EUI64
class LNSsHandler(apimanager.EmpowerAPIHandler):
"""Handler for accessing LNS."""
URLS = [r"/api/v1/lnsd/lnss/?",
r"/api/v1/lnsd/lnss/([a-zA-Z0-9:]*)/?"] # TODO CHECK EUI64 FORMAT
@apimanager.validate(max_args=1)
def get(self, *args, **kwargs):
"""List devices.
Args:
[0]: the lns euid (optional)
Example URLs:
GET /api/v1/lnsd/lnss
[
{
"euid":"::1",
"uri":"ws://0.0.0.0:6038/router-",
"desc": "LNS XXX"
}
]
GET /api/v1/lnsd/lnss/::1
{
"euid":"::1",
"uri":"ws://0.0.0.0:6038/router-",
"desc": "LNS XXX"
}
"""
if not args:
out = []
desc = self.get_argument("desc",None)
state = self.get_argument("state",None)
lgtw_euid = self.get_argument("lgtw_euid",None)
for key in self.service.lnss:
""" only if string in description """
if desc and desc not in self.service.lnss[key].to_dict()["desc"]:
continue
""" only if state matches """
if state and state != self.service.lnss[key].to_dict()["state"]:
continue
""" only if manages lgtw_id """
if lgtw_euid and lgtw_euid not in self.service.lnss[key].to_dict()["lgtws"]:
continue
""" all """
out.append(self.service.lnss[key].to_dict())
return out
else:
try:
lnss = self.service.lnss[EUI64(args[0]).id6].to_dict()
except KeyError as err:
self.set_status(400)
self.finish({"status_code":400,"title":"LNS not found","detail":str(err)})
else:
return lnss
@apimanager.validate(returncode=201, min_args=1, max_args=1)
def post(self, *args, **kwargs):
"""Add a new LNS to the LNS Discovery Server Database.
Request:
version: protocol version (1.0)
euid: the lns id in eui64 or id6 format (mandatory)
uri: the lns uri template (mandatory)
desc: a human readable description of the device (optional)
Example URLs:
POST /api/v1/lnsd/lnss/"::1"
{
"version":"1.0",
"lgtws":["b827:ebff:fee7:7681"],
"uri":"ws://0.0.0.0:6038/router-",
"desc": "LNS XXX"
}
"""
try:
lnss = self.service.add_lns(args[0], **kwargs)
except ValueError as err:
self.set_status(400)
self.finish({"status_code":400,"title":"Value error","detail":str(err)})
else:
self.set_header("Location", "/api/v1/lnsd/lnss/%s" % lnss.euid)
@apimanager.validate(returncode=201, min_args=1, max_args=1)
def put(self, *args, **kwargs):
"""Add a new LNS to the LNS Discovery Server Database.
Args:
[0]: the lns euid (mandatory)
Request:
version: protocol version (1.0)
uri: the lns uri template (mandatory)
desc: a human readable description of the device (optional)
Example URLs:
PUT /api/v1/lnsd/lnss/::1
{
"version":"1.0",
"lgtws":["b827:ebff:fee7:7681"],
"uri":"ws://0.0.0.0:6038/router-",
"desc": "LNS XXX"
}
"""
try:
self.service.update_lns(args[0], **kwargs)
except ValueError as err:
self.set_status(400)
self.finish({"status_code":400,"title":"Value error","detail":str(err)})
else:
self.set_header("Location", "/api/v1/lnsd/lnss/%s" % args[0])
@apimanager.validate(returncode=204, min_args=0, max_args=1)
def delete(self, *args, **kwargs):
"""Delete one or all devices.
Args:
[0]: the lnss euid
Example URLs:
DELETE /api/v1/lnsd/lnss
DELETE /api/v1/lnsd/lnss/00-0D-B9-2F-56-64
"""
if args:
try:
self.service.remove_lns(EUI64(args[0]).id6)
except ValueError as err:
self.set_status(400)
self.finish({"status_code":400,"title":"Value error",
"detail":str(err)})
except:
self.set_status(500)
self.finish({"status_code":500,"title":"Server error",
"detail":"unknown internal error"})
else:
self.service.remove_all_lnss()
| 5,725 |
example/ad-hoc-demo.py
|
towithyou/simple_ansilbe_api
| 2 |
2023720
|
from simple_ansible_api.api import AnsiBleApi
from simple_ansible_api.task import Task
from simple_ansible_api.callback import ResultsResultCallBack
def v1():
cli = AnsiBleApi(hosts_list="/etc/ansible/hosts")
# set custom callback object
# cli.set_callback(callback=ResultsResultCallBack())
t1 = Task(cli, name="t1", register="root_dir")
t1.shell(cmd="ls /root", )
t2 = Task(cli, name="t2")
t2.debug("{{root_dir.stdout_lines}}")
t3 = Task(cli, name="t3")
t3.debug("{{src}}") # Var parameter definition
t4 = Task(cli, name="t4")
t4.debug("{{dest}}") # Var parameter definition
t5 = Task(cli, name="t5")
t5.yum(name="tree", state="latest")
t6 = Task(cli, name="t6")
t6.copy(src="{{src}}", dest="{{dest}}")
t7 = Task(cli, name="t7")
t7.file(path="/tmp/example_dir", state="directory")
cli.ansible(hosts=["web", "db", "mongo"], var={"src": "/root/install.log", "dest": "/tmp/"},
name="test ad-hoc task")
def v2():
cli = AnsiBleApi(hosts_list=["192.168.0.107", "192.168.0.108", "192.168.0.109"])
t1 = Task(cli, name="t1", register="root_dir")
t1.shell(cmd="ls /root", )
t2 = Task(cli, name="t2")
t2.debug("{{root_dir.stdout_lines}}")
cli.ansible(hosts="192.168.0.107", name="test ad-hoc task")
# cli.ansible(hosts=["192.168.0.108", "192.168.0.109"], name="test ad-hoc task")
if __name__ == '__main__':
# v1()
v2()
| 1,449 |
src/DateUtils.py
|
sebastianhaberey/ctax
| 10 |
2023543
|
from datetime import datetime
from dateutil.parser import parse
from dateutil.tz import tzoffset, UTC
from dateutil.utils import default_tzinfo
def parse_date(text):
return default_tzinfo(parse(text), tzoffset("UTC", 0))
def date_to_string(date):
return date.strftime("%d.%m.%Y")
def date_and_time_to_string(date):
return date.strftime("%d.%m.%Y %H:%M:%S %Z")
def get_start_of_year(year):
"""
Returns start of year.
"""
return datetime(year, 1, 1, tzinfo=UTC)
def get_start_of_year_after(year):
"""
Returns first instant of next year.
"""
return datetime(year + 1, 1, 1, tzinfo=UTC)
| 640 |
disease/serializers.py
|
PetHospital/PetHospitalBackend
| 0 |
2023729
|
from rest_framework import serializers
from .models import *
class DiseaseGroupSerializer(serializers.ModelSerializer):
class Meta:
model = DiseaseGroup
fields = '__all__'
class DiseaseSerializer(serializers.ModelSerializer):
class Meta:
model = Disease
fields = '__all__'
class DiseaseImageSerializer(serializers.ModelSerializer):
class Meta:
model = DiseaseImage
fields = '__all__'
class ProcessSerializer(serializers.ModelSerializer):
class Meta:
model = Process
fields = '__all__'
| 591 |
data/ChallengeLoader.py
|
royhershkovitz/Sudoku-Solver
| 0 |
2023751
|
import re
from data.DataTypes import Board, DiagonalBoard, GameType
class ConfigPlaces:
sizeX = 0
sizeY = 1
groupX = 2
groupY = 3
TYPE = 4
class ConfigChallenge:
type: GameType = GameType.REGULAR
sizeX = -1
sizeY = -1
groupSizeX = -1
groupSizeY = -1
LINESEP = "\n"
REGEX_WORDSEP = "[ |]"
class LevelParser:
def __init__(self, level: str):
with open(level, "r") as f:
self.content = f.read()
self._clean_content()
def _clean_content(self):
split_content = [line.strip() for line in self.content.split(LINESEP) if line.strip()]
for line_num in range(len(split_content)):
split_content[line_num] = [word for word in re.split(REGEX_WORDSEP, split_content[line_num]) if word]
self.config_txt = [int(config) for config in split_content[0]]
self.puzzle = split_content[1:]
def _parse_configurations(self):
self.config = ConfigChallenge()
self.config.sizeX = self.config_txt[ConfigPlaces.sizeX]
self.config.sizeY = self.config_txt[ConfigPlaces.sizeY]
self.config.groupSizeX = self.config_txt[ConfigPlaces.groupX]
self.config.groupSizeY = self.config_txt[ConfigPlaces.groupY]
self.config.type = GameType(self.config_txt[ConfigPlaces.TYPE])
if self.config.type == GameType.REGULAR:
self.board = Board(self.config.sizeX,
self.config.sizeY,
self.config.groupSizeX,
self.config.groupSizeY)
elif self.config.type == GameType.DIAGONAL:
self.board = DiagonalBoard(self.config.sizeX,
self.config.sizeY,
self.config.groupSizeX,
self.config.groupSizeY)
else:
raise Exception(f"Unsupported game type {self.config_txt[ConfigPlaces.TYPE]}")
def _parse_puzzle(self):
try:
for x in range(self.config.sizeX * self.config.groupSizeX):
for y in range(self.config.sizeY * self.config.groupSizeY):
if self.puzzle[x][y] != ".":
self.board.place_number(x, y, int(self.puzzle[x][y]) - 1)
except IndexError:
raise Exception(f"Puzzle length is not as config ({self.config.sizeX}x{self.config.sizeX})")
except ValueError:
raise Exception("Puzzle contain illegal value (not '.' or number)")
def parse_challenge(self) -> Board:
self.board = None
self._parse_configurations()
self._parse_puzzle()
return self.board
| 2,661 |
notes/code/sfpd/catcloud.py
|
adamreevesman/msds692
| 1 |
2023959
|
from cloud import WordCloud
from csvcols import get_column
import matplotlib.pyplot as plt
import sys
categories = get_column(sys.argv[1], col=1)
wordcloud = WordCloud(width=1800,
height=1400,
max_words=10000,
random_state=1,
relative_scaling=0.25)
wordcloud.fit_words(categories.most_common(len(categories)))
plt.imshow(wordcloud)
plt.axis("off")
wordcloud.to_file("SFPD-wordcloud.png")
plt.show()
| 490 |
test/test_tf_dnn.py
|
nowindxdw/0AI_ears
| 0 |
2023738
|
from __future__ import absolute_import
from __future__ import print_function
import pytest
import os
from xears.data_utils import pre_dataset
from xears.models.tf import dnn_train
#xears_test
def test_DNN():
A_path = os.path.dirname(__file__)+os.path.sep+'xears'+os.path.sep+'mp3source'+os.path.sep+'model.wav'
B_path = os.path.dirname(__file__)+os.path.sep+'xears'+os.path.sep+'mp3source'+os.path.sep+'sad.wav'
#train_x, train_y, test_x, test_y= pre_dataset.pre_data(A_path,B_path)
data_set = pre_dataset.pre_wav_data(A_path,B_path)
dnn_train.train(data_set)
if __name__ == '__main__':
pytest.main([__file__])
raw_input('Press Enter to exit...')
| 677 |
mitutoyo.py
|
vifino/CircuitPython-mitutoyo
| 4 |
2022804
|
"""
`mitutoyo`: A library for the Mitutoyo Digimatic (SPC) protocol.
================================================================
This library is an implementation of the Mitutoyo Digimatic
protocol used to read data from gauges and scales.
It was written as a first project with CircuitPython.
Data used to implement this were Mitutoyo datasheets.
* Author(s): Adrian "vifino" Pistol
Implementation Notes
--------------------
**Hardware:**
- You need the 'data' and 'clock' pins configured as inputs with pullup.
They are pin 2 and 3 on a Digimatic 10-pin cable.
- Connect the 'req' pin to a NPN with a 10kΩ resistor and the open collector output to '!req'.
On a Digimatic 10-pin cable, '!req' is pin 5.
- Optionally, you can connect 'ready' as an input with a pullup to know when to read.
On a Digimatic 10-pin cable, 'ready' is pin 4.
**Software:**
- CircuitPython 5.0 tested, older versions should work.
MicroPython should also work, thanks to Adafruit Blinka.
"""
# TODO: vifino: acquire more Mitutoyo gear, it is beautiful.
__repo__ = "https://github.com/vifino/CircuitPython-mitutoyo"
__version__ = "1.0.0"
import digitalio
class Digimatic:
"""Mitutoyo Digimatic SPC implementation for CircuitPython.
Provide either 'req' or 'nreq'. 'req' takes precedence.
:param ~microcontroller.Pin data: data pin
:param ~microcontroller.Pin clock: clock pin
:param ~microcontroller.Pin req: non-inverted data request pin, alternative to 'nreq'
:param ~microcontroller.Pin nreq: inverted data request pin, alternative to 'req'
"""
UNITS = {0: "mm", 1: "in"}
def __init__(self, **args):
if "data" not in args:
raise "Missing `data` pin in arguments!"
if "clock" not in args:
raise "Missing `clock` pin in arguments!"
if "req" not in args and "nreq" not in args:
raise "Missing `req` or `nreq` in arguments!"
pins = {}
for name, pin in args.items():
if type(pin).__name__ == "Pin": # board pin
dio = digitalio.DigitalInOut(pin)
if name in ["req", "nreq"]:
dio.direction = digitalio.Direction.OUTPUT
else:
dio.direction = digitalio.Direction.INPUT
dio.pull = digitalio.Pull.UP
pins[name] = dio
else:
pins[name] = pin
self.pins = pins
# preallocate buffers
self.bits = bytearray(52)
self.nibbles = bytearray(13)
def _req(self, val):
if "req" in self.pins:
self.pins["req"].value = val
else:
self.pins["nreq"].value = not val
def read(self):
"""Attempt to read a value from the connected instrument.
:return: A reading or none if data is unparsable
:rtype: :class:`mitutoyo.Digimatic.Reading`
"""
clock = self.pins["clock"]
data = self.pins["data"]
# read bitstream
self._req(True)
for i in range(52):
# wait for clock to go low
while clock.value:
continue
self.bits[i] = data.value
if i == 0: # deassert req after first bit read, so we only get one response
self._req(False)
# wait for clock to go up again
while not clock.value:
continue
# assemble nibbles
for n in range(13): # iterate over each nibble
idx = n * 4
self.nibbles[n] = (
(self.bits[idx + 0] << 0)
+ (self.bits[idx + 1] << 1)
+ (self.bits[idx + 2] << 2)
+ (self.bits[idx + 3] << 3)
)
# parse preamble
# TODO: check if this contains useful data.
for n in range(4):
if self.nibbles[n] != 15:
return None # invalid data
# sign
if self.nibbles[4] != 0 and self.nibbles[4] != 8:
return None # invalid data
sign_pos = self.nibbles[4] == 0
# convert bcd sequence to integer
number = 0
bcd = self.nibbles[5:11]
for i in range(6):
number += bcd[i] * (10 ** (5 - i))
# decimal point
number = number / 10 ** self.nibbles[11]
# unit
unit = self.UNITS.get(self.nibbles[12])
value = number if sign_pos else -number
if number == 0:
value = 0.0 # don't like negative zeros.
return self.Reading(value, unit)
class Reading:
"""A Reading from a Mitutoyo Digimatic instrument."""
def __init__(self, value, unit):
self.value = value
"""The value returned by the instrument. (`float`)"""
self.unit = unit
"""The unit the reading's value is in. (`str`)"""
def __str__(self):
return "%s%s" % (self.value, self.unit)
def read_cm(self):
"""Attempt to read from a connected instrument, but always return the value in centimeters.
:return: centimeters
:rtype: float
"""
reading = self.read()
if not reading:
return None
if reading.unit == "mm":
return reading.value * 10
if reading.unit == "in":
return reading.value * 2.54
# Unlikely, but future proof.
raise "Reading has unknown unit: %s" % reading.unit
| 5,469 |
operations/localresponsenorm.py
|
Robust-Robots/onnx2pytorch
| 0 |
2023893
|
import warnings
from torch.nn.modules.normalization import LocalResponseNorm
class LocalResponseNormUnsafe(LocalResponseNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _check_input_dim(self, input):
return
| 268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.