seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
4313160628
|
from decouple import config
from etria_logger import Gladsheim
from src.domain.validator.webhook.validator import WebHookMessage
from src.infrastructure.mongo_db.infrastructure import MongoDBInfrastructure
class UserRepository:
infra = MongoDBInfrastructure
@classmethod
async def __get_collection(cls):
mongo_client = cls.infra.get_client()
try:
database = mongo_client[config("MONGODB_DATABASE_NAME")]
collection = database[config("MONGODB_USER_COLLECTION")]
return collection
except Exception as ex:
message = (
f"UserRepository::__get_collection::Error when trying to get collection"
)
Gladsheim.error(
error=ex,
message=message,
database=config("MONGODB_DATABASE_NAME"),
collection=config("MONGODB_USER_COLLECTION"),
)
raise ex
@classmethod
async def find_client_unique_id(cls, cpf: str) -> str:
user_filter = {"identifier_document.cpf": cpf}
collection = await cls.__get_collection()
user = await collection.find_one(user_filter) or {}
unique_id = user.get("unique_id")
return unique_id
@classmethod
async def update_exchange_account_status(cls, webhook_message: WebHookMessage) -> bool:
user_filter = {"identifier_document.cpf": webhook_message.cpf}
webhook_message_information = {
"$set": {"ouro_invest.status": webhook_message.status.value}
}
collection = await cls.__get_collection()
was_updated = await collection.update_one(
user_filter, webhook_message_information
)
return was_updated.matched_count == 1
|
sam-ve-m/webhook.onboarding
|
func/src/repositories/user/repository.py
|
repository.py
|
py
| 1,773 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23287441547
|
__author__ = "Vanessa Sochat"
__copyright__ = "Copyright 2021-2023, Vanessa Sochat"
__license__ = "MPL 2.0"
import os
import contributor_ci.utils as utils
# Replacements can currently be made for the database_file and lmod_base
install_dir = utils.get_installdir()
reps = {"$install_dir": install_dir, "$root_dir": os.path.dirname(install_dir)}
# The default settings file in the install root
default_settings_file = os.path.join(os.getcwd(), "contributor-ci.yaml")
|
vsoch/contributor-ci
|
contributor_ci/defaults.py
|
defaults.py
|
py
| 470 |
python
|
en
|
code
| 3 |
github-code
|
50
|
44033324144
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import os
import lib.Product as p
import lib.Productlist as l
# schreiben Sie eine Abfrage die die zu verarbeitende Datei abfragt.
# Dabei soll das Suffix .csv für CSV Dateien stehen und das Suffix .json für JSON Dateien.
# schreiben Sie eine Abfrage als was die aktuellen Daten gespeichert werden sollen. z.B JSON oder CSV.
einkaufsliste = l.Productlist(p)
os.system('clear')
######### 1. Aufgabe ############
#einkaufsliste.read_csv_file('/home/tn/bin/einkaufliste.csv', ',')
#einkaufsliste.write_csv_file('/home/tn/bin/einkaufliste-1.csv', ';')
input_file = input('Geben Sie die Datei an die eingelesen werden soll: ')
if input_file[-3:] == 'csv':
delimiter = input('Geben Sie das Trennzeichen der CSV Datei ein: ')
print(delimiter)
einkaufsliste.read_csv_file(input_file, delimiter)
elif input_file[-4:] == 'json':
einkaufsliste.read_json_file(input_file)
else:
print('Die Datei hat kein gültiges Suffix!')
exit(1)
#################################
while 1:
# os.system('clear')
print( 50 * '-')
print('Wählen Sie eine Zahl für eine Aktion aus')
print('1 - neues Produkt anlegen')
print('2 - bestehende Daten in eine CSV Datei schreiben')
print('3 - bestehende Daten in eine JSON Datei schreiben')
print('4 - alle Produkte auflisten')
print('5 - Program beenden')
cmd = input('Wählen Sie eine Zahl für eine Aktion aus: ')
if cmd == '1':
name = input('Geben Sie den Produktnamen an: ')
preis = input('Geben Sie den Preis an: ')
gewicht = input('Geben Sie das Gewicht an: ')
anzahl = input('Geben Sie eine Menge an: ')
product = p.Product({'Name': name, 'Preis': preis, 'Gewicht': gewicht, 'Anzahl': anzahl})
einkaufsliste.add_product(product)
elif cmd == '2':
out_file = input('Geben Sie den Dateinamen an in die die Daten geschrieben werden sollen: ')
delimiter = input('Geben Sie das Trennzeichen an: ')
einkaufsliste.write_csv_file(out_file, delimiter)
elif cmd == '3':
out_file = input('Geben Sie den Dateinamen an in die die Daten geschrieben werden sollen: ')
einkaufsliste.write_json_file(out_file)
elif cmd == '4':
einkaufsliste.list_products()
input('weiter')
elif cmd == '5':
exit(0)
else:
print('Ihre Eingabe war ungültig')
time.sleep(10)
# exit(2)
######### 2. Aufgabe ############
#einkaufsliste.write_csv_file('/home/tn/bin/einkaufliste-1.csv', ';')
#einkaufsliste.write_json_file('/home/tn/bin/einkaufliste.json')
#################################
|
itadh-jz/ita3-2020-04
|
neue_einkaufsliste.py
|
neue_einkaufsliste.py
|
py
| 2,658 |
python
|
de
|
code
| 0 |
github-code
|
50
|
74727431835
|
from rest_framework import status
# internal
from .base import BaseTestCase
class ProteinViewsetTestCase(BaseTestCase):
def setUp(self) -> None:
super().setUp()
def test_list_proteins(self):
endpoint = f"{self.url_api_prefix}proteins/"
res = self.client.get(endpoint)
self.assertEqual(res.status_code, status.HTTP_200_OK)
res_data = res.json()
self.assertEqual(res_data.get("count"), 3)
self.assertEqual(res_data.get("current_page"), 1)
self.assertEqual(res_data.get("total_pages"), 1)
def test_list_proteins_filter_by_taxid(self):
endpoint = f"{self.url_api_prefix}proteins/?taxid={self.tax1.tax_id}"
res = self.client.get(endpoint)
self.assertEqual(res.status_code, status.HTTP_200_OK)
res_data = res.json()
self.assertEqual(res_data.get("count"), 2)
self.assertEqual(res_data.get("current_page"), 1)
self.assertEqual(res_data.get("total_pages"), 1)
def test_retrieve_protein(self):
endpoint = f"{self.url_api_prefix}proteins/{self.protein1.protein_id}/"
res = self.client.get(endpoint)
self.assertEqual(res.status_code, status.HTTP_200_OK)
res_data = res.json()
self.assertEqual(
res_data.get("length_of_sequence"), self.protein1.length_of_sequence
)
self.assertEqual(res_data.get("protein_id"), self.protein1.protein_id)
self.assertEqual(res_data.get("sequence"), self.protein1.sequence)
self.assertIsNotNone(res_data.get("organism"))
self.assertIsNotNone(res_data.get("domains"))
# for domain in res_data.get("domains"):
# self.assertTrue(self.protein1.domains.filter(domain_id__in=domain.get('domain_id')))
def test_post_protein(self):
protein_id = "A0A034W5F9"
length_of_sequence = 119
sequence = "MKPSITSVLFLLATLAGVAIAANSSWGSRNSTNILLLRENVVRSPLKNGYQSVNVDFPKSGQTNTRAISAIFVIDRFTNSSGAYSSLWSGGVGYRFVSLNLKSQYNRGINSTVEIYGKR"
payload = {
"length_of_sequence": length_of_sequence,
"protein_id": protein_id,
"sequence": sequence,
"organism": self.organism2.id,
}
endpoint = f"{self.url_api_prefix}proteins/"
res = self.client.post(endpoint, data=payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
res_data = res.json()
self.assertEqual(res_data.get("length_of_sequence"), length_of_sequence)
self.assertEqual(res_data.get("protein_id"), protein_id)
self.assertEqual(res_data.get("sequence"), sequence)
|
donscara/Bioscience-Application-Project
|
api/tests/test_protein.py
|
test_protein.py
|
py
| 2,620 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17271641219
|
import numpy as np
import pandas as pd
def main():
df = pd.read_csv('raw_data/winemag.csv')
np.random.seed(555)
split_maks = np.random.rand(len(df)) < 0.8
train_df = df[split_maks]
test_df = df[~split_maks]
train_df.to_csv('processed_data/train.csv', index=False)
test_df.to_csv('processed_data/test.csv', index=False)
main()
|
mindsdb/mindsdb-examples
|
classics/wine_quality/data_processing.py
|
data_processing.py
|
py
| 359 |
python
|
en
|
code
| 29 |
github-code
|
50
|
10287857
|
# Import necessary libraries
import sys
from utils.file_utils import FileUtils
from utils.code_parser_interface import CodeParser
from stores.function_store import FunctionStore
from core.open_ai import OpenAI as AI
from utils.request_transformer import RequestTransformer
def generate_ai_unit_tests(file_path):
parser = CodeParser(file_path)
store = FunctionStore()
transformer = RequestTransformer()
open_ai = AI()
functions = parser.find_functions()
store.add_function_group(functions)
function_names = store.function_names
ai_response = open_ai.chat(transformer.get_code_string_from_functions(functions))
content = ai_response.choices[0].message.content
unit_test_content = transformer.extract_code_block(content)
path = FileUtils.create_directory("./ai-tests")
filename = FileUtils.get_file_name(file_path)
filename_without_extension = filename.split(".")[0]
# create test files
with open(FileUtils.convert_to_test("ai-tests/" + filename), "w", encoding="utf-8") as f:
functions = ", ".join(function_names)
f.write(f"import sys\n")
f.write(f"import os\n")
f.write(f"sys.path.insert(0, os.getcwd())\n")
f.write(f"from {filename_without_extension} import {functions}\n")
f.write(unit_test_content)
# Run the function if this script is called directly
if __name__ == "__main__":
filePath = sys.argv[1]
# filePath = "addition.py"
generate_ai_unit_tests(filePath)
|
killswitchh/ai-unit-test
|
.github/unit-test/add_unit_tests.py
|
add_unit_tests.py
|
py
| 1,490 |
python
|
en
|
code
| 0 |
github-code
|
50
|
27334403900
|
"""Specify all CLI-accessible modules and their configurations, the pipeline to run by default, and define special functions for the `config` and `pipeline` CLI option trees."""
import argparse
from typing import Callable, Final, Optional
import nhssynth.cli.module_arguments as ma
import nhssynth.modules as m
from nhssynth.cli.common_arguments import COMMON_PARSERS
class ModuleConfig:
"""
Represents a module's configuration, containing the following attributes:
Attributes:
func: A callable that executes the module's functionality.
add_args: A callable that populates the module's sub-parser arguments.
description: A description of the module's functionality.
help: A help message for the module's command-line interface.
common_parsers: A list of common parsers to add to the module's sub-parser, appending the 'dataset' and 'core' parsers to those passed.
"""
def __init__(
self,
func: Callable[..., argparse.Namespace],
add_args: Callable[..., None],
description: str,
help: str,
common_parsers: Optional[list[str]] = None,
no_seed: bool = False,
) -> None:
self.func = func
self.add_args = add_args
self.description = description
self.help = help
self.common_parsers = ["core", "seed"] if not no_seed else ["core"]
if common_parsers:
assert set(common_parsers) <= COMMON_PARSERS.keys(), "Invalid common parser(s) specified."
# merge the below two assert statements
assert (
"core" not in common_parsers and "seed" not in common_parsers
), "The 'seed' and 'core' parser groups are automatically added to all modules, remove the from `ModuleConfig`s."
self.common_parsers += common_parsers
def __call__(self, args: argparse.Namespace) -> argparse.Namespace:
return self.func(args)
def run_pipeline(args: argparse.Namespace) -> None:
"""Runs the specified pipeline of modules with the passed configuration `args`."""
print("Running full pipeline...")
args.modules_to_run = PIPELINE
for module_name in PIPELINE:
args = MODULE_MAP[module_name](args)
def add_pipeline_args(parser: argparse.ArgumentParser) -> None:
"""Adds arguments to `parser` for each module in the pipeline."""
for module_name in PIPELINE:
MODULE_MAP[module_name].add_args(parser, f"{module_name} options")
def add_config_args(parser: argparse.ArgumentParser) -> None:
"""Adds arguments to `parser` relating to configuration file handling and module-specific config overrides."""
parser.add_argument(
"-c",
"--input-config",
required=True,
help="specify the config file name",
)
parser.add_argument(
"-cp",
"--custom-pipeline",
action="store_true",
help="infer a custom pipeline running order of modules from the config",
)
for module_name in PIPELINE:
MODULE_MAP[module_name].add_args(parser, f"{module_name} option overrides", overrides=True)
for module_name in VALID_MODULES - set(PIPELINE):
MODULE_MAP[module_name].add_args(parser, f"{module_name} options overrides", overrides=True)
### EDIT BELOW HERE TO ADD MODULES / ALTER PIPELINE BEHAVIOUR
PIPELINE: Final = [
"dataloader",
"model",
"evaluation",
"dashboard",
] # NOTE this determines the order of a pipeline run
MODULE_MAP: Final = {
"dataloader": ModuleConfig(
func=m.dataloader.run,
add_args=ma.add_dataloader_args,
description="run the data loader module, to prepare the chosen dataset for use in other modules",
help="prepare the dataset",
common_parsers=["metadata", "typed", "transformed", "metatransformer", "sdv_metadata"],
),
"structure": ModuleConfig(
func=m.structure.run,
add_args=ma.add_structure_args,
description="run the structural discovery module, to learn a structural model for use in training and evaluation",
help="discover structure",
),
"model": ModuleConfig(
func=m.model.run,
add_args=ma.add_model_args,
description="run the model architecture module, to train a synthetic data generator",
help="train a model",
common_parsers=["transformed", "metatransformer", "experiments", "synthetic_datasets", "model"],
),
"evaluation": ModuleConfig(
func=m.evaluation.run,
add_args=ma.add_evaluation_args,
description="run the evaluation module, to evaluate an experiment",
help="evaluate an experiment",
common_parsers=["sdv_metadata", "typed", "experiments", "synthetic_datasets", "evaluations"],
),
"plotting": ModuleConfig(
func=m.plotting.run,
add_args=ma.add_plotting_args,
description="run the plotting module, to generate plots for a given model and / or evaluation",
help="generate plots",
common_parsers=["typed", "evaluations"],
),
"dashboard": ModuleConfig(
func=m.dashboard.run,
add_args=ma.add_dashboard_args,
description="run the dashboard module, to produce a streamlit dashboard",
help="start up a streamlit dashboard to view the results of an evaluation",
common_parsers=["typed", "experiments", "synthetic_datasets", "evaluations"],
no_seed=True,
),
"pipeline": ModuleConfig(
func=run_pipeline,
add_args=add_pipeline_args,
description="run the full pipeline.",
help="run the full pipeline",
),
"config": ModuleConfig(
func=None,
add_args=add_config_args,
description="run module(s) according to configuration specified by a file in `config/`; note that you can override parts of the configuration on the fly by using the usual CLI flags",
help="run module(s) in line with a passed configuration file",
),
}
### EDIT ABOVE HERE TO ADD MODULES / ALTER PIPELINE BEHAVIOUR
VALID_MODULES = {x for x in MODULE_MAP.keys() if x not in {"pipeline", "config"}}
assert (
set(PIPELINE) <= VALID_MODULES
), f"Invalid `PIPELINE` specification, must only contain valid modules from `MODULE_MAP`: {str(VALID_MODULES)}"
def get_parent_parsers(name: str, module_parsers: list[str]) -> list[argparse.ArgumentParser]:
"""Get a list of parent parsers for a given module, based on the module's `common_parsers` attribute."""
if name in {"pipeline", "config"}:
return [p(name == "config") for p in COMMON_PARSERS.values()]
elif name == "dashboard":
return [COMMON_PARSERS[pn](True) for pn in module_parsers]
else:
return [COMMON_PARSERS[pn]() for pn in module_parsers]
def add_subparser(
subparsers: argparse._SubParsersAction,
name: str,
module_config: ModuleConfig,
) -> argparse.ArgumentParser:
"""
Add a subparser to an argparse argument parser.
Args:
subparsers: The subparsers action to which the subparser will be added.
name: The name of the subparser.
module_config: A [`ModuleConfig`][nhssynth.cli.module_setup.ModuleConfig] object containing information about the subparser, including a function to execute and a function to add arguments.
Returns:
The newly created subparser.
"""
parent_parsers = get_parent_parsers(name, module_config.common_parsers)
parser = subparsers.add_parser(
name=name,
description=module_config.description,
help=module_config.help,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parent_parsers,
)
if name not in {"pipeline", "config"}:
module_config.add_args(parser, f"{name} options")
else:
module_config.add_args(parser)
parser.set_defaults(func=module_config.func)
return parser
|
nhsengland/NHSSynth
|
src/nhssynth/cli/module_setup.py
|
module_setup.py
|
py
| 7,892 |
python
|
en
|
code
| 2 |
github-code
|
50
|
4644450389
|
import os
from asr_model.audio import AudioFile
from asr_model.utils import extract_audio, convert_audio, write_to_file
from pydub import AudioSegment as am
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
import os
import torch
import torchaudio
from asr_model.variabels import *
VIDEO_EXT = ['mp4', 'ogg', 'm4v', 'm4a', 'webm', 'flv', 'amv', 'avi']
AUDIO_EXT = ['mp3', 'flac', 'wav', 'aac', 'm4a', 'weba', 'sdt']
class SubGenerator:
def __init__(self,
asr_model,
normalizer,
gector=None,
output_directory="./temp"):
super(SubGenerator, self).__init__()
self.max_words = 12
self.split_duration = 5000
self.split_threshold = 200
self.model = asr_model
self.itn = normalizer
self.gector = gector
if gector is not None:
self.max_len = 0
else:
self.max_len = 0
if not os.path.exists("temp"):
os.makedirs("temp")
# if self.file_ext in VIDEO_EXT:
# self.is_video = True
# extract_audio(self.file_path, self.temp_path)
# elif self.file_ext in AUDIO_EXT:
# self.is_video = False
# convert_audio(self.file_path, self.temp_path)
# else:
# raise ValueError("Extension mismatch")
self.output_directory = output_directory
self.output_file_handle_dict = {}
def format_audio(self, audio_path):
sound = am.from_file(audio_path, format='wav')
sound = sound.set_frame_rate(SAMPLE_RATE)
sound = sound.set_channels(1)
sound.export(audio_path, format='wav')
return sound
def transcript_split(self, audio_path):
sound = self.format_audio(audio_path)
last = 0
trans_dict = None
end_split = None
count_split = 0
last_index=0
last_s=''
for start, end, audio, s in audio_file.split_file():
if trans_dict is not None:
if start - trans_dict.get('end', 0) > self.split_threshold or len(trans_dict['tokens']) > self.max_len:
final_transcript = trans_dict['tokens']
# if len(final_transcript)>0 and final_transcript!= ' ':
# with open(speaker_path+".txt", "a") as myfile:
# myfile.write("audio/"+str(last_index)+'.wav|'+final_transcript.strip()+'|'+str(num_speaker)+'\n')
# with open(speaker_path+"/"+str(last_index)+".txt", "a") as myfile1:
# myfile1.write(final_transcript.strip())
# index+=1
trans_dict = None
end_split = True
# temppath = speaker_path+'/'+str(index)+'.wav'
# AudioSegment.from_raw(file=s, sample_width=2, frame_rate=16000, channels=1).export(temppath, format='wav')
signal, sr = torchaudio.load('./a.wav', channels_first=False)
tokens = self.model.transcribe(self.model.audio_normalizer(signal, SAMPLE_RATE).unsqueeze(0),torch.tensor([1.0]))[0]
if trans_dict is None:
trans_dict = {
'tokens': tokens,
'start': start,
'end': end,
}
print('tokens ', tokens)
last = end
count_split+=1
# last_index = index
last_s=s
# if trans_dict is not None:
# final_transcript = trans_dict['tokens']
# if len(final_transcript.strip())>0:
# with open(speaker_path+".txt", "a") as myfile:
# myfile.write("audio/"+str(last_index+1)+'.wav|'+final_transcript.strip()+'|'+str(num_speaker)+'\n')
# with open(speaker_path+"/"+str(last_index+1)+".txt", "a") as myfile1:
# myfile1.write(final_transcript.strip())
return 0
|
BarryZM/Dialog_generate_tool
|
STT/asr_model/transcript_dialog.py
|
transcript_dialog.py
|
py
| 4,076 |
python
|
en
|
code
| 0 |
github-code
|
50
|
20497572613
|
import os
from waflib import Logs
def build(bld):
platform = bld.env['PLATFORM']
spec = bld.options.project_spec
configuration = bld.env['CONFIGURATION']
if platform and not platform == 'project_generator' and not bld.cmd == 'generate_uber_files' and 'CryAudioImplWwise' in bld.spec_modules(spec, platform, configuration):
if not os.path.isdir('Code/SDKs/Audio/wwise/SDK'):
Logs.error('[ERROR] Attempting to build CryAudioImplWwise, but could not find the Wwise SDK at Code/SDKs/Audio/wwise/SDK, skipping.')
return
wwise_core_libs = [ 'AkMemoryMgr', 'AkMusicEngine', 'AkSoundEngine', 'AkStreamMgr' ]
wwise_plugins_libs = [
'AkAudioInputSource',
'AkCompressorFX',
'AkConvolutionReverbFX',
'AkDelayFX',
'AkExpanderFX',
'AkFlangerFX',
'AkGainFX',
'AkGuitarDistortionFX',
'AkHarmonizerFX',
'AkMatrixReverbFX',
'AkMeterFX',
'AkParametricEQFX',
'AkPeakLimiterFX',
'AkPitchShifterFX',
'AkRecorderFX',
'AkRoomVerbFX',
'AkSilenceSource',
'AkSineSource',
'AkSoundSeedImpactFX',
'AkSoundSeedWind',
'AkSoundSeedWoosh',
'AkStereoDelayFX',
'AkTimeStretchFX',
'AkToneSource',
'AkTremoloFX',
'AkVorbisDecoder',
'McDSPFutzBoxFX',
'McDSPLimiterFX',
'AkSynthOne'
]
wwise_extra_plugins_libs = [
'iZTrashBoxModelerFX',
'iZTrashDistortionFX',
'iZTrashDynamicsFX',
'iZTrashDelayFX',
'iZHybridReverbFX',
'CrankcaseAudioREVModelPlayerFX',
'iZTrashMultibandDistortionFX',
'iZTrashFiltersFX',
'AkMotionGenerator',
'AkRumble',
'IOSONOProximityMixer',
'AuroHeadphoneFX',
'AuroPannerMixer'
]
# CommunicationCentral must be the linked before other wwise libraries on gcc-based linkers
comm_central_libs = [ 'CommunicationCentral' ]
common_wwise_libs = wwise_core_libs + wwise_plugins_libs
win_libs = [ 'Ole32', 'dxguid', 'ws2_32', 'Dsound', 'XINPUT9_1_0', 'Msacm32' ]
win_release_libs = common_wwise_libs + wwise_extra_plugins_libs + win_libs
win_non_release_libs = comm_central_libs + win_release_libs + [ 'SFlib' ]
durango_libs = [ 'MMDevApi', 'Ws2_32', 'combase', 'kernelx', 'SmartGlassInterop', 'd3dcompiler', 'dxguid', 'xg_x', 'uuid', 'xaudio2', 'acphal' ]
durango_release_libs = common_wwise_libs + wwise_extra_plugins_libs + durango_libs
durango_non_release_libs = comm_central_libs + durango_release_libs
orbis_libs = [ 'SceAudioOut_stub_weak', 'SceAjm_stub_weak', 'SceAudio3d_stub_weak', 'SceMove_stub_weak']
orbis_wwise_libs = wwise_extra_plugins_libs + [ 'SceAudio3dEngine' ]
orbis_release_libs = common_wwise_libs + orbis_wwise_libs + orbis_libs
orbis_non_release_libs = comm_central_libs + orbis_release_libs
linux_module_extensions = [ 'sdl2' ],
linux_libs = [ 'pthread' ]
linux_release_libs = common_wwise_libs + linux_libs
linux_non_release_libs = comm_central_libs + linux_release_libs
darwin_wwise_libs = [ 'AkAACDecoder' ]
darwin_release_libs = common_wwise_libs + darwin_wwise_libs
darwin_non_release_libs = comm_central_libs + darwin_release_libs
android_release_libs = common_wwise_libs + [ 'CrankcaseAudioREVModelPlayerFX', 'AuroHeadphoneFX', 'AuroPannerMixer' ]
android_non_release_libs = comm_central_libs + android_release_libs
wwise_lib_subfolder = 'vc110'
try:
msvc_version = bld.env['MSVC_VERSION']
if msvc_version == '11.0':
wwise_lib_subfolder = 'vc110'
if msvc_version == '12.0':
wwise_lib_subfolder = 'vc120'
if msvc_version == '14.0':
wwise_lib_subfolder = 'vc140'
except:
pass
bld.CryEngineModule(
target = 'CryAudioImplWwise',
vs_filter = 'CryEngine/Audio/Implementations',
file_list = 'cryaudioimpl.waf_files',
pch = 'stdafx.cpp',
includes = [
'../../Common',
bld.CreateRootRelativePath('Code/SDKs/Audio/oculus/wwise/Include'),
bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/include')
],
debug_defines = 'WWISE_FOR_DEBUG',
profile_defines = 'WWISE_FOR_PROFILE',
performance_defines = 'WWISE_FOR_RELEASE',
release_defines = 'WWISE_FOR_RELEASE',
win_debug_lib = win_non_release_libs,
win_profile_lib = win_non_release_libs,
win_performance_lib = win_release_libs,
win_release_lib = win_release_libs,
win_x86_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Win32_' + wwise_lib_subfolder + '/Debug/lib'),
win_x86_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Win32_' + wwise_lib_subfolder + '/Profile/lib'),
win_x86_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Win32_' + wwise_lib_subfolder + '/Release/lib'),
win_x86_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Win32_' + wwise_lib_subfolder + '/Release/lib'),
win_x64_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/x64_' + wwise_lib_subfolder + '/Debug/lib'),
win_x64_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/x64_' + wwise_lib_subfolder + '/Profile/lib'),
win_x64_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/x64_' + wwise_lib_subfolder + '/Release/lib'),
win_x64_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/x64_' + wwise_lib_subfolder + '/Release/lib'),
durango_debug_lib = durango_non_release_libs,
durango_profile_lib = durango_non_release_libs,
durango_performance_lib = durango_release_libs,
durango_release_lib = durango_release_libs,
durango_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/XboxOne_' + wwise_lib_subfolder + '/Debug/lib'),
durango_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/XboxOne_' + wwise_lib_subfolder + '/Profile/lib'),
durango_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/XboxOne_' + wwise_lib_subfolder + '/Release/lib'),
durango_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/XboxOne_' + wwise_lib_subfolder + '/Release/lib'),
orbis_debug_lib = orbis_non_release_libs,
orbis_profile_lib = orbis_non_release_libs,
orbis_performance_lib = orbis_release_libs,
orbis_release_lib = orbis_release_libs,
orbis_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/PS4/Debug/lib'),
orbis_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/PS4/Profile/lib'),
orbis_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/PS4/Release/lib'),
orbis_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/PS4/Release/lib'),
linux_debug_lib = linux_non_release_libs,
linux_profile_lib = linux_non_release_libs,
linux_performance_lib = linux_release_libs,
linux_release_lib = linux_release_libs,
linux_x64_libpath = bld.CreateRootRelativePath('Code/SDKs/SDL2/lib/linux/'),
linux_x64_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Linux_x64/Debug/lib'),
linux_x64_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Linux_x64/Profile/lib'),
linux_x64_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Linux_x64/Release/lib'),
linux_x64_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Linux_x64/Release/lib'),
darwin_debug_lib = darwin_non_release_libs,
darwin_profile_lib = darwin_non_release_libs,
darwin_performance_lib = darwin_release_libs,
darwin_release_lib = darwin_release_libs,
darwin_framework = [ 'CoreAudio', 'AudioUnit', 'AudioToolbox' ],
darwin_x64_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Mac/Debug/lib'),
darwin_x64_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Mac/Profile/lib'),
darwin_x64_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Mac/Release/lib'),
darwin_x64_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/Mac/Release/lib'),
android_debug_lib = android_non_release_libs,
android_profile_lib = android_non_release_libs,
android_performance_lib = android_release_libs,
android_release_lib = android_release_libs,
android_arm_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-9_armeabi-v7a/Debug/lib'),
android_arm_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-9_armeabi-v7a/Profile/lib'),
android_arm_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-9_armeabi-v7a/Release/lib'),
android_arm_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-9_armeabi-v7a/Release/lib'),
android_arm64_debug_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-21_arm64-v8a/Debug/lib'),
android_arm64_profile_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-21_arm64-v8a/Profile/lib'),
android_arm64_performance_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-21_arm64-v8a/Release/lib'),
android_arm64_release_libpath = bld.CreateRootRelativePath('Code/SDKs/Audio/wwise/SDK/android-21_arm64-v8a/Release/lib'),
)
|
MibuWolf/CryGame
|
Code/CryEngine/CryAudioSystem/implementations/CryAudioImplWwise/wscript
|
wscript
| 9,786 |
python
|
en
|
code
| 2 |
github-code
|
50
|
|
11697945116
|
import operator
import random
import math
def calcShannonEnt(dataSet): # 计算数据的熵(entropy)
numEntries = len(dataSet) # 数据条数
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1] # 每行数据的最后一个字(类别)
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1 # 统计有多少个类以及每个类的数量
shannonEnt = 0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries # 计算单个类的熵值
shannonEnt -= prob * math.log(prob, 2) # 累加每个类的熵值
return shannonEnt
def splitDataSet(dataSet,axis,value): # 按某个特征分类后的数据
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet): # 选择最优的分类特征
numFeatures = len(dataSet[0])-1
baseEntropy = calcShannonEnt(dataSet) # 原始的熵
bestInfoGain = 0
bestFeature = -1
# ____________________________________________________________________________________
"""
修改部分
功能:随机选择d个属性(d=log2(属性个数))
"""
d = math.ceil(math.log(numFeatures, 2))
feature_index = [i for i in range(numFeatures)] # 创建属性索引数组
random.shuffle(feature_index) # 索引数组随机排序
feature_index_d = feature_index[:d] # 截取其中前d个
for i in feature_index_d:
# ____________________________________________________________________________________
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet) # 按特征分类后的熵
infoGain = baseEntropy - newEntropy # 原始熵与按特征分类后的熵的差值
if (infoGain>bestInfoGain): # 若按某特征划分后,熵值减少的最大,则次特征为最优分类特征
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet, features):
labels = features.copy()
classList = [example[-1] for example in dataSet] # 类别:男或女
# 情况1
if classList.count(classList[0]) == len(classList):
return classList[0]
# ——————————————————————————————————————————————————————————————————————————————————
# 情况2
if len(dataSet[0]) == 1:
return majorityCnt(classList)
# ——————————————————————————————————————————————————————————————————————————————————
bestFeat = chooseBestFeatureToSplit(dataSet) # 选择最优特征
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel: {}} # 分类结果以字典形式保存
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
def classify(inputTree, featLabels, testVec):
classLabel = 'None'
firstStr = next(iter(inputTree))
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
|
Carrot97/MachineLearning
|
EnsenbleLearning/RandomForest/TreeofForest.py
|
TreeofForest.py
|
py
| 4,509 |
python
|
en
|
code
| 1 |
github-code
|
50
|
25160036554
|
import numpy
from cost import hypothesis
def gradient(theta, X, Y, alpha):
m, n = X.shape
for j in range(0, len(theta)):
derivative = 0
for x, y in zip(X, Y):
derivative += (hypothesis(theta, x) - y)*x[j]
theta[j] = theta[j] - (alpha/m) * derivative
return theta
|
andrelbol/IA
|
IC/LogicalRegression/gradient.py
|
gradient.py
|
py
| 295 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26182680397
|
from PIL import Image
import os
import imageio
import datetime
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
plt.rcParams['font.size']=16
import os
pd = os.path.dirname(os.getcwd())
inDir = '/fits'
outDir = inDir
os.chdir(pd + inDir)
images = np.load('image_array.npy')
numImages = len(images)
print('{} images found'.format(numImages))
myParams={'colour':0,'slice':110}
sliceArray=[]
for i in range(numImages):
sliceArray.append(images[i][:,myParams['slice'],myParams['colour']])
fig,ax=plt.subplots(1,1,figsize=[5,4])
xVals = [100+i for i in range(300)]
def myGaussian(x,a,b,c,d,m):
return a * np.exp(-(x-b)**2/2/c**2)+d+np.multiply(m,x)
popt = []
for i,myslice in enumerate(sliceArray):
try:
popt.append(curve_fit(myGaussian,xVals,sliceArray[i][100:400],p0=[-100,250,50,200,-0.1])[0])
except:
popt.append(np.array([0,0,0,0]))
print('fit exception at',i)
for i,myslice in enumerate(sliceArray[:50:5]):
if popt[i*5][0] !=0:
ax.plot(xVals,myslice[100:400],'.',color='b',alpha=0.25)
ax.plot(xVals,myGaussian(xVals,*popt[i*5]),color='r',linewidth=2,alpha=0.75)
name='red_'+str(myParams['slice'])
ax.set_xlabel('y (px)')
ax.set_ylabel('Intensity')
ax.set_xlim([100,400])
#ax.set_ylim([-50,250])
ax.set_xticks([100,200,300,400])
#ax[1].set_yticks([0,128,256])
fig.tight_layout()
fig.savefig(name+'.png',dpi=300)
#fig.show()
####
sdVals = []
sdValsSq = []
for opt in popt:
sdVals.append(opt[2])
for item in sdVals:
sdValsSq.append(item**2)
np.save(name,sdValsSq)
|
microncubed/diffusivity
|
scripts/08_line_scan.py
|
08_line_scan.py
|
py
| 1,602 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9371337100
|
import requests
from bs4 import BeautifulSoup
def getHomeData(data):
data.clear()
data['middlePosts'] = []
data['leftPosts'] = []
data['today'] = []
# data['todayEvents'] = []
# data['tomorrowEvents'] = []
# data['weekendEvents'] = []
data['futureEvents'] = []
data['inCinema'] = []
url = 'https://epoznan.pl'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
middlePosts = soup.find_all(class_='masonryPosts__itemInner')
for article in middlePosts:
isArticleHasUpdate = False
articleTitle = article.find('h4').text.strip()
if 'AKTUALIZACJA' in articleTitle:
isArticleHasUpdate = True
articleTitle = articleTitle.replace('\n AKTUALIZACJA', '')
articleImgUrl = article.find(class_='masonryPosts__itemBg')['style'].split("('", 1)[1].split("')")[0]
articleURL = article['href']
articleID = articleURL.split('news-news-', 1)[1].split('-')[0]
data['middlePosts'].append({
'id': articleID,
'title': articleTitle,
'imgUrl': articleImgUrl,
'url': articleURL,
'update': isArticleHasUpdate
})
leftPosts = soup.find_all(class_='postItem')
for post in leftPosts:
title = post.find('h3').text
publishDate = post.find('a', class_='postItem__category').text
try:
imgUrl = post.find(class_='postItem__imageInner')['style'].split("('", 1)[1].split("')")[0]
except:
imgUrl = 'https://www.poznan.pl/mim/turystyka/pictures/epoznan,pic1,1017,75153,134326,show2.jpg'
url = post.find('a', class_='postItem__category')['href']
id = url.split('news-news-', 1)[1].split('-')[0]
try:
numberOfComments = int(post.find(class_='postItem__infoStat icon-chat').text)
except:
numberOfComments = 0
try:
numberOfPhotos = int(post.find(class_='postItem__infoStat icon-photos').text)
except:
numberOfPhotos = 0
update = False
if post.find(class_='postItem__infoTitle'):
update = True
data['leftPosts'].append({
'id': id,
'title': title,
'publishDate': publishDate,
'imgUrl': imgUrl,
'url': url,
'comments': numberOfComments,
'photos': numberOfPhotos,
'update': update,
})
fromToday = ''.join(x for x in publishDate if x.isdigit())
if int(fromToday) <= 12 or 'minut' in publishDate:
data['today'].append({
'id': id,
'title': title,
'publishDate': publishDate,
'imgUrl': imgUrl,
'url': url,
'comments': numberOfComments,
'photos': numberOfPhotos,
'update': update,
})
events = soup.find_all(class_='eventsList__item')
for event in events:
title = event.find('h4').text
if '\n' in title:
title = title.replace('\n ', '')
title = title.replace(' ', '')
try:
imgUrl = event.find(class_='eventsList__itemImage')['style'].split("('", 1)[1].split("')")[0]
except:
imgUrl = 'https://epoznan.pl/new_assets/img/culture/mettings.svg'
category = event.find(class_='eventsList__itemCategory').text
try:
infoLocation = event.find(class_='eventsList__itemInfo icon-location').text
except:
infoLocation = ''
try:
infoDate = event.find(class_='eventsList__itemInfo icon-time').text
except:
infoDate = ''
url = event.find('a')['href']
dataCategory = event['data-category']
dataCategoryName = ''
if dataCategory == '0':
dataCategoryName = 'todayEvents'
elif dataCategory == '1':
dataCategoryName = 'tomorrowEvents'
elif dataCategory == '2':
dataCategoryName = 'weekendEvents'
nextEvents = ''.join(x for x in infoDate if x.isdigit())
# if infoDate == 'dzisiaj' or infoDate == 'jutro' or int(nextEvents) < 30:
data['futureEvents'].append({
'title': title,
'dataCategory': dataCategory,
'category': category,
'infoLocation': infoLocation,
'infoDate': infoDate,
'imgUrl': imgUrl,
'url': url,
})
inCinema = soup.find_all(class_='cinemaList__item')
for movie in inCinema:
imgUrl = movie.find(class_='cinemaList__itemImage')['style'].split("('", 1)[1].split("')")[0]
url = movie.find(class_='cinemaList__itemLink')['href']
data['inCinema'].append({
'imgUrl': imgUrl,
'url': url,
})
try:
weatherInfo = soup.find(class_='weatherList__boxAlertInner').text
data['weatherInfo'] = weatherInfo
except:
data['weatherInfo'] = ''
temperatureIcon = soup.find_all(class_='weatherList__boxItemIcon')[0]['src']
temperatureCurrent = soup.find(class_='weatherList__boxItemCell weatherList__boxItemCell--textBig').text
temperatureMin = soup.find(class_='weatherList__boxItemCell weatherList__boxItemCell--textMedium').text
rain = soup.find_all(class_='weatherList__boxItemCell weatherList__boxItemCell--value')[0].text
wind = soup.find_all(class_='weatherList__boxItemCell weatherList__boxItemCell--value')[1].text + '/h'
airIcon = soup.find_all(class_='weatherList__boxItemIcon')[1]['src']
airQuality = soup.find_all(class_='weatherList__boxItemCell weatherList__boxItemCell--value')[2].text
airState = soup.find_all(class_='weatherList__boxItemCell weatherList__boxItemCell--value')[3].text
data['temperatureIcon'] = 'https://epoznan.pl/' + temperatureIcon
data['airIcon'] = 'https://epoznan.pl/' + airIcon
data['temperatureCurrent'] = temperatureCurrent
data['temperatureMin'] = temperatureMin
data['rain'] = rain
data['wind'] = wind
data['airQuality'] = airQuality
data['airState'] = airState
|
kwolarz/epoznan-backend
|
home.py
|
home.py
|
py
| 6,291 |
python
|
en
|
code
| 0 |
github-code
|
50
|
69946475675
|
from openerp.osv.orm import Model, BaseModel, fields, except_orm, FIELDS_TO_PGTYPES, LOG_ACCESS_COLUMNS # MetaModel, Model, TransientModel, AbstractModel
import types
import openerp.tools as tools
import logging
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
Model._sequence = 'ir_serial_id_seq'
for sub_class in Model.__subclasses__():
sub_class._sequence = 'ir_serial_id_seq'
from openerp.addons.base.ir.ir_actions import *
ir_actions_act_client._sequence = 'ir_serial_id_seq'
ir_actions_report_xml._sequence = 'ir_serial_id_seq'
ir_actions_act_window._sequence = 'ir_serial_id_seq'
ir_actions_act_url._sequence = 'ir_serial_id_seq'
ir_actions_server._sequence = 'ir_serial_id_seq'
ir_server_object_lines._sequence = 'ir_serial_id_seq'
FIELDS_TO_PGTYPES[fields.integer] = 'int8' # DECOD.IO
FIELDS_TO_PGTYPES[fields.many2one] = 'int8' # DECOD.IO
LOG_ACCESS_COLUMNS['create_uid'] = "BIGINT REFERENCES res_users ON DELETE RESTRICT"
LOG_ACCESS_COLUMNS['create_date'] = "TIMESTAMP without time zone DEFAULT (now() AT TIME ZONE 'UTC')"
LOG_ACCESS_COLUMNS['write_uid'] = "BIGINT REFERENCES res_users ON DELETE RESTRICT"
LOG_ACCESS_COLUMNS['write_date'] = "TIMESTAMP without time zone"
def MixIn(pyClass, mixInClass, makeAncestor=0):
if makeAncestor:
if mixInClass not in pyClass.__bases__:
pyClass.__bases__ = (mixInClass,) + pyClass.__bases__
else:
# Recursively traverse the mix-in ancestor classes in order to support inheritance
baseClasses = list(mixInClass.__bases__)
baseClasses.reverse()
for baseClass in baseClasses:
MixIn(pyClass, baseClass)
# Install the mix-in methods into the class
for name in dir(mixInClass):
if not name.startswith('__'): # skip private members
member = getattr(mixInClass, name)
if type(member) is types.MethodType:
member = member.im_func
setattr(pyClass, name, member)
class BaseModelExtend(object):
def get_related_fields_values(self, cr, uid, fields_ids, context=None): # DECOD.IO NEW
res = {}
for related_field in fields_ids.keys():
related_id = fields_ids[related_field]
field_path = related_field.split('.', 1)
model_field = field_path[0]
field_path = field_path[1]
model2 = self._columns[model_field]._obj # __contains__(self, name):
Model2 = self.pool.get(model2)
value = False
if related_id:
value = Model2.browse(cr, uid, related_id, context=context)
for field in field_path.split('.'):
if value[field] is None:
value = False
break
value = value[field]
res[related_field] = {'value': value or False}
return res
class BaseModelOverride(object):
def _inherits_reload_src(self):
""" Recompute the _inherit_fields mapping on each _inherits'd child model."""
if not self._sequence: # DECOD.IO db_uid
self._sequence = 'ir_serial_id_seq' # KGB don't mess with __init__
for obj in self.pool.models.values():
if self._name in obj._inherits:
obj._inherits_reload()
def _create_table(self, cr):
# ORIG: cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
cr.execute("""CREATE TABLE "%s"
(id bigint NOT NULL DEFAULT nextval('ir_serial_id_seq'),
PRIMARY KEY(id)) WITHOUT OIDS
""" % (self._table,)) # DECOD.IO db_uid
cr.execute("""CREATE TRIGGER "%s"
AFTER INSERT OR UPDATE OR DELETE ON "%s"
FOR EACH ROW EXECUTE PROCEDURE oe_audit()
""" % (self._table+'_audit', self._table)) # DECOD.IO db_uid if self._log_access:
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ORIG: cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
cr.execute("""CREATE TABLE "%s" ("%s" BIGINT NOT NULL, "%s" BIGINT NOT NULL, UNIQUE("%s","%s"))
""" % (m2m_tbl, col1, col2, col1, col2)) # DECOD.IO db_uid
#cr.execute("""CREATE TRIGGER "%s"
# AFTER INSERT OR UPDATE OR DELETE ON "%s"
# FOR EACH ROW EXECUTE PROCEDURE oe_audit()
# """ % (self._table+'_audit', self._table)) # DECOD.IO db_uid
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" BIGINT' % (self._table,)) # DECOD.IO db_uid
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" BIGINT' % (self._table,)) # DECOD.IO db_uid
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _auto_end(self, cr, context=None): # Called after _auto_init chance ti fix integer to bigINT
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _field_create(self, cr, context=None): # # DECOD.IO OVERRIDE db_uid nextval setval
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
# ORIG: cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
cr.execute('SELECT nextval(%s)', ('ir_serial_id_seq',)) # DECOD.IO db_uid
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f._type,
'relation': f._obj or '',
'select_level': tools.ustr(f.select or 0),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
# ORIG: cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
cr.execute('select nextval(%s)', ('ir_serial_id_seq',)) # DECOD.IO db_uid
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
|
BorgERP/edifact
|
server_base/osv/orm.py
|
orm.py
|
py
| 14,968 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19666717613
|
import win32com.client
from pywintypes import com_error
from tkinter.filedialog import askopenfilenames
from pathlib import PurePath
o = win32com.client.Dispatch("Excel.Application")
wb_path = askopenfilenames(title = "Select Excel file")
o.Visible = False
i = 0
try:
wb = o.Workbooks.Open(PurePath(wb_path[i]))
ws_index_list = [1,2,3] # Select the sheet that you want to print
path_to_pdf = (r"c:/Users/cmeneses/Desktop/sample.pdf")
wb.WorkSheets(ws_index_list).Select()
wb.ActiveSheet.ExportAsFixedFormat(0, path_to_pdf)
ws_index_list = [4,5,6] # Select the sheet that you want to print
path_to_pdf = (r"c:/Users/cmeneses/Desktop/sample1.pdf")
wb.WorkSheets(ws_index_list).Select()
wb.ActiveSheet.ExportAsFixedFormat(0, path_to_pdf)
i += i
except com_error as e:
print('failed.')
else:
print('Succeeded.')
|
cesarandres8911/Python_exercises
|
Facturacion_Expa/PrintToPdf.py
|
PrintToPdf.py
|
py
| 861 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9073798892
|
"""
"""
from processing_components.calibration.calibration_control import calibrate_function
from processing_components.calibration.operations import apply_gaintable
from processing_components.visibility.gather_scatter import visibility_gather_channel
from processing_components.visibility.operations import divide_visibility, integrate_visibility_by_channel
def calibrate_list_serial_workflow(vis_list, model_vislist, calibration_context='TG', global_solution=True,
**kwargs):
""" Create a set of components for (optionally global) calibration of a list of visibilities
If global solution is true then visibilities are gathered to a single visibility data set which is then
self-calibrated. The resulting gaintable is then effectively scattered out for application to each visibility
set. If global solution is false then the solutions are performed locally.
:param vis_list:
:param model_vislist:
:param calibration_context: String giving terms to be calibrated e.g. 'TGB'
:param global_solution: Solve for global gains
:param kwargs: Parameters for functions in components
:return:
"""
def solve_and_apply(vis, modelvis=None):
return calibrate_function(vis, modelvis, calibration_context=calibration_context, **kwargs)[0]
if global_solution:
point_vislist = [divide_visibility(vis_list[i], model_vislist[i])
for i, _ in enumerate(vis_list)]
global_point_vis_list = visibility_gather_channel(point_vislist)
global_point_vis_list = integrate_visibility_by_channel(global_point_vis_list)
# This is a global solution so we only compute one gain table
_, gt_list = solve_and_apply(global_point_vis_list, **kwargs)
return [apply_gaintable(v, gt_list, inverse=True)
for v in vis_list]
else:
return [solve_and_apply(vis_list[i], model_vislist[i]) for i, v in enumerate(vis_list)]
|
rtobar/algorithm-reference-library
|
workflows/serial/calibration/calibration_serial.py
|
calibration_serial.py
|
py
| 1,996 |
python
|
en
|
code
| null |
github-code
|
50
|
38292239949
|
# normalized vocabulary for evidence_label
# 'FDA guidelines', 'preclinical', 'trials', 'NCCN guidelines', or
# 'European LeukemiaNet Guidelines'
# see https://docs.google.com/spreadsheets/d/1j9AKdv1k87iO8qH-ujnW3x4VusbGoXDjdb5apUqJsSI/edit#gid=1415903760
def evidence_label(evidence, association, na=False):
# CGI
# Drug status?? VICC group??
# cgi_a = ['clinical practice']
# cgi_b = ['clinical trials iii', 'clinical trials iv']
# cgi_c = ['clinical trials i', 'clinical trials ii', 'case reports']
# cgi_d = ['pre-clinical data']
# CGI
# Evidence level
cgi_a = ['cpic guidelines', 'european leukemianet guidelines', 'fda guidelines', 'nccn guidelines', 'nccn/cap guidelines'] # NOQA
cgi_b = ['late trials', 'late trials,pre-clinical']
cgi_c = ['early trials', 'case report', 'clinical trial',
'early trials,case report']
cgi_d = ['pre-clinical', 'clinical trials']
# JAX
jax_a = ['guideline', 'fda approved']
jax_b = ['phase iii', 'clinical study - cohort', 'clinical study - meta-analysis']
jax_c = ['phase i', 'phase ib', 'phase ib/ii', 'phase ii', 'clinical study', 'case reports/case series']
jax_d = ['phase 0', 'preclinical', 'preclinical - cell line xenograft', 'preclinical - cell culture', 'preclinical - pdx', 'preclinical - patient cell culture', 'preclinical - pdx & cell culture'] # NOQA
# PMKB
pmkb_a = ['1']
pmkb_b = []
pmkb_c = ['2']
pmkb_d = []
# CIVIC
civic_a = ['a']
civic_b = ['b']
civic_c = ['c']
civic_d = ['d', 'e']
# ONCOKB
oncokb_a = ['1', 'r1']
oncokb_b = ['2a']
oncokb_c = ['2b', '3a', '3b']
oncokb_d = ['4']
# molecularmatch
molecularmatch_a = ['1', '1a']
molecularmatch_b = ['1b']
molecularmatch_c = ['2', '2c']
molecularmatch_d = ['2d', '3', '4', '5']
# sage
sage_c = ['early clinical', 'case reports']
# molecularmatch_trials
# >>> phases
# set([u'Phase 1/Phase 2', u'Early Phase 1', u'Phase 2a',
# u'N/A', u'Phase 2b', u'Phase 2', u'Phase 3', u'Phase 0', u'Phase 1',
# u'Phase 4', u'Unknown', u'Not Applicable', u'Phase 2/Phase 3'])
molecularmatch_t_a = []
molecularmatch_t_b = ['phase 1/phase 3', 'phase 3', 'phase 4',
'phase 2/phase 3', 'phase 3/phase 4',
'phase 4/phase 4']
molecularmatch_t_c = ['early phase 1', 'phase 1', 'phase 2a', 'phase 2b',
'phase 1/phase 2', 'phase 2', 'phase 1 / phase 2',
'phase 1/phase 1']
molecularmatch_t_d = ['phase 0', 'n/a', 'unknown', 'not applicable']
ev_lab = {
'A': cgi_a + jax_a + pmkb_a + civic_a + oncokb_a + molecularmatch_a,
'B': cgi_b + jax_b + pmkb_b + civic_b + oncokb_b + molecularmatch_b +
molecularmatch_t_b,
'C': cgi_c + jax_c + pmkb_c + civic_c + oncokb_c + molecularmatch_c +
sage_c + molecularmatch_t_c, # NOQA
'D': cgi_d + jax_d + pmkb_d + civic_d + oncokb_d + molecularmatch_d +
molecularmatch_t_d
}
ev_lev = {
'A': 1,
'B': 2,
'C': 3,
'D': 4
}
for item in ev_lab:
for opt in ev_lab[item]:
if evidence and opt == evidence.lower():
association['evidence_label'] = item
association['evidence_level'] = ev_lev[item]
break
if 'evidence_label' not in association:
if na:
association['evidence_label'] = 'D'
# association['evidence_level'] = 'NA'
else:
association['evidence_label'] = evidence
# association['evidence_level'] = evidence
return association
|
ohsu-comp-bio/g2p-aggregator
|
harvester/evidence_label.py
|
evidence_label.py
|
py
| 3,746 |
python
|
en
|
code
| 48 |
github-code
|
50
|
36392048280
|
from pwn import *
host, port = '10.10.147.219',3404
s = remote(host,port)
s.recvline()
while 1:
op = s.recvline()
op = op.decode('utf-8')
print(op)
if 'flag' in op:
print(op)
msg = s.recvall()
msg = msg.decode('utf-8')
print(msg)
break
solve = op.split(' ')
if 'add' == solve[1]:
res = int(solve[0])+int(solve[2])
if 'minus' == solve[1]:
res = int(solve[0])-int(solve[2])
if 'multiply' == solve[1]:
res = int(solve[0])*int(solve[2])
print(res)
s.sendline(str(res))
s.close()
|
AlionGreen/python-scripts
|
flag83.py
|
flag83.py
|
py
| 586 |
python
|
en
|
code
| 1 |
github-code
|
50
|
13146243692
|
import io_utils
import numpy as np
import pandas as pd
import shutil
"""
DataSets.load(data_set_name)->data, labels
DataSets.save_artificial(data, labels, features_labels)
load the already computed rank/weight by feature selector using cv strategy on data set(cv,D)
PrecomputedData.load(data_set_name, cv, assessment_method(weight/rank), feature_selector)
load the cv indices for data set [(train_indices, test_indices)] cv
PrecomputedData.load_cv(data_set,cv)
load the statistical analysis of weights of feature on data set by feature selector
Analysis.load_csv(data_set, cv, assessment_method, feature_method)
"""
class DataSets:
root_dir = ".."
data_sets = {
'colon': (
{
"path": "/COLON/COLON/colon.data",
},
{
"path": "/COLON/COLON/colon.labels",
"apply_transform": np.sign
}
),
'arcene': (
{
"path": "/ARCENE/ARCENE/arcene.data",
"apply_transform": np.transpose,
# "feat_labels": "/ARCENE/ARCENE/arcene_feat.labels"
},
{
'path': "/ARCENE/ARCENE/arcene.labels",
}
),
'dexter': (
{
# "feat_labels": "/DEXTER/DEXTER/dexter_feat.labels",
"path": "/DEXTER/DEXTER/dexter.data",
"method": "sparse_matrix",
"args": [20000]
},
{
"path": "/DEXTER/DEXTER/dexter.labels",
}
),
"dorothea": (
{
# "feat_labels": "/DOROTHEA/DOROTHEA/dorothea_feat.labels",
"path": "/DOROTHEA/DOROTHEA/dorothea.data",
"method": "sparse_binary_matrix",
"args": [100001],
"apply_transform": lambda x: x[:, :150]
},
{
"path": "/DOROTHEA/DOROTHEA/dorothea.labels",
"apply_transform": lambda x: x[:150]
}
),
# TODO feature labels not found
"gisette": (
{
# "feat_labels": "/GISETTE/GISETTE/gisette_feat.labels",
"path": "/GISETTE/GISETTE/gisette_valid.data",
"apply_transform": lambda x: np.transpose(x)[:, :200],
},
{
"path": "/GISETTE/GISETTE/gisette_valid.labels",
"apply_transform": lambda x: x[:200]
}
),
"artificial": (
{
"feat_labels": "/ARTIFICIAL/ARTIFICIAL/artificial_feat.labels",
"path": "/ARTIFICIAL/ARTIFICIAL/artificial.data.npy",
"method": "numpy_matrix",
},
{
"path": "/ARTIFICIAL/ARTIFICIAL/artificial.labels.npy",
"method": "numpy_matrix",
}
)
}
@staticmethod
def save_artificial(data, labels, feature_labels):
"""
save data to "../ARTIFICIAL/ARTIFICIAL/artificial.data"
save labels to "../ARTIFICIAL/ARTIFICIAL/artificial.labels"
save feature labels to "../ARTIFICIAL/ARTIFICIAL/artificial_feat.labels"
"""
PreComputedData.delete("artificial")
artificial_data_dir = DataSets.root_dir + "/ARTIFICIAL/ARTIFICIAL"
io_utils.mkdir(artificial_data_dir)
data_file_name = artificial_data_dir + "/artificial.data"
label_file_name = artificial_data_dir + "/artificial.labels"
feature_label_file_name = artificial_data_dir + "/artificial_feat.labels"
np.save(data_file_name, data)
np.save(label_file_name, labels)
np.savetxt(feature_label_file_name, feature_labels, fmt='%d')
@staticmethod
def load(data_set):
"""
load the data and return the data(D,N) and the labels(N,)
if there are feature labels, the relevant features is at the top of returned data
"""
data_info, labels_info = DataSets.data_sets[data_set]
labels = DataSets.__load_data_set_file(labels_info)
data = DataSets.__load_data_set_file(data_info) # D ,N
feature_labels = DataSets.load_features_labels(data_set)
if feature_labels is not None:
features = data[feature_labels == 1]
probes = data[feature_labels == -1]
data = np.vstack((features, probes))
return data, labels
@staticmethod
def __load_data_set_file(info):
data = getattr(io_utils, info.get('method', 'regular_matrix'))(
DataSets.root_dir + info['path'],
*info.get('args', []),
**info.get('kwargs', {})
)
apply_transform = info.get('apply_transform', False)
if apply_transform:
return apply_transform(data)
return data
@staticmethod
def load_features_labels(data_set):
if data_set not in DataSets.data_sets:
return None
data_info, _ = DataSets.data_sets[data_set]
feat_labels_filename = data_info.get('feat_labels', None)
if feat_labels_filename is not None:
return np.loadtxt(DataSets.root_dir + feat_labels_filename)
return None
class PreComputedData:
# the precomputed data is the weights or ranks(decided by assessment method) computed by
# feature_selector for every fold's training set for data set (cv,D)
@staticmethod
def load(data_set, cv, assessment_method, feature_selector):
# ../pre_computed_data/data_set/cv/assessment_method/feature_selector.npy
filename = PreComputedData.file_name(data_set, cv, assessment_method, feature_selector)
try:
return np.load(filename, allow_pickle=True)
except FileNotFoundError:
print("File " + filename + " not found")
raise
@staticmethod
def file_name(data_set, cv, assessment_method, feature_selector):
return "{data_dir}/{feature_selector}.npy".format(
data_dir=PreComputedData.dir_name(data_set, cv, assessment_method),
feature_selector=feature_selector.__name__
)
@staticmethod
def load_cv(data_set, cv):
# ../pre_computed_data/data_set/cv/indices.npy
file_name = PreComputedData.cv_file_name(data_set, cv)
try:
return np.load(file_name, allow_pickle=True)
except FileNotFoundError:
print("CV {} was never generated".format(cv.__name__))
raise
@staticmethod
def delete(data_set):
try:
shutil.rmtree(PreComputedData.root_dir(data_set))
except FileNotFoundError:
pass
@staticmethod
def cv_file_name(data_set, cv):
return PreComputedData.cv_dir(data_set, cv) + "/indices.npy"
@staticmethod
def dir_name(data_set, cv, assessment_method):
return "{cv_dir}/{method}".format(
cv_dir=PreComputedData.cv_dir(data_set, cv),
method=assessment_method
)
@staticmethod
def cv_dir(data_set, cv):
return "{data_set_dir}/{cv}".format(
data_set_dir=PreComputedData.root_dir(data_set),
cv=cv.__name__
)
@staticmethod
def root_dir(data_set):
return "{root_dir}/pre_computed_data/{data_set}".format(
root_dir=DataSets.root_dir,
data_set=data_set
)
class Analysis:
"""
this class is to load the statistical analysis of the feature weights computed by feature selection algorithm for
every fold of the cross validation for data set
for what the statistical analysis is please look analyse_weights.py
"""
@staticmethod
def load_csv(data_set, cv, assessment_method, feature_method):
# ../precomputed_data/data_set/cv/assessment_method/feature_selector.csv
filename = Analysis.file_name(data_set, cv, assessment_method, feature_method) + ".csv"
try:
stats = pd.read_csv(filename)
return stats
except FileNotFoundError:
print("File " + filename + " not found")
raise
@staticmethod
def file_name(data_set, cv, assessment_method, feature_method):
return Analysis.dir_name(data_set, cv, assessment_method) + "/" + feature_method.__name__
@staticmethod
def dir_name(data_set, cv, method):
return "{root_dir}/pre_computed_data/{data_set}/{cv}/{method}".format(
root_dir=DataSets.root_dir,
method=method,
data_set=data_set,
cv=cv.__name__
)
|
WYBupup/EnsembleMethodsForFeatureSelection
|
data_sets.py
|
data_sets.py
|
py
| 8,594 |
python
|
en
|
code
| null |
github-code
|
50
|
2579283470
|
import dash
from dash import Dash, html, dcc, Input, Output, callback, State
import plotly.express as px
from dash import dash_table
import pandas as pd
import dash_bootstrap_components as dbc
import plotly.graph_objects as go
import sys
import json
import numerize
from numerize import numerize
from flask_caching import Cache
sys.path.insert(0, '../modules')
# app = Dash(__name__)
dash.register_page(__name__, path = "/")
# app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP, dbc.icons.BOOTSTRAP])
# sales = app.sales_data()
# # store_sales = dcc.Store(id = 'sales-store', data = sales_data.to_dict('records'))
# year_filter = sales["year"].unique().tolist()
# year_options = year_filter.sort()
# category_filter = sales["Product_Category"].unique().tolist()
# warehouse_filter = sales["Warehouse"].unique().tolist()
summary_table = pd.DataFrame.from_dict({
"categories":["X"], "orders":[10], "returns":[1], "net_sales":[9], "month_on_month":[2.5]
})
revSummary = ['Products Sold', 'Returns', 'Highest Grossing Warehouse', 'Best Performing Category']
layout = html.Div([
dcc.Store(id = "sales-data"),
dbc.Row([
dbc.Col([
dcc.Dropdown( placeholder = 'Year', id = 'date-time-filter',
className="dbc year-dropdown .Select-control", value = 2021)
], width=2),
dbc.Col([
html.Button( 'Download Report', id = 'downloader',
className="btn btn-info")
], width = 2),
], justify= 'end'),
html.Br(),
# row with summary cards
dbc.Row([
dbc.Col([
dbc.Row([
dbc.Col(
html.Div(
[
html.Div([
html.P("Orders", className="card-text"),
dbc.Row([
dbc.Col([
html.H5(className="card-title", id = "sum-of-orders"),
html.P("00.00 %", className="card-text")
], width = 8),
dbc.Col([
html.I(className="bi bi-cart-check fa-3x icon-main")
], width = 4)
])
], className="card-body")
], className= "card bg-light mb-3"
), width=4
),
dbc.Col(
html.Div(
[
html.Div([
html.P("Returns", className="card-text"),
dbc.Row([
dbc.Col([
html.H5( className="card-title", id = "sum-of-returns"),
html.P("00.00 %", className="card-text")
], width = 8),
dbc.Col([
html.I(className = "bi bi-arrow-left-square fa-3x icon-negative")
], width = 4)
])
], className="card-body")
], className= "card bg-light mb-3"
), width=4
),
dbc.Col(
html.Div(
[
html.Div([
html.P("Net Sales", className="card-text"),
dbc.Row([
dbc.Col([
html.H5("000", className="card-title", id = "net-sales"),
html.P("00.00 %", className="card-text")
], width = 8),
dbc.Col([
html.I(className = "bi bi-cash-coin fa-3x icon-positive")
], width = 4)
])
], className="card-body")
], className= "card bg-light mb-3"
), width=4
),
]),
dbc.Row([
dbc.Col(
html.Div(
[
html.Div([
html.H6("Monthly Sales", className="card-title"),
dcc.Graph( id = "monthly-sales-chart")
], className="card-body")
], className= "card bg-light mb-3"
), width=12
),
]),
],),
dbc.Col([
html.Div(
[
html.Div([
html.H6("Highlights", className="card-title"),
html.Br(),
dbc.Row([
dbc.Col([
html.I(className="bi bi-arrow-up-right-square-fill fa-2x icon-main opacity")
], width = 2),
dbc.Col([
dbc.Row([
dbc.Col(["Highest Grossing Category"])
]),
dbc.Row([
dbc.Col(["Name"], id ="highest-cat-name", className = "border-right", width =6),
dbc.Col(["Orders"], id ="highest-cat-orders", width =6),
# dbc.Col(["Revenue"], id ="highest-cat-revenue", width =4),
]),
# html.Div(["name"],),
# html.Div(["orders"], id ="most-purchased-orders"),
# html.Div(["revenue"], id ="most-purchased-revenue"),
], width = 10)
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.I(className="bi bi-arrow-up-right-square-fill fa-2x icon-main opacity")
], width = 2),
dbc.Col([
dbc.Row([
dbc.Col(["Most Purchased Product"])
]),
dbc.Row([
dbc.Col(["Name"], id ="most-purchased-name", className = "border-right", width =6),
dbc.Col(["Orders"], id ="most-purchased-orders", width =6),
# dbc.Col(["Revenue"], id ="most-purchased-revenue", width =4),
]),
# html.Div(["name"],),
# html.Div(["orders"], id ="most-purchased-orders"),
# html.Div(["revenue"], id ="most-purchased-revenue"),
], width = 10)
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.I(className="bi bi-arrow-down-left-square-fill fa-2x icon-main opacity")
], width = 2),
dbc.Col([
dbc.Row([
dbc.Col(["Least Purchased Product"])
]),
dbc.Row([
dbc.Col(["Name"], id ="least-purchased-name", className = "border-right", width =6),
dbc.Col(["Orders"], id ="least-purchased-orders", width =6),
# dbc.Col(["Revenue"], id ="least-purchased-revenue", width =4),
]),
# html.Div(["name"],),
# html.Div(["orders"], id ="most-purchased-orders"),
# html.Div(["revenue"], id ="most-purchased-revenue"),
], width = 10)
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.I(className="bi bi-arrow-left-square-fill fa-2x icon-main opacity")
], width = 2),
dbc.Col([
dbc.Row([
dbc.Col(["Most Returned Product"])
]),
dbc.Row([
dbc.Col(["Name"], id ="most-returned-name", className = "border-right", width =6),
dbc.Col(["Orders"], id ="most-returned-orders", width =6),
# dbc.Col(["Revenue"], id ="most-returned-revenue", width =4),
]),
# html.Div(["name"],),
# html.Div(["orders"], id ="most-purchased-orders"),
# html.Div(["revenue"], id ="most-purchased-revenue"),
], width = 10)
]),
html.Br(),
], className="card-body"),
], className= "card border-light mb-3"
)
,
html.Div([
html.Div([
html.Div(["Gauge Chart"]),
dcc.Graph(id = "gauge-chart", className ='guage-chart')
], className = "card-body")
], className="card border-light mb-3 "),
],width=4, className= "fixed-cards"),
], className = "fixed-section"),
#row with graph and highlights card
dbc.Row([
dbc.Col(
html.Div(
[
html.Div([
html.H6("Top 10 Categories by Demand", className="card-title"),
html.Br(),
], className="card-body"),
html.Div([
], id = 'category-rating', className = "cat-card-rows" , style = {"overflow": "scroll"}),
], className= "card bg-light mb-3 second-section details-table-section "
), width=4
),
dbc.Col(
html.Div(
[
dbc.Row([
dbc.Col([
html.Div([
html.H6("Data", className="card-title"),
], className="card-body"),
], width=6),
dbc.Col([
dbc.Row([
dbc.Col([
dcc.Dropdown( placeholder = 'Warehouse', id = 'table-warehouse-filter',
className="dbc year-dropdown .Select-control")
], width=8),
dbc.Col([
html.Button( 'Filter', id = 'table-filter',
className="btn btn-info")
], width = 4),
])
], width=6)
], justify= "between", className="details-table-nav"),
html.Div(dbc.Table(id = 'data-table'), className = "table-box")
], className= "card bg-light mb-3 details-table-section second-section"
), width=8
, ),
], )
])
# @callback(Output('sales-data', 'cdata'))
# def store_sales_data(): # sourcery skip: inline-immediately-returned-variable
# return sales_data.reset_index().to_json(orient="split")
#year dropdown options
@callback(
Output("date-time-filter", "options"),
Input("sales-store", "data")
)
def filter_options(data):
# sourcery skip: inline-immediately-returned-variable
sales_data = pd.DataFrame(json.loads(data))
options = sales_data["year"]
options = options.unique()
return options
@callback(
Output("sales-data", "data"),
Input("date-time-filter", "value"),
Input("sales-store", "data")
)
def filtered_data(value, data):
# sourcery skip: inline-immediately-returned-variable
data = pd.DataFrame(json.loads(data))
new_df = data[data["year"] == value]
new_df = new_df.to_json(date_format='iso')
return new_df
@callback(
Output("table-warehouse-filter", "options"),
Input("sales-data", "data")
)
def warehouse_options(data):
# sourcery skip: inline-immediately-returned-variable
sales_data = pd.DataFrame(json.loads(data))
options = sales_data["Warehouse"]
options = options.unique()
return options
@callback(
[Output("sum-of-orders", "children"), Output("sum-of-returns", "children"), Output("net-sales", "children"), ],
Input("sales-data", "data")
)
def summary_cards(data):
data = pd.DataFrame(json.loads(data))
# data = data[data["year"] == year]
total_orders = data["Order_Demand"].sum()
total_returns = data["Returns"].sum()
net_sales = total_orders - total_returns
total_orders = numerize.numerize(total_orders)
total_returns = numerize.numerize(total_returns)
net_sales = numerize.numerize(net_sales)
return [total_orders, total_returns, net_sales]
# @callback(
# [Output("sum-of-returns", "children"), ],
# # Input("date-time-filter", "value"),
# Input("sales-data", "data")
# )
# def returns_stats(data):
# data = pd.DataFrame(json.loads(data))
# # data = data[data["year"] == year]
# total_returns = data["Returns"].sum()
# total_returns = numerize.numerize(total_returns)
# return [total_returns]
@callback(
[Output("highest-cat-name", "children"), Output("highest-cat-orders", "children")],
# Input("date-time-filter", "value"),
Input("sales-data", "data")
)
def cat_stats(data):
data = pd.DataFrame(json.loads(data))
# data = data[data["year"] == year]
high_cat = data[["Product_Category", "Order_Demand"]]
high_cat = high_cat.groupby(["Product_Category"]).sum("Order_Demand")
high_cat_name= high_cat.idxmax()
high_cat_orders = numerize.numerize(high_cat["Order_Demand"].max())
return [high_cat_name, high_cat_orders]
@callback(
[Output("most-purchased-name", "children"), Output("most-purchased-orders", "children")],
# Input("date-time-filter", "value"),
Input("sales-data", "data")
)
def prod_stats(data):
data = pd.DataFrame(json.loads(data))
# data = data[data["year"] == year]
most_purchased = data[["Product_Code", "Order_Demand"]]
most_purchased = most_purchased.groupby(["Product_Code"]).sum("Order_Demand")
most_purchased_name= most_purchased.idxmax()
most_purchased_orders = numerize.numerize(most_purchased["Order_Demand"].max())
return [most_purchased_name, most_purchased_orders]
@callback(
[Output("least-purchased-name", "children"), Output("least-purchased-orders", "children")],
# Input("date-time-filter", "value"),
Input("sales-data", "data")
)
def prod_stats_two(data):
data = pd.DataFrame(json.loads(data))
# data = data[data["year"] == year]
least_purchased = data[["Product_Code", "Order_Demand"]]
least_purchased = least_purchased.groupby(["Product_Code"]).sum("Order_Demand")
least_purchased_name= least_purchased.idxmin()
least_purchased_orders = numerize.numerize(least_purchased["Order_Demand"].min())
return [least_purchased_name, least_purchased_orders]
@callback(
[Output("most-returned-name", "children"), Output("most-returned-orders", "children")],
# Input("date-time-filter", "value"),
Input("sales-data", "data")
)
def prod_returns(data):
data = pd.DataFrame(json.loads(data))
# data = data[data["year"] == year]
most_returned = data[["Product_Code", "Returns"]]
most_returned = most_returned.groupby(["Product_Code"]).sum("Returns")
most_returned_name= most_returned.idxmax()
most_returned_orders = numerize.numerize(most_returned["Returns"].max())
return [most_returned_name, most_returned_orders]
@callback(
Output("monthly-sales-chart", "figure"),
# Input("date-time-filter", "value"),
Input("sales-data", "data")
)
def salesTrend(data):
plot_data = pd.DataFrame(json.loads(data))
# plot_data = plot_data[plot_data["year"] == date]
plot_data = plot_data.groupby(["month_year", "month"]).sum("Order_Demand")
plot_data = pd.DataFrame(plot_data).reset_index()
# plot_data['month'] = plot_data['month'].astype('str')
fig = px.histogram(plot_data, y="Order_Demand", x="month", template="cyborg", histfunc='sum',
labels = {'Order_Demand':'Orders', "month": "Month"})
fig.update_layout(
paper_bgcolor = '#222',
margin={'l':20, 'r':20, 'b':10},
font_color='white',
# font_size=18,
hoverlabel={'bgcolor':'black', 'font_size':12, },
bargap=.40 ,
# xaxis = {
# 'tickangle': 45
# }
)
fig.update_traces(
# marker_bgcolor="#93c"
marker = {
'color': '#93c',
}
)
fig.update_xaxes( # the y-axis is in dollars
dtick= 30,
showgrid=True,
tickmode = 'linear',
tickangle = 45,
tickfont=dict(size=10),
type = 'category',
showticklabels = True
)
return fig
@callback(
Output("gauge-chart", "figure"),
# Input("date-time-filter", "value"),
Input("sales-data", "data")
)
def gauge(data):
data = pd.DataFrame(json.loads(data))
# data = data[data["year"] == value]
gauge_data = data[["Warehouse", "Order_Demand"]]
gauge_data = gauge_data.groupby(["Warehouse"]).sum("Order_Demand").reset_index()
# labels = ["NET-DEMAND", "RETURNS"]
# values = [4500, 2500]
# Use `hole` to create a donut-like pie chart
fig = go.Figure(data=[go.Pie(labels= gauge_data["Warehouse"].apply(str.upper), values= gauge_data["Order_Demand"] , hole=.7)])
fig.update_traces(
marker_colors= ["#93c", "#f80"]
)
fig.update_layout(
margin={'l':20, 'r':20, 'b':0, 't':0},
height = 200
)
return fig
@callback(
Output("category-rating", "children"),
Input("sales-data", 'data')
)
def cat_rates(data): # sourcery skip: inline-immediately-returned-variable
data = pd.DataFrame(json.loads(data))
data = data[["Product_Category", "Order_Demand"]]
data = data.groupby(["Product_Category"], as_index = False).agg(
Demand = pd.NamedAgg(column = "Order_Demand", aggfunc = sum),
# Returns = pd.NamedAgg(column = "Returns", aggfunc = sum)
)
cat_rated = data.sort_values('Demand', ascending=False).head(10)
cards = [dbc.Row([
dbc.Row([
dbc.Col([
html.I(className="bi bi-arrow-up-right-square-fill icon-main opacity")
], width = 1),
dbc.Col([cat_rated.iloc[i,0],], className = 'border-right', width = 6),
dbc.Col([numerize.numerize(cat_rated.iloc[i,1]),], className = '', width = 5),
# dbc.Col([numerize.numerize(cat_rated.iloc[i,2]),], width = 3) ,
]),
html.Br(),
html.Hr()
]) for i in range(len(cat_rated))]
return cards
@callback(
Output("data-table", "children"),
# Input("date-time-filter", "value"),
Input('table-warehouse-filter', "value"),
Input("sales-data", "data")
)
def data_table(whse, data):
# sourcery skip: inline-immediately-returned-variable
data = pd.DataFrame(json.loads(data))
if whse is None:
df = data
else:
df = data[data["Warehouse"] == whse]
table = df[["Product_Category", "Order_Demand", "Returns"]]
table = table.groupby(["Product_Category"], as_index = False).agg(
Demand = pd.NamedAgg(column = "Order_Demand", aggfunc = sum),
Returns = pd.NamedAgg(column = "Returns", aggfunc = sum)
)
table["Net_Sales"] = table["Demand"] - table["Returns"]
# table["Month-on-month %"] = table["Net_Sales"].pct_change(axis = 'rows').round(2)
table[['Demand', 'Returns', 'Net_Sales']] = table[['Demand', 'Returns', 'Net_Sales']].applymap(lambda x: numerize.numerize(x))
table = table.rename(columns = {"Product_Category": "Category"})
display = dbc.Table.from_dataframe(table, striped=True, bordered=True, hover=True, index=True, responsive = True)
return display
|
josephine-amponsah/retail-demand-forecaster
|
dash_app/pages/dashboard.py
|
dashboard.py
|
py
| 21,375 |
python
|
en
|
code
| 2 |
github-code
|
50
|
26570349832
|
"""
convergence.py
script contains functions to determine the convergence time of indus
CREATED ON: 12/10/2020
AUTHOR(S):
Bradley C. Dallin ([email protected])
** UPDATES **
TODO:
"""
##############################################################################
## IMPORTING MODULES
##############################################################################
## IMPORT OS
import os
## IMPORT SHUTIL
import shutil
## IMPORT NUMPY
import numpy as np
## FUNCTION TO SAVE AND LOAD PICKLE FILES
from sam_analysis.core.pickles import load_pkl, save_pkl
## IMPORT INDUS FUNCTION
from sam_analysis.indus.wham import IndusWham
## IMPORT HFE FUNCTION
from sam_analysis.indus.hydration_fe import HydrationFreeEnergy
##############################################################################
# FUNCTIONS AND CLASSES
##############################################################################
## CONVERGENCE CLASS
class Convergence:
"""class object used to compute convergence time from indus"""
def __init__( self,
sim_working_dir = None,
input_prefix = None,
time_range = ( 0., 5000., ),
time_step = 1000.,
equilibration_time = 2000.,
recompute_converge = False,
verbose = True,
**kwargs ):
## INITIALIZE VARIABLES IN CLASS
self.sim_working_dir = sim_working_dir
self.input_prefix = input_prefix
self.time_range = time_range
self.time_step = time_step
self.equilibration_time = equilibration_time
self.recompute_converge = recompute_converge
self.verbose = verbose
## REMOVE START AND END FROM KWARGS
kwargs.pop( "start", None )
kwargs.pop( "end", None )
## UPDATE OBJECT WITH KWARGS
self.__dict__.update(**kwargs)
def __str__( self ):
return "HYDRATION FREE ENERGY"
## INSTANCE PREPARING CONVERGE TIME
def prep_converge( self ):
"""FUNCTION PREPARING FOR CONVERGENCE"""
## PATH OUTPUT
self.output_dir = os.path.join( self.sim_working_dir, "output_files" )
## PATH WHAM
self.wham_dir = os.path.join( self.sim_working_dir, "wham" )
## PATH CONVERGE
self.converge_dir = os.path.join( self.wham_dir, "convergence" )
## WHAM FILE
self.wham_file = os.path.join( self.converge_dir, self.input_prefix + "_wham_{}.csv" )
## REMOVE CONVERGE DIRECTORY IF RECOMPUTE
if self.recompute_converge is True \
and os.path.exists( self.converge_dir ) is True:
shutil.rmtree( self.converge_dir )
## CREATE CONVERGE DIRECTORY PATH (IF NOT EXISTS)
if os.path.exists( self.converge_dir ) is not True:
os.mkdir( self.converge_dir )
## INSTANCE DETERMINING CONVERGE TIME
def time( self ):
"""FUNCTION TO PUT WHAM OUTPUT INTO READABLE FORMAT"""
## PREPARE OBJECT
self.prep_converge()
## PATH TO PKL
out_name = self.input_prefix + "_convergence_time.pkl"
path_pkl = os.path.join( self.output_dir, out_name )
if self.recompute_converge is True or \
os.path.exists( path_pkl ) is not True:
## INITIALIZE INDUS WHAM
wham = IndusWham( **self.__dict__ )
## INITIALZE HFE
hfe = HydrationFreeEnergy( **self.__dict__ )
## CREATE LIST WITH END TIMES
end_time = np.arange( self.equilibration_time,
self.time_range[-1],
self.time_step ) + self.time_step
## CREATE PLACE HOLDER
start_time = np.zeros_like( end_time )
equil_mu = np.zeros_like( end_time )
## LOOP THROUGH END TIMES
for ii, end in enumerate( end_time ):
## UPDATE WHAM FILE
start = end - self.equilibration_time
converge_file = self.wham_file.format( int(start) )
## PREPARE WHAM INPUTS
wham_input = [ self.equilibration_time, end, converge_file ]
## RUN WHAM
wham.compute( wham_input )
## COMPUTE HFE
hfe.recompute_fe = True
hfe.hfe( path_wham = converge_file )
## UPDATE
start_time[ii] = start
equil_mu[ii] = hfe.mu
## STORE RESULTS
results = [ start_time, equil_mu ]
## SAVE PKL
save_pkl( results, path_pkl )
else:
## LOAD PKL
results = load_pkl( path_pkl )
## STORE RESULTS IN CLASS
self.time = results[0]
self.mu = results[1]
## MAIN FUNCTION
def main(**kwargs):
"""Main function to run production analysis"""
## INITIALIZE CLASS
converge = Convergence( **kwargs )
## ANALYZE WHAM
converge.time()
## GATHER RESULTS
time = converge.time
mu = converge.mu
## RETURN RESULTS
return { "converge_time" : time,
"converge_mu" : mu }
#%%
##############################################################################
## MAIN SCRIPT
##############################################################################
if __name__ == "__main__":
## IMPORT CHECK SERVER PATH
from sam_analysis.core.check_tools import check_server_path
## TESTING DIRECTORY
test_dir = r"/mnt/r/python_projects/sam_analysis/sam_analysis/testing"
## SAM DIRECTORY
sam_dir = r"ordered_ch3_sam_indus"
## WORKING DIR
working_dir = os.path.join(test_dir, sam_dir)
## LOAD TRAJECTORY
path_traj = check_server_path( working_dir )
## LOAD TRAJECTORY
input_prefix = "sam_indus"
## WHAM KWARGS
wham_kwargs = { "spring_weak" : 2.0,
"spring_strong" : 8.5,
"tol" : 0.00001,
"temperature" : 300,
"recompute_wham" : True, }
## HFE KWARGS
hfe_kwargs = { "recompute_fe" : True }
## KWARGS
kwargs = {}
kwargs.update( wham_kwargs )
kwargs.update( hfe_kwargs )
## INITIALIZE EQUILIBRATION
converge = Convergence( sim_working_dir = path_traj,
input_prefix = input_prefix,
time_range = ( 0., 5000., ),
time_step = 1000.,
equilibration_time = 2000.,
recompute_converge = True,
verbose = True,
**kwargs )
## ANALYZE WHAM
converge.time()
## RESULTS
converge_time = converge.time
converge_mu = converge.mu
|
atharva-kelkar/hydrophobicity-features
|
sam_analysis/indus/convergence.py
|
convergence.py
|
py
| 7,275 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25779239585
|
#!/usr/bin/python3
"""a Rectangle Class that models a rectangle"""
from models.base import Base
class Rectangle(Base):
"""a model of a rectangle"""
def __init__(self, width, height, x=0, y=0, id=None):
"""initialise a Rectangle instance"""
self.width = width
self.height = height
self.x = x
self.y = y
super().__init__(id)
@property
def width(self):
"""getter method for width attribute"""
return (self.__width)
@width.setter
def width(self, value):
"""setter and validator for the width attribute"""
if type(value) is not int:
raise TypeError("width must be an integer")
if value <= 0:
raise ValueError("width must be > 0")
self.__width = value
@property
def height(self):
"""getter method for height attribute"""
return (self.__height)
@height.setter
def height(self, value):
"""setter and validator for the height attribute"""
if type(value) is not int:
raise TypeError("height must be an integer")
if value <= 0:
raise ValueError("height must be > 0")
self.__height = value
@property
def x(self):
"""getter method for x attribute"""
return (self.__x)
@x.setter
def x(self, value):
"""setter and validator for the x attribute"""
if type(value) is not int:
raise TypeError("x must be an integer")
if value < 0:
raise ValueError("x must be >= 0")
self.__x = value
@property
def y(self):
"""getter method for width attribute"""
return (self.__y)
@y.setter
def y(self, value):
"""setter and validator for the height attribute"""
if type(value) is not int:
raise TypeError("y must be an integer")
if value < 0:
raise ValueError("y must be >= 0")
self.__y = value
def area(self):
"""calculate and return the area of the rectangle"""
return (self.width * self.height)
def display(self):
"""prints the rectangle instance with character #
the y value pushes the rows downwards and x pushes
the coulmns to the right"""
for line_space in range(self.y):
print()
for row in range(self.height):
for ln in range(self.x):
print(" ", end="")
for column in range(self.width):
print("#", end="")
print()
def update(self, *args, **kwargs):
"""assigns an argument to each attribute
if args in not empty, disregard kwargs
args assigns(id, width, height, x, y) in that order
"""
if args is not None and len(args) >= 1:
if len(args) == 1:
self.id = args[0]
if len(args) == 2:
self.id, self.width = args[0], args[1]
if len(args) == 3:
self.id, self.width, self.height = args[0], args[1], args[2]
if len(args) == 4:
self.id, self.width, self.height = args[0], args[1], args[2]
self.x = args[3]
if len(args) == 5:
self.id, self.width, self.height = args[0], args[1], args[2]
self.x, self.y = args[3], args[4]
else:
if kwargs is not None:
for key, value in kwargs.items():
if key == 'width':
self.width = value
elif key == 'height':
self.height = value
elif key == 'x':
self.x = value
elif key == 'y':
self.y = value
elif key == 'id':
self.id = value
def to_dictionary(self):
"""returns a dictionary representation of Rectangle"""
class_dict = {}
class_dict['id'] = self.id
class_dict['width'] = self.width
class_dict['height'] = self.height
class_dict['x'] = self.x
class_dict['y'] = self.y
return (class_dict)
def __str__(self):
"""implement the str magic method"""
return(f"[{self.__class__.__name__}] ({self.id})"
f" {self.x}/{self.y} - {self.width}/{self.height}")
|
gisconesheri2/alx-higher_level_programming
|
0x0C-python-almost_a_circle/models/rectangle.py
|
rectangle.py
|
py
| 4,388 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15917277332
|
from django.conf import settings
from . import constants
import json
import requests
import webbrowser
import base64
import datetime
import os
class XeroAuthManager:
refresh_token = ''
access_token = ''
refresh_timestamp = 0
refresh_timeout = 1800
token_filename = ''
b64_id_secret = ''
tenant_id = ''
status_code = 0
error_codes = {}
xero_return_data = {}
valid_xero = False
def __init__(self):
self.token_filename = os.path.join(settings.BASE_DIR, 'apps/xero_toolkit/'+constants.XERO_TOKEN_FILENAME)
self.b64_id_secret = base64.b64encode(
bytes(constants.XERO_CLIENT_ID + ':' + constants.XERO_CLIENT_SECRET, 'utf-8')).decode('utf-8')
self.refresh_timestamp = datetime.datetime.now()
self._get_refresh_token()
def xero_setup_token_info(self, auth_code):
# auth_code = request.GET['code']
exchange_code_url = 'https://identity.xero.com/connect/token'
response = requests.post(exchange_code_url,
headers={
'Authorization': 'Basic ' + self.b64_id_secret
},
data={
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': constants.XERO_REDIRECT_URL
})
if response.status_code == 200:
json_response = response.json()
with open(self.token_filename, 'w') as outfile:
json.dump(json_response, outfile)
else:
self.__process_errors(response, exchange_code_url)
def get_company_name(self):
self. _get_req('Organisation','','')
return self.valid_xero
#self. _get_req('freds','','')
def set_contact_details(self, contact_data):
self._set_req('Contacts', '', '')
def save_to_xero(self, endpoint, xero_data):
self._set_req(endpoint, xero_data, '')
return self.valid_xero
def has_tokens(self):
return self.valid_xero
def has_xero_data(self):
return self.valid_xero
def get_error(self):
return self.error_codes
def get_xero_response(self):
return self.xero_return_data
def _get_refresh_token(self):
if os.path.isfile(self.token_filename) == False:
self._xero_first_login()
else:
self._xero_refresh_token()
def _xero_first_login(self):
self._debug('_xero_first_login')
auth_url = ('''https://login.xero.com/identity/connect/authorize?''' +
'''response_type=code''' +
'''&client_id=''' + constants.XERO_CLIENT_ID +
'''&redirect_uri=''' + constants.XERO_REDIRECT_URL +
'''&scope=''' + constants.XERO_SCOPE +
'''&state=123''')
self._debug('call url:' + auth_url )
webbrowser.open_new(auth_url)
self._debug('url called')
def _xero_refresh_token(self):
self._debug('_xero_refresh_token')
with open(self.token_filename) as json_file:
data = json.load(json_file)
dt_exp = self.refresh_timestamp
dt_now = datetime.datetime.now()
if dt_now > dt_exp:
self._debug('get new token')
old_token = data['refresh_token']
token_refresh_url = 'https://identity.xero.com/connect/token'
response = requests.post(token_refresh_url,
headers={
'Authorization': 'Basic ' + self.b64_id_secret,
'Content-Type': 'application/x-www-form-urlencoded'
},
data={
'grant_type': 'refresh_token',
'refresh_token': old_token
})
self._debug('call url:' + token_refresh_url)
self._debug('response code:' + str(response.status_code))
if response.status_code == 200:
json_response = response.json()
with open(self.token_filename, 'w') as outfile:
json.dump(json_response, outfile)
self.refresh_timeout = json_response['expires_in']
self.refresh_timestamp = dt_now + datetime.timedelta(seconds=self.refresh_timeout)
self.access_token = json_response['access_token']
self.valid_xero = True
else:
self.__process_errors(response, token_refresh_url)
else:
self.access_token = data['access_token']
if self.tenant_id == '':
self._xero_tenants()
def _xero_tenants(self):
connections_url = 'https://api.xero.com/connections'
response = requests.get(connections_url,
headers={
'Authorization': 'Bearer ' + self.access_token,
'Content-Type': 'application/json'
})
if response.status_code == 200:
json_response = response.json()
for tenants in json_response:
json_dict = tenants
self.tenant_id = json_dict['tenantId']
else:
self.__process_errors(response, connections_url)
def _get_req(self, url_endpoint, data, filters):
self._xero_refresh_token()
get_url = constants.XERO_BASE_URL + url_endpoint
response = requests.get(get_url,
headers={
'Authorization': 'Bearer ' + self.access_token,
'Xero-tenant-id': self.tenant_id,
'Accept': 'application/json'
})
if response.status_code == 200:
json_response = response.json()
if json_response['Status'] == 'OK':
self.xero_return_data = json_response
else:
self.valid_xero = False
self.status_code = json_response['Status']
else:
self.__process_errors(response, url_endpoint)
def _set_req(self, url_endpoint, data, filters):
self._xero_refresh_token()
post_url = constants.XERO_BASE_URL + url_endpoint
print(data)
tmp = json.dumps(data, default=str)
log_file = os.path.join(settings.BASE_DIR, 'apps/xero_toolkit/postlog.json')
with open(log_file, 'w') as outfile:
json.dump(data, outfile, default=str)
response = requests.post(post_url,
data=json.dumps(data, default=str),
headers={
'Authorization': 'Bearer ' + self.access_token,
'Xero-tenant-id': self.tenant_id,
'Content-type': 'application/json',
'Accept': 'application/json'
})
if response.status_code == 200:
json_response = response.json()
if json_response['Status'] == 'OK':
self.xero_return_data = json_response
else:
self.valid_xero = False
self.status_code = json_response['Status']
else:
self.__process_errors( response, 'post' + url_endpoint)
def __process_xero_response(self, json_response, endpoint):
self.valid_xero = False
def __process_errors(self, response, error_process):
self.valid_xero = False
self.status_code = response.status_code
self.error_codes['error_type']= response.reason
self.error_codes['error_process'] = error_process
self.error_codes['error_code'] = response.status_code
# log_file = os.path.join(settings.BASE_DIR, 'apps/xero_toolkit/xero_error.log')
# if os.path.exists(log_file):
# append_write = 'a' # append if already exists
# else:
# append_write = 'w' # make a new file if not
# with open(log_file, append_write) as outfile:
# outfile.write(error_process + "response:" + response + '\n')
# self.error_codes['error_number'] = json_response['status']
#self.error_codes['error_type'] = json_response['type']
#self.error_codes['error_detail'] = json_response['detail']
def _debug(self, debugline):
log_file = os.path.join(settings.BASE_DIR, 'apps/xero_toolkit/xero_error.txt')
if os.path.exists(log_file):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
with open(log_file, append_write) as outfile:
outfile.write(debugline + '\n')
|
simonfroggatt/medusa_plinks
|
apps/xero_toolkit/xeromanager.py
|
xeromanager.py
|
py
| 9,096 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13746146054
|
from time import sleep
import pytest
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
# Test 3A (arrange, act, assert)
@pytest.mark.parametrize("env", ["dev"])
def test_selenium_google(setup_function, env):
# get config from setup function (arrange)
config = setup_function
timeout = config["demo"][env]["selenium"]["timeout"]
# setting browser (arrange)
executable_path = "./drivers/geckodriver_mac_greater_60"
options = webdriver.FirefoxOptions()
options.headless = True
options.add_argument("--disable-gpu")
browser = webdriver.Firefox(executable_path=executable_path, options=options)
browser.implicitly_wait(timeout)
# operate browser (act)
browser.get(config["demo"][env]["selenium"]["host"])
WebDriverWait(browser, timeout, 2).until(EC.visibility_of(browser.find_element_by_css_selector("input[name='q']")))
search_txt = browser.find_element_by_css_selector("input[name='q']")
sleep(1)
search_txt.send_keys("selenium")
sleep(1)
search_txt.submit()
# get result (act)
WebDriverWait(browser, timeout, 2).until(EC.visibility_of(browser.find_element_by_css_selector("input[name='q']")))
search_txt = browser.find_element_by_css_selector("input[name='q']")
# assert
assert search_txt.get_attribute("value") == "selenium"
browser.quit()
|
CommonMarvel/full-stack-testing-starter
|
tests/selenium/demo/selenium_google_test.py
|
selenium_google_test.py
|
py
| 1,449 |
python
|
en
|
code
| 3 |
github-code
|
50
|
2188934878
|
'''
给定一个正整数 num,编写一个函数,如果 num 是一个完全平方数,则返回 True,否则返回 False。
说明:不要使用任何内置的库函数,如 sqrt。
示例 1:
输入:16
输出:True
示例 2:
输入:14
输出:False
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/valid-perfect-square
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# 二分查找
class Solution:
def isPerfectSquare(self, num: int) -> bool:
if num < 2:
return True
lager = num
less = 0
mid = num // 2 + 1
biaoshi = True
while (biaoshi):
if mid * mid == num:
return True
elif mid * mid > num:
lager = mid
mid = (lager + less) // 2
elif mid * mid < num:
less = mid
mid = (lager + less) // 2
if lager == mid or less == mid or lager == less:
biaoshi = False
return False
# 递归
class Solution1:
def isPerfectSquare(self, num: int) -> bool:
# leetcode submit region end(Prohibit modification and deletion)
if num < 2:
return True
def is_sqrt(start, end, num):
if start * start == num or end * end == num:
return True
if end - start > 1:
if end * end > num > start * start:
return is_sqrt(start, (start + end) // 2, num) or is_sqrt((start + end) // 2, end, num)
return False
return is_sqrt(0, num, num)
if __name__ == "__main__":
for num in range(101):
# num = 27
print(str(num) + ':' + str(Solution().isPerfectSquare(num)))
|
sunxinzhao/LeetCode_subject
|
simple/number/367.py
|
367.py
|
py
| 1,857 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
4140086112
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import color
from sklearn.cluster import KMeans
import argparse
def change_format(img, back=False): # change picture from rgb to lab
if back:
return color.lab2rgb(img)
return color.rgb2lab(img)
def run_trans( # transform each cluster
want,
labels,
fix_labels,
want_mean,
want_std,
base_mean,
base_std
):
for j in range(len(want)):
want_label = labels[j]
base_label = fix_labels[want_label]
want[j] -= want_mean[want_label]
want[j] /= want_std[want_label]
want[j] *= base_std[base_label]
want[j] += base_mean[base_label]
def get_sep_label( # get relationship between each cluster
datas,
km,
cluster_num
):
ans = [[] for k in range(cluster_num)]
for k in range(len(datas)):
ans[km.labels_[k]].append(datas[k])
for k in range(cluster_num):
ans[k] = np.array(ans[k])
return ans
def trans_color( # main function
color,
image,
change_format,
cluster_num
):
base = color.reshape(-1, 3)
want = image.reshape(-1, 3)
want_data = image.reshape(-1, 3)
bm = np.mean(base, axis=0)
wm = 1./np.mean(want, axis=0)
base_km = KMeans(n_clusters=cluster_num).fit(base)
want_km = KMeans(n_clusters=cluster_num).fit(want)
base = change_format(color).reshape(-1, 3)
want = change_format(image).reshape(-1, 3)
base_mean, base_std, want_mean, want_std, qs = [], [], [], [], []
base_jall = get_sep_label(base, base_km, cluster_num)
want_jall = get_sep_label(want, want_km, cluster_num)
qs_jall = get_sep_label(want_data, want_km, cluster_num)
for j in range(cluster_num):
base_j = base_jall[j]
want_j = want_jall[j]
base_mean.append(np.mean(base_j, axis=0))
want_mean.append(np.mean(want_j, axis=0))
qs.append(np.mean(qs_jall[j], axis=0))
base_std.append(np.std(base_j, axis=0))
want_std.append(np.std(want_j, axis=0))
run_trans(
want,
want_km.labels_,
base_km.predict(np.array(qs)*wm*bm),
np.array(want_mean),
np.array(want_std),
np.array(base_mean),
base_std=np.array(base_std)
)
return change_format(want.reshape(image.shape), True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--base_image", default=None,
type=str) # base image dir
parser.add_argument("-w", "--want_image", default=None,
type=str) # want image dir
parser.add_argument("-n", "--cluster_num", default=1,
type=int) # num of clusters
args = parser.parse_args()
base = plt.imread(args.base_image)/255
want = plt.imread(args.want_image)/255
ans = trans_color(base, want, change_format, args.cluster_num)
plt.imshow(ans)
plt.show()
if __name__ == "__main__":
main()
|
cyzkrau/DIP_2021f
|
Color_Transfer/color_transfer.py
|
color_transfer.py
|
py
| 2,975 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22795758008
|
"""
THIS CODE IS REALLY SLOPPY AND BAD
I just wanted to solve this problem as fast as possible
I ranked 4,869 for both problems solved, in like 38 minutes
I just wanted to code it as fast as possible to see how quick
I could come up with a solution
"""
data = open("input.txt").read().splitlines()
test = data[0]
syntax = ["()", "[]", "{}", "<>"]
syntax = list(map(lambda x: (x[0], x[1]), syntax))
openers = [x[0] for x in syntax]
closers = [x[1] for x in syntax]
def firstError(line):
lastOpener = []
for c in line:
if c in openers:
lastOpener.append(c)
else:
expected = closers[openers.index(lastOpener[-1])]
if c != expected:
return c
else:
lastOpener.pop()
return None
def fix(line):
lastOpener = []
for c in line:
if c in openers:
lastOpener.append(c)
else:
lastOpener.pop()
lastOpener.reverse()
lastOpener = list(map(lambda x: closers[openers.index(x)], lastOpener))
return ''.join(lastOpener)
vals = {
")": 3,
"]": 57,
"}": 1197,
">": 25137
}
counts = {}
for d in data:
err = firstError(d)
if err:
if err not in counts:
counts[err] = 1
else:
counts[err] += 1
i = 0
for c in counts:
i += vals[c] * counts[c]
print(i)
incompletes = list(filter(lambda x: firstError(x) == None, data))
points = {
")": 1,
"]": 2,
"}": 3,
">": 4
}
scores = []
for i in incompletes:
score = 0
chars = fix(i)
for c in chars:
score *= 5
val = points[c]
score += val
#print(chars, score)
scores.append(score)
scores.sort()
print(scores[len(scores) // 2])
|
mattbruv/advent-of-code
|
src/2021/day10/day10.py
|
day10.py
|
py
| 1,751 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38719597675
|
from googleplaces import GooglePlaces
from config import GOOGLE_API_KEY
from logic.nearby_util import address_to_latlng
class NearbySearchGoogle(object):
def __init__(self):
self.google_places = GooglePlaces(GOOGLE_API_KEY)
def find_nearby_places(self, search_keyword, address):
nearby_places = []
lat, lng = address_to_latlng(address)
query_result = self.google_places.nearby_search(
lat_lng={
'lat': lat,
'lng': lng,
},
keyword=search_keyword,
radius=20000,
)
for place in query_result.places:
place.get_details()
data = {
'name': place.name,
'id': place.place_id,
'phone': place.local_phone_number,
'url': place.website,
'rating': place.details.get('rating', None),
'price_level': place.details.get('price_level', None),
}
nearby_places.append(data)
return nearby_places
|
gkeswani92/live-review-places
|
logic/google_places/nearby.py
|
nearby.py
|
py
| 1,068 |
python
|
en
|
code
| 1 |
github-code
|
50
|
13722078540
|
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import torch
import torch.nn.functional as F
import numpy as np
import imageio
import util
import warnings
from data import get_split_dataset
from render import NeRFRenderer
from model import make_model
from scipy.interpolate import CubicSpline
from scipy.spatial import distance_matrix
import tqdm
import cv2
from torch.autograd import Variable
os.environ["OPENCV_IO_ENABLE_OPENEXR"]="1"
import lpips
import matplotlib.pyplot as plt
loss_fn_vgg = lpips.LPIPS(net='vgg')
import pytorch_msssim
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
def extra_args(parser):
parser.add_argument(
"--subset", "-S", type=int, default=0, help="Subset in data to use"
)
parser.add_argument(
"--split",
type=str,
default="test",
help="Split of data to use train | val | test",
)
parser.add_argument(
"--source",
"-P",
type=str,
default="64",
help="Source view(s) in image, in increasing order. -1 to do random",
)
parser.add_argument(
"--num_views",
type=int,
default=40,
help="Number of video frames (rotated views)",
)
parser.add_argument(
"--num_src",
type=int,
default=3,
help="Number of source frames",
)
parser.add_argument(
"--elevation",
type=float,
default=-10.0,
help="Elevation angle (negative is above)",
)
parser.add_argument(
"--scale", type=float, default=1.0, help="Video scale relative to input size"
)
parser.add_argument(
"--radius",
type=float,
default=0.0,
help="Distance of camera from origin, default is average of z_far, z_near of dataset (only for non-DTU)",
)
parser.add_argument("--fps", type=int, default=30, help="FPS of video")
parser.add_argument("--validate_mesh", action='store_true')
parser.add_argument("--validate_nvs", action='store_true')
return parser
args, conf = util.args.parse_args(extra_args)
args.resume = True
device = util.get_cuda(args.gpu_id[0])
dset = get_split_dataset(
args.dataset_format, args.datadir, want_split=args.split, training=False
)
data = dset[args.subset]
data_path = data["path"]
print("Data instance loaded:", data_path)
images = data["images"] # (NV, 3, H, W)
poses = data["poses"] # (NV, 4, 4)
poses_origin = data['poses_origin']
def read_depth(filename):
depth_h = cv2.imread(filename ,cv2.IMREAD_UNCHANGED)[...,-1]
depth_h[depth_h==65504.] = 0.
downSample = 0.5
depth = cv2.resize(depth_h, None, fx=downSample, fy=downSample, interpolation=cv2.INTER_NEAREST)
mask = depth > 0
return depth
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
batch_indices = torch.arange(B, dtype=torch.long).to(device)
barycenter = torch.sum((xyz), 1)
barycenter = barycenter/xyz.shape[1]
barycenter = barycenter.view(B, 1, 3)
dist = torch.sum((xyz - barycenter) ** 2, -1)
farthest = torch.max(dist,1)[1]
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
# ensure the same sampled frames with mvsnerf and ibrnet
xyzs = data["poses_origin"][:,:3,-1]
distances = distance_matrix(xyzs, xyzs, p=2)
rank = np.argsort(distances[50])
num_select = 30
xyzs = xyzs[rank[:num_select]]
rank = rank[:num_select]
num_src = args.num_src
centroids = farthest_point_sample(torch.from_numpy(xyzs[None]), num_src)
source = [ rank[centroids[0][i].item()] for i in range(num_src)]
source = torch.Tensor(source).to(torch.long)
xyzs = data["poses_origin"][:,:3,-1]
distances = distance_matrix(xyzs, xyzs, p=2)
rank = np.argsort(distances[50])
centroids = farthest_point_sample(torch.from_numpy(xyzs[None]), 100)
ref_pair_idx = [ centroids[0][i].item() for i in range(100)]
for i in range(num_src):
ref_pair_idx.remove(source[i])
render_poses_idx = ref_pair_idx[16-num_src : 26-num_src]
data_bbox = data['bbox'][render_poses_idx]
render_poses = poses[render_poses_idx]
print("Sampled source: ", source)
focal = data["focal"]
if isinstance(focal, float):
# Dataset implementations are not consistent about
# returning float or scalar tensor in case of fx=fy
focal = torch.tensor(focal, dtype=torch.float32)
focal = focal[None]
c = data.get("c")
if c is not None:
c = c.to(device=device).unsqueeze(0)
NV, _, H, W = images.shape
if args.scale != 1.0:
Ht = int(H * args.scale)
Wt = int(W * args.scale)
if abs(Ht / args.scale - H) > 1e-10 or abs(Wt / args.scale - W) > 1e-10:
warnings.warn(
"Inexact scaling, please check {} times ({}, {}) is integral".format(
args.scale, H, W
)
)
H, W = Ht, Wt
net = make_model(conf["model"]).to(device=device)
net.load_weights(args)
renderer = NeRFRenderer.from_conf(
conf["renderer"], lindisp=dset.lindisp, eval_batch_size=args.ray_batch_size,
).to(device=device)
render_par = renderer.bind_parallel(net, args.gpu_id, simple_output=True).eval()
# Get the distance from camera to origin
z_near = dset.z_near
z_far = dset.z_far
print("Generating rays")
def validate_mesh(resolution=512, scene=0, savedir='', threshold=2., cat=None, scale_mats_np=None, network_query_fn=None):
import trimesh
os.makedirs(savedir, exist_ok=True)
vertices, triangles = extract_geometry(resolution=resolution, threshold=threshold, network_query_fn=network_query_fn)
if scale_mats_np is not None:
vertices = vertices * scale_mats_np[0, 0] + scale_mats_np[:3, 3][None]
mesh = trimesh.Trimesh(vertices, triangles)
mesh_path = os.path.join(savedir, cat+"_{:03d}".format(scene+1)+'.ply')
mesh.export(mesh_path)
print("mesh saved at " + mesh_path)
def extract_geometry(network_query_fn=None, resolution=128, threshold=0.001):
bound_min = torch.tensor([-1., -1., -1.]).cuda() * 0.85
bound_max = torch.tensor([1, 1, 1]).cuda() * 0.85
def query_func(pts):
viewdirs = torch.zeros_like(pts).cuda()
raw = network_query_fn.net(pts[None].cuda(), coarse=True, viewdirs=viewdirs[None])
return raw[...,-1]
return extract_geometry_(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def extract_geometry_(bound_min, bound_max, resolution, threshold, query_func, N = 64):
import mcubes
print('threshold: {}'.format(threshold))
u = extract_fields(bound_min, bound_max, resolution, query_func, N)
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices, triangles
def extract_fields(bound_min, bound_max, resolution, query_func, N = 64):
X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs)
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val
return u
render_rays = util.gen_rays(
render_poses,
W,
H,
focal * args.scale,
z_near,
z_far,
c=c * args.scale if c is not None else None,
).to(device=device)
# (NV, H, W, 8)
focal = focal.to(device=device)
# source = torch.tensor(list(map(int, args.source.split())), dtype=torch.long)
NS = len(source)
random_source = NS == 1 and source[0] == -1
assert not (source >= NV).any()
if renderer.n_coarse < 64:
# Ensure decent sampling resolution
renderer.n_coarse = 64
renderer.n_fine = 128
with torch.no_grad():
print("Encoding source view(s)")
if random_source:
src_view = torch.randint(0, NV, (1,))
else:
src_view = source
net.encode(
images[src_view].unsqueeze(0),
poses[src_view].unsqueeze(0).to(device=device),
focal,
c=c,
)
os.makedirs('visuals', exist_ok=True)
# render demo views
if args.validate_nvs:
print("Rendering",len(render_poses_idx) * H * W, "rays")
all_rgb_fine = []
all_depth = []
for rays in tqdm.tqdm(
torch.split(render_rays.view(-1, 8), args.ray_batch_size, dim=0)
):
rgb, _depth = render_par(rays[None])
all_rgb_fine.append(rgb[0])
all_depth.append(_depth[0])
_depth = None
rgb_fine = torch.cat(all_rgb_fine)
all_depth = torch.cat(all_depth)
frames = rgb_fine.view(-1, H, W, 3)
depths = all_depth.view(-1, H, W)
num_views = len(render_poses_idx)
for i in range(num_views):
idx = render_poses_idx[i]
image_gt = images[idx].permute(1,2,0) *0.5 + 0.5
image_pred = frames[i].cpu()
imageio.imwrite('visuals/{0}_{1}_{2}_pred_rgb.png'.format(str(args.subset), str(i), os.path.basename(dset.base_path)), (image_pred*255.).numpy().astype(np.uint8))
imageio.imwrite('visuals/{0}_{1}_{2}_gt_rgb.png'.format(str(args.subset), str(i), os.path.basename(dset.base_path)), (image_gt*255.).numpy().astype(np.uint8))
print("Writing video")
vid_name = "{:02}".format(args.subset)
vid_path = os.path.join('visuals', "video_" + vid_name + ".mp4")
imageio.mimwrite(vid_path, (frames.cpu().numpy() * 255).astype(np.uint8), fps=30, quality=8)
if args.validate_mesh:
print("Generating Mesh")
cat = os.path.basename(dset.base_path)
with torch.no_grad():
testsavedir = os.path.join('visuals')
if os.path.exists(testsavedir) == False:
os.mkdir(testsavedir)
validate_mesh(scene=args.subset, savedir=testsavedir, cat=cat, network_query_fn=render_par) # , scale_mats_np=scale_mats_np
|
omniobject3d/OmniObject3D
|
benchmarks/sparse_view_reconstruction/_pixelnerf/eval/gen_results.py
|
gen_results.py
|
py
| 11,388 |
python
|
en
|
code
| 365 |
github-code
|
50
|
38942092957
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from evalcompsite.models import Application, Apartment, House, Land, Comment
# Create your views here.
def home (request):
comments_list = Comment.objects.order_by('-id')[:10]
return render(request, 'home.html', {'comments_list' : comments_list})
def requestpage (request):
return render(request, 'request.html')
def price (request):
apartments = Apartment.objects.all()
houses = House.objects.all()
land_plot = Land.objects.all()
context = {
"apartments": apartments,
"houses": houses,
"land_plot": land_plot
}
return render(request, 'price.html', context)
def about (request):
return render(request, 'about.html')
def createrequest(request):
if request.method == "POST":
app = Application()
app.last_name = request.POST.get("last_name")
app.first_name = request.POST.get("first_name")
app.patronymic = request.POST.get("patronymic")
app.email = request.POST.get("email")
app.phone_number = request.POST.get("phone_number")
app.eval_object = request.POST.get("eval_object")
app.aim = request.POST.get("aim")
app.address = request.POST.get("address")
app.price = request.POST.get("price")
app.comment = request.POST.get("comment")
app.save()
return HttpResponseRedirect(reverse('evalcompsite:requestpage'))
def createcomment(request):
if request.method == "POST":
app = Comment()
app.name = request.POST.get("name")
app.comment = request.POST.get("comment")
app.save()
return HttpResponseRedirect(reverse('evalcompsite:home'))
|
Ant1Hero3/Evalcompsite
|
mysite/apps/evalcompsite/views.py
|
views.py
|
py
| 1,600 |
python
|
en
|
code
| 0 |
github-code
|
50
|
2497937870
|
import pco
import cv2
# Start a live preview
CONFIGURATION = {
'exposure time': 10e-3,
'delay time': 0,
'roi': (0, 0, 2048, 2048),
'timestamp': 'ascii',
'pixel rate': 100_000_000,
'trigger': 'auto sequence',
'acquire': 'auto',
'noise filter': 'on',
'metadata': 'on',
'binning': (1, 1)
}
def live_preview(shutter_time=100, exposure=10):
'''
Generate a live preview of the camera by taking sequential images.
Args:
- shutter_time: Shutter time in milliseconds
- exposure: Exposure time in milliseconds
'''
camera = pco.Camera()
print("Started preview with exposure {:.2f} ms".format(exposure))
with camera as cam:
# Set camera parameters
while True:
cam.configuration = {
"exposure time": exposure * 1e-3,
"roi": (1, 1, 2048, 2048),
}
cam.record(mode="sequence")
image, meta = cam.image()
# Resize image 720 x 1024
image = cv2.resize(image, (1024, 720))
cv2.imshow("Live Preview", image)
if ord("q") == cv2.waitKey(shutter_time):
break
def main():
live_preview(exposure=20)
if __name__ == "__main__":
main()
|
asoronow/py-isi
|
camera.py
|
camera.py
|
py
| 1,289 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26536310566
|
import requests
from bs4 import BeautifulSoup
import csv
import os
topics = ["love","inspirational","life","humor","books","reading","friendship","friends","truth"]
for topic in topics:
url = requests.get(f"http://quotes.toscrape.com/tag/{topic}")
quotes_deatils=[]
def main(url):
src = url.content
soup = BeautifulSoup(src, "lxml")
quotes = soup.find_all("div", {"class": "quote"})
def get_quote_details(quote):
quote_content = quote.find('span', {'class': 'text'}).text
author = quote.find('small', {'class': 'author'}).text
tags = [tag.text for tag in quote.find_all('a', {'class': 'tag'})]
quotes_deatils.append({"Quote":quote_content,"Author":author,"Tags":tags})
for quote in quotes:
get_quote_details(quote)
directory = f"csv_files/{topic}"
if not os.path.exists(directory):
os.makedirs(directory)
with open(f'csv_files/{topic}/{topic}.csv', 'w', newline='') as output_file:
keys = ['Quote', 'Author', 'Tags']
dict_writer = csv.DictWriter(output_file, fieldnames=keys)
dict_writer.writeheader()
dict_writer.writerows(quotes_deatils)
print("File created successfully.")
main(url)
|
nourmuhammed20/Web_Scrapping
|
Beautiful Soup/QuotesScrapper/Quotes_Scrapping.py
|
Quotes_Scrapping.py
|
py
| 1,262 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19502019669
|
from abc import ABC, abstractmethod
from random import shuffle
import tensorflow as tf
from core.Log import log
from datasets import DataKeys
from datasets.Augmentors import parse_augmentors
from datasets.Resize import resize, ResizeMode, jointly_resize
from datasets.util.BoundingBox import encode_bbox_as_mask, get_bbox_from_segmentation_mask
from datasets.util.Normalization import normalize, unnormalize
class AbstractDataset(ABC):
def __init__(self, config, subset, num_classes):
self.summaries = []
self.config = config
self.subset = subset
self.n_classes = num_classes
self.use_bbox_guidance = config.bool("use_bbox_guidance", False)
self.use_mask_guidance = config.bool("use_mask_guidance", False)
self.use_laser_guidance = config.bool("use_laser_guidance", False)
self.use_clicks_guidance = config.bool("use_clicks_guidance", False)
self.epoch_length_train = config.int("epoch_length_train", -1)
self.epoch_length_val = config.int("epoch_length_val", -1)
self.shuffle_buffer_size = config.int("shuffle_buffer_size", 5000)
self.use_summaries = self.config.bool("use_summaries", False)
self.normalize_imgs = self.config.bool("normalize_imgs", True)
@abstractmethod
def n_examples_per_epoch(self):
if self.subset == "train" and self.epoch_length_train != -1:
return self.epoch_length_train
elif self.subset == "val" and self.epoch_length_val != -1:
return self.epoch_length_val
else:
return None
@abstractmethod
def create_input_tensors_dict(self, batch_size):
pass
def num_classes(self):
return self.n_classes
def load_example(self, input_filenames):
raw_example = self.load_raw_example(*input_filenames)
processed = self.process_raw_example(raw_example)
return processed
def process_raw_example(self, example):
example = self.postproc_example_initial(example)
example = self.augment_example_before_resize(example)
example = self.postproc_example_before_resize(example)
example = self.resize_example(example)
example = self.augment_example_after_resize(example)
example = self.postproc_example_before_assembly(example)
example = self.assemble_example(example)
return example
def load_raw_example(self, img_filename, label_filename=None, *args):
img_tensors = self.load_image(img_filename)
if not isinstance(img_tensors, dict):
img_tensors = {DataKeys.IMAGES: img_tensors}
label_tensors = self.load_annotation(img_tensors[DataKeys.IMAGES], img_filename, label_filename)
if not isinstance(label_tensors, dict):
label_tensors = {DataKeys.SEGMENTATION_LABELS: label_tensors}
# merge the two dicts
# the keys need to be disjoint!
for k in img_tensors.keys():
assert k not in label_tensors.keys()
example = img_tensors
example.update(label_tensors)
return example
def load_image(self, img_filename):
img_data = tf.read_file(img_filename)
img = tf.image.decode_image(img_data, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img.set_shape((None, None, 3))
return img
def load_annotation(self, img, img_filename, annotation_filename):
ann_data = tf.read_file(annotation_filename)
ann = tf.image.decode_image(ann_data, channels=1)
ann.set_shape(img.get_shape().as_list()[:-1] + [1])
ann = self.postproc_annotation(annotation_filename, ann)
return ann
def postproc_annotation(self, ann_filename, ann):
return ann
def resize_example(self, tensors):
resize_mode_str = self.config.string("resize_mode_" + self.subset, "")
if resize_mode_str == "":
print("Using resize_mode_train for", self.subset, "since resize_mode_" + self.subset,
"not specified in the config", file=log.v1)
resize_mode_str = self.config.string("resize_mode_train")
size = self.config.int_list("input_size_" + self.subset, [])
if len(size) == 0:
size = self.config.int_list("input_size_train", [])
resize_mode = ResizeMode(resize_mode_str)
tensors = resize(tensors, resize_mode, size)
return tensors
def jointly_resize_examples(self, tensors_batch):
resize_mode_str = self.config.string("resize_mode_" + self.subset, "")
if resize_mode_str == "":
print("Using resize_mode_train for", self.subset, "since resize_mode_" + self.subset,
"not specified in the config", file=log.v1)
resize_mode_str = self.config.string("resize_mode_train")
size = self.config.int_list("input_size_" + self.subset, [])
if len(size) == 0:
size = self.config.int_list("input_size_train", [])
resize_mode = ResizeMode(resize_mode_str)
tensors_batch = jointly_resize(tensors_batch, resize_mode, size)
return tensors_batch
def augment_example_before_resize(self, tensors):
augmentors_str = self.config.string_list("augmentors_" + self.subset, [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors = aug.apply_before_resize(tensors)
return tensors
def augment_example_after_resize(self, tensors):
augmentors_str = self.config.string_list("augmentors_" + self.subset, [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors = aug.apply_after_resize(tensors)
return tensors
def jointly_augment_examples_before_resize(self, tensors_batch):
augmentors_str = self.config.string_list("augmentors_" + self.subset, [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors_batch = aug.batch_apply_before_resize(tensors_batch)
return tensors_batch
def jointly_augment_examples_after_resize(self, tensors_batch):
augmentors_str = self.config.string_list("augmentors_" + self.subset, [])
augmentors = parse_augmentors(augmentors_str, self.config)
for aug in augmentors:
tensors_batch = aug.batch_apply_after_resize(tensors_batch)
return tensors_batch
def postproc_example_initial(self, tensors):
if DataKeys.IMAGES in tensors and DataKeys.RAW_IMAGES not in tensors:
tensors[DataKeys.RAW_IMAGES] = tensors[DataKeys.IMAGES]
if DataKeys.IMAGES in tensors and DataKeys.RAW_IMAGE_SIZES not in tensors:
tensors[DataKeys.RAW_IMAGE_SIZES] = tf.shape(tensors[DataKeys.IMAGES])[0:2]
if DataKeys.SEGMENTATION_LABELS in tensors and DataKeys.BBOXES_y0x0y1x1 not in tensors:
print("deriving bboxes from segmentation masks", file=log.v5)
segmentation_labels = tensors[DataKeys.SEGMENTATION_LABELS]
bbox = get_bbox_from_segmentation_mask(segmentation_labels)
tensors[DataKeys.BBOXES_y0x0y1x1] = bbox
return tensors
def postproc_example_before_assembly(self, tensors):
tensors_postproc = tensors.copy()
if self.normalize_imgs:
tensors_postproc[DataKeys.IMAGES] = normalize(tensors[DataKeys.IMAGES])
return tensors_postproc
def postproc_example_before_resize(self, tensors):
tensors_postproc = tensors.copy()
if (self.use_bbox_guidance) \
and DataKeys.BBOXES_y0x0y1x1 in tensors and DataKeys.BBOX_GUIDANCE not in tensors:
bbox = tensors[DataKeys.BBOXES_y0x0y1x1]
img = tensors[DataKeys.IMAGES]
bbox_guidance = encode_bbox_as_mask(bbox, tf.shape(img))
tensors_postproc[DataKeys.BBOX_GUIDANCE] = bbox_guidance
return tensors_postproc
def assemble_example(self, tensors):
tensors_assembled = tensors.copy()
inputs_to_concat = [tensors[DataKeys.IMAGES]]
if self.use_bbox_guidance and DataKeys.BBOX_GUIDANCE in tensors:
print("using bbox guidance", file=log.v1)
bbox_guidance = tf.cast(tensors[DataKeys.BBOX_GUIDANCE], tf.float32)
inputs_to_concat.append(bbox_guidance)
if self.use_laser_guidance and DataKeys.LASER_GUIDANCE in tensors:
print("using laser guidance", file=log.v1)
laser_guidance = tf.cast(tensors[DataKeys.LASER_GUIDANCE], tf.float32)
inputs_to_concat.append(laser_guidance)
if self.use_clicks_guidance:
print("using guidance from clicks", file=log.v1)
neg_dist_transform = tensors[DataKeys.NEG_CLICKS]
pos_dist_transform = tensors[DataKeys.POS_CLICKS]
inputs_to_concat.append(neg_dist_transform)
inputs_to_concat.append(pos_dist_transform)
if self.use_mask_guidance:
print("using mask guidance", file=log.v1)
mask = tf.cast(tensors[DataKeys.SEGMENTATION_LABELS], tf.float32)
inputs_to_concat.append(mask)
if len(inputs_to_concat) > 1:
inputs = tf.concat(inputs_to_concat, axis=-1)
else:
inputs = inputs_to_concat[0]
tensors_assembled[DataKeys.INPUTS] = inputs
return tensors_assembled
def create_summaries(self, data):
if DataKeys.IMAGES in data:
if self.normalize_imgs:
unnormed = unnormalize(data[DataKeys.IMAGES])
else:
unnormed = data[DataKeys.IMAGES]
# allow visualization if only 2 color channels are there
if unnormed.shape[-1] == 2:
pad = tf.zeros_like(unnormed[:, :, :, :1])
unnormed = tf.concat([unnormed, pad], axis=3)
self.summaries.append(tf.summary.image(self.subset + "data/images", unnormed))
if DataKeys.SEGMENTATION_LABELS in data:
self.summaries.append(tf.summary.image(self.subset + "data/ground truth segmentation labels",
tf.cast(data[DataKeys.SEGMENTATION_LABELS], tf.float32)))
if DataKeys.SEGMENTATION_INSTANCE_LABELS in data:
self.summaries.append(tf.summary.image(self.subset + "data/ground truth segmentation instance labels",
tf.cast(data[DataKeys.SEGMENTATION_INSTANCE_LABELS], tf.float32)))
if DataKeys.BBOX_GUIDANCE in data:
self.summaries.append(tf.summary.image(self.subset + "data/bbox guidance",
tf.cast(data[DataKeys.BBOX_GUIDANCE], tf.float32)))
if DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE in data:
self.summaries.append(tf.summary.image(self.subset + "data/signed_distance_transform_guidance",
data[DataKeys.SIGNED_DISTANCE_TRANSFORM_GUIDANCE]))
if DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE in data:
self.summaries.append(tf.summary.image(self.subset + "data/unsigned_distance_transform_guidance",
data[DataKeys.UNSIGNED_DISTANCE_TRANSFORM_GUIDANCE]))
if DataKeys.LASER_GUIDANCE in data:
self.summaries.append(tf.summary.image(self.subset + "data/laser guidance",
tf.cast(data[DataKeys.LASER_GUIDANCE], tf.float32)))
class FileListDataset(AbstractDataset):
def __init__(self, config, dataset_name, subset, default_path, num_classes):
super().__init__(config, subset, num_classes)
self.inputfile_lists = None
self.fraction = config.float("data_fraction", 1.0)
self.data_dir = config.string(dataset_name + "_data_dir", default_path)
self._num_parallel_calls = config.int("num_parallel_calls", 32)
self._prefetch_buffer_size = config.int("prefetch_buffer_size", 20)
def _load_inputfile_lists(self):
if self.inputfile_lists is not None:
return
self.inputfile_lists = self.read_inputfile_lists()
assert len(self.inputfile_lists) > 0
for l in self.inputfile_lists:
assert len(l) > 0
# make sure all lists have the same length
assert all([len(l) == len(self.inputfile_lists[0]) for l in self.inputfile_lists])
if self.fraction < 1.0:
n = int(self.fraction * len(self.inputfile_lists[0]))
self.inputfile_lists = tuple([l[:n] for l in self.inputfile_lists])
def n_examples_per_epoch(self):
self._load_inputfile_lists()
n_examples = super().n_examples_per_epoch()
if n_examples is None:
return len(self.inputfile_lists[0])
else:
return n_examples
def create_input_tensors_dict(self, batch_size):
self._load_inputfile_lists()
if self.subset == "train":
# shuffle lists together, for this zip, shuffle, and unzip
zipped = list(zip(*self.inputfile_lists))
shuffle(zipped)
inputfile_lists_shuffled = tuple([x[idx] for x in zipped] for idx in range(len(self.inputfile_lists)))
else:
inputfile_lists_shuffled = self.inputfile_lists
tfdata = tf.data.Dataset.from_tensor_slices(inputfile_lists_shuffled)
if self.subset == "train":
tfdata = tfdata.shuffle(buffer_size=self.shuffle_buffer_size)
def _load_example(*input_filenames):
example = self.load_example(input_filenames)
# this has different sizes and therefore cannot be batched
if batch_size > 1:
if DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE in example:
del example[DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE]
if DataKeys.RAW_IMAGES in example:
del example[DataKeys.RAW_IMAGES]
return example
def _filter_example(tensors):
if DataKeys.SKIP_EXAMPLE in tensors:
return tf.logical_not(tensors[DataKeys.SKIP_EXAMPLE])
else:
return tf.constant(True)
tfdata = tfdata.map(_load_example, num_parallel_calls=self._num_parallel_calls)
tfdata = tfdata.filter(_filter_example)
tfdata = tfdata.repeat()
tfdata = self._batch(tfdata, batch_size)
tfdata = tfdata.prefetch(buffer_size=self._prefetch_buffer_size)
# TODO: maybe we can improve the performance like this
#tf.contrib.data.prefetch_to_device("/gpu:0", self._prefetch_buffer_size)
res = tfdata.make_one_shot_iterator().get_next()
if self.use_summaries:
self.create_summaries(res)
return res
def _batch(self, tfdata, batch_size):
if batch_size > 1:
tfdata = tfdata.batch(batch_size, drop_remainder=True)
elif batch_size == 1:
# like this we are able to retain the batch size in the shape information
tfdata = tfdata.map(lambda x: {k: tf.expand_dims(v, axis=0) for k, v in x.items()})
else:
assert False, ("invalid batch size", batch_size)
return tfdata
# Override to add extraction keys that will be used by trainer.
def get_extraction_keys(self):
return []
@abstractmethod
def read_inputfile_lists(self):
raise NotImplementedError
|
VisualComputingInstitute/TrackR-CNN
|
datasets/Dataset.py
|
Dataset.py
|
py
| 14,251 |
python
|
en
|
code
| 511 |
github-code
|
50
|
24088044734
|
# Tutorial - Detect and Recognize Car License Plates Using Python
#
# First, you need to install Tesseract OCR on your Mac or PC
# On Mac: brew install tesseract
#
# Path to tesseract on Mac:
# /opt/homebrew/Cellar/tesseract/5.3.0/bin/tesseract
#
# pip install OpenCV-Python
# You will use this library for preprocessing the input image and displaying various output images.
#
# pip install imutils
# You will use this library to crop the original input image to a desired width.
#
# pip install pytesseract
# You will use this library to extract the characters of the license plate and convert them into strings.
# Import the libraries
import cv2
import imutils
import pytesseract
# Point pytesseract to the location where the Tesseract engine is installed
# Replace the string with the path to the tesseract executable on the Mac
# pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
pytesseract.pytesseract.tesseract_cmd = '/opt/homebrew/Cellar/tesseract/5.3.0/bin/tesseract'
# Read in the input image
IMAGE_1 = 'image1.jpeg'
IMAGE_2 = 'image2.jpeg'
IMAGE_3 = 'image3.jpeg'
IMAGE = 'image'
SUFFIX = '.jpeg'
DIR = 'img/'
X = 1
IMAGE_NAME = IMAGE + str(X) + SUFFIX
print(IMAGE_NAME)
FILE_PATH = DIR + IMAGE_NAME
print(FILE_PATH)
original_image = cv2.imread(FILE_PATH)
# Preprocess the image
# Resize the image width to 500 pixels.
# Then convert the image to grayscale as the canny edge detection function only works with grayscale images.
# Finally, call the bilateralFilter function to reduce the noise in the image.
original_image = imutils.resize(original_image, width=500 )
gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.bilateralFilter(gray_image, 11, 17, 17)
# Perform edge detection
edged_image = cv2.Canny(gray_image, 30, 200)
# Find the contours
contours, new = cv2.findContours(edged_image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
img1 = original_image.copy()
cv2.drawContours(img1, contours, -1, (0, 255, 0), 3)
cv2.imshow("img1", img1)
# Sort the contours
# contours = sorted(contours, key = cv2.contourArea, reverse = True)[:30]
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:100]
# stores the license plate contour
screenCnt = None
img2 = original_image.copy()
# draws top 30 contours
cv2.drawContours(img2, contours, -1, (0, 255, 0), 3)
cv2.imshow("img2", img2)
# Loop over the top 30 contours
count = 0
idx = 7
for c in contours:
# approximate the license plate contour
contour_perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * contour_perimeter, True)
# Look for contours with 4 corners
if len(approx) == 4:
screenCnt = approx
# find the coordinates of the license plate contour
x, y, w, h = cv2.boundingRect(c)
new_img = original_image [ y: y + h, x: x + w]
# stores the new image
cv2.imwrite('./'+str(idx)+'.png',new_img)
idx += 1
break
# draws the license plate contour on original image
cv2.drawContours(original_image , [screenCnt], -1, (0, 255, 0), 3)
cv2.imshow("detected license plate", original_image )
# Convert the characters in the new image to a string
# filename of the cropped license plate image
cropped_License_Plate = './7.png'
cv2.imshow("cropped license plate", cv2.imread(cropped_License_Plate))
# converts the license plate characters to string
text = pytesseract.image_to_string(cropped_License_Plate, lang='eng')
# Display the output
print("License plate is: ", text)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
wacastel/python-license-reader
|
license_reader.py
|
license_reader.py
|
py
| 3,576 |
python
|
en
|
code
| 0 |
github-code
|
50
|
1436741061
|
import asyncio
class B(object):
def __init__(self):
self._value = 0
def value(self):
return self._value
class Counter1(B):
async def add(self):
value = self._value + 1
asyncio.sleep(1)
self._value = value
class Counter2(B):
async def add(self):
self._value += 1
asyncio.sleep(1)
def main():
c1 = Counter1()
res1 = asyncio.gather(*[c1.add() for i in range(100)])
c2 = Counter2()
res2 = asyncio.gather(*[c2.add() for i in range(100)])
loop = asyncio.get_event_loop()
all = asyncio.gather(res1, res2)
loop.run_until_complete(all)
loop.close()
print(c1.value(), c2.value())
if __name__ == '__main__':
main()
|
YuanXianguo/Python-IT-Heima
|
网络编程/3、多任务编程/07协程/asyncio_demo/anheng.py
|
anheng.py
|
py
| 729 |
python
|
en
|
code
| 1 |
github-code
|
50
|
34524954211
|
# with open('file/pi_digits.txt') as file_object:
# contents=file_object.read()
# print(contents)
filename = '/Users/zhanghao/code/py-workspace/file_ReadOrWrite/pi_digits.txt'
#read row by row
with open(filename) as file_object:
for line in file_object:
print(line.rstrip())
print('--------------------------')
with open(filename) as file_object2:
lines=file_object2.readlines()
print(lines)
|
zhanghao-esrichina/bigdata-project
|
basic_grammar/file_ReadOrWrite/file_reader.py
|
file_reader.py
|
py
| 429 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15250899768
|
import os
from googletest.test import gtest_test_utils
# Command to run the googletest-shuffle-test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-shuffle-test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to googletest-shuffle-test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]
)
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]
)
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations(
{TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, []
)[0]
)
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(
GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)]
)[0]
)
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(
GetTestsForAllIterations({}, [ShuffleFlag(), RandomSeedFlag(1)])[0]
)
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)]
)[0]
)
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations(
{TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)],
)[0]
)
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assertTrue(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assertTrue(
SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS
)
self.assertTrue(
SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, SHUFFLED_FILTERED_TESTS
)
self.assertTrue(
SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, SHUFFLED_SHARDED_TESTS
)
def testShuffleChangesTestCaseOrder(self):
self.assertTrue(
GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS),
)
self.assertTrue(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS),
)
self.assertTrue(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS),
)
self.assertTrue(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS),
)
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(
1,
SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,),
)
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(
1,
SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,),
)
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(
1,
SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,),
)
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(
1,
SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,),
)
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertTrue(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertTrue(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertTrue(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertTrue(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assertTrue(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assertTrue(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assertTrue(
test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)
)
for test in SHARDED_TESTS:
self.assertTrue(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assertTrue(
not non_death_test_found,
'%s appears after a non-death test' % (test,),
)
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(
1,
test_cases.count(test_case),
'Test case %s is not grouped together in %s' % (test_case, tests),
)
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
# pylint: disable-next=unbalanced-tuple-unpacking
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]
)
)
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
tests_with_seed1 = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)]
)[0]
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
tests_with_seed2 = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)]
)[0]
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
tests_with_seed3 = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)]
)[0]
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
# pylint: disable-next=unbalanced-tuple-unpacking
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]
)
)
self.assertTrue(
tests_in_iteration1 != tests_in_iteration2, tests_in_iteration1
)
self.assertTrue(
tests_in_iteration1 != tests_in_iteration3, tests_in_iteration1
)
self.assertTrue(
tests_in_iteration2 != tests_in_iteration3, tests_in_iteration2
)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
tests1 = GetTestsForAllIterations(
{TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)],
)[0]
tests2 = GetTestsForAllIterations(
{TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)],
)[0]
tests3 = GetTestsForAllIterations(
{TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)],
)[0]
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
google/googletest
|
googletest/test/googletest-shuffle-test.py
|
googletest-shuffle-test.py
|
py
| 11,108 |
python
|
en
|
code
| 31,518 |
github-code
|
50
|
20755137339
|
#!/usr/env/bin python3
# Importing modules
from difflib import Match
from xml.dom import UserDataHandler
import cv2
from cv2 import RANSAC
from matplotlib.pyplot import axis
import numpy as np
import math
import os
from tqdm import tqdm
from scipy import ndimage as ndi
from skimage.feature import peak_local_max, corner_peaks
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from Helper import *
class MyAutoPano():
def __init__(self, ImageSetPath, NumFeatures, ResultPath, TestName, ImageSetHeight=None, ImageSetWidth=None):
self.ImageCount = 0
self.ImageSetPath = ImageSetPath
self.ResultPath = ResultPath
os.makedirs(self.ResultPath, exist_ok = True)
self.NumFeatures = NumFeatures
if(not ImageSetHeight and not ImageSetWidth):
self.ImageSetResize = False
self.ImageSetHeight = cv2.imread(ImageSetPath[0]).shape[0]
self.ImageSetWidth = cv2.imread(ImageSetPath[0]).shape[1]
else:
self.ImageSetResize = True
self.ImageSetHeight = ImageSetHeight
self.ImageSetWidth = ImageSetWidth
# print(ImageSetHeight, ImageSetWidth)
self.ImageSet = list()
self.ImageSetGray = list()
self.Inliers = np.empty([0, 0, 1])
self.Homography = np.empty([0, 1, 3, 3])
self.BlendedImage = None
self.ImageSetRefId = None
self.TestName = TestName
# Toggles
self.Visualize = False
def createImageSet(self):
if(self.ImageSetResize):
[self.ImageSet.append(cv2.resize(cv2.imread(self.ImageSetPath[img]), None, fx=self.ImageSetHeight, fy=self.ImageSetWidth, interpolation=cv2.INTER_CUBIC)) for img in range(len(self.ImageSetPath))] # Reading images
else:
[self.ImageSet.append(cv2.imread(self.ImageSetPath[img])) for img in range(len(self.ImageSetPath))] # Reading images
[self.ImageSetGray.append(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)) for img in self.ImageSet] # Converting images to grayscale
self.ImageSet = np.array(self.ImageSet)
self.ImageSetGray = np.float32(np.array(self.ImageSetGray))
self.ImageSetRefId = int(len(self.ImageSet)/2) # Setting a reference to the anchor image
def computeHarrisCorners(self, Image, Visualize):
print("Computing Harris Corners...")
ImageGray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)
CornerScore = cv2.cornerHarris(ImageGray, 2, 3, 0.00001) # Computing corner probability using Harris corners
# CornerScore = cv2.normalize(CornerScore, None, -1.0, 1.0, cv2.NORM_MINMAX) # Normalizing
CornerScore[CornerScore<0.001*CornerScore.max()] = 0
CornerScore = cv2.dilate(CornerScore, None) # Dilating to mark corners
HarrisCorners = np.copy(Image)
HarrisCorners[CornerScore>0.001*CornerScore.max()]=[0,0,255] # Marking corners in RGB image
if(Visualize):
cv2.imshow("Harris Corners", HarrisCorners)
cv2.imshow("Corner Score", np.float32(CornerScore))
cv2.imwrite(self.ResultPath + self.TestName + '_Harris_' + str(self.ImageCount) + '.png', HarrisCorners)
cv2.waitKey(3)
return CornerScore, HarrisCorners
def computeShiTomasiCorners(self, Image, Visualize):
print("Computing Shi-Tomasi Corners...")
ImageGray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(ImageGray, self.NumFeatures, 0.01, 3) # Computing corners using the Shi-Tomasi method
corners = np.int0(corners)
print("ShiTomasiCorners: %d"%(len(corners)))
ShiTomasiCorners = np.zeros(Image.shape[0:2])
ImageSetShiTomasiCorners = np.copy(Image)
for corner in corners: # Marking corners in RGB image
x,y = corner.ravel()
ShiTomasiCorners[y,x] = 255
cv2.circle(ImageSetShiTomasiCorners,(x,y),2,(0,0,255),-1)
if(Visualize):
cv2.imshow("Shi-Tomasi Corners", ImageSetShiTomasiCorners)
cv2.imshow("Corners", ShiTomasiCorners)
cv2.imwrite(self.ResultPath + self.TestName + '_Shi-Tomasi_' + str(self.ImageCount) + '.png', ImageSetShiTomasiCorners)
cv2.waitKey(3)
return ShiTomasiCorners, ImageSetShiTomasiCorners
def ANMS(self, Image, ImageCorners, Visualize):
print("Applying ANMS...")
ANMSCorners = list()
local_maximas = peak_local_max(ImageCorners, min_distance=1)
local_maximas = np.int0(local_maximas)
print("Local Maximas: %d"%len(local_maximas))
if(self.NumFeatures > len(local_maximas)):
self.NumFeatures = len(local_maximas)
r = [np.Infinity for i in range(len(local_maximas))]
ED = 0
for i in tqdm(range(len(local_maximas))):
for j in range(len(local_maximas)):
if(ImageCorners[local_maximas[j,0],local_maximas[j,1]] > ImageCorners[local_maximas[i,0],local_maximas[i,1]]):
ED = math.sqrt((local_maximas[j,0] - local_maximas[i,0])**2 + (local_maximas[j,1] - local_maximas[i,1])**2)
if(ED < r[i]):
r[i] = ED
ANMSCorners.append([r[i], local_maximas[i,0], local_maximas[i,1]])
ANMSCorners = sorted(ANMSCorners, reverse=True)
ANMSCorners = np.array(ANMSCorners[:self.NumFeatures])
print("ANMS Corners: %d"%len(ANMSCorners))
ImageSetLocalMaxima = np.copy(Image)
ImageSetANMS = np.copy(Image)
for local_maxima in local_maximas: # Marking corners in RGB image
y,x = local_maxima.ravel()
cv2.circle(ImageSetLocalMaxima,(x,y),2,(0,255,0),-1)
for i in range(self.NumFeatures): # Marking corners in RGB image
cv2.circle(ImageSetANMS,(int(ANMSCorners[i][2]),int(ANMSCorners[i][1])),2,(0,0,255),-1)
if(Visualize):
cv2.imshow("Local Max", ImageSetLocalMaxima)
cv2.imshow("ANMS", ImageSetANMS)
cv2.imwrite(self.ResultPath + self.TestName + '_ANMS_' + str(self.ImageCount) + '.png', ImageSetANMS)
cv2.waitKey(3)
return ANMSCorners, ImageSetLocalMaxima, ImageSetANMS
def featureDescriptor(self, Image, key_points, Visualize):
print("Retrieving feature patches...")
ImageGray = np.float32(cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY))
patch_size = 40
features = list()
for point in range(len(key_points)):
patch = np.uint8(np.array(neighbors(ImageGray, 20, int(key_points[point][1]), int(key_points[point][2]))))
patch_gauss = cv2.resize(cv2.GaussianBlur(patch, (5,5), 0), None, fx=0.2, fy=0.2, interpolation=cv2.INTER_CUBIC)
patch_gauss = (patch_gauss - patch_gauss.mean())/(patch_gauss.std()+1e-20)
features.append(patch_gauss.flatten())
if(Visualize):
temp = cv2.circle(np.copy(Image),(int(key_points[point][2]), int(key_points[point][1])),2,(0,0,255),-1)
cv2.imshow("Feature", temp)
cv2.imshow("Patch", patch)
cv2.imshow("Patch gauss", patch_gauss)
# cv2.waitKey(3)
features = np.array(features)
return features
def featureMatching(self, Image0, Image1, Features0, Features1, ANMSCorners0, ANMSCorners1, Visualize):
print("Matching features...")
SSDs = list()
matches = list()
if(len(Features0) > len(Features1)):
N = len(Features1)
else:
N = len(Features0)
features = np.arange(N).tolist()
if(Visualize):
if(Image0.shape != Image1.shape):
temp_shape = np.vstack((Image0.shape, Image1.shape)).max(axis=0)
Image0_ = np.uint8(np.empty(temp_shape))
Image1_ = np.uint8(np.empty(temp_shape))
Image0_[0:Image0.shape[0],0:Image0.shape[1]] = Image0
Image1_[0:Image1.shape[0],0:Image1.shape[1]] = Image1
temp = np.hstack((Image0_, Image1_))
else:
temp = np.hstack((Image0, Image1))
for i in tqdm(range(N)):
SSDs.clear()
for j in features:
SSDs.append([sum((Features0[i] - Features1[j])**2), ANMSCorners1[j][1], ANMSCorners1[j][2]])
SSDs = sorted(SSDs)
# if((SSDs[0][0]/SSDs[1][0]) < 0.95):
# continue
matches.append([ANMSCorners0[i][1], ANMSCorners0[i][2], SSDs[0][1], SSDs[0][2]])
temp = cv2.circle(temp,(int(ANMSCorners0[i][2]), int(ANMSCorners0[i][1])),2,(0,0,255),-1)
temp = cv2.circle(temp,(int(SSDs[0][2])+Image1.shape[1], int(SSDs[0][1])),2,(0,0,255),-1)
temp = cv2.line(temp, (int(ANMSCorners0[i][2]), int(ANMSCorners0[i][1])), (int(SSDs[0][2])+Image1.shape[1], int(SSDs[0][1])), (0,255,0), 1)
if(Visualize):
cv2.imshow("Matches", temp)
cv2.imwrite(self.ResultPath + self.TestName + '_Matches_' + str(self.ImageCount) + '.png', temp)
if(Visualize):
cv2.waitKey(3)
print("Matches: %d"%len(matches))
matches = np.array(matches)
return matches, temp
def RANSAC(self, Matches, Image0, Image1, iterations, threshold, Visualize):
print("Performing RANSAC...")
max_inliers = 0
best_H = None
Inliers = list()
features = np.arange(len(Matches)).tolist()
for i in tqdm(range(iterations)):
feature_pairs = np.random.choice(features, 4, replace=False)
p1 = list()
p2 = list()
for j in range(len(feature_pairs)):
p1.append([Matches[feature_pairs[j]][1], Matches[feature_pairs[j]][0]])
p2.append([Matches[feature_pairs[j]][3], Matches[feature_pairs[j]][2]])
H = cv2.getPerspectiveTransform(np.float32(p1), np.float32(p2))
Hp1 = np.dot(H, np.vstack((Matches[:,1], Matches[:,0], np.ones([1,len(Matches)]))))
Hp1 = np.array(Hp1/(Hp1[2]+1e-20)).transpose()
Hp1 = np.delete(Hp1, 2, 1)
p2_ = list()
[p2_.append([Matches[x][3], Matches[x][2]]) for x in range(len(Matches))]
p2_ = np.array(p2_)
SSD = list()
[SSD.append(sum((p2_[x] - Hp1[x])**2)) for x in range(len(Matches))]
SSD = np.array(SSD)
SSD[SSD <= threshold] = 1
SSD[SSD > threshold] = 0
inliers = np.sum(SSD)
if(inliers > max_inliers):
max_inliers = inliers
Inliers = np.where(SSD == 1)
best_H = H
p1.clear()
p2.clear()
for i in Inliers[0]:
p1.append([Matches[i][1], Matches[i][0]])
p2.append([Matches[i][3], Matches[i][2]])
H, _ = cv2.findHomography(np.float32(p1), np.float32(p2), cv2.RANSAC, 1)
print("Inliers: %d"%max_inliers)
if(Image0.shape != Image1.shape):
temp_shape = np.vstack((Image0.shape, Image1.shape)).max(axis=0)
Image0_ = np.uint8(np.empty(temp_shape))
Image1_ = np.uint8(np.empty(temp_shape))
Image0_[0:Image0.shape[0],0:Image0.shape[1]] = Image0
Image1_[0:Image1.shape[0],0:Image1.shape[1]] = Image1
temp = np.hstack((Image0_, Image1_))
else:
print('test')
temp = np.hstack((Image0, Image1))
for i in Inliers[0]:
temp = cv2.circle(temp,(int(Matches[i][1]), int(Matches[i][0])),2,(0,0,255),-1)
temp = cv2.circle(temp,(int(Matches[i][3])+Image1.shape[1], int(Matches[i][2])),2,(0,0,255),-1)
temp = cv2.line(temp, (int(Matches[i][1]), int(Matches[i][0])), (int(Matches[i][3])+Image1.shape[1], int(Matches[i][2])), (0,255,0), 1)
if(Visualize):
cv2.imshow("RANSAC", temp)
cv2.waitKey(3)
self.Homography = np.insert(self.Homography, len(self.Homography), np.array([best_H]), axis=0)
if(H is None):
H = best_H
return H, temp
def mean_blend(self, img1, img2):
assert(img1.shape == img2.shape)
locs1 = np.where(cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY) != 0)
blended1 = np.copy(img2)
blended1[locs1[0], locs1[1]] = 0
locs2 = np.where(cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY) != 0)
blended2 = np.copy(img1)
blended2[locs2[0], locs2[1]] = img2[locs2[0], locs2[1]]
blended = cv2.addWeighted(blended1, 0, blended2, 1.0, 0)
return blended
def stitchImages(self, Image0, Image1, H, Visualize):
print("Blending Images...")
h0, w0 = Image0.shape[:2]
h1, w1 = Image1.shape[:2]
c0 = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]]).reshape(-1, 1, 2) # Points on Image 1
c1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2) # Points on Image 2
# print("Homography Matrix", H)
c0_ = cv2.perspectiveTransform(c0, H) # Points of Image 1 transformed
corners = np.concatenate((c0_, c1), axis = 0).reshape(8,2)
x_min, y_min = np.int0(np.min(corners, axis = 0))
x_max, y_max = np.int0(np.max(corners, axis = 0))
H_translate = np.array([[1, 0, -x_min], [0, 1, -y_min], [0, 0, 1]]) # translate
Image0_Warped = cv2.warpPerspective(Image0, np.dot(H_translate, H), (x_max-x_min, y_max-y_min))
ImageStitched = np.copy(Image0_Warped)
idx = np.s_[-y_min:-y_min+h1, -x_min: -x_min+w1]
ImageStitched[idx] = self.mean_blend(ImageStitched[idx], Image1)
if(Visualize):
cv2.imshow("Stiched", ImageStitched)
cv2.waitKey(3)
return ImageStitched
def saveResults(self, ImageCount, Corners_0, Corners_1, ANMS_0, ANMS_1, Matches, RANSAC_, Stich):
cv2.imwrite(self.ResultPath + self.TestName + '_Corners_0_' + str(ImageCount) + '.png', Corners_0)
cv2.imwrite(self.ResultPath + self.TestName + '_Corners_1_' + str(ImageCount) + '.png', Corners_1)
cv2.imwrite(self.ResultPath + self.TestName + '_ANMS_0_' + str(ImageCount) + '.png', ANMS_0)
cv2.imwrite(self.ResultPath + self.TestName + '_ANMS_1_' + str(ImageCount) + '.png', ANMS_1)
cv2.imwrite(self.ResultPath + self.TestName + '_Matches_' + str(ImageCount) + '.png', Matches)
cv2.imwrite(self.ResultPath + self.TestName + '_RANSAC_' + str(ImageCount) + '.png', RANSAC_)
cv2.imwrite(self.ResultPath + self.TestName + '_Stich_' + str(ImageCount) + '.png', Stich)
def generatePanorama(self, Visualize):
print("Generating Panorama...")
self.createImageSet()
ImageSet = [x for x in self.ImageSet]
if(len(ImageSet)%2 != 0):
ImageSet = ImageSet[:len(ImageSet)//2+1]
else:
ImageSet = ImageSet[:len(ImageSet)//2]
PanoHalves = list()
half = self.ImageSetRefId//2
# if(half == 0):
# half = 1
for i in range(half+1):
for img in range(len(ImageSet)-1):
print("Stitching Frames %d & %d"%(img*(i+1), img*(i+1)+1))
ShiTomasiCorners0, Corners_0 = self.computeShiTomasiCorners(ImageSet[img], True)
ANMSCorners0, _, ANMS_0 = self.ANMS(ImageSet[img], ShiTomasiCorners0, True)
Features0 = self.featureDescriptor(ImageSet[img], ANMSCorners0, False)
ShiTomasiCorners1, Corners_1 = self.computeShiTomasiCorners(ImageSet[img+1], True)
ANMSCorners1, _, ANMS_1 = self.ANMS(ImageSet[img+1], ShiTomasiCorners1, True)
Features1 = self.featureDescriptor(ImageSet[img+1], ANMSCorners1, False)
Matches, Matches_ = self.featureMatching(ImageSet[img], ImageSet[img+1], Features0, Features1, ANMSCorners0, ANMSCorners1, True)
H, RANSAC_ = self.RANSAC(Matches, ImageSet[img], ImageSet[img+1], 1000, 5, True)
if(H is not None):
I = self.stitchImages(ImageSet[img], ImageSet[img+1], H, True)
else:
print("Not enough overlap, skipping image...")
continue
self.saveResults(self.ImageCount, Corners_0, Corners_1, ANMS_0, ANMS_1, Matches_, RANSAC_, I)
# cv2.imwrite(self.ResultPath + self.TestName + '_Stich_' + str(self.ImageCount) + '.png', I)
self.ImageCount += 1
ImageSet.append(I)
ImageSet = ImageSet[-img-1:]
PanoHalves.append(I)
ImageSet.clear()
ImageSet = [x for x in self.ImageSet]
ImageSet.reverse()
if(len(ImageSet)%2 != 0):
ImageSet = ImageSet[:len(ImageSet)//2+1]
else:
ImageSet = ImageSet[:len(ImageSet)//2]
for i in range(half+1):
for img in range(len(ImageSet)-1):
print("Stitching Frames %d & %d"%(img*(i+1), img*(i+1)+1))
ShiTomasiCorners0, Corners_0 = self.computeShiTomasiCorners(ImageSet[img], True)
ANMSCorners0, _, ANMS_0 = self.ANMS(ImageSet[img], ShiTomasiCorners0, True)
Features0 = self.featureDescriptor(ImageSet[img], ANMSCorners0, False)
ShiTomasiCorners1, Corners_1 = self.computeShiTomasiCorners(ImageSet[img+1], True)
ANMSCorners1, _, ANMS_1 = self.ANMS(ImageSet[img+1], ShiTomasiCorners1, True)
Features1 = self.featureDescriptor(ImageSet[img+1], ANMSCorners1, False)
Matches, Matches_ = self.featureMatching(ImageSet[img], ImageSet[img+1], Features0, Features1, ANMSCorners0, ANMSCorners1, True)
H, RANSAC_ = self.RANSAC(Matches, ImageSet[img], ImageSet[img+1], 1000, 5, True)
if(H is not None):
I = self.stitchImages(ImageSet[img], ImageSet[img+1], H, True)
else:
print("Not enough overlap, skipping image...")
continue
self.saveResults(self.ImageCount, Corners_0, Corners_1, ANMS_0, ANMS_1, Matches_, RANSAC_, I)
# cv2.imwrite(self.ResultPath + self.TestName + '_Stich_' + str(self.ImageCount) + '.png', I)
self.ImageCount += 1
ImageSet.append(I)
ImageSet = ImageSet[-img-1:]
PanoHalves.append(I)
print("Generating final panorama...")
PanoFirstHalf = PanoHalves[0]
PanoSecondHalf = PanoHalves[1]
ShiTomasiCorners0, Corners_0 = self.computeShiTomasiCorners(PanoFirstHalf, True)
ANMSCorners0, _, ANMS_0 = self.ANMS(PanoFirstHalf, ShiTomasiCorners0, True)
Features0 = self.featureDescriptor(PanoFirstHalf, ANMSCorners0, False)
# HarrisCorners1 = self.computeHarrisCorners(self.ImageSet[img+1], True)
ShiTomasiCorners1, Corners_0 = self.computeShiTomasiCorners(PanoSecondHalf, True)
ANMSCorners1, _, ANMS_1 = self.ANMS(PanoSecondHalf, ShiTomasiCorners1, True)
Features1 = self.featureDescriptor(PanoSecondHalf, ANMSCorners1, False)
Matches, Matches_ = self.featureMatching(PanoFirstHalf, PanoSecondHalf, Features0, Features1, ANMSCorners0, ANMSCorners1, True)
H, RANSAC_ = self.RANSAC(Matches, PanoFirstHalf, PanoSecondHalf, 2000, 10, True)
I = self.stitchImages(PanoFirstHalf, PanoSecondHalf, H, True)
cv2.waitKey(0)
self.saveResults(self.ImageCount, Corners_0, Corners_1, ANMS_0, ANMS_1, Matches_, RANSAC_, I)
|
tanujthakkar/MyAutoPano
|
Phase1/Code/MyAutoPano.py
|
MyAutoPano.py
|
py
| 19,697 |
python
|
en
|
code
| 2 |
github-code
|
50
|
21303784439
|
from post_traitement_function import *
#####################################
#Fin des fonctions
#####################################
#chemin d'accès aux fichiers résultats
"""
Cette fonction permet de récuéperer toutes les informations utiles
au post traitement
"""
OptionTrace, OptionMultiTrace, chemin_multi_trace, Liste_chemin_sample, Liste_nom_fichiers, Liste_void_ratio = MiseEnPlacePostTraitement(sys.argv[1], sys.argv[2])
Liste_couleur_trace = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'w']
Liste_forme_trace = ['-', '*', '^', 'x', '+', '.']
"""
Cette boucle vise a tracer tous les graphs individuels, ie dans chque void ratio
"""
for i in range(0,len(Liste_chemin_sample)) :
if OptionTrace == False and OptionMultiTrace == False :
exit("""Les options : 'OptionsTrace' et 'OptionMultiTrace'
Situées dans le fichier input_data.py sont toutes les deux à False
Il faut en mettre au moins une sur True pour qu'il y ait un post traitement
Le code est donc stoppé """)
if not os.path.exists(Liste_chemin_sample[i]+'/resultat') :
os.mkdir(Liste_chemin_sample[i]+'/resultat')
if not os.path.exists(chemin_multi_trace+'/multi_resultat') :
os.mkdir(chemin_multi_trace+'/multi_resultat')
"""
info_list_parse() se situe dans le fichier post_traitement_function.py
Il permet de lire l'ensemble d'un fichier textet et d'acrire sont contenu dans une grande matrice
priori a ne pas modifier
"""
matrice=info_list_parse(Liste_chemin_sample[i]+'/'+Liste_nom_fichiers[i])
"""
getLineFromcolumn() se situe dans post_traitement_function.py
Fonctionne en colaboration avec info_list_parse, car il lit les informations écrite
dans la grosse matrice
--> a modifier en focntion de ce qu'on souhaite recuperer dans le fichier de résultat
Ici on recupere tout et on s'nemerde pas
"""
line_time , line_iter ,line_epsilon_zz ,line_deviatoric_strain, line_name, line_deviatoric_stress = getLineFromcolumn(matrice)
"""
Là on retravaille la forme de données pour avoir un beau graph
et que ça soit plus silmple pour comparer avec les résultats de Antoine
"""
for j in range (0, len(line_epsilon_zz)):
line_epsilon_zz[j] *= -1
line_deviatoric_stress[j] *=1./1000.
"""
Récuperations de certaines données à trracer ans le fichier Data_to_plot traité
Détail de la function dan sle fichier post_traitement_function.py
"""
r_mean, grains, liste_rad_pondere, liste_pourcentage_rad_inf = AnalyseGranulometrique(Liste_chemin_sample[i])
"""
Les fonctions ci-dessous servent à tracer différentes courbes
Le choix de courbe individuel ou multiple est fait dans les fonctions
"""
TraceDeviatoricStrain(line_epsilon_zz, line_deviatoric_strain, Liste_void_ratio[i], Liste_chemin_sample[i], chemin_multi_trace, Liste_couleur_trace , i, len(Liste_void_ratio), OptionTrace, OptionMultiTrace)
TraceDeviatoricStress(line_epsilon_zz, line_deviatoric_stress, Liste_void_ratio[i], Liste_chemin_sample[i], chemin_multi_trace, Liste_couleur_trace , i, len(Liste_void_ratio), OptionTrace, OptionMultiTrace)
TraceCourbeGranulometric( grains['rad'], liste_pourcentage_rad_inf,Liste_void_ratio[i], Liste_chemin_sample[i] ,chemin_multi_trace, Liste_couleur_trace , i, len(Liste_void_ratio), OptionTrace, OptionMultiTrace)
TraceCourbeGranulometricPondere(liste_rad_pondere, liste_pourcentage_rad_inf,Liste_void_ratio[i], Liste_chemin_sample[i] ,chemin_multi_trace, Liste_couleur_trace , i, len(Liste_void_ratio), OptionTrace, OptionMultiTrace)
TraceCourbeGranulometricEchelleLog(grains['rad'], liste_pourcentage_rad_inf,Liste_void_ratio[i], Liste_chemin_sample[i] ,chemin_multi_trace, Liste_couleur_trace , i, len(Liste_void_ratio), OptionTrace, OptionMultiTrace)
TraceCourbeGranulometricPondereEchelleLog(liste_rad_pondere, liste_pourcentage_rad_inf,Liste_void_ratio[i], Liste_chemin_sample[i] ,chemin_multi_trace, Liste_couleur_trace , i, len(Liste_void_ratio), OptionTrace, OptionMultiTrace)
print("Tous les post-traitements ont été effectués")
|
Raphael-Bouchard/etude_granulo
|
Post_Traitement/posttraitement.py
|
posttraitement.py
|
py
| 4,121 |
python
|
fr
|
code
| 0 |
github-code
|
50
|
14875311132
|
import abc
import importlib
class Plugins(abc.ABCMeta):
plugins = dict()
def __new__(metaclass, name, bases, namespace):
cls = abc.ABCMeta.__new__(metaclass, name, bases, namespace)
if isinstance(cls.name, str):
metaclass.plugins[cls.name] = cls
return cls
@classmethod
def get(cls, name):
if name not in cls.plugins:
print('Loading plugins from plugins.%s' % name)
importlib.import_module('plugins.%s' % name)
return cls.plugins[name]
class PluginBase(metaclass=Plugins):
@property
@abc.abstractmethod
def name(self):
raise NotImplemented()
class SpamPlugin(PluginBase):
name = 'spam'
class EggsPlugin(PluginBase):
name = 'eggs'
print(EggsPlugin().name)
print(Plugins.plugins)
|
worasit/python-learning
|
mastering/metaclasses/automatically_registering_a_plugin_system.py
|
automatically_registering_a_plugin_system.py
|
py
| 812 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22715176363
|
from tkinter import *
from tkinter.font import Font
from PIL import ImageTk, Image # type: ignore
from bot_module import *
import selenium
import time
import sys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support import expected_conditions
ICON_PATH = "rsc/logo.jpeg"
def cria_gui():
'''
Cria interface gráfica de entrada dos contatos e da mensagem. Essa função
pode ser chamada separadamente sem ser dentro do escopo de criar_gui_aviso().
'''
interface = Tk()
TITULO = "NPSP"
# Título, dimensões e ícone
interface.title(TITULO)
X = interface.winfo_screenwidth()
interface.geometry("650x650+{}+30".format((X//2)-300))
icone = ImageTk.PhotoImage(Image.open(ICON_PATH))
interface.iconphoto(True, icone)
frame_contatos = Frame(interface)
frame_contatos.pack()
frame_mensagem = Frame(interface)
frame_mensagem.pack()
FONTE = Font(family="Helvetica", size=20)
fg_cor = "#083F38"
bg_cor = "#9BE1B6"
label_nome_contatos = Label(frame_contatos, text="Contatos",
font=FONTE, fg=fg_cor, bg=bg_cor)
label_nome_contatos.pack()
label_nome_mensagem = Label(frame_contatos, text="Mensagem",
font=FONTE, fg=fg_cor, bg=bg_cor)
text_area_height = "13"
contatos = Text(frame_contatos, height=text_area_height)
contatos.pack()
submeter_contatos = Button(frame_contatos, text="Enviar", command=lambda:escreve_texto(contatos.get(1.0, END), "contatos"))
submeter_contatos.pack()
mensagem = Text(frame_mensagem, height=text_area_height)
mensagem.pack()
submeter_mensagem = Button(frame_mensagem, text="Enviar", command=lambda:escreve_texto(mensagem.get(1.0, END), "mensagem"))
submeter_mensagem.pack()
label_nome_mensagem.pack()
finalizado = Button(interface, text="Finalizar", font=("Arial", 15),
command=acha_numero_envia_mensagem)
finalizado.pack(side=RIGHT)
interface.mainloop()
def cria_gui_aviso():
'''
Cria interface inicial com as orientações de uso do resto do programa,
chamando dentro de seu escopo a função que cria a janela posterior. Ou seja,
não é necessário chamar a função cria_gui().
'''
GUI_AVISO_TITULO = "NPSP - Aviso"
DIMENSOES = (574, 530)
# Cria instância de Tk, define o título, dimensões e o ícone do janela
interface = Tk()
interface.title(GUI_AVISO_TITULO)
X = interface.winfo_screenwidth()
interface.geometry("{}x{}+{}+50".format(DIMENSOES[0], DIMENSOES[1], (X//2)-300))
icone = ImageTk.PhotoImage(Image.open(ICON_PATH))
interface.iconphoto(True, icone)
canvas = Canvas(interface, width=DIMENSOES[0], height=DIMENSOES[1]-30)
canvas.pack()
img = ImageTk.PhotoImage(Image.open("rsc/aviso.png"))
canvas.create_image(0, 0, anchor=NW, image=img)
ok_button = Button(interface, text="Ok", font=("Arial",17), command=interface.destroy)
ok_button.pack()
interface.mainloop()
cria_gui()
def escreve_texto(texto, tipo):
'''
Escreve texto no arquivo de nome tipo.txt
Parâmetros: str, str
'''
with open(f"{tipo}.txt", "w+", encoding="utf-8") as f:
f.write(texto)
DRIVER_PATH = "chromedriver.exe"
TEMPO_DE_ESPERA = 300
contagem_clique_finalizar = 0
def pegar_contagem():
global contagem_clique_finalizar
contagem_clique_finalizar += 1
return contagem_clique_finalizar
def acha_contato_envia_mensagem():
'''
Função que acha o contato e envia a mensagem
'''
contagem = pegar_contagem()
contatos = le_arquivo("contatos")
mensagem = le_arquivo("mensagem")
if contagem == 1:
global driver
driver = iniciar_driver(DRIVER_PATH)
driver.implicitly_wait(TEMPO_DE_ESPERA)
no_remember_me(driver)
for contato in contatos:
barra_de_pesquisa(contato, driver)
time.sleep(0.5)
envia_mensagem(mensagem, driver)
time.sleep(0.5)
def processa_numero(numero):
# TODO: processa número para ficar no padrão adequado. Manipulação de strings
# 9 números: número sem DD
# 11 números: com DD
# 13 números: com código 55
numero = numero.replace(" ", "").replace("(", "").replace(")", "").replace("-", "")
if len(numero) < 11:
return numero
elif len(numero) == 11:
return f"+55{numero}"
elif len(numero) > 11:
if numero[0] == "+":
return numero
else:
return f"+{numero}"
def acha_numero_envia_mensagem():
'''
Função que usa a estrutura https://wa.me/phonenumber e envia a mensagem
Número tem que estar no formato internacional
'''
contagem = pegar_contagem()
numeros = le_arquivo("contatos")
mensagem = le_arquivo("mensagem")
if contagem == 1:
global driver
driver = iniciar_driver(DRIVER_PATH)
#no_remember_me(driver)
try:
element = WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.CLASS_NAME, "_13NKt copyable-text selectable-text")))
element.click()
except:
pass
print(numeros)
driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')
for i, numero in enumerate(numeros):
driver.implicitly_wait(TEMPO_DE_ESPERA)
numero = processa_numero(numero)
print(numero)
numero_entra_chat(numero, driver)
envia_mensagem(mensagem, driver)
#driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')
driver.execute_script('''window.open("http://google.com","_blank");''')
window_name = driver.window_handles[-1]
driver.switch_to.window(window_name=window_name)
sys.exit(0)
|
Ygor-J/bot_whatsapp
|
gui.py
|
gui.py
|
py
| 5,846 |
python
|
pt
|
code
| 0 |
github-code
|
50
|
74541406236
|
from string import ascii_lowercase # 알파벳 소문자들
n = int(input()) # 단어의 개수
cnt = 0 # 그룹 단어의 갯수를 세는 변수
for i in range(n):
s = input() # 단어
j = 0 # s 문자열의 인덱스를 위한 변수
alpha_list = list(ascii_lowercase) # 알파벳 소문자들의 리스트
while j < len(s):
if s[j] in alpha_list:
alpha = s[j] # s[j]의 문자를 변수에 저장
while j < len(s):
if s[j] != alpha:
break
j += 1
alpha_list.remove(alpha) # 알파벳 소문자들의 리스트에서 해당 알파벳을 제거
else:
break
if j == len(s):
cnt += 1
print(cnt)
# 리스트에서 특정 원소를 찾기위해서는 if '원소' in 리스트: 와 같은 문법을 사용한다.
# 알파벳 소문자들의 리스트를 만들기 위해서 string의 ascii_lowercase를 import 시켜서 사용한다.
# 리스트에서 특정 원소를 제거하기 위해서는 remove(원소)와 같은 문법을 사용한다.
# 16번째 line에 있는 명령문이 if문 보다 앞에 있을 시에, 인덱스 오류가 발생한다.
|
chlgksdbs/Baekjoon-Online-Judge
|
Python/단계별로 풀어보기/06. 문자열/[1316] 그룹 단어 체커.py
|
[1316] 그룹 단어 체커.py
|
py
| 1,196 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
28545062693
|
from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
from . import views
# using namespace to avoid url name confilt
app_name = 'blog'
urlpatterns = [
path('', views.index, name='homepage'),
path('blog/', PostListView.as_view(), name='blog'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
]
|
PhurbaGyalzen/THE-XLOG
|
blog/urls.py
|
urls.py
|
py
| 760 |
python
|
en
|
code
| 1 |
github-code
|
50
|
32770875146
|
import os, sys, re
if len(sys.argv) < 3:
print('python "file_path" "pattern"')
exit(0)
with open(sys.argv[1], 'r', encoding='utf-8') as f:
for line in f.readlines():
if re.search(sys.argv[2], line):
if line[-1] == '\n':
line = line[:-1]
if line[-2:] == " {":
line = line[:-2]
print(line)
|
tanght1994/helptanght
|
get_go_func_name.py
|
get_go_func_name.py
|
py
| 377 |
python
|
en
|
code
| 0 |
github-code
|
50
|
2481999619
|
from modules import Json
from datetime import timedelta, timezone, time
from logging import getLevelName, Logger
from os.path import isfile
from pydantic import BaseModel, Field, validator
from typing import Union, Optional
unique_key_list = []
# CRITICAL
# ERROR
# WARNING
# INFO
# DEBUG
# NOTSET
class LoggingConfig(BaseModel):
stream_level: Union[int, str]=Field(20, alias="stream-level")
file_level: Union[int, str]=Field(20, alias="file-level")
backup_count: int=Field(3, alias="backup-count")
file_name: str=Field(alias="file-name")
dir_path: str=Field("logs", alias="dir-path")
@validator("stream_level", "file_level")
def level_name_validator(cls, value):
if type(value) == int:
if value in range(0, 51, 10):
return value
else:
new_value = getLevelName(value)
if type(new_value) == int:
return new_value
raise ValueError(f"Illegal level name: \"{value}\"")
class Config:
extra = "ignore"
class WebConfig(BaseModel):
host: str
port: int
debug: bool
class DiscordConfig(BaseModel):
token: str
prefixs: list[str]
rcon_role: int=Field(alias="rcon-role")
class _RCONConfig(BaseModel):
host: str
port: int
password: str
timeout: float
class _DiscordChannels(BaseModel):
text_channel_id: int=Field(alias="text-channel-id")
class ARKTimeData(BaseModel):
time: Union[time, str]
clear_dino: bool=Field(False, alias="clear-dino")
method: str
@validator("time")
def time_validator(cls, value):
if type(value) != time:
if type(value) != str:
raise ValueError(f"Illegal time format: \"{value}\"")
value = time.fromisoformat(value)
if value.tzinfo == None:
value = value.replace(tzinfo=TIMEZONE)
return value
@validator("method")
def method_validator(cls, value: str):
if value.lower() in ["restart", "save", "stop", "start"]:
return value.lower()
raise ValueError(f"Illegal method: \"{value}\"")
class ARKServerConfig(BaseModel):
unique_key: str=Field(alias="unique-key")
dir_path: str=Field(alias="dir-path")
file_name: str=Field(alias="file-name")
display_name: str=Field(alias="display-name")
rcon_config: _RCONConfig=Field(alias="rcon")
discord_config: _DiscordChannels=Field(alias="discord")
time_table: list[ARKTimeData]=Field(alias="time-table")
logging_config: LoggingConfig=Field(alias="logging")
logger_name: Optional[str]=None
@validator("unique_key")
def unique_key_validator(cls, value):
if value in unique_key_list:
raise ValueError(f"Repeated unique-key: \"{value}\"")
unique_key_list.append(value)
return value
class MessageFilters(BaseModel):
startswith: tuple[str, ...]
include: tuple[str, ...]
endswith: tuple[str, ...]
class BroadcastMessage(BaseModel):
save: str
stop: str
start: str
restart: str
saving: str
saved: str
class StatusMessage(BaseModel):
running: str
stopped: str
starting: str
rcon_disconnect: str=Field(alias="rcon-disconnect")
network_disconnect: str=Field(alias="network-disconnect")
CONFIG: dict[str, Union[dict, str, int]] = {
"web": {
"host": "0.0.0.0",
"port": 5000,
"debug": False,
},
"discord": {
"token": "",
"prefixs": [],
"rcon-role": 0,
},
"servers": [
"servers-config/Server-Example.json",
],
"ark-message-filter": {
"startswith": [
"SERVER:",
"管理員指令",
],
"include": [
"被自動摧毀了!",
": Destroy ",
"has entered your zone.",
"馴養了 一隻",
"Souls were destroyed by ",
"Soul was destroyed by ",
"擊殺!",
"已死亡!",
"killed!",
"你的部落 killed",
"killed ,擊殺者:",
"認養了",
"摧毀了你的",
"拆除了",
"放生了'",
"你的部落馴養了一隻",
"冷藏了",
"加入了部落!",
],
"endswith": [],
},
"broadcast": {
"save": "伺服器將於 $TIME 分鐘後存檔。\nServer will save in $TIME min.",
"stop": "伺服器將於 $TIME 分鐘後關閉。\nServer will shutdown in $TIME min.",
"start": "啟動伺服器。\nStart Server.",
"restart": "伺服器將於 $TIME 分鐘後重啟。\nServer will restart in $TIME min.",
"saving": "儲存中...\nSaving...",
"saved": "儲存完成!\nWorld Saved!",
},
"status-message": {
"running": "🟢 運作中",
"stopped": "🔴 未開啟",
"starting": "🔵 正在啟動中",
"rcon-disconnect": "🟡 RCON失去連線",
"network-disconnect": "🟠 對外失去連線",
},
"logging": {
"main": {
"stream-level": "INFO",
"file-level": "INFO",
"backup-count": 3,
"file-name": "main",
"dir-path": "logs",
},
"discord": {
"stream-level": "WARNING",
"file-level": "INFO",
"backup-count": 3,
"file-name": "discord",
"dir-path": "logs",
},
"web": {
"stream-level": "INFO",
"file-level": "INFO",
"backup-count": 3,
"file-name": "web",
"dir-path": "logs",
},
"rcon": {
"stream-level": "INFO",
"file-level": "INFO",
"backup-count": 3,
"file-name": "rcon",
"dir-path": "logs",
},
},
"database": "data.db",
"low-battery": 30,
"timezone": 8,
}
try:
RAW_CONFIG: dict = Json.load("config.json")
for key, value in RAW_CONFIG.items():
if type(value) == dict:
for s_key, s_value in value.items():
CONFIG[key][s_key] = s_value
else:
CONFIG[key] = value
except: pass
finally:
Json.dump("config.json", CONFIG)
TIMEZONE: timezone = timezone(timedelta(hours=CONFIG["timezone"]))
WEB_CONFIG = WebConfig(**CONFIG["web"])
DISCORD_CONFIG = DiscordConfig(**CONFIG["discord"])
__server_list: list[ARKServerConfig] = [
ARKServerConfig(**Json.load(config_path)) for config_path in CONFIG["servers"]
]
SERVERS: dict[str, ARKServerConfig] = {
ark_server_config.unique_key: ark_server_config
for ark_server_config in __server_list
}
FILTERS = MessageFilters(**CONFIG["ark-message-filter"])
BROADCAST_MESSAGES = BroadcastMessage(**CONFIG["broadcast"])
STATUS_MESSAGES = StatusMessage(**CONFIG["status-message"])
SQL_FILE = CONFIG["database"]
LOGGING_CONFIG: dict[str, LoggingConfig] = {
key: LoggingConfig(**value)
for key, value in CONFIG["logging"].items()
}
for unique_key, server_config in SERVERS.items():
server_config.logger_name = f"{server_config.display_name}-{unique_key}"
LOGGING_CONFIG.update({
server_config.logger_name: server_config.logging_config
})
LOW_BATTERY: int = CONFIG["low-battery"]
if False:
from sqlite3 import connect
if not isfile("data.db"):
db = connect(SQL_FILE)
cursor = db.cursor()
cursor.execute("""
CREATE TABLE "Users" (
"discord_id" INTEGER NOT NULL UNIQUE,
"account" TEXT NOT NULL UNIQUE,
"password" TEXT NOT NULL,
"token" TEXT UNIQUE,
PRIMARY KEY("discord_id")
);
""")
db.commit()
cursor.close()
db.close()
|
AloneAlongLife/ARK-Server-Manager-Plus_3.0
|
configs/config.py
|
config.py
|
py
| 7,800 |
python
|
en
|
code
| 2 |
github-code
|
50
|
21381824031
|
from ReadData import *
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
x_train, y_train = readCSVData('data_final2.csv')
x_test, y_test = readTXTData('train.txt')
x_train = x_train + x_test[:450]
y_train = y_train + y_test[:450]
y_test = y_test[451:499]
x_test = x_test[451:499]
vectorizer = TfidfVectorizer(ngram_range=(1,2))
vectors = vectorizer.fit_transform(x_train)
# return vectors
# def fit(self):
# vectors = self.tfidf()
classifier = LinearSVC()
# train the classifier
classifier.fit(vectors, y_train)
vectors1 = vectorizer.transform(x_test)
predict = classifier.predict(vectors1)
ac = accuracy_score(y_test, predict)
print(ac)
|
VuHoangvn/Nhap-mon-hoc-may
|
train/tfidf_linearSVM.py
|
tfidf_linearSVM.py
|
py
| 758 |
python
|
en
|
code
| 0 |
github-code
|
50
|
24650671602
|
def chop(lst):
del lst[0]
del lst[-1]
def middle(lst):
list_changer = lst[1:]
del list_changer[-1]
return list_changer
first_list = [1, 2, 3, 4]
second_list = [1, 2, 3, 4]
chopped_list = chop(first_list)
print(first_list)
print(chopped_list)
mid_list = middle(second_list)
print(second_list)
print(mid_list)
|
XEvan-WiseX/CIS-104
|
module 8/08_01/ex_08_01.py
|
ex_08_01.py
|
py
| 353 |
python
|
en
|
code
| 0 |
github-code
|
50
|
27592687058
|
import common.input as input
import algorithm.lightweight.coreset as alc
import common.utils as utils
import matplotlib.pyplot as plt
import numpy as np
import statistics
from sklearn.cluster import KMeans
data = input.parse_txt("dataset/s-set/s3.txt")
opt = input.parse_txt("dataset/s-set/s3-label.pa")
centers = input.parse_txt("dataset/s-set/s3-cb.txt")
#Computing lightweight coreset
lwcs = alc.LightweightCoreset(data, 15, 0.1)
coreset, weights = lwcs.compute()
@utils.timeit
def test_no_coreset():
kmeans = KMeans(n_clusters=15, random_state = 0).fit(X=data)
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1])
cost = utils.cost_function(data, kmeans.labels_, kmeans.cluster_centers_)
return cost
@utils.timeit
def test_coreset():
kmeans = KMeans(n_clusters=15, random_state = 0).fit(X=coreset, sample_weight=weights)
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1])
cost = utils.cost_function(data, kmeans.predict(X=data), kmeans.cluster_centers_)
return cost
cost = test_no_coreset()
results = []
for i in range(100):
coreset, weights = lwcs.compute()
cost_cs = test_coreset()
results.append((cost-cost_cs)/cost)
print(statistics.mean(results))
# print("cost no coreset ", cost)
# print("cost coreset ", cost_cs)
# print("coreset improvment: {:.1%} ".format(np.abs(cost-cost_cs)/cost))
|
piotrhm/coreset
|
example.py
|
example.py
|
py
| 1,397 |
python
|
en
|
code
| 2 |
github-code
|
50
|
25314326878
|
# For testing.
#
# Concept: This module allows to trigger abnormal situations, to test the reaction of the software ("fault insertion testing").
# In the place in the software, where the fault shall be injected, add a condition like
# if (testsuite_faultinjection_is_triggered(TC_MY_TESTCASE_FOR_SOMETHING)):
# DoSomethingStrange()
# In normal software run, this condition is never fulfilled and does not disturb. If the related test case is activated,
# by setting testsuite_testcase_number = TC_MY_TESTCASE_FOR_SOMETHING below, the condition will fire and the fault is injected.
# A number of delay cycles can be configured with testsuite_delayCycles below.
#
# Detailled docu see doc/testing_and_simulation.md
from udplog import udplog_log
from configmodule import getConfigValue, getConfigValueBool
# The list of test cases. Each must have a unique test case ID.
TC_NOTHING_TO_TEST = 0
TC_EVSE_ResponseCode_SequenceError_for_SessionSetup = 1
TC_EVSE_ResponseCode_Failed_for_CableCheckRes = 2
TC_EVSE_ResponseCode_SequenceError_for_ServiceDiscoveryRes = 3
TC_EVSE_ResponseCode_SequenceError_for_ServicePaymentSelectionRes = 4
TC_EVSE_ResponseCode_SequenceError_for_ContractAuthenticationRes = 5
TC_EVSE_ResponseCode_ServiceSelectionInvalid_for_ChargeParameterDiscovery = 6
TC_EVSE_ResponseCode_Failed_for_PreChargeRes = 7
TC_EVSE_ResponseCode_Failed_for_PowerDeliveryRes = 8
TC_EVSE_ResponseCode_Failed_for_CurrentDemandRes = 9
TC_EVSE_Timeout_during_CableCheck = 10
TC_EVSE_Timeout_during_PreCharge = 11
TC_EVSE_Shutdown_during_PreCharge = 12
TC_EVSE_Shutdown_during_CurrentDemand = 13
TC_EVSE_Malfunction_during_CurrentDemand = 14
TC_EVSE_Timeout_during_CurrentDemand = 15
TC_EVSE_GoodCase = 16
TC_EVSE_LastTest = 17
# variables
testsuite_testcase_number = 0
testsuite_delayCycles = 0
testsuite_TcTitle = "(title not initialized)"
# Counter variable for delaying the trigger
testsuite_counter = 0
def testsuite_printToTestLog(s):
fileOut = open('testresults.txt', 'a') # open the result file for appending
print(s, file=fileOut)
fileOut.close()
def testsuite_getTcNumber():
if (testsuite_testcase_number==0):
return "(no tests)"
else:
return str(testsuite_testcase_number) + testsuite_TcTitle
def testsuite_faultinjection_is_triggered(context):
global testsuite_counter, testsuite_testcase_number, testsuite_delayCycles
isTestcaseFired = False
if (context==testsuite_testcase_number): # if the call context is matching the intended test case
testsuite_counter += 1 # count the number of matching calls
isTestcaseFired = testsuite_counter>=testsuite_delayCycles # and fire the test case if the intended number is reached
if (isTestcaseFired):
print("[TESTSUITE] Fired test case " + str(context) + " TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT")
s = "[TESTSUITE] Fired test case " + str(context)
udplog_log(s, "testsuite")
return isTestcaseFired
def testsuite_choose_testcase():
global testsuite_counter, testsuite_testcase_number, testsuite_delayCycles
global testsuite_observedResult
global testsuite_expectedResult
global testsuite_TcTitle
if (not getConfigValueBool("testsuite_enable")):
testsuite_testcase_number = TC_NOTHING_TO_TEST
return
try:
if (testsuite_expectedResult is None):
testsuite_expectedResult = ""
except:
testsuite_expectedResult = ""
# as first step, before choosing the next test case, check the result of the ongoing test case
if (testsuite_expectedResult!=""):
s = "ExpectedResult: " + testsuite_expectedResult
s = s + ", ObservedResult: " + testsuite_observedResult
if (testsuite_expectedResult!=testsuite_observedResult):
s = "FAIL " + s
else:
s = "PASS " + s
print(s)
udplog_log(s, "testsuite")
x = "Result for Testcase " + str(testsuite_testcase_number) + " " + testsuite_TcTitle
testsuite_printToTestLog(x)
testsuite_printToTestLog(s)
if (testsuite_testcase_number<TC_EVSE_LastTest):
testsuite_testcase_number+=1
print("[TESTSUITE] Setting up test case " + str(testsuite_testcase_number) + " TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT")
s = "[TESTSUITE] Setting up test case " + str(testsuite_testcase_number)
udplog_log(s, "testsuite")
testsuite_counter = 0
testsuite_delayCycles = 5 # just a default
testsuite_expectedResult = "" # just a default
testsuite_observedResult = "" # just a default
testsuite_TcTitle = "(title missing)" # just a default
# For each test case, configure the test parameters and the expected result
if (testsuite_testcase_number == TC_EVSE_Timeout_during_CableCheck):
testsuite_delayCycles=0 # immediately timeout
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Timeout during CableCheck shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_Timeout_during_PreCharge):
testsuite_delayCycles=0 # immediately timeout
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Timeout during PreCharge shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_Shutdown_during_PreCharge):
testsuite_delayCycles=2 # shutdown after 2 ok-cycles
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Shutdown during PreCharge shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_Shutdown_during_CurrentDemand):
testsuite_delayCycles=20 # shutdown after 20 ok-cycles
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Shutdown during CurrentDemand shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_Malfunction_during_CurrentDemand):
testsuite_delayCycles=5 # malfunction after 5 ok-cycles
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Malfunction during CurrentDemand shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_Timeout_during_CurrentDemand):
testsuite_delayCycles=30 # timeout after 30 ok-cycles
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Timeout during CurrentDemand shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_SequenceError_for_SessionSetup):
testsuite_delayCycles=0 # immediately
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "SequenceError in SessionSetup shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_SequenceError_for_ServiceDiscoveryRes):
testsuite_delayCycles=0 # immediately
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "SequenceError in ServiceDiscoveryRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_SequenceError_for_ServicePaymentSelectionRes):
testsuite_delayCycles=0 # immediately
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "SequenceError in ServicePaymentSelectionRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_SequenceError_for_ContractAuthenticationRes):
testsuite_delayCycles=0 # immediately
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "SequenceError in ContractAuthenticationRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_ServiceSelectionInvalid_for_ChargeParameterDiscovery):
testsuite_delayCycles=0 # immediately
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "ServiceSelectionInvalid in ChargeParameterDiscoveryshall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_Failed_for_CableCheckRes):
testsuite_delayCycles=0 # immediately in the first message
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Failed in CableCheckRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_Failed_for_PreChargeRes):
testsuite_delayCycles=2 # after two ok cycles, we inject the fault in the third cycle
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Failed in PreChargeRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_Failed_for_PowerDeliveryRes):
testsuite_delayCycles=0 # immediately
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Failed in PowerDeliveryRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_ResponseCode_Failed_for_CurrentDemandRes):
testsuite_delayCycles=10 # fire the fault after 10 ok-cycles
testsuite_expectedResult = "TSRS_SafeShutdownFinished"
testsuite_TcTitle = "Failed in CurrentDemandRes shall lead to SafeShutdown"
if (testsuite_testcase_number == TC_EVSE_GoodCase):
# Test case for the good case: Normal charging, no errors.
testsuite_delayCycles=0 # not relevant
testsuite_expectedResult = "TSRS_ChargingFinished"
testsuite_TcTitle = "Good case, normal charging without errors"
def testsuite_reportstatus(s):
# give the test status to the UDP, to inform the other side and to have it in the network log.
udplog_log(s, "testsuite")
pass
def testsuite_evaluateIpv4Packet(pkt):
# The testsuite listens to syslog messages which are coming from the other side,
# to know what is going on.
global testsuite_observedResult
if (len(pkt)>50):
protocol = pkt[23]
destinationport = pkt[36]*256 + pkt[37]
if ((protocol == 0x11) and (destinationport==0x0202)): # it is an UDP packet to the syslog port
baSyslog = pkt[46:]
strSyslog = ""
syslogLen = len(baSyslog)
if (syslogLen>100):
syslogLen=100
for i in range(0, syslogLen-1): # one less, remove the trailing 0x00
x = baSyslog[i]
if (x<0x20):
x=0x20 # make unprintable character to space.
strSyslog+=chr(x) # convert ASCII code to string
print("[Testsuite] received syslog packet: " + strSyslog)
if (strSyslog[0:5]=="TSRS_"):
# it is a TestSuiteReportStatus message.
testsuite_observedResult = strSyslog
if __name__ == "__main__":
print("Testing the mytestsuite")
print("nothing to do")
|
uhi22/pyPLC
|
mytestsuite.py
|
mytestsuite.py
|
py
| 11,226 |
python
|
en
|
code
| 66 |
github-code
|
50
|
75122449436
|
def BubbleSort(list):
# loop of [len(list)-1] times
for i in range(0,len(list)-1):
# will be changed to False later if swapping occurs
noSwap = True
# loop of [len(list)-1-i] times
for j in range(0,len(list)-i-1):
# swap
if list[j] > list[j+1]:
temp = list[j]
list[j] = list[j+1]
list[j+1] = temp
# swapping occured
noSwap = False
# in case of tracking
#print(i)
# if no swapping occured --> sorted list is ready
if noSwap:
break
print(list)
#%% Testing
myList = [0,0,0,-1,-0,1,2,3,2,1]
BubbleSort(myList)
|
amiralishd/Data-Structures-and-Algorithms
|
Sorting/BuubleSort.py
|
BuubleSort.py
|
py
| 699 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19425085749
|
# implementation of disjoint-set data structure (very efficient!)
# each "set" is a tree, and the "set representative" is the tree root
# hence, two nodes are in the same set if root(u) == root(v)
# initially, everything is in its own set. hence parent(node) = node
parent = range(nn)
size = [1]*nn
# to find the root, start from the node and keep going to parent[node]. stop when parent[node] = node.
# in addition, we set "parent[node] = root(node)" so that next time we look for root(node), we'll get there in 1 step!
def root(node):
if not parent[node] == node:
parent[node] = root(parent[node])
return parent[node]
# to merge the sets, we can simply do parent[root(u)] = root(v)
# to ensure that tree heights are O(log n), we make the root of the smaller tree a child of the root of the larger tree
# (since a node's root can't change > log n times)
def merge(uu, vv):
root_uu, root_vv = root(uu), root(vv)
assert root_uu != root_vv, (root_uu, root_vv)
if size[root_uu] < size[root_vv]:
parent[root_uu] = root_vv
size[root_vv] += size[root_uu]
else:
parent[root_vv] = root_uu
size[root_uu] += size[root_vv]
|
navkrishna21/my-algorithm-data-structure-code-snippets
|
DSU/DSU_implementation.py
|
DSU_implementation.py
|
py
| 1,184 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26666624970
|
import yaml
import pytest
from pycheron.callPycheronMetric import callPycheron
from pycheron.db.sqllite_db import Database
def load_config_file(config_file):
with open(config_file, "r") as ymlfile:
cfg = yaml.load(ymlfile)
return cfg
def ensure_config_values(config):
for key, val in list(config.items()):
if val == "None" and key != "session":
config[key] = None
def get_db(db_name, session):
return Database(db_name, session_name=session)
def basic_stats_test(data_base):
basic_stats = data_base.get_metric("basicStatsMetric")
assert basic_stats is not None
@pytest.mark.parametrize(
"config_file",
[
"test_callPycheronMetric/pycheronConfigTemplateDir.yaml",
"test_callPycheronMetric/pycheronConfigTemplateDirNotByDay.yaml",
"test_callPycheronMetric/pycheronConfigTemplateCalcIndNoPlot.yaml",
"test_callPycheronMetric/pycheronConfigTemplateCalcIndPlot.yaml",
"test_callPycheronMetric/pycheronConfigTemplateCalcAllPlot.yaml",
"test_callPycheronMetric/pycheronConfigTemplateCalcAllNoPlot.yaml",
"test_callPycheronMetric/pycheronConfigTemplate.yaml",
"test_callPycheronMetric/pycheronConfigTemplateWfdisc.yaml",
],
)
def test_callPycheronMetric(config_file):
cfg = load_config_file(config_file)
ensure_config_values(cfg)
callPycheron(
output_dir=cfg["output_dir"],
data=cfg["data"],
datatype=cfg["datatype"],
calcAll=cfg["calcAll"],
calcPsds=cfg["calcPsds"],
calcBasic=cfg["calcBasic"],
calcCorr=cfg["calcCorr"],
calcCrossCorr=cfg["calcCrossCorr"],
calcGap=cfg["calcGap"],
calcAmp=cfg["calcAmp"],
calcSNR=cfg["calcSNR"],
calcSOH=cfg["calcSOH"],
calcStalta=cfg["calcStalta"],
calcDcOffset=cfg["calcDcOffset"],
calcSpikes=cfg["calcSpikes"],
calcAllDeadChan=cfg["calcAllDeadChan"],
calcTransfer=cfg["calcTransfer"],
calcCal=cfg["calcCal"],
network=cfg["network"],
station=cfg["station"],
byDay=cfg["byDay"],
startdate=cfg["startdate"],
enddate=cfg["enddate"],
jul_start=cfg["jul_start"],
jul_end=cfg["jul_end"],
generateMasks=cfg["generateMasks"],
masksByTime=cfg["masksByTime"],
rmsThreshold=cfg["rmsThreshold"],
maxThreshold=cfg["maxThreshold"],
minThreshold=cfg["minThreshold"],
medianThreshold=cfg["medianThreshold"],
meanThreshold=cfg["meanThreshold"],
varianceThreshold=cfg["varianceThreshold"],
stdThreshold=cfg["stdThreshold"],
maxLagSecs=cfg["maxLagSecs"],
filt=cfg["filt"],
freqmin=cfg["freqmin"],
freqmax=cfg["freqmax"],
corners=cfg["corners"],
zerophase=cfg["zerophase"],
maxorder=cfg["maxorder"],
ba=cfg["ba"],
freq_passband=cfg["freq_passband"],
windowSecs=cfg["windowSecs"],
incrementSecs=cfg["incrementSecs"],
threshold=cfg["threshold"],
separateMasks=cfg["separateMasks"],
completeDay=cfg["completeDay"],
expLoPeriod=cfg["expLoPeriod"],
expHiPeriod=cfg["expHiPeriod"],
linLoPeriod=cfg["linLoPeriod"],
linHiPeriod=cfg["linHiPeriod"],
evalresp=cfg["evalresp"],
dcExpThreshold=cfg["dcExpThreshold"],
pctBelowNoiseThreshold=cfg["pctBelowNoiseThreshold"],
pctAboveNoiseThreshold=cfg["pctAboveNoiseThreshold"],
dcLinThreshold=cfg["dcLinThreshold"],
num_gaps=cfg["num_gaps"],
pctBelowNoiseThresholdRESP=cfg["pctBelowNoiseThresholdRESP"],
pctAboveNoiseThresholdRESP=cfg["pctAboveNoiseThresholdRESP"],
minRep=cfg["minRep"],
algorithmSNR=cfg["algorithmSNR"],
windowSecsSNR=cfg["windowSecsSNR"],
snrThreshold=cfg["snrThreshold"],
data_quality=cfg["data_quality"],
activity=cfg["activity"],
io_clock=cfg["io_clock"],
windowSize=cfg["windowSize"],
thresholdSpikes=cfg["thresholdSpikes"],
selectivity=cfg["selectivity"],
fixedThreshold=cfg["fixedThreshold"],
staSecs=cfg["staSecs"],
ltaSecs=cfg["ltaSecs"],
increment=cfg["increment"],
algorithmSTA=cfg["algorithmSTA"],
plots=cfg["plots"],
pdfModel=cfg["pdfModel"],
per_arr=cfg["per_arr"],
showNoiseModel=cfg["showNoiseModel"],
showMaxMin=cfg["showMaxMin"],
showMode=cfg["showMode"],
showMean=cfg["showMean"],
showMedian=cfg["showMedian"],
showEnvelope=cfg["showEnvelope"],
envelopeType=cfg["envelopeType"],
showSingle=cfg["showSingle"],
singleType=cfg["singleType"],
min_stations=cfg["min_stations"],
rank_by=cfg["rank_by"],
processesPSD=cfg["processesPSD"],
processesSpikes=cfg["processesSpikes"],
log=cfg["log"],
fortran=cfg["fortran"],
timespan=cfg["timespan"],
dcADF_win_size=cfg["dcADF_win_size"],
dcADF_pval_thresh=cfg["dcADF_pval_thresh"],
dcADF_threshold=cfg["dcADF_threshold"],
dcADF_use_thresh=cfg["dcADF_use_thresh"],
dcMean_win_size=cfg["dcMean_win_size"],
dcMean_thresh=cfg["dcMean_thresh"],
cal_metric_store=cfg["cal_metric_store"],
dcExpThresholdHour=cfg["dcExpThresholdHour"],
dcLinThresholdHour=cfg["dcLinThresholdHour"],
byHourOn=cfg["byHourOn"],
database=cfg["database"],
session=cfg["session"],
overwrite=cfg["overwrite"],
to_csv=cfg["to_csv"],
stationStartAt=cfg["stationStartAt"],
)
db_name = cfg["output_dir"] + "/" + cfg["database"]
db = get_db(db_name, cfg["session"])
basic_stats_test(db)
|
sandialabs/pycheron
|
pycheron/test_callPycheronMetric/test_callPycheronMetric.py
|
test_callPycheronMetric.py
|
py
| 5,805 |
python
|
en
|
code
| 20 |
github-code
|
50
|
41224875379
|
highest = 0
lowest = 1000000
arr = [567,123,76541,2123]
for i in range(len(arr)):
if arr[i] > highest:
highest = arr[i]
if arr[i] < lowest:
lowest = arr[i]
print(highest)
print(lowest)
|
jchh1998/practical-2
|
q4.py
|
q4.py
|
py
| 221 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17945819781
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path('login/', views.LoginPage, name = "login"),
path('logout/', views.LogoutUser, name = "logout"),
path('register/', views.RegisterUser, name = "register"),
path('', views.home, name = "home"),
]
|
DevSheila/DonationsPlatform
|
Donor_Login_Register/urls.py
|
urls.py
|
py
| 295 |
python
|
en
|
code
| 2 |
github-code
|
50
|
16164109080
|
from math import floor
from os import sep
with open(f'inputs{sep}day_1.txt') as rf: components = [int(l) for l in rf.readlines()]
def fuel_from_mass(mass):
return floor(mass / 3) - 2
def fuel_for_mass_including_fuel(mass):
fuel = fuel_from_mass(mass)
fuel_extra = fuel
while fuel_extra >= 0:
fuel_extra = fuel_from_mass(fuel_extra)
fuel += fuel_extra if fuel_extra > 0 else 0
return fuel
#Part 1:
print(sum([fuel_from_mass(c) for c in components]))
#Part 2:
print(sum([fuel_for_mass_including_fuel(c) for c in components]))
|
Nathansbud/AdventOfCode
|
2019/day_1.py
|
day_1.py
|
py
| 568 |
python
|
en
|
code
| 1 |
github-code
|
50
|
23360340548
|
#給過但會超時,因為此方法為以C++為底來設想的方法
n=int(input())
grid=[2]
for i in range(3,40000,2):
check=1
for j in range(3,int(i**0.5)+1):
if i%j==0:
check=0
break
if check:
grid.append(i)
for i in range(n):
ans=[1,1]
s,e=map(int,input().split())
for j in range(s,e+1):
temp=j
tpAn=1
if j in grid:
tpAn=2
elif j==1:
tpAn=1
else:
temp=j
for x in range(2,int(j**0.5)+1):
count=0
if x in grid and temp%x==0:
count=0
while temp%x==0:
temp=temp//x
count+=1
tpAn*=count+1
if temp==1:
break
if temp>1:
tpAn*=2
if tpAn>ans[1]:
ans=[j,tpAn]
if tpAn>ans[1]:
ans=[j,tpAn]
print("Between %d and %d, %d has a maximum of %d divisors."%(s,e,ans[0],ans[1]))
#這個是python遞迴的版本,基本概念相同但是沒有建表反而是以遞迴查找的方式來實現
# from math import sqrt
# from collections import Counter
# def primeFactors(n):
# i = 2
# while i<= sqrt(n):
# if n%i == 0:
# l = primeFactors(n/i)
# l.append(i)
# return l
# i+=1
# return [n]
# numTest = int(input())
# for itertest in range(numTest):
# L,U = map(int,input().split())
# it = L
# result,maxI = 0,-1
# while it<=U:
# f = primeFactors(it)
# counter = Counter(f)
# result1 = 1
# for c in counter:
# result1 *= counter[c] + 1
# if result1 > result:
# result = result1
# maxI = it
# it+=1
# if L == 1 and U == 1:
# print("Between %d and %d, %d has a maximum of 1 divisors."%(L,U,maxI))
# else:
# print("Between %d and %d, %d has a maximum of %d divisors."%(L,U,maxI,result))
|
Fergus4506/Mid1_2
|
DS_pr/UVA200-299/UVA294_CO.py
|
UVA294_CO.py
|
py
| 2,085 |
python
|
en
|
code
| 0 |
github-code
|
50
|
641907812
|
#!/usr/bin/python3
import sys
import signal
# Dictionary to store status code counts
status_code_counts = {
200: 0,
301: 0,
400: 0,
401: 0,
403: 0,
404: 0,
405: 0,
500: 0,
}
# Variables to keep track of total file size and line count
total_file_size = 0
line_count = 0
def print_statistics():
"""
Print the computed statistics.
"""
print("Total file size:", total_file_size)
sorted_status_codes = sorted(status_code_counts.keys())
for status_code in sorted_status_codes:
count = status_code_counts[status_code]
if count > 0:
print("{}: {}".format(status_code, count))
def signal_handler(sig, frame):
"""
Signal handler for CTRL + C (keyboard interruption).
Print statistics and exit.
"""
print_statistics()
sys.exit(0)
# Register the signal handler for CTRL + C
signal.signal(signal.SIGINT, signal_handler)
try:
for line in sys.stdin:
line_count += 1
parts = line.split()
if len(parts) >= 10:
status_code = int(parts[-2])
file_size = int(parts[-1])
total_file_size += file_size
if status_code in status_code_counts:
status_code_counts[status_code] += 1
# Print statistics every 10 lines
if line_count % 10 == 0:
print_statistics()
except KeyboardInterrupt:
# Handle keyboard interruption (CTRL + C)
print_statistics()
|
Butawantemi/alx-higher_level_programming
|
0x0B-python-input_output/101-stats.py
|
101-stats.py
|
py
| 1,463 |
python
|
en
|
code
| 0 |
github-code
|
50
|
6991513011
|
from . import pluginbase
from . import transport
from twisted.internet import reactor
from twisted.python import log
import sys
def main():
if len(sys.argv) < 2:
print("Usage: %s <config dir>" % sys.argv[0])
sys.exit(1)
transportobj = transport.Transport()
boss = pluginbase.PluginBoss(sys.argv[1], transportobj)
observer = log.FileLogObserver(sys.stdout)
observer.timeFormat = "%Y-%m-%d %H:%M:%S"
log.startLoggingWithObserver(observer.emit)
log.msg("Abbott starting up!")
boss.load_all_plugins()
reactor.run()
if __name__ == "__main__":
main()
|
brownan/abbott
|
abbott/entrypt.py
|
entrypt.py
|
py
| 609 |
python
|
en
|
code
| 9 |
github-code
|
50
|
42693432422
|
class VRPSolution():
def __init__(self) -> None:
self.objVal = 0
self.vehicleNum = 0
self.pathSet = {}
self.solSet = {}
self.distance = {}
self.travelTime = {}
self.totalLoad = {}
self.pathNum = []
class SPPSolution():
def __init__(self,objVal=0,path=[],travelTime=0,travelTimeList=[],distance=0,load=0,loadList=[]) -> None:
self.status = False
self.objVal = objVal
self.path = path
self.travelTime = travelTime
self.travelTimeList = travelTimeList
self.distance = distance
self.load = load
self.loadList = loadList
|
clare1456/RL-for-CS
|
CG_test/solution.py
|
solution.py
|
py
| 676 |
python
|
en
|
code
| 0 |
github-code
|
50
|
31038894475
|
def add_time(start, duration, show_day = None):
def landing_day(day, days_later) :
# --- When show_day != None this function traverses the list of days and lands on target day
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
day = day.lower(); day = day.capitalize() # Clean up the day entry to match list entries
index = days.index(day)
count = 0
while count < days_later :
count += 1; index += 1
if index >= len(days) : index = 0
return days[index]
#--- BREAK APART | Split start for time & meridiem, time for hour & minute
#--- then duration time for hour and minute
meri_flag = False # Meridiem flip flag
time = start.split()[0]
meridiem = start.split()[1]
t_hour = int(time.split(":")[0])
t_minute = int(time.split(":")[1])
d_hour = int(duration.split(":")[0])
d_minute = int(duration.split(":")[1])
#--- Calculate new time. n_days = days to advance
new_minute = d_minute + t_minute
new_hour = d_hour + t_hour + (new_minute // 60)
#--- Calculate n_days
if (new_hour / 24) < 1 and meridiem == "AM" : n_days = 0
else : n_days = round(new_hour / 24)
if new_minute >= 60 : new_minute -= 60
if new_hour >= 12 :
meri_flag = True
flip_meri_count = (new_hour // 12)
if flip_meri_count % 2 == 1 :
if meridiem == "AM" : meridiem = "PM"
elif meridiem == "PM" : meridiem = "AM"
new_hour -= (12 * (new_hour // 12))
if new_hour == 0 : new_hour = 12
if show_day is not None :
new_time = f"{new_hour}:{new_minute:02} {meridiem}, {landing_day(show_day, n_days)}"
else :
new_time = f"{new_hour}:{new_minute:02} {meridiem}"
if meri_flag :
if n_days == 1 :
new_time = new_time + " (next day)"
elif n_days > 1 :
new_time = new_time + f" ({n_days} days later)"
return new_time
|
Packetouille/FCC_Python
|
Time Calculator/time_calculator.py
|
time_calculator.py
|
py
| 2,038 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17439690818
|
from functools import partial
from django.db.models import query
from django.db.models.query import QuerySet
from requests.api import request
from rest_framework.decorators import action, authentication_classes
from rest_framework.mixins import ListModelMixin
from rest_framework.response import Response
from rest_framework import serializers, viewsets, status
from rest_framework import viewsets, status
from rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny
from rest_framework.views import APIView
from garnbarn_api.serializer import AssignmentSerializer, CustomUserSerializer, TagSerializer
from garnbarn_api.authentication import FirebaseAuthIDTokenAuthentication
from rest_framework.decorators import action, permission_classes, api_view
from garnbarn_api.services.line import LineLoginPlatformHelper, LineApiError
import json
import pyotp
from django.db.models import Q
from datetime import datetime, date
from .models import Assignment, CustomUser, Tag
class CustomUserViewset(viewsets.ModelViewSet):
authentication_classes = [FirebaseAuthIDTokenAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = CustomUserSerializer
def get_queryset(self):
uid = self.request.query_params.get('uid', None)
if uid:
try:
user = CustomUser.objects.get(uid=uid)
except CustomUser.DoesNotExist:
return None
else:
user = CustomUser.objects.get(uid=self.request.user.uid)
return user
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
if queryset is None:
return Response({"message": "User not found"}, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(queryset)
return Response(serializer.data)
@action(methods=['POST'], detail=False,
url_path="link", url_name="account-link")
def link(self, request, *args, **kwarg):
uid = request.user.uid
try:
request_payload = json.loads(request.body)
except json.JSONDecodeError:
return Response({
"message": "The body contain invalid json format."
}, status=status.HTTP_400_BAD_REQUEST)
if request_payload.get("platform") != "line":
return Response({
"message": "You didn't specify the platform or the platform you specify is not supported."
}, status=status.HTTP_400_BAD_REQUEST)
if not request_payload.get("credential"):
return Response({
"message": "No credential provided"
}, status=status.HTTP_400_BAD_REQUEST)
# Check if user already linked with LINE
if request.user.line:
return Response({
"message": "User already linked the account with LINE"
}, status=status.HTTP_400_BAD_REQUEST)
check_list = ["code", "clientId", "redirectUri"]
for item in check_list:
if not request_payload["credential"].get(item):
return Response({
"message": f"To link account with LINE, Field `{item}` in credential is required"
})
credential = request_payload["credential"]
line_login = LineLoginPlatformHelper()
try:
line_login.verify_login_code(
credential["code"], credential["redirectUri"], credential["clientId"])
line_profile = line_login.get_user_profile()
except LineApiError as e:
return Response(e.line_error_object, status=status.HTTP_400_BAD_REQUEST)
request.user.line = line_profile["userId"]
request.user.save()
return Response({}, status=status.HTTP_200_OK)
@action(methods=['POST'], detail=False,
url_path='unlink', url_name='unlink')
def unlink(self, request):
try:
request_payload = json.loads(request.body)
except json.JSONDecodeError:
return Response({
"message": "The body contain invalid json format."
}, status=status.HTTP_400_BAD_REQUEST)
if request_payload.get("platform") != "line":
return Response({
"message": "You didn't specify the platform or the platform you specify is not supported."
}, status=status.HTTP_400_BAD_REQUEST)
if not request.user.line:
return Response({
"message": "User is not linked the LINE account"
}, status=status.HTTP_400_BAD_REQUEST)
request.user.line = None
request.user.save()
return Response({}, status=status.HTTP_200_OK)
class AssignmentViewset(viewsets.ModelViewSet):
authentication_classes = [FirebaseAuthIDTokenAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = AssignmentSerializer
def get_queryset(self):
user_data = self.request.user.uid
if self.request.query_params.get('fromPresent') == "true":
assignment = Assignment.objects.get_queryset().filter(
Q(author=user_data) | Q(tag__subscriber__icontains=user_data) | Q(tag__author=user_data))
assignment = assignment.exclude(
due_date__lt=date.today())
assignment = assignment.exclude(due_date=None).order_by('due_date')
else:
assignment = Assignment.objects.get_queryset().filter(
Q(author=user_data) | Q(tag__subscriber__icontains=user_data) | Q(tag__author=user_data)).order_by('id')
return assignment
def create(self, request, *args, **kwargs):
""" Create Assignment object.
Returns:
If the given data contain all
requirements(assignment_name is included and due_date > now)
, returns assignment's object in json.
Else, returns bad request status
"""
serializer = AssignmentSerializer(data=request.data)
if not serializer.is_valid():
# Response 400 if the request body is invalid
return Response({
'message': serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
self.perform_create(serializer)
response = Response(serializer.data, status=status.HTTP_201_CREATED)
location_id = serializer.data.get('id')
response['Location'] = f"{request.path}" + f"{location_id}"
return response
def perform_create(self, serializer):
user_data = self.request.user.uid
serializer.save(author=CustomUser(uid=user_data))
def destroy(self, request, *args, **kwargs):
""" Remove assignment with specified id.
Returns:
{} with 200 status code.
"""
assignment = self.get_object()
assignment.delete()
return Response({}, status=status.HTTP_200_OK)
def partial_update(self, request, *args, **kwargs):
""" Update data of a specified assignment
Returns:
Assignment's object in json.
"""
serializer = AssignmentSerializer(
instance=self.get_object(), data=request.data, partial=True)
if not serializer.is_valid():
# Response 400 if the request body is invalid
return Response({
'message': serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
self.perform_update(serializer)
return Response(self.get_object().get_json_data(), status=status.HTTP_200_OK)
class TagViewset(viewsets.ModelViewSet):
authentication_classes = [FirebaseAuthIDTokenAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = TagSerializer
def get_queryset(self):
user_data = self.request.user.uid
tag = Tag.objects.get_queryset().filter(Q(author=user_data) | Q(
subscriber__icontains=user_data)).order_by('id')
return tag
def create(self, request, *args, **kwargs):
"""Create Tag object.
Returns:
If the given data contain all
requirements(tag id and name are included),
return tag's object in json.
Else, return bad request status
"""
serializer = TagSerializer(data=request.data)
if not serializer.is_valid():
"""Response 400 if the request body is invalid"""
return Response({
'message': serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
random_secret_key = pyotp.random_base32()
self.perform_create(serializer, random_secret_key)
response_data = serializer.data
response_data["secretKeyTotp"] = random_secret_key
response = Response(response_data, status=status.HTTP_201_CREATED)
location_id = serializer.data.get('id')
response['Location'] = f"{request.path}" + f"{location_id}"
return response
def perform_create(self, serializer, secret_key):
user_data = self.request.user.uid
serializer.save(author=CustomUser(uid=user_data),
secret_key_totp=secret_key)
def destroy(self, request, *args, **kwargs):
"""Remove tag with specified id.
Returns:
{} with 200 status code.
"""
user_data = self.request.user.uid
tag = self.get_object()
if str(tag.author) == str(user_data):
tag.delete()
return Response({}, status=status.HTTP_200_OK)
else:
return Response({
'message': "Only Tag's author can delete the tag"
}, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, *args, **kwargs):
"""Update data of the specified tag
Returns:
Tag's object in json.
"""
data = request.data
serializer = TagSerializer(
instance=self.get_object(), data=data, partial=True)
if not serializer.is_valid():
"""Response 400 if the request body is invaild."""
return Response({
'message': serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
self.perform_update(serializer)
return Response(self.get_object().get_json_data(), status=status.HTTP_200_OK)
@action(methods=['post'], detail=True,
url_path="subscribe", url_name="subscribe")
def subscribe(self, request, *args, **kwargs):
tag = Tag.objects.get_queryset().get(id=self.kwargs.get('pk'))
try:
request_json = json.loads(request.body)
except json.JSONDecodeError:
return self.response_bad_request("The invalid json is passed in the body.")
if not request_json.get("code"):
return self.response_bad_request("The field `code` is required in the json body.")
otp_code = request_json["code"]
if not tag.secret_key_totp or tag.secret_key_totp == "":
return self.response_bad_request("This Tag doesn't contain the key that required for the verification process. Please re-create the tag.")
if not pyotp.TOTP(tag.secret_key_totp).verify(otp_code):
return self.response_bad_request("You enter an incorrect subscribe code.")
if not tag.author:
return self.response_bad_request("This Tag doesn't contain the author that required for the verification process. Please re-create the tag.")
if request.user.uid == tag.author.uid:
return self.response_bad_request("You can't subscribe to your own tag.")
if not tag.subscriber:
tag.subscriber = [request.user.uid]
elif request.user.uid in tag.subscriber:
return self.response_bad_request("User has already subscribed to this tag.")
elif tag.subscriber:
tag.subscriber.append(request.user.uid)
tag.save()
return Response({"message": f"user has subscribed to tag id {tag.id}"}, status.HTTP_200_OK)
@action(methods=['post', 'delete'], detail=True,
url_path="unsubscribe", url_name="unsubscribe")
def unsubscribe(self, request, *args, **kwargs):
tag = Tag.objects.get_queryset().get(id=self.kwargs.get('pk'))
if not tag.subscriber or request.user.uid not in tag.subscriber:
return Response({
"message": "User has not subscribe to this tag yet."
}, status=status.HTTP_400_BAD_REQUEST)
elif request.user.uid in tag.subscriber:
tag.subscriber.remove(request.user.uid)
if tag.subscriber == []:
tag.subscriber = None
tag.save()
return Response({"message": f"user has un-subscribed from tag id {tag.id}"}, status.HTTP_200_OK)
def response_bad_request(self, message):
return Response({
"message": message
}, status=status.HTTP_400_BAD_REQUEST)
|
GarnBarn/garnbarn-backend
|
garnbarn_api/views.py
|
views.py
|
py
| 12,945 |
python
|
en
|
code
| 0 |
github-code
|
50
|
39372159355
|
import random
from typing import Union
import pandas as pd
from .data_handler import get_interacted_products, DataHandler
from .models import PopularityBasedRecommender, ContentBasedRecommender
from .config import SEED, \
EVAL_RANDOM_SAMPLE_NON_INTERACTED_ITEMS
def hit_top_n(product_id: int, recommended_products: list[int], top_n: int) -> int:
return product_id in recommended_products[:top_n]
class ModelEvaluator:
def __init__(self, data_handler: DataHandler):
self.data_handler = data_handler
def get_not_interacted_products_sample(self, user_id: int, sample_size: int) -> set[int]:
interacted_products = get_interacted_products(user_id, self.data_handler.interactions_indexed)
all_products = set(self.data_handler.products['product_id'])
non_interacted_products = all_products - interacted_products
random.seed(SEED)
non_interacted_products_sample = random.sample(non_interacted_products, sample_size)
return set(non_interacted_products_sample)
def evaluate_model_for_user(self, model: Union[PopularityBasedRecommender, ContentBasedRecommender], user_id: int,
interactions: pd.DataFrame) -> dict[str, Union[float]]:
user_interactions = interactions.loc[user_id]
interacted_products = set(user_interactions['product_id'])
interacted_products_count = len(interacted_products)
user_recommendations = model.predict(user_id)
hits_at_5 = 0
hits_at_10 = 0
for product_id in interacted_products:
non_interacted_sample = self.get_not_interacted_products_sample(user_id,
EVAL_RANDOM_SAMPLE_NON_INTERACTED_ITEMS)
products_to_filter_recommendations = non_interacted_sample.union({product_id})
valid_recommendations = \
user_recommendations[user_recommendations['product_id'].isin(products_to_filter_recommendations)]
hits_at_5 += int(hit_top_n(product_id, valid_recommendations['product_id'].tolist(), 5))
hits_at_10 += int(hit_top_n(product_id, valid_recommendations['product_id'].tolist(), 10))
rate_at_5 = hits_at_5 / float(interacted_products_count)
rate_at_10 = hits_at_10 / float(interacted_products_count)
user_metrics = {
'hits@5_count': hits_at_5,
'hits@10_count': hits_at_10,
'interacted_count': interacted_products_count,
'recall@5': rate_at_5,
'recall@10': rate_at_10
}
return user_metrics
def evaluate(self, model: Union[PopularityBasedRecommender, ContentBasedRecommender], interactions: pd.DataFrame) \
-> dict[str, float]:
users_metrics = []
for idx, user_id in enumerate(list(interactions.index.unique().values)):
user_metrics = self.evaluate_model_for_user(model, user_id, interactions)
users_metrics.append(user_metrics)
detailed_results = pd.DataFrame(users_metrics).sort_values('interacted_count', ascending=False)
global_rate_at_5 = detailed_results['hits@5_count'].sum() / float(detailed_results['interacted_count'].sum())
global_rate_at_10 = detailed_results['hits@10_count'].sum() / float(detailed_results['interacted_count'].sum())
global_metrics = {
'rate@5': global_rate_at_5,
'rate@10': global_rate_at_10
}
return global_metrics
|
justleon/IUM-Recommendation-Tool
|
recommender/model_evaluator.py
|
model_evaluator.py
|
py
| 3,511 |
python
|
en
|
code
| 0 |
github-code
|
50
|
42577385818
|
'''
FOR PIECES IS QUICKER THAN
'''
class Solution(object):
def licenseKeyFormatting(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
"""
S=list(S.replace("-", "").upper())
ans=[]
a=0
upper = len(S)
lower = len(S)-K
if len(S)<=K:
return "".join(S)
while (lower>=0 ):
ans.append("".join(S[lower:upper]))
upper=lower
lower = lower-K
if lower<0 and upper>0:
ans.append("".join(S[0:upper]))
break
return "-".join(ans[::-1])
|
RamonRomeroQro/ProgrammingPractice
|
code/LISCENCEKEY.py
|
LISCENCEKEY.py
|
py
| 708 |
python
|
en
|
code
| 1 |
github-code
|
50
|
42680711409
|
def pgcd(a,b):
if b == 0:
return a
else:
r = a % b
return pgcd(b,r)
def ppcm(a,b):
d = pgcd(a,b)
return int(a*b / d)
p = 1
for i in range(20):
p = ppcm(p,i+1)
print(p)
|
IThinkThereforeISuffer/ProjectEuler
|
Problem005.py
|
Problem005.py
|
py
| 180 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28569552447
|
def max_subarray_sum(arr, window_size):
left = 0
right = window_size - 1
# Calculate the initial sum
# slicing is inclusive of 'left' index and exclusive of the 'right+1' index
max_sum = sum(arr[left:right + 1])
current_sum = max_sum
# Slide the window
while right < len(arr) - 1:
# Move the window one position to the right
left += 1
right += 1
# Update the current sum
# current_sum = 7 - 2 + 5 = 10
current_sum = current_sum - arr[left - 1] + arr[right]
# Update the maximum sum if necessary
max_sum = max(max_sum, current_sum)
return max_sum
# Example usage
arr = [2, 4, 1, 5, 3, 7, 2, 8]
arr1 = [1, 2, 5, 2, 8, 1, 5]
window_size = 3
max_sum = max_subarray_sum(arr, window_size)
max_sum1 = max_subarray_sum(arr1, window_size)
print("Maximum sum of subarray of size", window_size, ":", max_sum)
print("Maximum sum of subarray of size", window_size, ":", max_sum1)
|
aakashmanjrekar11/leetcode
|
3. Sliding Window/MaxSubarray.py
|
MaxSubarray.py
|
py
| 1,013 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25192975718
|
import magma
import coreir
_cache = None
def CoreIRContext(reset=False) -> coreir.Context:
global _cache
if not reset and _cache is not None:
return _cache
if reset:
magma.frontend.coreir_.ResetCoreIR()
c = magma.backend.coreir.coreir_runtime.coreir_context()
if reset:
c.load_library("commonlib")
_cache = c
return c
|
rdaly525/MetaMapper
|
metamapper/__init__.py
|
__init__.py
|
py
| 376 |
python
|
en
|
code
| 4 |
github-code
|
50
|
30332519273
|
def plot_carpet_ts(timeseries, modules, atlas=None, background_file=None, nskip=0, size=(950, 800),
subplot=None, title=None, output_file="regts.png"):
"""
Adapted from: https://github.com/poldracklab/niworkflows
Plot an image representation of voxel intensities across time also know
as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
2017 Jul 1; 154:150-158.
Parameters
----------
timeseries : numpy.ndarray
See http://nilearn.github.io/manipulating_images/input_output.html
4D input image
modules: ndarray
axes : matplotlib axes, optional
The axes used to display the plot. If None, the complete
figure is used.
title : string, optional
The title displayed on the figure.
output_file : string, or None, optional
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
legend : bool
Whether to render the average functional series with ``atlaslabels`` as
overlay.
"""
import numpy as np
import nibabel as nb
import pandas as pd
import os
import matplotlib.pyplot as plt
from matplotlib import gridspec as mgs
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
import PUMI.utils.globals as glb
from nilearn.plotting import plot_img
legend = False
if atlas:
legend = True
# actually load data
timeseries = pd.read_csv(timeseries, sep="\t")
#normalise all timeseries
v = (None, None)
timeseries = (timeseries - timeseries.mean()) / timeseries.std()
v = (-2, 2)
timeseries = timeseries.transpose()
minimum = np.min(timeseries)
maximum = np.max(timeseries)
myrange = maximum - minimum
modules = pd.Series(modules).values
lut = pd.factorize(modules)[0]+1
# If subplot is not defined
if subplot is None:
subplot = mgs.GridSpec(1, 1)[0]
# Define nested GridSpec
wratios = [2, 120, 20]
gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
width_ratios=wratios[:2 + int(legend)],
wspace=0.0)
mycolors = ListedColormap(cm.get_cmap('Set1').colors[:7][::-1])
# Segmentation colorbar
ax0 = plt.subplot(gs[0])
ax0.set_yticks([])
ax0.set_xticks([])
lutt=pd.DataFrame({'1': lut})
ax0.imshow(lutt, interpolation='none', aspect='auto',
cmap=mycolors, vmin=0, vmax=8)
ax0.grid(False)
ax0.spines["left"].set_visible(False)
ax0.spines["bottom"].set_color('none')
ax0.spines["bottom"].set_visible(False)
# Carpet plot
ax1 = plt.subplot(gs[1])
ax1.imshow(timeseries, interpolation='nearest', aspect='auto', cmap='gray',
vmin=v[0], vmax=v[1])
ax1.grid(False)
ax1.set_yticks([])
ax1.set_yticklabels([])
# Set 10 frame markers in X axis
interval = max((int(timeseries.shape[-1] + 1) // 10, int(timeseries.shape[-1] + 1) // 5, 1))
xticks = list(range(0, timeseries.shape[-1])[::interval])
ax1.set_xticks(xticks)
ax1.set_xlabel('time')
#ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5)
# Remove and redefine spines
for side in ["top", "right"]:
# Toggle the spine objects
ax0.spines[side].set_color('none')
ax0.spines[side].set_visible(False)
ax1.spines[side].set_color('none')
ax1.spines[side].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_color('none')
ax1.spines["left"].set_visible(False)
if legend:
gslegend = mgs.GridSpecFromSubplotSpec(
5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0)
if not background_file:
background_file = atlas#glb._FSLDIR_ + "/data/standard/MNI152_T1_2mm_brain.nii.gz" #TODO: works only for 3mm atlas
background = nb.load(background_file)
atlas = nb.load(atlas)
nslices = background.shape[-1]
#coords = np.linspace(int(0 * nslices), int(0.99 * nslices), 5).astype(np.uint8)
coords = [-40, 20, 0, 20, 40] #works in MNI space
lut2 = lut
lut2 = np.array([0] + lut2.tolist())
relabeled=lut2[np.array(atlas.get_data(), dtype=int)]
atl = nb.Nifti1Image(relabeled, atlas.get_affine())
for i, c in enumerate(coords):
ax2 = plt.subplot(gslegend[i])
plot_img(atl, bg_img=background, axes=ax2, display_mode='z',
annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors,
interpolation='nearest', vmin=1, vmax=7)
if output_file is not None:
figure = plt.gcf()
figure.savefig(output_file, bbox_inches='tight')
plt.close(figure)
figure = None
return os.getcwd() + '/' + output_file
return [ax0, ax1], gs
|
spisakt/PUMI
|
plot/timeseries.py
|
timeseries.py
|
py
| 5,187 |
python
|
en
|
code
| 4 |
github-code
|
50
|
2864635986
|
import turtle, random, time
def polygon(sides, length, color):
turtle.penup()
if sides == 4:
turtle.setposition(-length/2, -length/2)
elif sides == 3:
turtle.setposition(-(length/2), -(length/4*(3**(1/2))))
else:
turtle.setposition(-length/2, -length/2)
turtle.pendown()
turtle.color(color)
turtle.begin_fill()
for i in range(sides):
turtle.forward(length)
turtle.left(360//sides)
turtle.end_fill()
#input() # to stop the function
time.sleep(10)
def ddAlanHesaplama(a,b):
return a * b
def kareAlanHesaplama(a):
return ddAlanHesaplama(a,a)
#print("Dikdörtgen Alanı :", ddAlanHesaplama(1,3), "\nKare Alanı :", kareAlanHesaplama(3))
#polygon(4, 300, "darkred")
polygon(5, 100, "blue")
|
mehmetcoban13/deneme
|
deneme3.py
|
deneme3.py
|
py
| 783 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8361252944
|
import argparse
import json
import sys
"""
Tries to link within a datacrate by looking for value that reference a known
title or name
"""
parser = argparse.ArgumentParser()
parser.add_argument("infile", nargs="?", type=argparse.FileType("r"), default=sys.stdin)
parser.add_argument(
"outfile", nargs="?", type=argparse.FileType("w"), default=sys.stdout
)
parser.add_argument(
"-m", "--mapping", type=argparse.FileType("r"), help="JSON mapping file"
)
parser.add_argument("-n", "--no-link", default=False, action='store_true', help="Don't try to cross-link items (eg for Omeka S exports)")
parser.add_argument("-r", "--remove-omeka-namespace", default=False, action='store_true', help="Remove and keys from the Omeka S namespace after mapping")
args = vars(parser.parse_args())
catalog = json.load(args["infile"])
if args["mapping"]:
mapper = json.loads(args["mapping"].read())
else:
mapper = {}
# Build a table of names
names = {}
# Look up external mapping table and add new data
items_to_remove = []
for item in catalog["@graph"]:
keys = list(item.keys())
for k in keys:
if k in mapper:
item[mapper[k]] = item[k]
items_to_remove.append(k)
# Remove old data
for item in catalog["@graph"]:
for k in items_to_remove:
if k in item:
item.pop(k)
for k, v in item.items():
if not isinstance(v, list):
v = [v]
for val in v:
if k == "title" or k == "name":
if isinstance(val, dict):
if "@value" in val:
names[val["@value"]] = item["@id"]
elif "@label" in val:
names[val["@label"]] = item["@id"]
else:
names[val] = item["@id"]
for item in catalog["@graph"]: # catalog["@graph"]:
for k, v in item.items():
if not isinstance(v, list):
v = [v]
if k not in ["title", "name", "@id", "@type"]:
item[k] = []
for val in v:
if str(val) in names:
item[k].append({"@id": names[val], "@label": val})
else:
item[k].append(val)
if len(item[k]) == 1:
item[k] = item[k][0]
if args["remove_omeka_namespace"]:
for item in catalog["@graph"]:
to_remove = set()
for k, v in item.items():
if not isinstance(v, list):
v = [v]
if k.startswith("o:"):
to_remove.add(k)
for remove_key in to_remove:
item.pop(remove_key)
with args["outfile"] as new:
new.write(json.dumps(catalog, indent=2))
|
UTS-eResearch/omeka-datacrate-tools
|
doctor_datacrate.py
|
doctor_datacrate.py
|
py
| 2,735 |
python
|
en
|
code
| 1 |
github-code
|
50
|
37386530480
|
from typing import List, Optional
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.crud.base import CRUDBase
from app.models.charity_project import CharityProject
class CRUDCharityProject(CRUDBase):
async def get_charity_project_by_name(
self,
charity_project_name: str,
session: AsyncSession
) -> Optional[CharityProject]:
charity_project = await session.execute(
select(self.model).where(
self.model.name == charity_project_name
)
)
return charity_project.scalars().first()
async def get_projects_by_completion_rate(
self,
session: AsyncSession,
) -> List[CharityProject]:
projects = await session.execute(
select(CharityProject).where(
CharityProject.fully_invested
)
)
return sorted(
projects.scalars().all(),
key=lambda obj: obj.close_date - obj.create_date
)
charity_project_crud = CRUDCharityProject(CharityProject)
|
MrGorkiy/QRkot_spreadsheets
|
app/crud/charity_project.py
|
charity_project.py
|
py
| 1,105 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11234697527
|
import logging
import time
from selenium import webdriver
from selenium.common.exceptions import *
from reali_web.base_action import base_actions_data
from reali_web.consumer_pages.sign.sign_in import sign_in_page
from reali_web.consumer_pages.buy.homes import homes_page
ba = base_actions_data
si = sign_in_page.SignInPage
hp = homes_page.HomesPage
class BaseFunctions:
@staticmethod
def base_navigation(driver: webdriver, url: str):
try:
driver.get(url)
logging.info('navigation accrued')
print(f'navigation accrued to {url}')
except WebDriverException as err:
logging.error('navigation did not accrued')
print(f'navigation did not accrued to {url}')
raise err
@staticmethod
def base_login(driver: webdriver, url: str, email: str, password: str):
try:
BaseFunctions.base_navigation(driver, url)
hp.validate_navigation_to_homes_primary_page(driver)
hp.click_on_sign_in(driver)
assert si.validate_in_sign_in_page(driver)
si.send_keys_to_email(driver, email)
si.send_keys_to_password(driver, password)
si.click_on_sign_in_button_in_sign_in_page(driver)
assert hp.validate_navigation_to_homes_primary_page(driver)
logging.info('login was successful')
print('login was successful')
return True
except (AssertionError, NoSuchFrameException, ElementClickInterceptedException, NoSuchElementException) as err:
logging.error('login was not init successfully')
print('login was not init successfully')
raise err
@staticmethod
def tear_down(driver: webdriver):
try:
driver.quit()
time.sleep(5)
logging.info('closed the chrome instance')
print('closed the chrome instance')
except WebDriverException as err:
logging.error('did not managed to close the chrome session')
print('did not managed to close the chrome session')
raise err
@staticmethod
def handle_to_current_window(driver: webdriver):
try:
window_handle = driver.current_window_handle
logging.info('got handle_to_current_window')
print('got handle_to_current_window')
return window_handle
except NoSuchFrameException as err:
logging.error('did not get handle_to_current_window')
print('did not get handle_to_current_window')
raise err
@staticmethod
def get_windows_handles(driver: webdriver):
try:
time.sleep(3)
windows_list = driver.window_handles
logging.info('got all different windows handles')
print('got all different windows handles')
return windows_list
except NoSuchFrameException as err:
logging.error('did not get all different windows handles')
print('did not get all different windows handles')
raise err
@staticmethod
def switch_to_window(driver, handle):
try:
time.sleep(5)
driver.switch_to.window(handle)
logging.info('switch_to new window')
print('switch_to new window')
except InvalidSwitchToTargetException as err:
logging.error('did not managed to switch_to new window')
print('did not managed to switch_to new window')
raise err
|
ErezShamay/WebAutomation
|
base_action/base_actions.py
|
base_actions.py
|
py
| 3,541 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22033385109
|
import ontoload as oL
# appends to a file the initial network map
def writeMap(fileString, networkMap):
# opens the file object to append
f = open(fileString, "a")
for i in range(networkMap.arrayYSize):
for j in range(networkMap.arrayXSize):
f.write(networkMap.modArray[i][j].name)
f.write(" Dir: ")
if networkMap.modArray[i][j].sensor == 'e':
f.write(" ")
else:
f.write(networkMap.modArray[i][j].sensorDir)
f.write(" ")
f.write("\n")
f.write("\n")
f.close()
#gets the text input for the rearrangment
def inputRead():
# for testing pourposes the input file will be in .txt file
fileName = "inputFile.txt"
f = open(fileName, "r")
# reads the input
response = f.read()
# print(response)
return response
# opens the text file and writes down the title
def writeMain(networkMap, fileStr):
f = open(fileStr, "w+")
f.write("This is the current map of the network;\n\n")
f.close()
writeMap(fileStr, networkMap)
# writes the introduction and changes the elements
def writeUpdated(fileStr, networkMap, response):
element, direction = networkMap.inputOntoCheck(response)
currentY, currentX, finalY, finalX = networkMap.changeModAndDir(element, direction)
introText = f"This is the updated map of the network, the {element} was requested to point {direction};\n"
modulePosText = f"The module was originally at ({currentX}, {currentY}) and was moved to ({finalX}, {finalY})\n\n"
f = open(fileStr, "a")
f.write(introText)
f.write(modulePosText)
f.close()
writeMap(fileStr, networkMap)
networkMap = oL.ontoNet()
networkMap.createNetwork()
networkMap.createOntoMap()
fileStr = "outputFile.txt"
writeMain(networkMap, fileStr)
response = inputRead()
writeUpdated(fileStr, networkMap, response)
|
atomicdork/FinalProject
|
inputMain.py
|
inputMain.py
|
py
| 1,970 |
python
|
en
|
code
| 0 |
github-code
|
50
|
3040956
|
def insertion_sort(l):
for i in range(1,len(l)):
pos=i
curr_ele=l[i]
while curr_ele<l[pos-1] and pos>0:
l[pos]=l[pos-1]
pos=pos-1
l[pos]=curr_ele
l=list(map(int,input().split()))
insertion_sort(l)
print(l)
|
Indu1115/Searchings-and-Sortings
|
insertion_sort.py
|
insertion_sort.py
|
py
| 241 |
python
|
en
|
code
| 0 |
github-code
|
50
|
3638274714
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 06:13:31 2019
@author: [email protected]
"""
import sys
import os
import random
import collections
import operator
from collections import OrderedDict
import re, copy, pyperclip, simpleSubCipher, wordPatterns, makeWordPatterns
nonLettersOrSpacePattern = re.compile('[^A-Z\s]')
#check to see if output file already exists
if os.path.exists(sys.argv[2]):
os.remove(sys.argv[2])
LETTERS = ' .ABCDEFGHIJKLMNOPQRSTUVWXYZ'
lettersList = set(LETTERS)
def main():
mostFreqToLeast = ' ETOARNIHSLDCUBGWFYPMK.XJQVZ'
first = ' ETOARNIHSLDCUBGWFYPMK.XJQVZ'
with open(sys.argv[1], 'r') as f_in:
message0 = f_in.read()
#print(contents)
letter_freq = {}
for i in message0.upper():
if i in letter_freq:
letter_freq[i] += 1
else:
letter_freq[i] = 1
#for key,val in letter_freq.items():
# print(key, "=>", val)
#print("--------------------")
matched_letters = {}
#remove all keys that do not match letter set
for (key,val) in letter_freq.items():
if key in lettersList:
#print(key)
matched_letters[key] =val
#for (key,val) in matched_letters.items():
# print(key, "=>", val)
#print("--------------------")
sortByVals = OrderedDict(sorted(matched_letters.items(), key=lambda x: x[1], reverse = True))
#a = sorted(matched_letters.keys())
#print(a)
#print()
#for k, v in sortByVals.items():
# print (k, ":" , v)
#print(sortByVals)
#sortByVals['G'] = 'F'
#print(sortByVals)
charsB = ''
#create list of letters from most freq to least
for k in sortByVals.keys():
#print(k)
charsB += k
#
#print("after swap:",new)
#
#for i in range(0, len(mostFreqToLeast)):
# if(mostFreqToLeast[i] == 'B'):
# index1 = i
# elif(mostFreqToLeast[i] == ebook[1].upper()):
# index2 = i
#
#print(mostFreqToLeast)
#new1 = swap(mostFreqToLeast, index1,index2)
#print(new1)
#for i in range(0, len(new1)):
# if(new1[i] == 'O'):
# index1 = i
# elif(new1[i] == ebook[2].upper()):
# index2 = i
#
#new2 = swap(new1, index1,index2)
#for i in range(0, len(new2)):
# if(new2[i] == 'K'):
# index1 = i
# elif(new2[i] == ebook[4].upper()):
# index2 = i
#
#new3 = swap(new2, index1,index2)
#for i in range(0, len(new3)):
# if(new3[i] == ' '):
# index1 = i
# elif(new3[i] == ebook[5].upper()):
# index2 = i
#
#new4 = swap(new3, index1,index2)
a = list(sortByVals.keys())
#charsB = a
message = ''
charsA = mostFreqToLeast
#charsB = list(sortByVals)
charsA, charsB = charsB, charsA
# Loop through each symbol in message:
for symbol in message0:
if symbol.upper() in charsA:
# Encrypt/decrypt the symbol:
symIndex = charsA.find(symbol.upper())
if symbol.isupper():
message += charsB[symIndex].upper()
else:
message += charsB[symIndex].lower()
else:
# Symbol is not in LETTERS; just add it
message += symbol
#
##print(translated)
#with open(translated, "a") as f_out:
# f_out.write(translated)
#translated = message
ebook = ''
for i in range(0, len(message)):
if(message[i] == '#'):
ebook += message[i-6]
ebook += message[i-5]
ebook += message[i-4]
ebook += message[i-3]
ebook += message[i-2]
ebook += message[i-1]
hashy = i
break
web = ''
for i in range(0, len(message)):
if((message[i] == '/') and (message[i+1] == '/')
and message[i-1] == ':'):
web += message[i+2]
web += message[i-2]
web += message[i-3]
web += message[i-4]
web += message[i+5]
break
star = ''
for i in range(0, len(message)):
if(message[i] == '*' and message[i+1] == '*'
and message[i+2] == '*'):
star += message[i+4] #S
star += message[i+5] #T
star += message[i+6] #A star[2]
star += message[i+7] #R
star += message[i+8] #T
star += message[i+15] #I
star += message[i+14] #H
star += message[i+11] #F
star += message[i+21] #j
star += message[i+23] #c
star += message[i+26] #g
break
for i in range(0, len(mostFreqToLeast)):
if(mostFreqToLeast[i].upper() == 'S'):
ii=i
if(mostFreqToLeast[i].upper() == star[0].upper()):
jj=i
mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
for i in range(0, len(mostFreqToLeast)):
if(mostFreqToLeast[i].upper() == 'T'):
ii=i
if(mostFreqToLeast[i].upper() == star[1].upper()):
jj=i
mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
for i in range(0, len(mostFreqToLeast)):
if(mostFreqToLeast[i].upper() == 'A'):
ii=i
if(mostFreqToLeast[i].upper() == star[2].upper()):
jj=i
mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
for i in range(0, len(mostFreqToLeast)):
if(mostFreqToLeast[i].upper() == 'R'):
ii=i
if(mostFreqToLeast[i].upper() == star[3].upper()):
jj=i
mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
for i in range(0, len(mostFreqToLeast)):
if(mostFreqToLeast[i].upper() == 'B'):
ii=i
if(mostFreqToLeast[i].upper() == ebook[1].upper()):
jj=i
mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
#for i in range(0, len(mostFreqToLeast)):
# if(mostFreqToLeast[i].upper() == 'O'):
# ii=i
# if(mostFreqToLeast[i].upper() == ebook[2].upper()):
# jj=i
#mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
#for i in range(0, len(mostFreqToLeast)):
# if(mostFreqToLeast[i].upper() == 'F'):
# ii=i
# if(mostFreqToLeast[i].upper() == star[7].upper()):
# jj=i
#mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
for i in range(0, len(mostFreqToLeast)):
if(mostFreqToLeast[i].upper() == 'K'):
ii=i
if(mostFreqToLeast[i].upper() == ebook[4].upper()):
jj=i
mostFreqToLeast = swap(mostFreqToLeast,ii,jj)
print(first)
print(mostFreqToLeast)
#print(mostFreqToLeast)
#print(mostFreqToLeast)
print(star[2])
#
#newtrans = ''
#for i in translated:
# if(i.upper() == ebook[1].upper()):
# if i.isupper():
# newtrans += 'B'
# else:
# newtrans += 'b'
# elif(i.upper() == ebook[0].upper()):
# if i.isupper():
# newtrans += 'E'
# else:
# newtrans += 'e'
# elif(i.upper() == ebook[2].upper()):
# if i.isupper():
# newtrans += 'O'
# else:
# newtrans += 'o'
# elif(i.upper() == ebook[4].upper()):
# if i.isupper():
# newtrans += 'K'
# else:
# newtrans += 'k'
# elif(i.upper() == web[0].upper()):
# if i.isupper():
# newtrans += 'W'
# else:
# newtrans += 'w'
# elif(i.upper() == web[1].upper()):
# if i.isupper():
# newtrans += 'P'
# else:
# newtrans += 'p'
# elif(i.upper() == web[2].upper()):
# if i.isupper():
# newtrans += 'T'
# else:
# newtrans += 't'
# #elif(i.upper() == web[3].upper()):
# # if i.isupper():
# # newtrans += 'H'
# # else:
# # newtrans += 'h'
# elif(i.upper() == web[4].upper()):
# if i.isupper():
# newtrans += '.'
# else:
# newtrans += '.'
# elif(i.upper() == star[0].upper()):
# if i.isupper():
# newtrans += 'S'
# else:
# newtrans += 's'
# elif(i.upper() == star[2].upper()):
# if i.isupper():
# newtrans += 'A'
# else:
# newtrans += 'a'
# elif(i.upper() == star[3].upper()):
# if i.isupper():
# newtrans += 'R'
# else:
# newtrans += 'r'
# elif(i.upper() == star[5].upper()):
# if i.isupper():
# newtrans += 'I'
# else:
# newtrans += 'i'
# elif(i.upper() == star[6].upper()):
# if i.isupper():
# newtrans += 'H'
# else:
# newtrans += 'h'
# elif(i.upper() == star[7].upper()):
# if i.isupper():
# newtrans += 'F'
# else:
# newtrans += 'f'
# elif(i.upper() == star[8].upper()):
# if i.isupper():
# newtrans += 'J'
# else:
# newtrans += 'j'
# elif(i.upper() == star[9].upper()):
# if i.isupper():
# newtrans += 'C'
# else:
# newtrans += 'c'
# elif(i.upper() == star[10].upper()):
# if i.isupper():
# newtrans += 'G'
# else:
# newtrans += 'g'
# elif(i.upper() == charsB[0].upper()):
# if i.isupper():
# newtrans += ' '
# else:
# newtrans += ' '
#
# else:
# newtrans += i
#
translated = ''
charsA = mostFreqToLeast
#charsB = list(sortByVals)
charsA, charsB = charsB, charsA
# Loop through each symbol in message:
for symbol in message:
if symbol.upper() in charsA:
# Encrypt/decrypt the symbol:
symIndex = charsA.find(symbol.upper())
if symbol.isupper():
translated += charsB[symIndex].upper()
else:
translated += charsB[symIndex].lower()
else:
# Symbol is not in LETTERS; just add it
translated += symbol
# for x in range(0, len(newtrans)):
# if(newtrans[hashy-x] == ':' and (newtrans[hashy-x-13]=='\\n')):
# print(newtrans[hashy-x-1])
# print(newtrans[hashy-x-2])
# print(newtrans[hashy-x-3])
# print(newtrans[hashy-x-4])
# print(newtrans[hashy-x-5])
#
# print(newtrans[hashy-x-6])
# print(newtrans[hashy-x-7])
# print(newtrans[hashy-x-8])
# print(newtrans[hashy-x-9])
# print(newtrans[hashy-x-10])
#
# print(newtrans[hashy-x-11])
# print(newtrans[hashy-x-12])
# print(newtrans[hashy-x-13])
# break
#print("hello")
##print(a)
#
#
#
#
#for i in range(0, len(message)):
# if(message[i] == '#'):
# print(message[i-1],message[i-2],message[i-3],message[i-4],message[i-5],message[i-6])
#
# Determine the possible valid ciphertext translations:
# print('Hacking...')
#letterMapping = hackSimpleSub(newtrans)
#
# # Display the results to the user:
# print('Mapping:')
# print(letterMapping)
# print()
#
# print('Original ciphertext:')
# print(translated)
# print()
# print('Copying hacked message to clipboard:')
#hackedMessage = decryptWithCipherletterMapping(newtrans, letterMapping)
# pyperclip.copy(hackedMessage)
# print(hackedMessage)
with open(sys.argv[2], 'a') as f_out:
print(translated, file=f_out)
#print("before swap:", mostFreqToLeast)
#
##print(ebook.upper())
#
#index1 = 0
#index2 = 0
#
#for i in range(0, len(mostFreqToLeast)):
# if(mostFreqToLeast[i] == 'E'):
# index1 = i
# elif(mostFreqToLeast[i] == ebook[0].upper()):
# index2 = i
#new = swap(mostFreqToLeast, index1,index2)
#
#translated = ''
#charsA = mostFreqToLeast
##charsB = list(sortByVals)
#charsA, charsB = charsB, charsA
#
## Loop through each symbol in message:
##for symbol in message:
## if symbol.upper() in charsA:
## # Encrypt/decrypt the symbol:
## symIndex = charsA.find(symbol.upper())
# if symbol.isupper():
# translated += charsB[symIndex].upper()
# else:
# translated += charsB[symIndex].lower()
# else:
# # Symbol is not in LETTERS; just add it
# translated += symbol
#with open(sys.argv[2], 'a') as f_out:
# print(translated, file=f_out)
def swap(s, i, j):
lst = list(s)
lst[i], lst[j] = lst[j], lst[i]
return ''.join(lst)
def getBlankCipherletterMapping():
# Returns a dictionary value that is a blank cipherletter mapping.
return {'A': [], 'B': [], 'C': [], 'D': [], 'E': [], 'F': [], 'G': [], 'H': [], 'I': [], 'J': [], 'K': [], 'L': [], 'M': [], 'N': [], 'O': [], 'P': [], 'Q': [], 'R': [], 'S': [], 'T': [], 'U': [], 'V': [], 'W': [], 'X': [], 'Y': [], 'Z': [], ' ': [], '.': []}
def addLettersToMapping(letterMapping, cipherword, candidate):
# The `letterMapping` parameter is a "cipherletter mapping" dictionary
# value that the return value of this function starts as a copy of.
# The `cipherword` parameter is a string value of the ciphertext word.
# The `candidate` parameter is a possible English word that the
# cipherword could decrypt to.
# This function adds the letters of the candidate as potential
# decryption letters for the cipherletters in the cipherletter
# mapping.
for i in range(len(cipherword)):
if candidate[i] not in letterMapping[cipherword[i]]:
letterMapping[cipherword[i]].append(candidate[i])
def intersectMappings(mapA, mapB):
# To intersect two maps, create a blank map, and then add only the
# potential decryption letters if they exist in BOTH maps.
intersectedMapping = getBlankCipherletterMapping()
for letter in LETTERS:
# An empty list means "any letter is possible". In this case just
# copy the other map entirely.
if mapA[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapB[letter])
elif mapB[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapA[letter])
else:
# If a letter in mapA[letter] exists in mapB[letter], add
# that letter to intersectedMapping[letter].
for mappedLetter in mapA[letter]:
if mappedLetter in mapB[letter]:
intersectedMapping[letter].append(mappedLetter)
return intersectedMapping
def removeSolvedLettersFromMapping(letterMapping):
# Cipherletters in the mapping that map to only one letter are
# "solved" and can be removed from the other letters.
# For example, if 'A' maps to potential letters ['M', 'N'], and 'B'
# maps to ['N'], then we know that 'B' must map to 'N', so we can
# remove 'N' from the list of what 'A' could map to. So 'A' then maps
# to ['M']. Note that now that 'A' maps to only one letter, we can
# remove 'M' from the list of letters for every other
# letter. (This is why there is a loop that keeps reducing the map.)
loopAgain = True
while loopAgain:
# First assume that we will not loop again:
loopAgain = False
# `solvedLetters` will be a list of uppercase letters that have one
# and only one possible mapping in `letterMapping`:
solvedLetters = []
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
solvedLetters.append(letterMapping[cipherletter][0])
# If a letter is solved, than it cannot possibly be a potential
# decryption letter for a different ciphertext letter, so we
# should remove it from those other lists:
for cipherletter in LETTERS:
for s in solvedLetters:
if len(letterMapping[cipherletter]) != 1 and s in letterMapping[cipherletter]:
letterMapping[cipherletter].remove(s)
if len(letterMapping[cipherletter]) == 1:
# A new letter is now solved, so loop again.
loopAgain = True
return letterMapping
def hackSimpleSub(message):
intersectedMap = getBlankCipherletterMapping()
cipherwordList = nonLettersOrSpacePattern.sub('', message.upper()).split()
for cipherword in cipherwordList:
# Get a new cipherletter mapping for each ciphertext word:
candidateMap = getBlankCipherletterMapping()
wordPattern = makeWordPatterns.getWordPattern(cipherword)
if wordPattern not in wordPatterns.allPatterns:
continue # This word was not in our dictionary, so continue.
# Add the letters of each candidate to the mapping:
for candidate in wordPatterns.allPatterns[wordPattern]:
addLettersToMapping(candidateMap, cipherword, candidate)
# Intersect the new mapping with the existing intersected mapping:
intersectedMap = intersectMappings(intersectedMap, candidateMap)
# Remove any solved letters from the other lists:
return removeSolvedLettersFromMapping(intersectedMap)
def decryptWithCipherletterMapping(ciphertext, letterMapping):
# Return a string of the ciphertext decrypted with the letter mapping,
# with any ambiguous decrypted letters replaced with an _ underscore.
# First create a simple sub key from the letterMapping mapping:
key = ['x'] * len(LETTERS)
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
# If there's only one letter, add it to the key.
keyIndex = LETTERS.find(letterMapping[cipherletter][0])
key[keyIndex] = cipherletter
else:
ciphertext = ciphertext.replace(cipherletter.lower(), '_')
ciphertext = ciphertext.replace(cipherletter.upper(), '_')
key = ''.join(key)
# With the key we've created, decrypt the ciphertext:
return simpleSubCipher.decryptMessage(key, ciphertext)
if __name__ == '__main__':
main()
|
liemthanhho/FS2019Security
|
pa02/betterSubCrack.py
|
betterSubCrack.py
|
py
| 18,650 |
python
|
en
|
code
| 0 |
github-code
|
50
|
16397307831
|
import pandas as pd
from sqlalchemy import create_engine
db_uri = 'mysql+pymysql://root:Cooperboy0071985@localhost/mysql7'
engine = create_engine(db_uri, echo=False) # enter your password and database names here
df = pd.read_csv('C:\Absenteeism_predictions2.csv',sep=',',quotechar='\'',encoding='utf8')
print(df)
sample_sql_database = df.to_sql('sample_database3', con=engine,index=False,if_exists='append')
sample_sql_database = engine.execute("SELECT * FROM sample_database3").fetchall()
print(sample_sql_database)
#df.to_sql('Table_name',con=engine,index=False,if_exists='append') # Replace Table_name with your sql table name
|
rocooper7/6.CodFac
|
1.ORM_MySQL/alchemysql/Ejemplos_Simples/carga_simple.py
|
carga_simple.py
|
py
| 638 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73085357595
|
from ariadne import MutationType
from api.models import *
from api.types import *
import io
from api.models.pytorch import *
import soundfile as sf
import numpy as np
mutation = MutationType()
@mutation.field("classifyHeatBeatSound")
async def create_session_resolver(obj, info, input):
try:
ext = "."+str(input['audio'].filename).split('.')[-1]
if not ext in allowed_extensions:
return {
"ok" : False,
"error": {
"field" : 'audio',
"message" : f'Only audios with extensions ({", ".join(allowed_extensions)}) are allowed.'
}
}
audio = await input['audio'].read()
audio = io.BytesIO(audio)
waveform, samplerate = sf.read(file=audio, dtype='float32')
waveform = torch.from_numpy(np.array([waveform]))
res = predict_sound(hbsc_model, waveform, device)
return {
'ok': True,
'prediction': res.to_json()
}
except Exception as e:
print(e)
return {
"ok": False,
"error":{
"field": 'server',
'message': "Something went wrong on the server."
}
}
|
CrispenGari/HBSC
|
server/api/resolvers/mutations/__init__.py
|
__init__.py
|
py
| 1,237 |
python
|
en
|
code
| 2 |
github-code
|
50
|
6956852359
|
#removeAndSaveListElements.py
list1 = ["red", "blue", "orange", "black", "white", "golden"]
list2 = ["nose", "ice", "fire", "cat", "mouse", "dog"]
print("lists before deletion: ")
len_list1 = len(list1)
len_list2 = len(list2)
if len_list1 == len_list2:
for i in range(len_list1):
print(list1[i], "\t", list2[i])
#print("\n")
print()
list1_res = list1[0]
list2_res = list2[5]
list1.remove(list1_res)
list2.remove(list2_res)
len_list1 = len(list1)
len_list2 = len(list2)
print("lists after deletion: ")
if len_list1 == len_list2:
for i in range(len_list1):
print(list1[i],"\t", list2[i])
print()
print("Residual 1\tResidual 2")
print(list1_res + "\t\t" + list2_res)
|
MattKrepp1/Econ-411
|
Learn-Python-for-Stats-and-Econ-master/In Class Projects/In Class Examples Spring 2019/Section 2/removeAndSaveListElements.py
|
removeAndSaveListElements.py
|
py
| 692 |
python
|
en
|
code
| 0 |
github-code
|
50
|
30980467641
|
import os
import zipfile
from .preprocessor import Preprocessor
class ZipFeedback(Preprocessor):
def __init__(self):
super(ZipFeedback, self).__init__()
def preprocess(self, path, resources):
self.feedback_zip = os.path.split(resources['feedback_zip'])[-1]
self.src = path
self.dst = os.path.join(resources['path'], self.feedback_zip)
with zipfile.ZipFile(self.dst, 'w') as zf:
for root, _, files in os.walk(self.src):
for file in files:
src = os.path.join(root, file)
arcname = os.path.join(os.path.splitext(self.feedback_zip)[0], os.path.relpath(src, start=self.src))
zf.write(src, arcname)
return resources['path'], resources
|
DigiKlausur/ilias2nbgrader
|
ilias2nbgrader/preprocessors/zipfeedback.py
|
zipfeedback.py
|
py
| 798 |
python
|
en
|
code
| 2 |
github-code
|
50
|
17149426371
|
from concurrent.futures import ProcessPoolExecutor
import sys
import numpy as np
from pycocotools.cocoeval import COCOeval
import torch
import torch.distributed as dist
import utils
from dataloaders.dataloader import create_eval_dataloader
from dataloaders.prefetcher import eval_prefetcher
import config
from box_coder import build_ssd300_coder
class Evaluator:
def __init__(self, config):
self.config = config
self.eval_count = 0
self._dataloader = None
self.fetch_dataloader()
self.ret = []
self.overlap_threshold = 0.50
self.nms_max_detections = 200
self.encoder = build_ssd300_coder(config.fast_nms)
def fetch_dataloader(self):
if self._dataloader is None:
self._dataloader, self.inv_map, self.cocoGt = create_eval_dataloader(config)
return self._dataloader
def evaluate_coco(self, final_results, cocoGt):
if self.config.use_coco_ext:
cocoDt = cocoGt.loadRes(final_results, use_ext=True)
E = COCOeval(cocoGt, cocoDt, iouType='bbox', use_ext=True)
else:
cocoDt = cocoGt.loadRes(final_results)
E = COCOeval(cocoGt, cocoDt, iouType='bbox')
E.evaluate()
E.accumulate()
E.summarize()
print("Current AP: {:.5f} AP".format(E.stats[0]))
return E.stats[0]
def evaluate(self, trainer):
self.eval_count += 1
eval_dataloader = eval_prefetcher(iter(self._dataloader),
torch.cuda.current_device(),
config.pad_input,
config.nhwc,
config.fp16)
trainer.model.eval()
ret = []
with torch.no_grad():
for batch in eval_dataloader:
img, img_id, img_size = batch
_, ploc, plabel = trainer.inference(img)
# torch.save({
# "bbox": ploc,
# "scores": plabel,
# "criteria": self.overlap_threshold,
# "max_output": self.nms_max_detections,
# }, "decode_inputs_{}.pth".format(config.local_rank))
# exit()
for idx in range(ploc.shape[0]):
# ease-of-use for specific predictions
ploc_i = ploc[idx, :, :].unsqueeze(0)
plabel_i = plabel[idx, :, :].unsqueeze(0)
result = self.encoder.decode_batch(ploc_i, plabel_i, self.overlap_threshold, self.nms_max_detections)[0]
htot, wtot = img_size[0][idx].item(), img_size[1][idx].item()
loc, label, prob = [r.cpu().numpy() for r in result]
for loc_, label_, prob_ in zip(loc, label, prob):
ret.append([img_id[idx], loc_[0] * wtot, \
loc_[1] * htot,
(loc_[2] - loc_[0]) * wtot,
(loc_[3] - loc_[1]) * htot,
prob_,
self.inv_map[label_]])
trainer.model.train()
ret = np.array(ret).astype(np.float32)
if self.config.distributed:
ret_copy = torch.tensor(ret).cuda()
ret_sizes = [torch.tensor(0).cuda() for _ in range(config.n_gpu)]
torch.distributed.all_gather(ret_sizes, torch.tensor(ret_copy.shape[0]).cuda())
max_size = 0
sizes = []
for s in ret_sizes:
max_size = max(max_size, s.item())
sizes.append(s.item())
ret_pad = torch.cat([ret_copy, torch.zeros(max_size - ret_copy.shape[0], 7, dtype=torch.float32).cuda()])
other_ret = [torch.zeros(max_size, 7, dtype=torch.float32).cuda() for i in range(config.n_gpu)]
torch.distributed.all_gather(other_ret, ret_pad)
cat_tensors = []
for i in range(config.n_gpu):
cat_tensors.append(other_ret[i][:sizes[i]][:])
final_results = torch.cat(cat_tensors).cpu().numpy()
else:
final_results = ret
if utils.is_main_process():
eval_ap = self.evaluate_coco(final_results, self.cocoGt)
return eval_ap
else:
return 0
|
Deep-Spark/DeepSparkHub
|
cv/detection/ssd/pytorch/base/train/evaluator.py
|
evaluator.py
|
py
| 4,399 |
python
|
en
|
code
| 28 |
github-code
|
50
|
589528131
|
def findsumofdiv(n):
teiler = []
summe = 0
for i in range(1, n):
if n % i == 0:
teiler.append(i)
for i in range(len(teiler)):
summe += teiler[i]
return summe
def amicable_numbers(eingabe):
numbers = []
for j in range(1, eingabe):
x = findsumofdiv(j)
if j == findsumofdiv(x) and j != x:
numbers.append(j)
ergebnis = 0
for a in range(len(numbers)):
ergebnis += numbers[a]
return ergebnis
print(amicable_numbers(20000))
|
h3Nn35/ProjectEuler
|
21 Amicable_Numbers/Amicable_Numbers.py
|
Amicable_Numbers.py
|
py
| 525 |
python
|
de
|
code
| 0 |
github-code
|
50
|
35705183638
|
import networkx as nx
import matplotlib.pyplot as plt
from random import randint, randrange, shuffle
class GraphGen:
BORNEMIN = 1
BORNEMAX = 10
def __init__(self, min, max):
"""Class generator"""
self.BORNEMIN = min
self.BORNEMAX = max
#Generation for test purposes
def generate_test_sample_1(self, graph):
for n in range(1, 8):
graph.add_node(str(n), pos=1, dist=9999, visited=False)
graph.add_edge("1", "2", weight=3)
graph.add_edge("1", "7", weight=2)
graph.add_edge("2", "7", weight=4)
graph.add_edge("2", "3", weight=2)
graph.add_edge("3", "4", weight=2)
graph.add_edge("3", "5", weight=5)
graph.add_edge("5", "6", weight=1)
return graph
#Generation for test purposes
def generate_test_sample_2(self, graph, cities):
for n in range(len(cities)):
graph.add_node(cities[n], pos=(
randint(1, 1000), randint(1, 1000)), dist=9999, visited=False)
for i in range(3):
graph.add_edge(cities[0],cities[i], weight=randint(self.BORNEMIN, self.BORNEMAX))
graph.add_edge(cities[len(cities)-1],cities[len(cities)-i-1], weight=randint(self.BORNEMIN, self.BORNEMAX))
for j in range(1, len(cities)-1):
for k in range(1, len(cities)-1):
graph.add_edge(cities[j], cities[k], weight=randint(self.BORNEMIN, self.BORNEMAX))
return graph
#Graph with each node randomly having between 1 and 4 edges and random weights
def generate_cities(self, graph, cities):
for n in range(len(cities)):
graph.add_node(cities[n], pos=(
randint(1, 1000), randint(1, 1000)), dist=9999, visited=False)
for c_node in range(len(cities)):
for ite in range(randrange(1,3)):
while True:
target_node = randrange(len(cities))
#Avoid self edges and multiple edges on the same nodes
if(target_node != c_node and not (graph.has_edge(cities[c_node], cities[target_node]))): break
graph.add_edge(cities[c_node],cities[target_node], weight=randint(self.BORNEMIN, self.BORNEMAX))
return graph
#Switch statement for which generation method to choose
def gen_method(self, method, graph, **kwargs):
json = kwargs.get("json", None)
if(method == "test_sample_1"):
self.generate_test_sample_1(graph)
elif(method == "test_sample_2"):
self.generate_test_sample_2(graph, json)
elif(method == "cities"):
while True:
self.generate_cities(graph, json)
if nx.is_connected(graph): break
return graph
|
thomasbarrepitous/Dijkstra_Example
|
generation.py
|
generation.py
|
py
| 2,768 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15974308023
|
from pathGenerator import *
class BFS:
x_cord = [0, 1, 0, -1]
y_cord = [1, 0, -1, 0]
def __init__(self,matrix, startX, startY,goalX,goalY,grid,n,size):
self.grid = grid
self.x = startX
self.y = startY
self.goal_x = goalX
self.goal_y = goalY
self.matrix = matrix
self.size = n
self.grid_size = size
def bfs(self,visited,x,y,p):
queue = []
queue.append([x,y])
visited[x][y] = 1
while queue:
d = queue.pop(0)
for i in range(0,4):
x_temp = BFS.x_cord[i] + d[0]
y_temp = BFS.y_cord[i] + d[1]
if x_temp == self.goal_x and y_temp == self.goal_y:
return True
if (x_temp >=0 and x_temp < self.size and y_temp >=0 and y_temp <self.size
and visited[x_temp][y_temp] == 0 and self.matrix[x_temp][y_temp]==1):
p.mark_path(x_temp,y_temp)
visited[x_temp][y_temp]=1
queue.append([x_temp,y_temp])
return False
def bfsUtil(self):
visited = [[0 for x in range(self.size)] for y in range(self.size)]
p = PathGenerator(self.grid,self.grid_size)
self.bfs(visited,self.x,self.y,p)
def solve(self):
self.bfsUtil()
|
pranjalvithlani/maze-runner
|
BFS.py
|
BFS.py
|
py
| 1,355 |
python
|
en
|
code
| 1 |
github-code
|
50
|
23952412165
|
def example(Simulator):
from csdl import Model, GraphRepresentation
class ExampleImplicit2(Model):
def initialize(self):
self.parameters.declare('nlsolver')
def define(self):
# define internal model that defines a residual
from csdl import NewtonSolver, ScipyKrylov
solver_type = self.parameters['nlsolver']
quadratic = Model()
a = quadratic.declare_variable('a')
b = quadratic.declare_variable('b')
c = quadratic.declare_variable('c')
x = quadratic.declare_variable('x')
u = quadratic.declare_variable('u')
# test_var = x**2
# quadratic.register_output('test_var', test_var*2.0)
# temp = quadratic.declare_variable('temp')
# quadratic.connect(test_var.name, 'temp')
# ax2 = a*temp
# quadratic.register_output('t', a*1.0)
ax2 = a * x**2
au2 = a * u**2
y = x - (-ax2 - c) / b
v = u - (-au2 - c * 2) / b
quadratic.register_output('y', y)
quadratic.register_output('v', v)
# from csdl_om import Simulator
# sim = Simulator(quadratic)
# sim.visualize_implementation()
# exit()
# SOLUTION: x [0.38742589]
# SOLUTION: u [0.66666667]
solve_quadratic = self.create_implicit_operation(quadratic)
if solver_type == 'bracket':
solve_quadratic.declare_state('x',
residual='y',
val=0.34,
bracket=(0, 0.5))
solve_quadratic.declare_state('u',
residual='v',
val=0.4,
bracket=(0, 1.0))
else:
solve_quadratic.declare_state('x', residual='y', val=0.34)
solve_quadratic.declare_state('u', residual='v', val=0.4)
if solver_type == 'newton':
solve_quadratic.nonlinear_solver = NewtonSolver(
solve_subsystems=False)
else:
raise ValueError(
f'solver type {solver_type} is unknown.')
# solve_quadratic.linear_solver = csdl.LinearBlockGS()
solve_quadratic.linear_solver = ScipyKrylov()
aa = self.create_input('a', val=1.5)
bb = self.create_input('b', val=2.0)
cc = self.create_input('c', val=-1.0)
xx, uu = solve_quadratic(aa, bb, cc)
self.register_output('f', xx * 3.0 + uu * 3.0 + 0.5 * aa)
rep = GraphRepresentation(ExampleImplicit2())
sim = Simulator(rep)
sim.run()
return sim, rep
|
LSDOlab/csdl
|
csdl/examples/valid/ex_promotions_implicit2.py
|
ex_promotions_implicit2.py
|
py
| 3,010 |
python
|
en
|
code
| 5 |
github-code
|
50
|
72051187675
|
import requests
import pandas as pd
def fetch_bitcoin_data(api_key):
url = f'https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_DAILY&symbol=BTC&market=USD&apikey={api_key}'
response = requests.get(url)
data = response.json()
# Process the JSON data into a pandas DataFrame
df = pd.DataFrame.from_dict(data['Time Series (Digital Currency Daily)'], orient='index')
df = df.rename(columns={
'1a. open (USD)': 'Open',
'2a. high (USD)': 'High',
'3a. low (USD)': 'Low',
'4a. close (USD)': 'Close',
'5. volume': 'Volume'
}).astype(float)
df.index = pd.to_datetime(df.index)
return df
if __name__ == '__main__':
# Replace 'your_api_key' with your actual Alpha Vantage API key
api_key = 'N8IN2LVTV9ZJQ71V'
bitcoin_df = fetch_bitcoin_data(api_key)
print(bitcoin_df.head())
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
stenuuesoo/beatifulbtc
|
main.py
|
main.py
|
py
| 933 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18727133010
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
execute operations between columns and save in a given column
"""
import my_functions as mf
import numpy as np
import pandas as pd
import re
def parse(argv):
"""
This function accept a list of strings, create and fill a parser istance
and return a populated namespace
Parameters
----------
argv: list of strings
list to be parsed
output: namespace
---------
"""
import argparse as ap
import argparse_custom as apc
description = """Execute operations between columns and save in a column.
Example:
c3+c6-1: add column 3 to column 6 and subtract 1.
"""
p = ap.ArgumentParser(description=description,
#formatter_class=apc.RawDescrArgDefHelpFormatter)
formatter_class=ap.ArgumentDefaultsHelpFormatter)
p.add_argument("operation", action="store",
help="""Operation to execute between columns.""")
p.add_argument("to_col", action="store", type=int,
help="""Column where the operation is saved. If it is larger than
the number of columns, the result is appended""")
p.add_argument("ifname", nargs='+', action=apc.file_exists(),
help="""Input file name(s)""")
p = apc.version_verbose(p, '0.1')
p.add_argument("-s", "--substitute", action="store", type=float,
help="""If given, substitutes the content of the columns used for
the operation with '%(dest)s'. This is executed before copying the
result of the operation to the desired column""")
p, group = apc.insert_or_replace(p)
p, group = apc.overwrite_or_skip(p)
p, pandas = apc.pandas_group(p)
p.add_argument("--fmt", default="%7.6e", action=apc.StoreFmt, nargs='+',
help="Format of the output files.")
description = """Parameters related to the parallel computation"""
p, parallel = apc.parallel_group( p, description=description )
return p.parse_args(args=argv)
#end def parse(argv):
def columns_operations(f, operations, to_column, **kwargs):
"""
read file, performe the desired operations between columns, save it and
write to a file.
Signature:
columns_operations("file.dat", 'c3+c6-1', 4)
#read file, add content of column 3 and column 6, subtract 1 and save
the result in column 4.
Parameters
----------
f: file object or string
file containing the catalogue
operations: string
columns with operations to perform
to_columns int
save there the result of operations
output
------
none
accepted kwargs that affects the function
+substitute: substitute the content of the columns involved with operations
with this value, if not None
+verbose: verbose mode [True|False]
+replace: replace string *replace[0]* with *replace[1]* in f.name
+insert: insert string *insert[0]* before *insert[1]* in f.name
+skip: existing file names skipped [True|False]
+overwrite: existing file names overwritten [True|False]
+pandas: use pandas for the input
+chunks: chunksize in pandas.read_table
+fmt: format of the output file
"""
ofile = mf.create_ofile_name(f, **kwargs) # create the output file name
if kwargs['verbose']:
print("Processing file '{}'".format(f))
pattern = re.compile(r"c(\d+?)") #re pattern with the columns name
if kwargs['pandas']: #expression to evaluate to execute the operation
to_evaluate = pattern.sub("cat[\\1]", operations)
else:
to_evaluate = pattern.sub("cat[:,\\1]", operations)
#columns used in the operation
columns_read = [int(s) for s in pattern.findall(operations)]
# read the input catalogue
if kwargs['pandas']:
if kwargs['chunks'] is None:
cat = pd.read_table(f, header=None,
skiprows=mf.n_lines_comments(f), sep='\s')
new_column = eval(to_evaluate) # do the operation
if kwargs['substitute'] is not None:
cat[columns_read] = kwargs['substitute']
cat[to_column] = new_column #copy the result of the operation
np.savetxt(ofile, cat, fmt=kwargs['fmt'], delimiter='\t')
else:
chunks = pd.read_table(f, header=None, sep='\s',
skiprows=mf.n_lines_comments(f), chunksize=kwargs['chunks'])
with open(ofile, 'w') as of:
for cat in chunks:
new_column = eval(to_evaluate) # do the operation
if kwargs['substitute'] is not None:
cat[columns_read] = kwargs['substitute']
cat[to_column] = new_column #copy the result of the operation
np.savetxt(of, cat, fmt=kwargs['fmt'], delimiter='\t')
else:
cat = np.loadtxt(f)
new_column = eval(to_evaluate) # do the operation
if kwargs['substitute'] is not None:
cat[:, columns_read] = kwargs['substitute']
cat[:, to_column] = new_column #copy the result of the operation
np.savetxt(ofile, cat, fmt=kwargs['fmt'], delimiter='\t')
#end def columns_operations(f, operations, to_column, **kwargs):
if __name__ == "__main__": #if it's the main
import sys
args = parse(sys.argv[1:])
#if parallel computation required, check that Ipython.parallel.Client
#is in installed and that the ipycluster has been started
if args.parallel :
from ipython_parallel import Load_balanced_view as Lbv
parallel_env = Lbv() #initialize the object with all my parallen stuff
args.parallel = parallel_env.is_parallel_enabled()
if(args.parallel == False): # if: parallel
for fn in args.ifname: #file name loop
columns_operations(fn, args.operation, args.to_col, **vars(args))
else: # else: parallel
imports = ['import numpy as np', 'import my_functions as mf',
"import pandas as pd", "import re"]
parallel_env.exec_on_engine(imports)
initstatus = parallel_env.get_queue_status() #get the initial status
#submit the jobs and save the list of jobs
import os
runs = [parallel_env.apply(columns_operations, os.path.abspath(fn), args.operation,
args.to_col, **vars(args)) for fn in args.ifname]
if args.verbose : #if some info is required
parallel_env.advancement_jobs(runs, update=args.update,
init_status=initstatus)
else: #if no info at all is wanted
parallel_env.wait(jobs=runs) #wait for the end
#just check for any error
results = [r.result for r in runs]
#clear the variable in the parallel environment to avoid filling up memory
parallel_env.clear_cache()
#end if: parallel
exit()
|
montefra/montefra_PhD_Python
|
Catalogues/columns_operations.py
|
columns_operations.py
|
py
| 6,883 |
python
|
en
|
code
| 0 |
github-code
|
50
|
4639108049
|
from typing import Callable, Optional, Sequence, Tuple, Union, overload
from faker import Faker
from faker.generator import Generator
from faker.providers import BaseProvider
from faker.providers.python import Provider
from ..base import DEFAULT_FORMAT_FUNC, BytesValue, FileMixin, StringValue
from ..registry import FILE_REGISTRY
from ..storages.base import BaseStorage
from ..storages.filesystem import FileSystemStorage
__author__ = "Artur Barseghyan <[email protected]>"
__copyright__ = "2022-2023 Artur Barseghyan"
__license__ = "MIT"
__all__ = ("CsvFileProvider",)
FAKER = Faker()
class CsvFileProvider(BaseProvider, FileMixin):
"""CSV file provider.
Usage example:
.. code-block:: python
from faker import Faker
from faker_file.providers.csv_file import CsvFileProvider
FAKER = Faker()
FAKER.add_provider(CsvFileProvider)
file = FAKER.csv_file()
Usage example with options:
.. code-block:: python
file = FAKER.csv_file(
prefix="zzz",
num_rows=100,
data_columns=('{{name}}', '{{sentence}}', '{{address}}'),
include_row_ids=True,
)
Usage example with `FileSystemStorage` storage (for `Django`):
.. code-block:: python
from django.conf import settings
from faker_file.storages.filesystem import FileSystemStorage
file = FAKER.csv_file(
storage=FileSystemStorage(
root_path=settings.MEDIA_ROOT,
rel_path="tmp",
),
prefix="zzz",
num_rows=100,
)
"""
extension: str = "csv"
@overload
def csv_file(
self: "CsvFileProvider",
storage: Optional[BaseStorage] = None,
basename: Optional[str] = None,
prefix: Optional[str] = None,
header: Optional[Sequence[str]] = None,
data_columns: Tuple[str, ...] = ("{{name}}", "{{address}}"),
num_rows: int = 10,
include_row_ids: bool = False,
content: Optional[str] = None,
encoding: Optional[str] = None,
format_func: Callable[
[Union[Faker, Generator, Provider], str], str
] = DEFAULT_FORMAT_FUNC,
raw: bool = True,
**kwargs,
) -> BytesValue:
...
@overload
def csv_file(
self: "CsvFileProvider",
storage: Optional[BaseStorage] = None,
basename: Optional[str] = None,
prefix: Optional[str] = None,
header: Optional[Sequence[str]] = None,
data_columns: Tuple[str, ...] = ("{{name}}", "{{address}}"),
num_rows: int = 10,
include_row_ids: bool = False,
content: Optional[str] = None,
encoding: Optional[str] = None,
format_func: Callable[
[Union[Faker, Generator, Provider], str], str
] = DEFAULT_FORMAT_FUNC,
**kwargs,
) -> StringValue:
...
def csv_file(
self: "CsvFileProvider",
storage: Optional[BaseStorage] = None,
basename: Optional[str] = None,
prefix: Optional[str] = None,
header: Optional[Sequence[str]] = None,
data_columns: Tuple[str, ...] = ("{{name}}", "{{address}}"),
num_rows: int = 10,
include_row_ids: bool = False,
content: Optional[str] = None,
encoding: Optional[str] = None,
format_func: Callable[
[Union[Faker, Generator, Provider], str], str
] = DEFAULT_FORMAT_FUNC,
raw: bool = False,
**kwargs,
) -> Union[BytesValue, StringValue]:
"""Generate a CSV file with random text.
:param storage: Storage. Defaults to `FileSystemStorage`.
:param basename: File basename (without extension).
:param prefix: File name prefix.
:param header: The ``header`` argument expects a list or a tuple of
strings that will serve as the header row if supplied.
:param data_columns: The ``data_columns`` argument expects a list or a
tuple of string tokens, and these string tokens will be passed to
:meth:`parse()
<faker.providers.python.Provider.parse>`
for data generation. Argument Groups are used to pass arguments
to the provider methods. Both ``header`` and ``data_columns`` must
be of the same length.
:param num_rows: The ``num_rows`` argument controls how many rows of
data to generate, and the ``include_row_ids`` argument may be set
to ``True`` to include a sequential row ID column.
:param include_row_ids:
:param content: File content. If given, used as is.
:param encoding: Encoding.
:param format_func: Callable responsible for formatting template
strings.
:param raw: If set to True, return `BytesValue` (binary content of
the file). Otherwise, return `StringValue` (path to the saved
file).
:return: Relative path (from root directory) of the generated file
or raw content of the file.
"""
# Generic
if storage is None:
storage = FileSystemStorage()
filename = storage.generate_filename(
extension=self.extension,
prefix=prefix,
basename=basename,
)
if self.generator is None:
self.generator = Faker()
# Specific
if content is None:
content = self.generator.csv(
header=header,
data_columns=data_columns,
num_rows=num_rows,
include_row_ids=include_row_ids,
)
else:
content = format_func(self.generator, content)
data = {"content": content, "filename": filename, "storage": storage}
if raw:
raw_content = BytesValue(content.encode("utf8"))
raw_content.data = data
return raw_content
storage.write_text(filename, content, encoding=encoding)
# Generic
file_name = StringValue(storage.relpath(filename))
file_name.data = data
FILE_REGISTRY.add(file_name)
return file_name
|
barseghyanartur/faker-file
|
src/faker_file/providers/csv_file.py
|
csv_file.py
|
py
| 6,214 |
python
|
en
|
code
| 74 |
github-code
|
50
|
33187085436
|
#!/usr/bin/python
# coding=utf-8
import sys
import random
from PyQt4 import QtGui,QtCore
from math import *
class GameWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('24 point game')
self.resize(400, 250)
self.widget = QtGui.QWidget(self)
self.setWindow()
def setWindow(self):
self.num = []
self.num_bak = []
self.expression = []
self.label = []
hbox0 = QtGui.QHBoxLayout()
for i in range(0, 4):
label = QtGui.QLabel()
label.setAlignment(QtCore.Qt.AlignCenter)
num = random.randint(1, 10) #[1, 10] 1<=x<=10
self.num.append(num)
self.num_bak.append(num)
self.expression.append(str(num))
label.setText(str(num))
label.setStyleSheet(
'background-color: rgb(47, 130, 208);'
'color: rgb(246, 253, 251);'
'font: 75 18pt "Axure Handwriting";'
)
hbox0.addWidget(label)
self.label.append(label)
self.expressionWid = QtGui.QLineEdit()
self.submitWid = QtGui.QPushButton('submit', self)
self.no_solutionWid = QtGui.QPushButton('no solution', self)
self.answerWid = QtGui.QPushButton('see answer', self)
self.nextWid = QtGui.QPushButton('next puzzle', self)
self.statusbarWid = self.statusBar()
self.connect(self, QtCore.SIGNAL("messageToStatusbar(QString)"), self.statusbarWid, QtCore.SLOT("showMessage(QString)"))
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.submitWid)
hbox1.addWidget(self.no_solutionWid)
hbox1.addWidget(self.answerWid)
hbox1.addWidget(self.nextWid)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox0)
vbox.addWidget(self.expressionWid)
vbox.addLayout(hbox1)
self.widget.setLayout(vbox)
self.setCentralWidget(self.widget)
self.connect(self.submitWid, QtCore.SIGNAL('clicked()'), self.checkExpression)
self.connect(self.no_solutionWid, QtCore.SIGNAL('clicked()'), self.checkNoSolution)
self.connect(self.answerWid, QtCore.SIGNAL('clicked()'), self.getAnswer)
self.connect(self.nextWid, QtCore.SIGNAL('clicked()'), self.nextPuzzle)
def nextPuzzle(self):
self.num = []
self.num_bak = []
self.expression = []
for i in range(0, 4):
label = self.label[i]
num = random.randint(1, 10)
self.num.append(num)
self.num_bak.append(num)
self.expression.append(str(num))
label.setText(str(num))
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), " ")
self.expressionWid.clear()
def checkNoSolution(self):
""" 检查是否无解
"""
self.num = []
self.expression = []
for i in range(len(self.num_bak)):
self.num.append(self.num_bak[i])
self.expression.append(str(self.num_bak[i]))
rt = self.caculate(4);
self.num = []
for i in range(len(self.num_bak)):
self.num.append(self.num_bak[i])
if rt == True:
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "WRONG, please try again!")
else:
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "RIGHT, you have done a great job!")
def checkExpression(self):
""" 计算输入的表达式结果
"""
tmp_expression = str(self.expressionWid.text())
try:
self.exp_string = unicode(tmp_expression)
result = eval(self.exp_string)
except:
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "expression invalid")
return
if result == 24 and self.expressionLegal():
self.success()
else:
self.failure()
def expressionLegal(self):
""" 检查表达式是否合法
"""
self.tmp_num = []
length = len(self.exp_string)
i = 0
while i < length:
if self.exp_string[i].isdigit() == True:
self.tmp_num.append(int(self.exp_string[i]))
if self.exp_string[i] == '1' and (i+1) < length :
if self.exp_string[i+1] == '0':
self.tmp_num[-1] = 10
i += 1
i += 1
if len(self.tmp_num) != 4:
return False
self.num.sort()
self.tmp_num.sort()
for i in range(4):
if self.tmp_num[i] != self.num[i]:
return False
return True
def success(self):
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "SUCCESS, congratulations!")
def failure(self):
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "FAILURE, check each number and the sum")
def getAnswer(self):
self.num = []
self.expression = []
for i in range(len(self.num_bak)):
self.num.append(self.num_bak[i])
self.expression.append(str(self.num_bak[i]))
rt = self.caculate(4);
self.num = []
for i in range(len(self.num_bak)):
self.num.append(self.num_bak[i])
self.num = []
for i in range(len(self.num_bak)):
self.num.append(self.num_bak[i])
if rt == True:
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "answer is %s" %(self.expression[0]))
else:
self.emit(QtCore.SIGNAL("messageToStatusbar(QString)"), "no solution")
return rt
def caculate(self, n = 4):
""" 计算24点
"""
if n == 1:
if abs(self.num[0]- 24.0) < 1E-6:
return True
else:
return False
for i in range(0, n-1):
for j in range(i+1, n):
a = self.num[i]
b = self.num[j]
self.num[j] = self.num[n-1]
expa = self.expression[i]
expb = self.expression[j]
self.expression[j] = self.expression[n-1]
# a+b
tmp = ['(', expa, '+', expb, ')']
self.expression[i] = ''.join(tmp)
self.num[i] = float(a)+float(b)
if self.caculate(n-1) == True:
return True
# a*b
tmp = ['(', expa, '*', expb, ')']
self.expression[i] = ''.join(tmp)
self.num[i] = float(a)*float(b)
if self.caculate(n-1) == True:
return True
# a-b
tmp = ['(', expa, '-', expb, ')']
self.expression[i] = ''.join(tmp)
self.num[i] = float(a)-float(b)
if self.caculate(n-1) == True:
return True
# b-a
tmp = ['(', expb, '-', expa, ')']
self.expression[i] = ''.join(tmp)
self.num[i] = float(b)-float(a)
if self.caculate(n-1) == True:
return True
# a/b
if b != 0:
tmp = ['(', expa, '/', expb, ')']
self.expression[i] = ''.join(tmp)
self.num[i] = float(a)/float(b)
if self.caculate(n-1) == True:
return True
# b/a
if a != 0:
tmp = ['(', expb, '/', expa, ')']
self.expression[i] = ''.join(tmp)
self.num[i] = float(b)/float(a)
if self.caculate(n-1) == True:
return True
self.num[i] = a
self.num[j] = b
self.expression[i] = expa
self.expression[j] = expb
return False
def startGame():
app = QtGui.QApplication(sys.argv)
window = GameWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
startGame()
|
wuxx/python
|
24_point_game/24_point.py
|
24_point.py
|
py
| 8,304 |
python
|
en
|
code
| 1 |
github-code
|
50
|
24360907604
|
from typing import Any
start_id_number = 1
last_id_number = 151
stats = ['id', 'height', 'weight']
def get_stats():
return stats
def compare(stat_name, user_stat_value, opponent_stat_value):
if user_stat_value > opponent_stat_value:
return [1, user_stat_value]
elif user_stat_value == opponent_stat_value:
return [0, 0]
else:
return [-1, opponent_stat_value]
def get_url(pokemon_id):
url = "https://pokeapi.co/api/v2/pokemon/{}".format(pokemon_id)
return url
def read_json(response, pokemon_id) -> dict[str, Any]:
pokemon = response.json()
return {
'name': pokemon['name'],
'id': pokemon['id'],
'height': pokemon['height'],
'weight': pokemon['weight'],
'available': 1,
}
def print_details(pokemon):
print(' ID : {}'.format(pokemon['id']))
print(' NAME : {}'.format(pokemon['name']))
print(' HEIGHT : {}'.format(pokemon['height']))
print(' WEIGHT : {}'.format(pokemon['weight']))
print('AVAILABLE : {}'.format(("NO" if pokemon['available'] == 0 else "YES")))
|
angelRep/CFG-Python-Project
|
cfg-python-project/pokemon.py
|
pokemon.py
|
py
| 1,147 |
python
|
en
|
code
| 0 |
github-code
|
50
|
5164596817
|
valor = int(input('Valor a ser sacado: R$'))
total = valor
ced = 50 # comeca com a cedula de 50
totced = 0
while True:
if total >= ced: # se o saque estiver maior que a cedula de 50
total = total - ced # subtrair uma nota de 50 do montante
totced = totced + 1 # somar +1 ao total de cedulas
else:
if totced > 0: #considerar apenas as notar que foram sacadas
print(f'Total de {totced} cedulas de R${ced}') #total de cedulas sacadas de 50,20,10...
if ced == 50: #se a cedula era de 50, vai analisar a de 20 agora
ced = 20
elif ced == 20: #se a cedula era de 20, vai analisar a de 10 agora
ced = 10
elif ced == 10:
ced = 1
totced = 0 #zerar as celulas sempre que mudar
if total == 0: #quando zerar o saque sair do while
break
print('fim')
|
GabrielBrotas/Python
|
modulo 2/Exercicios/Ex071.2 - Caixa eletronico.py
|
Ex071.2 - Caixa eletronico.py
|
py
| 899 |
python
|
pt
|
code
| 0 |
github-code
|
50
|
34416853907
|
import logging
import numpy as np
from abcpy.backends import BackendDummy as Backend
from abcpy.continuousmodels import Uniform
# from abcpy.backends import BackendMPI as Backend # to use MPI
from abcpy.distances import Euclidean
from abcpy.output import Journal
from src.distance import WeightedDistance
from src.models import SEI4RD
from src.statistic import ExtractSingleTimeseries2DArray
from src.utils import ABC_inference, determine_eps, generate_samples
logging.basicConfig(level=logging.INFO)
#results_folder = "results/SEI4RD_england_infer_1Mar_23May/"
results_folder = "results/SEI4RD_england_infer_1Mar_31Aug/"
print(results_folder)
# load files
#data_folder = "data/england_inference_data_1Mar_to_23May/"
data_folder = "data/england_inference_data_1Mar_to_31Aug/"
# alpha_home = np.load(data_folder + "alpha_home.npy")
alpha_home = 1 # fix this
alpha_work = np.load(data_folder + "mobility_work.npy")
alpha_other = np.load(data_folder + "mobility_other.npy")
alpha_school = np.load(data_folder + "mobility_school.npy")
england_pop = np.load(data_folder + "england_pop.npy", allow_pickle=True)
contact_matrix_home_england = np.load(data_folder + "contact_matrix_home_england.npy")
contact_matrix_work_england = np.load(data_folder + "contact_matrix_work_england.npy")
contact_matrix_school_england = np.load(data_folder + "contact_matrix_school_england.npy")
contact_matrix_other_england = np.load(data_folder + "contact_matrix_other_england.npy")
observation_england = np.load(data_folder + "observed_data.npy", allow_pickle=True)
#print(observation_england.shape)
# thee last column of the above one represents the number of Ic people in our model.
# parameters
n = 5 # number of age groups
dt = 0.1 # integration timestep
T = alpha_other.shape[0] - 1 # horizon time in days (needs to be 1 less than the number of days in observation)
total_population = england_pop # population for each age group
# 16th March: Boris Johnson asked old people to isolate; we then learn a new alpha from the 18th March:
lockdown_day = 17
# alpha_home = np.repeat(alpha_home, np.int(1 / dt), axis=0)
alpha_work = np.repeat(alpha_work, np.int(1 / dt), axis=0)
alpha_other = np.repeat(alpha_other, np.int(1 / dt), axis=0)
alpha_school = np.repeat(alpha_school, np.int(1 / dt), axis=0)
# ABC model (priors need to be fixed better):
# beta = Uniform([[0], [1]], name='beta') # controls how fast the epidemics grows. Related to R_0
beta = Uniform([[0], [0.5]], name='beta') # controls how fast the epidemics grows. Related to R_0
d_L = Uniform([[1], [10]], name='d_L') # average duration of incubation
d_C = Uniform([[1], [10]], name='d_C') # average time before going to clinical
d_R = Uniform([[1], [10]], name='d_R') # average recovery time
d_RC = Uniform([[4], [14]], name='d_RC') # average recovery time from clinical state
d_D = Uniform([[1], [10]], name='d_D') # average duration of infected clinical state (resulting in death)
p01 = Uniform([[0], [.3]], name="p01") # restrict priors a bit
p02 = Uniform([[0], [.5]], name="p02")
p03 = Uniform([[0], [1]], name="p03")
p04 = Uniform([[0], [1]], name="p04")
p05 = Uniform([[0.5], [1]], name="p05")
p11 = Uniform([[0], [1]], name="p11")
p12 = Uniform([[0], [1]], name="p12")
p13 = Uniform([[0], [1]], name="p13")
p14 = Uniform([[0], [1]], name="p14")
p15 = Uniform([[0.5], [1]], name="p15")
initial_exposed = Uniform([[0], [500]], name="initial_exposed")
alpha_123 = Uniform([[0.3], [1]], name="alpha_123")
alpha_4 = Uniform([[0], [1]], name="alpha_4")
alpha_5 = Uniform([[0], [1]], name="alpha_5")
model = SEI4RD(
[beta, d_L, d_C, d_R, d_RC, d_D, p01, p02, p03, p04, p05, p11, p12, p13, p14, p15, initial_exposed, alpha_123,
alpha_4, alpha_5], tot_population=total_population, T=T, contact_matrix_school=contact_matrix_school_england,
contact_matrix_work=contact_matrix_work_england, contact_matrix_home=contact_matrix_home_england,
contact_matrix_other=contact_matrix_other_england, alpha_school=alpha_school, alpha_work=alpha_work,
alpha_home=alpha_home, alpha_other=alpha_other, modify_alpha_home=False, dt=dt, return_once_a_day=True,
return_observation_only_with_hospitalized=True, learn_alphas_old=True, lockdown_day=lockdown_day)
true_parameters_fake_1 = [0.05, 5, 7, 5, 5, 6, 0.06, .1, .2, .3, .4, .1, .2, .3, .4, .5, 50, .4, .3, 0.3]
# print(len(true_parameters_fake_1))
observation_england_1 = model.forward_simulate(true_parameters_fake_1, 1)
true_parameters_fake_2 = [0.05, 5, 7, 5, 5, 6, 0.05, .1, .2, .3, .4, .1, .2, .3, .4, .5, 50, .4, .3, 0.3]
observation_england_2 = model.forward_simulate(true_parameters_fake_2, 1)
print(observation_england_1[0].shape)
# we define now the statistics and distances:
rho = 1 # multiplier to decrease importance of past
distances_list = []
# this has to be used if the model returns already the correct observations (return_observation_only=True)
for i in range(n):
distances_list.append(
Euclidean(ExtractSingleTimeseries2DArray(index=i, rho=rho, end_step=-1))) # deceased
# now add the distance on the number of hospitalized people, that needs to discard the first 17 elements because there
# is no data on that before the 18th March.
distances_list.append(Euclidean(ExtractSingleTimeseries2DArray(index=5, rho=rho, start_step=19, end_step=-1)))
# define a weighted distance:
# max values of the daily counts: 1., 9., 73., 354., 462., 17933.
# we could use the inverse of them as weights; I think however the last timeseries have less noise as they are sampled
# from larger numbers, so they should be slightly less important.
weights = [1, 1, 1, 2, 2, .1]
# weights = [1.0 / 1 * 0.75, 1.0 / 9 * 0.75, 1.0 / 68 * 0.85, 1.0 / 338, 1.0 / 445, 1.0 / 4426]
# weights = [1, 0.1, 0.01, 0.005, 0.005, 0.005]
final_distance = WeightedDistance(distances_list, weights=weights)
print("dist", final_distance.distance(observation_england_1, [observation_england]))
# define backend
backend = Backend()
# # generate 100 samples from which to find the starting epsilon value as the 20th percentile of the distances
param, samples = generate_samples(model, 10, num_timeseries=6, two_dimensional=True)
eps = determine_eps(samples, dist_calc=final_distance, quantile=0.5) # * 1000
print("epsilon", eps)
# you can keep running the sequential algorithms from previously saved journal files.
start_journal_path = None
# start_journal_path = results_folder + "seicicsr.jrl"
# jrnl_start = Journal.fromFile(start_journal_path)
# eps = jrnl_start.configuration["epsilon_arr"][-1] # use the last step eps? Maybe should reduce that as well..
# inference1
print("Inference 1")
jrnl = ABC_inference("PMCABC", model, observation_england, final_distance, eps=eps, n_samples=500, n_steps=5,
backend=backend, full_output=1, journal_file=start_journal_path, epsilon_percentile=10,
journal_file_save=results_folder + "journal_1")
# jrnl = ABC_inference("SABC", model, observation_england, final_distance, eps=eps, n_samples=100000, n_steps=10,
# backend=backend, full_output=1, journal_file=start_journal_path, beta=2,
# delta=0.2, v=0.3, ar_cutoff=0.01, resample=None, n_update=None, )
jrnl.save(results_folder + "PMCABC_inf1.jrl")
print("Posterior mean: ", jrnl.posterior_mean())
#
# inference2
print("Inference 2")
epsilon_percentile = 50
if "epsilon_arr" in jrnl.configuration.keys():
eps = np.percentile(jrnl.distances[-1], epsilon_percentile)
print("using epsilon from last step...")
start_journal_path = results_folder + "PMCABC_inf1.jrl"
jrnl = ABC_inference("PMCABC", model, observation_england, final_distance, eps=eps, n_samples=500, n_steps=10,
backend=backend, full_output=1, journal_file=start_journal_path,
epsilon_percentile=epsilon_percentile,
journal_file_save=results_folder + "journal_2")
# save the journal
jrnl.save(results_folder + "PMCABC_inf2.jrl")
#
print("Inference 3")
jrnl = Journal.fromFile(results_folder + "journal_2.jrl")
epsilon_percentile = 70
if "epsilon_arr" in jrnl.configuration.keys():
eps = np.percentile(jrnl.distances[-1], epsilon_percentile)
print("using epsilon from last step: ", eps)
start_journal_path = results_folder + "journal_2.jrl"
jrnl = ABC_inference("PMCABC", model, observation_england, final_distance, eps=eps, n_samples=500, n_steps=10,
backend=backend, full_output=1, journal_file=start_journal_path,
epsilon_percentile=epsilon_percentile,
journal_file_save=results_folder + "journal_3")
# save the journal
jrnl.save(results_folder + "PMCABC_inf3.jrl")
|
OptimalLockdown/MobilitySEIRD-England
|
inference_SEI4RD_england_data.py
|
inference_SEI4RD_england_data.py
|
py
| 8,689 |
python
|
en
|
code
| 2 |
github-code
|
50
|
37152546209
|
#########################################################################
# Dusi's Thesis #
# Algorithmic Discrimination and Natural Language Processing Techniques #
#########################################################################
# This experiment computes the perplexity of a sentence for an MLM BERT
# Perplexity is a measure of anomaly for sentences
import gc
import os
import pickle
import numpy as np
import torch
from datasets import Dataset
import settings
from src.models.templates import TemplatesGroup
from src.models.trained_model_factory import TrainedModelForMaskedLMFactory
from src.experiments.mlm_gender_prediction_finetuned import eval_group, occupation_token
from src.parsers import jobs_parser
from src.parsers.article_inference import infer_indefinite_article
EXPERIMENT_NAME: str = "mlm_gender_perplexity"
FOLDER_OUTPUT: str = settings.FOLDER_RESULTS + "/" + EXPERIMENT_NAME
FOLDER_OUTPUT_IMAGES: str = FOLDER_OUTPUT + "/" + settings.FOLDER_IMAGES
FOLDER_OUTPUT_TABLES: str = FOLDER_OUTPUT + "/" + settings.FOLDER_TABLES
def compute_perplexity_for_group(model, tokenizer, templates_group: TemplatesGroup, occupations: list[str], targets: list[str]) -> np.ndarray:
"""
Computes perplexity metric for all the sentences and all the targets given as input.
Returns a numpy array with dimensions [# templates, # occupations, # gender]
:param model: The model used to compute probability and loss of masked words.
:param tokenizer: The tokenizer working with that model.
:param templates_group: The group of sentences to analyze.
:param occupations: The list of occupations replacing the "$ART_OCC" token.
:param targets: The target words "he" and "she"
:return: The numpy array of computed perplexities
"""
scores: np.ndarray = np.zeros(shape=(len(templates_group.templates), len(occupations), len(targets)))
for i, tmpl in enumerate(templates_group.templates):
for j, occ in enumerate(occupations):
art_occ: str = infer_indefinite_article(occ) + ' ' + occ
masked_sentence = tmpl.sentence.replace(occupation_token, art_occ)
for k, targ in enumerate(targets):
sentence = masked_sentence.replace(settings.TOKEN_MASK, targ)
scores[i, j, k] = compute_perplexity_for_text(model, tokenizer, text=sentence)
return scores
def compute_perplexity_for_text(model, tokenizer, text) -> float:
tensor_input = tokenizer(text, return_tensors='pt')['input_ids'].to(settings.pt_device)
# tensor([[ 101, 2769, 4263, 872, 102]])
repeat_input = tensor_input.repeat(tensor_input.size(-1) - 2, 1)
# tensor([[ 101, 2769, 4263, 872, 102],
# [ 101, 2769, 4263, 872, 102],
# [ 101, 2769, 4263, 872, 102]])
mask = torch.ones(tensor_input.size(-1) - 1, device='cuda').diag(1)[:-2]
# tensor([[0., 1., 0., 0., 0.],
# [0., 0., 1., 0., 0.],
# [0., 0., 0., 1., 0.]])
masked_input = repeat_input.masked_fill(mask == 1, 103).to(settings.pt_device)
# tensor([[ 101, 103, 4263, 872, 102],
# [ 101, 2769, 103, 872, 102],
# [ 101, 2769, 4263, 103, 102]])
labels = repeat_input.masked_fill(masked_input != 103, -100).to(settings.pt_device)
# tensor([[-100, 2769, -100, -100, -100],
# [-100, -100, 4263, -100, -100],
# [-100, -100, -100, 872, -100]])
"""
for token, inp, lab in zip(tensor_input[0], torch.unsqueeze(masked_input, 1), torch.unsqueeze(labels, 1)):
res = model(inp, labels=lab)
loss = res['loss'].cpu().detach().numpy()
print(f"{token}: loss = {loss}")
"""
res = model(masked_input, labels=labels)
loss = res['loss'].cpu().detach().numpy()
# print("Sentence loss: ", loss)
score = np.exp(loss)
return score
def launch() -> None:
# Chosen model
model_name = settings.DEFAULT_BERT_MODEL_NAME
factory = TrainedModelForMaskedLMFactory(model_name=model_name)
training_samples: list[int] = [0, 500, 1000, 2000, 5000, 10000, 20000]
# occs_list = random.sample(ONEWORD_OCCUPATIONS, 1000)
# occs_list = ["nurse", "secretary", "engineer", "plumber", ]
occs_list = jobs_parser.get_words_list()
results = Dataset.from_dict(mapping={'occupation': occs_list})
for samples_number in training_samples:
gc.collect()
scores_dump_file = settings.FOLDER_SAVED_DATA + '/' + EXPERIMENT_NAME + f'/fine-tuned-{samples_number}-scores.bin'
scores: np.ndarray
if os.path.exists(scores_dump_file):
with open(scores_dump_file, "rb") as f:
scores = pickle.load(f)
else:
# Retrieving saved models from a previous experiment
saved_model_ft_path = settings.FOLDER_SAVED_MODELS + f"/mlm_gender_prediction_finetuned/mlm_gender_prediction_{model_name}_{samples_number}"
model = factory.get_model(load_or_save_path=saved_model_ft_path)
print(f"Current model trained on: {samples_number} samples")
scores = compute_perplexity_for_group(model=model, tokenizer=factory.tokenizer,
templates_group=eval_group, occupations=occs_list,
targets=eval_group.targets)
# The ndarray <scores> has dimensions [# templates, # occupations, # gender]
# We average the results for the templates:
scores = np.mean(scores, axis=0)
# Now, the ndarray <scores> has dimensions [# occupations, # gender]
# Saving a data checkpoint
with open(scores_dump_file, "wb") as f:
pickle.dump(scores, f)
# Adding scores to the resulting dataset
for k, targ in enumerate(eval_group.targets):
col_name: str = f'fine-tuned-{samples_number}-{targ}'
print("\tAdding column: ", col_name)
results = results.add_column(name=col_name, column=scores[:, k])
with open(FOLDER_OUTPUT_TABLES + '/perplexity_binary_results.bin', "wb") as f:
pickle.dump(results, f)
table_path = FOLDER_OUTPUT_TABLES + '/perplexity_results.' + settings.OUTPUT_TABLE_FILE_EXTENSION
results.to_csv(path_or_buf=table_path, sep=settings.OUTPUT_TABLE_COL_SEPARATOR)
"""
with open(table_path, "w") as f:
print(results.features.values(), sep=settings.OUTPUT_TABLE_COL_SEPARATOR, file=f)
for row in results:
print(row, file=f)
"""
return
|
MicheleDusi/AlgorithmicDiscrimination_MasterThesis
|
src/experiments/mlm_gender_perplexity.py
|
mlm_gender_perplexity.py
|
py
| 6,127 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12408247569
|
import fractions
A, B, C, D = map(int, input().split())
def lcm(x, y):
return (x * y) // fractions.gcd(x, y)
LCM_CD = lcm(C, D)
C_Count = B // C - A // C
D_Count = B // D - A // D
CD_Count = B // (LCM_CD) - A // (LCM_CD)
print(C_Count, D_Count, CD_Count)
print(B - A + 1 - C_Count - D_Count + CD_Count)
|
ritzcr/AtCoder
|
practice/abc131_c.py
|
abc131_c.py
|
py
| 313 |
python
|
en
|
code
| 0 |
github-code
|
50
|
39098615988
|
"""
1.6 - String Compression: Implement a method to perform basic string compression using the
counts of repeated characters. For example, the sxtring aabcccccaaa would become a2b1c5a3. If
the "compressed" string would not become smaller than the original string, your method should
return the original string. You can assume the string has only uppercase and lowercase letters (a-z).
Time Complexity: O(n)
Space Complexity: O(n)
Author: Jonah Beers
"""
string = 'aabcccccaaa'
string2 = 'aabbcc'
def compress_string(string):
if len(string) < 3:
return string
curr_char, count = '', 0
compressed_string = ''
for char in string:
if char != curr_char:
if count != 0:
compressed_string += (curr_char+str(count))
curr_char = char
count = 1
else:
count += 1
compressed_string += (curr_char+str(count))
if len(compressed_string) == len(string):
return string
return compressed_string
# Driver
print('The compressed string for', string, 'is:', compress_string(string))
print('The compressed string for', string2, 'is:', compress_string(string2))
|
jonahb13/cracking-the-coding-interview
|
solutions/ch1_arrays_and_strings/1_6.py
|
1_6.py
|
py
| 1,170 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28612035618
|
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from data import get_dataloader
from model import EncoderAndDecoder
from train import train_model
from infer import get_private_pred
train_dataloader, valid_dataloader = get_dataloader()
model = EncoderAndDecoder()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=0.0004)
exp_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.95)
save_dir = "melt_pred.pth"
start_epoch, end_epoch = 0, 200
train_losses, valid_losses = train_model(model, optimizer, exp_scheduler, device, save_dir,
start_epoch, end_epoch, train_dataloader, valid_dataloader)
plt.plot(train_losses["mae"][10: ])
plt.plot(valid_losses["mae"][10: ])
plt.show()
plt.plot(train_losses["f1"][10: ])
plt.plot(valid_losses["f1"][10: ])
plt.show()
plt.plot(train_losses["mae_over_f1"][10: ])
plt.plot(valid_losses["mae_over_f1"][10: ])
plt.show()
predictor = EncoderAndDecoder().load_state_dict(torch.load(save_dir))
predictor.eval()
get_private_pred(model, "submission.csv")
|
Hyonchori/CONTESTS
|
북극 해빙예측 AI 경진대회/main.py
|
main.py
|
py
| 1,163 |
python
|
en
|
code
| 2 |
github-code
|
50
|
8015886566
|
import math
operations = {
'(': -1,
')': 1,
'+': 2,
'-': 2,
'*': 3,
'/': 3,
'^': 4
}
def is_sign(ch: chr) -> bool:
return ch in operations.keys()
def is_bracket(ch: chr) -> bool:
return ch == '(' or ch == ')'
def is_num(ch: chr) -> bool:
return ch in '0123456789.'
def priority_of(ch: chr) -> int:
return operations.get(ch)
def is_priority_reduction(sequence: list, next_char: chr) -> bool:
return len(sequence) > 0 and priority_of(sequence[-1]) >= priority_of(next_char)
def is_empty(num: str) -> bool:
return num == ''
def is_trigonometrical_function(ch: chr) -> bool:
return ch in 'sincostgctg'
def separate_trigonometrical_function(sequence: str, cursor: int) -> (str, int):
trigonometrical_function = ''
bracket_count = 0
while is_trigonometrical_function(sequence[cursor]):
trigonometrical_function += sequence[cursor]
cursor += 1
inner_expression = ''
while cursor < len(sequence) and (bracket_count > 0 or inner_expression == ''):
if sequence[cursor] == '(':
bracket_count += 1
elif sequence[cursor] == ')':
bracket_count -= 1
inner_expression += sequence[cursor]
cursor += 1
cursor -= 1
value_param = calculate(inner_expression)
return str(calc_trigonometrical_function(trigonometrical_function, value_param)), cursor
def calc_trigonometrical_function(function: str, param: float) -> float:
if function == 'tg':
return math.tan(param)
if function == 'ctg':
return 1 / math.tan(param)
if function == 'sin':
return math.sin(param)
if function == 'cos':
return math.cos(param)
def calculate(expression: str) -> float:
nums_stack = []
operation_stack = []
cur_num = ''
i = -1
size = len(expression) - 1
while i < size:
i += 1
el = expression[i]
if is_sign(el):
# case when '-' stay at the start of a sequence
if el == '-' and (i == 0 or is_sign(expression[i - 1])):
cur_num += el
continue
# case when opening bracket stay at the start of an expression Exp:(a+b)*c...
if not is_empty(cur_num):
nums_stack.append(float(cur_num))
cur_num = ''
while el != '(' and is_priority_reduction(operation_stack, el):
fusion(nums_stack, operation_stack)
if el == ')':
operation_stack.pop()
else:
operation_stack.append(el)
elif is_num(el):
cur_num += el
elif is_trigonometrical_function(el):
cur_num, i = separate_trigonometrical_function(expression, i)
if not is_empty(cur_num):
nums_stack.append(float(cur_num))
while len(operation_stack) > 0:
fusion(nums_stack, operation_stack)
return nums_stack.pop()
def fusion(nums_stack: list, operation_stack: list):
n2 = nums_stack.pop()
n1 = nums_stack.pop()
operation = operation_stack.pop()
if operation == '+':
nums_stack.append(n1 + n2)
if operation == '-':
nums_stack.append(n1 - n2)
if operation == '*':
nums_stack.append(n1 * n2)
if operation == '/':
try:
nums_stack.append(n1 / n2)
except ZeroDivisionError:
if n1 > 0:
nums_stack.append(float('inf'))
else:
nums_stack.append(float('-inf'))
if operation == '^':
nums_stack.append(pow(n1, n2))
|
JoreKut/gui-plotter
|
util/service/calculator.py
|
calculator.py
|
py
| 3,587 |
python
|
en
|
code
| 0 |
github-code
|
50
|
132330470
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 12:14:25 2020
@author: paulg
"""
"""
Created on Fri Jun 12 16:23:42 2020
@author: paulg
"""
import pyrealsense2 as rs
import numpy as np
import cv2
import imutils
from skimage.measure import compare_ssim
#####################################################
## Volume measurement ##
#####################################################
# THIS ALGORITHM MEASURE THE GEMETRIC PROPETIES OF A SHAPE:AREA, DEPTH, VOLUME, BOUNDING BOX
# method=0 contour extraction with variable erosion and dilatation
# method=1 contour extraction with a filter depth
############# Functions #############
def Str_sim_in(background,image,gau_blur):
grayA = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if gau_blur==1:
grayA= cv2.GaussianBlur(grayA, (3, 3), 0)
grayB= cv2.GaussianBlur(grayB, (3, 3), 0)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 80, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2.imshow('diff', thresh)
return cnts
def empty(a):
#dunction for trackbars
pass
def contour_extrac2(image,dila,ero):
# contour extraction for the filter depth method
#conversion to gray scale a
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(7,7),0)
#thresholding is used here because the depth filter convert the background in white
thresh= cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)[1]
# edged detection because the black and white suitable forthe detection (the object is in black).
#Converts every pixel to the opposite binary value could also work
edged = cv2.Canny(thresh, 50, 100)
edged = cv2.dilate(edged, None, iterations=dila)
edged = cv2.erode(edged, None, iterations=ero)
cnts = cv2.findContours( edged,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
cnts = imutils.grab_contours(cnts)
return cnts
def contour_extrac(image,dila,ero):
# contour extraction when the depth mask is not used
# more accurate erode=1 and dila=1
# for some objects with issue backgourn erode=3 and dila=6 or five is better but slighly to high area
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(7,7),0)
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=dila)
edged = cv2.erode(edged, None, iterations=ero)
cnts = cv2.findContours( edged,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
cnts = imutils.grab_contours(cnts)
return cnts
def geo_property(depth_image,cnt,color_image,dist_came_back,length_per_pixel,area_per_pixel):
area=cv2.contourArea(cnt)
if area>500:
cv2.drawContours(color_image,[cnt],-1,(0,255,0),2)
#(x, y, w, h) = cv2.boundingRect(cnt)
rect = cv2.minAreaRect(cnt)
pos=rect[0]
size=rect[1]
x=int(pos[0])
y=int(pos[1])
w=min(size)
h=max(size)
cimg = np.zeros_like(depth_image)
cv2.drawContours(cimg, [cnt],-1, color=255, thickness=-1)
# Access the image pixels and create a 1D numpy array then add to list
#the condition is extremely important
pts = np.where((cimg == 255)*(depth_image!=0))
depth=dist_came_back-np.mean(depth_image[pts[0], pts[1]])
correct_factor=(dist_came_back-depth)/dist_came_back
w_real=w*length_per_pixel*correct_factor
h_real=h*length_per_pixel*correct_factor
area_real=area*area_per_pixel*(correct_factor**2)
print(area_real,h_real,w_real,depth*100)
cv2.putText(color_image, 'area='+ "%.3fcm^2" %(area_real), (x-100, y-100), 1, 1.2, (0, 0, 255))
cv2.putText(color_image,"depth="+ "%.3fcm" %(depth*100), (x-100, y-70), 1, 1.2, (0, 0, 255))
return color_image
def geo_property2(depth_image,cnt,color_image,depth_image_ini,length_per_pixel,area_per_pixel):
area=cv2.contourArea(cnt)
if area>500:
cv2.drawContours(color_image,[cnt],-1,(0,255,0),1)
#(x, y, w, h) = cv2.boundingRect(cnt)
rect = cv2.minAreaRect(cnt)
pos=rect[0]
size=rect[1]
x=int(pos[0])
y=int(pos[1])
w=min(size)
h=max(size)
cimg = np.zeros_like(depth_image)
cv2.drawContours(cimg, [cnt],-1, color=255, thickness=-1)
# Access the image pixels and create a 1D numpy array then add to list
#the condition is extremely important
pts = np.where((cimg == 255)*(depth_image!=0))
dist_came_back=np.mean(depth_image_ini[pts[0], pts[1]])
depth=dist_came_back-np.mean(depth_image[pts[0], pts[1]])
correct_factor=(dist_came_back-depth)/dist_came_back
w_real=w*length_per_pixel*correct_factor
h_real=h*length_per_pixel*correct_factor
area_real=area*area_per_pixel*(correct_factor**2)
print(area_real,h_real,w_real)
cv2.putText(color_image, 'area='+str(area_real), (x, y), 1, 1, (0, 255, 0))
#cv2.putText(color_image,"depth="+ "%.3fcm" %(depth*100), (x-50, y+20), 1, 1, (0, 255, 0))
return color_image
############# Functions #############
############# Calibration #############
#need to be adjusted depending on the setup
# distance of the camera from the background
dist_came_back=0.86
# area and length of a pixel atthe background level. Try to do it as precosely as possible
# studio 1
# area_per_pixel=100/12640
# length_per_pixel=10/113.7
#error
#area_per_pixel=100/13000
#length_per_pixel=10/115
#studio2 correct
# area_per_pixel=100/4950
# length_per_pixel=10/71.5
area_per_pixel=100/5190.8
length_per_pixel=10/72.7
# filter on struc_similarity index 1=yes other=nont
#if you want to work on the full window chose x1=None
x1,x2,y1,y2=120,420,100,390
filt_stu=0
############# Calibration #############
############# Camera parameter #############
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 10)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 10)
# Start streaming
profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
init_bg0=None
init_bg1=None
init_bg2=None
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
############# Camera parameter #############
############# Trackbar creation #############
cv2.namedWindow("Trackbars")
cv2.resizeWindow("Trackbars",640,240)
#method 0 without backgroud extraction method 1 with background extraction
cv2.createTrackbar("Method","Trackbars",1,2,empty)
cv2.createTrackbar("Dilate","Trackbars",1,15,empty)
cv2.createTrackbar("Erode","Trackbars",1,15,empty)
cv2.createTrackbar("Max_dist","Trackbars",530,1000,empty)
cv2.createTrackbar("Min_dist","Trackbars",10,1000,empty)
############# Trackbar creation #############
############# Real time video analysis #############
num_im=70
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
colorizer = rs.colorizer()
colorized_depth = np.asanyarray(colorizer.colorize(depth_frame).get_data())
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
if x1!=None:
depth_image = depth_image[x1:x2,y1:y2]
color_image = color_image[x1:x2,y1:y2]
colorized_depth=colorized_depth[x1:x2,y1:y2]
#choose the method
method=cv2.getTrackbarPos("Method","Trackbars")
if method==0:
if init_bg0==None:
depth_image_ini=depth_image*depth_scale
init_bg0=1
#scale the depth
depth_image = depth_image * depth_scale
dilate=cv2.getTrackbarPos("Dilate","Trackbars")
erode=cv2.getTrackbarPos("Erode","Trackbars")
cnts=contour_extrac(color_image,dilate,erode)
for cnt in cnts:
#color_image=geo_property2(depth_image,cnt,color_image,depth_image_ini,length_per_pixel,area_per_pixel)
color_image=geo_property(depth_image,cnt,color_image,dist_came_back,length_per_pixel,area_per_pixel)
if method==1:
if init_bg1==None:
depth_image_ini=depth_image*depth_scale
init_bg1=1
# Remove background - Set pixels further than clipping_distance to filt_color
# max_clipping_distance=cv2.getTrackbarPos("Max_dist","Trackbars")/(10000*depth_scale)
# min_clipping_distance=cv2.getTrackbarPos("Min_dist","Trackbars")/(10000*depth_scale)
max_clipping_distance=cv2.getTrackbarPos("Max_dist","Trackbars")/(1000)
min_clipping_distance=cv2.getTrackbarPos("Min_dist","Trackbars")/(1000)
filt_color = 255
depth_image=depth_image*depth_scale
depth_image_3d = np.dstack((depth_image,depth_image,depth_image))#depth image is 1 channel, color is 3 channels
dist_1D= abs(depth_image_ini-depth_image)
dist_3D=np.dstack((dist_1D,dist_1D,dist_1D))
bg_removed = np.where((depth_image_3d!=0)*((dist_3D > max_clipping_distance) | (dist_3D< min_clipping_distance)), filt_color, color_image)
#the depth_image_3d!=0 is to remove the point equal to zero
# bg_removed = np.where((depth_image_3d > max_clipping_distance) | (depth_image_3d < min_clipping_distance), filt_color, color_image)
dilate=cv2.getTrackbarPos("Dilate","Trackbars")
erode=cv2.getTrackbarPos("Erode","Trackbars")
cnts=contour_extrac2(bg_removed,dilate,erode)
#scale the depth
for cnt in cnts:
color_image=geo_property(depth_image,cnt,color_image,dist_came_back,length_per_pixel,area_per_pixel)
cv2.imshow('bg removed', bg_removed)
if method==2:
if init_bg2==None:
first_frame=color_image
depth_image_ini=depth_image*depth_scale
init_bg2=1
#scale the depth
cnts=Str_sim_in(first_frame,color_image,filt_stu)
depth_image = depth_image * depth_scale
test=depth_image
for cnt in cnts:
#color_image=geo_property2(depth_image,cnt,color_image,depth_image_ini,length_per_pixel,area_per_pixel)
color_image=geo_property(depth_image,cnt,color_image,dist_came_back,length_per_pixel,area_per_pixel)
# Show images
images = np.hstack((color_image, colorized_depth))
cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Align Example', images)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
if key & 0xFF == ord('s'):
cv2.imwrite('images5/bootle_cnt_color'+str(num_im)+'.jpg', color_image)
cv2.imwrite('images5/bootle_depth'+str(num_im)+'.jpg', colorized_depth)
num_im+=1
finally:
pipeline.stop()
############# Real time video analysis #############
|
pgredigui/recycleye
|
object_size/depth_combined_methods2.py
|
depth_combined_methods2.py
|
py
| 13,337 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11042381240
|
import pygame
from settings import *
from tile import Tile
from player import Player
from debug import debug
from filereader import import_csv_layout
from weapon import weapon
from ui import UI
class Level:
def __init__(self):
self.display_surface=pygame.display.get_surface()
#sprite group setup
self.visible_sprites=YCamera()
self.obstacle_sprites=pygame.sprite.Group()
self.current_attack=None
self.UI=UI()
#sprite setup
self.create_map()
def create_map(self):
layout = {
'boundary': import_csv_layout('map/Map_FloorBlocks.csv'),
'flowers': import_csv_layout('map/Map_Extra.csv'),
'details': import_csv_layout('map/Map_Details.csv')
}
file_paths = {
'567': 'details/shadow.png',
'166': 'details/house0.png',
'167': 'details/house1.png',
'168': 'details/house2.png',
'169': 'details/house3.png',
'170': 'details/house4.png',
'126': 'details/house5.png',
'127': 'details/house6.png',
'128': 'details/house7.png',
'129': 'details/house8.png',
'130': 'details/house9.png',
'86': 'details/house10.png',
'87': 'details/house11.png',
'88': 'details/house12.png',
'89': 'details/house13.png',
'90': 'details/house14.png',
'46': 'details/house15.png',
'47': 'details/house16.png',
'48': 'details/house17.png',
'49': 'details/house18.png',
'50': 'details/house19.png',
'6': 'details/house20.png',
'7': 'details/house21.png',
'8': 'details/house22.png',
'9': 'details/house23.png',
'10': 'details/house24.png',
'1125': 'details/flag0.png',
'1085': 'details/flag1.png',
}
for style,layout in layout.items():
for row_index,row in enumerate(layout):
for col_index,col in enumerate(row):
if col!='-1' and col!='2':
x = col_index*TILESIZE
y = row_index*TILESIZE
if style=='boundary':
Tile((x,y),[self.obstacle_sprites],'invisible')
if style=='flowers':
Tile((x,y),[self.visible_sprites,self.obstacle_sprites],'flowers',pygame.image.load('map/Bush.png').convert_alpha())
if style=='details':
file_path = file_paths.get(col)
if file_path:
Tile((x,y),[self.visible_sprites,self.obstacle_sprites],'house',pygame.image.load(file_path).convert_alpha())
self.player=Player((1460,470),[self.visible_sprites],self.obstacle_sprites,self.create_attack,self.destroy_weapon)
def run(self):
self.visible_sprites.drawing(self.player)
self.visible_sprites.update()
self.UI.display(self.player)
debug(self.player.status)
def create_attack(self):
self.current_attack=weapon(self.player,self.visible_sprites)
def destroy_weapon(self):
if self.current_attack:
self.current_attack.kill()
self.current_attack=None
class YCamera(pygame.sprite.Group):
def __init__(self):
super().__init__()
self.display_surface=pygame.display.get_surface()
self.half_width=self.display_surface.get_size()[0] // 2
self.half_height=self.display_surface.get_size()[1] // 2
self.offset=pygame.math.Vector2()
self.floor_surf=pygame.image.load('map/map6.png')
self.floor_rect=self.floor_surf.get_rect(topleft=(0,0))
def drawing(self,player):
#getting the offset
self.offset.x=player.rect.centerx - self.half_width
self.offset.y=player.rect.centery - self.half_height
floor_offset_pos= self.floor_rect.topleft-self.offset
self.display_surface.blit(self.floor_surf,floor_offset_pos)
for sprite in sorted(self.sprites(),key=lambda sprite: sprite.rect.centery):
offset_pos=sprite.rect.topleft - self.offset
self.display_surface.blit(sprite.image,offset_pos)
|
NadirAli1403/TreasureHuntOOP
|
level.py
|
level.py
|
py
| 4,476 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22385220928
|
from solfege.lib.pattern import Scale
IONIAN = Scale(
[2, 2, 1, 2, 2, 2, 1],
"ionian"
)
ionian = IONIAN
dorian = IONIAN.mode(1, 'dorian')
phrygian = IONIAN.mode(2, 'phrygian')
lydian = IONIAN.mode(3, 'lydian')
mixolydian = IONIAN.mode(4, 'mixolydian')
aeolian = IONIAN.mode(5, 'aeolian')
locrian = IONIAN.mode(6, 'locrian')
modes_of_major = [
ionian,
dorian,
phrygian,
lydian,
mixolydian,
aeolian,
locrian,
]
modes_of_minor = [
aeolian,
locrian,
ionian,
dorian,
phrygian,
lydian,
mixolydian,
]
BLUES = Scale(
[3, 2, 1, 1, 3, 2],
'blues scale'
)
KLEZMER = Scale(
[1, 3, 1, 2, 1, 2, 2],
'klezmer scale'
)
|
pvarsh/solfege
|
solfege/exercises/scales.py
|
scales.py
|
py
| 689 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28215338548
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class Visita(models.Model):
_name = 'hospital.visita'
_rec_name = 'data'
data = fields.Date(required=True)
# MANY TO ONE
historial_id = fields.Many2one('hospital.historial', ondelete='cascade', string='Historial', required=True)
# ONE TO ONE: MALALTIA
malaltia_id = fields.Many2one('hospital.malaltia', compute='compute_malaltia', inverse='malaltia_inverse')
malaltia_ids = fields.One2many('hospital.malaltia', 'visita_id')
@api.one
@api.depends('malaltia_ids')
def compute_malaltia(self):
if len(self.malaltia_ids) > 0:
self.malaltia_id = self.malaltia_ids[0]
@api.one
def malaltia_inverse(self):
if len(self.malaltia_ids) > 0:
# delete previous reference
malaltia = self.env['hospital.malaltia'].browse(self.malaltia_ids[0].id)
malaltia.visita_id = False
# set new reference
self.malaltia_id.visita_id = self
# ONE TO ONE: METGE
metge_id = fields.Many2one('hospital.metge', compute='compute_metge', inverse='metge_inverse')
metge_ids = fields.One2many('hospital.metge', 'visita_id')
@api.one
@api.depends('metge_ids')
def compute_metge(self):
if len(self.metge_ids) > 0:
self.metge_id = self.metge_ids[0]
@api.one
def metge_inverse(self):
if len(self.metge_ids) > 0:
# delete previous reference
metge = self.env['hospital.metge'].browse(self.metge_ids[0].id)
metge.visita_id = False
# set new reference
self.metge_id.visita_id = self
|
unexpectedprojectz/hospital
|
models/model_visita.py
|
model_visita.py
|
py
| 1,654 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40229712570
|
import FWCore.ParameterSet.Config as cms
CfgNavigationSchoolESProducer = cms.ESProducer("CfgNavigationSchoolESProducer",
ComponentName = cms.string('CfgNavigationSchool'),
SimpleMagneticField = cms.string(''),
# SimpleMagneticField = cms.string('ParabolicMf'),
)
_defaultPSetWithIn=cms.PSet(IN = cms.vstring(''),OUT = cms.vstring(''))
_defaultPSetInverseRelation=cms.PSet(OUT = cms.vstring(''))
_defaultPSet=_defaultPSetWithIn;
parts={}
parts["TIB%d"]=4
parts["TOB%d"]=6
parts["TEC%d_pos"]=9
parts["TEC%d_neg"]=9
parts["TID%d_pos"]=3
parts["TID%d_neg"]=3
parts["PXB%d"]=3
parts["PXF%d_pos"]=2
parts["PXF%d_neg"]=2
import copy
for p in parts.keys():
for i in range(1,parts[p]+1):
setattr(CfgNavigationSchoolESProducer,p%(i,),copy.copy(_defaultPSet))
|
cms-sw/cmssw
|
RecoTracker/TkNavigation/python/CfgNavigationSchool_cfi.py
|
CfgNavigationSchool_cfi.py
|
py
| 956 |
python
|
en
|
code
| 985 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.